summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore11
-rw-r--r--cmake/make_dist.cmake.in16
-rw-r--r--cmake/os/WindowsCache.cmake1
-rw-r--r--debian/additions/innotop/innotop8
-rw-r--r--debian/additions/innotop/innotop.12
-rw-r--r--extra/mariabackup/innobackupex.cc86
-rw-r--r--extra/mariabackup/xtrabackup.cc100
-rw-r--r--libmysqld/CMakeLists.txt6
-rw-r--r--mysql-test/lib/My/SafeProcess.pm2
-rw-r--r--mysql-test/main/delete_use_source.result4
-rw-r--r--mysql-test/main/delete_use_source.test1
-rw-r--r--mysql-test/main/lock_kill.result36
-rw-r--r--mysql-test/main/lock_kill.test59
-rw-r--r--mysql-test/main/ps.result18
-rw-r--r--mysql-test/main/ps.test14
-rw-r--r--mysql-test/suite/galera/disabled.def1
-rw-r--r--mysql-test/suite/galera/r/MDEV-25562.result10
-rw-r--r--mysql-test/suite/galera/r/galera_nonPK_and_PA.result63
-rw-r--r--mysql-test/suite/galera/t/MDEV-25562.test12
-rw-r--r--mysql-test/suite/galera/t/galera_ist_mariabackup.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_ist_mariabackup_innodb_flush_logs.cnf1
-rw-r--r--mysql-test/suite/galera/t/galera_ist_mysqldump.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_nonPK_and_PA.test168
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_qpress.cnf1
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_rsync.cnf1
-rw-r--r--mysql-test/suite/galera/t/galera_sst_rsync2.cnf1
-rw-r--r--mysql-test/suite/galera/t/galera_sst_rsync_data_dir.cnf3
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf1
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.cnf1
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.cnf3
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync.cnf3
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync_section.cnf3
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-627.result10
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_nonPK_and_PA.result46
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-627.test9
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_nonPK_and_PA.test109
-rw-r--r--mysql-test/suite/innodb/r/index_vcol_purge_startup.result24
-rw-r--r--mysql-test/suite/innodb/t/index_vcol_purge_startup.test28
-rw-r--r--mysql-test/suite/innodb_gis/r/gis.result13
-rw-r--r--mysql-test/suite/innodb_gis/t/gis.test15
-rw-r--r--mysql-test/suite/mariabackup/xbstream.test2
-rw-r--r--scripts/wsrep_sst_common.sh381
-rw-r--r--scripts/wsrep_sst_mariabackup.sh523
-rw-r--r--scripts/wsrep_sst_mysqldump.sh4
-rw-r--r--scripts/wsrep_sst_rsync.sh283
-rw-r--r--sql/CMakeLists.txt43
-rw-r--r--sql/gen_lex_token.cc2
-rw-r--r--sql/gen_sql_yacc_ora_yy.cmake15
-rw-r--r--sql/gen_yy_files.cmake39
-rw-r--r--sql/ha_partition.cc19
-rw-r--r--sql/ha_partition.h5
-rw-r--r--sql/handler.cc42
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item_func.h2
-rw-r--r--sql/item_sum.h2
-rw-r--r--sql/mdl.h6
-rw-r--r--sql/myskel.m418
-rw-r--r--sql/sql_base.cc25
-rw-r--r--sql/sql_base.h2
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_digest.cc2
-rw-r--r--sql/sql_lex.h8
-rw-r--r--sql/sql_parse.cc4
-rw-r--r--sql/sql_priv.h2
-rw-r--r--sql/sql_trigger.cc9
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/sql_yacc.yy61
-rw-r--r--storage/connect/tabrest.cpp6
-rw-r--r--storage/innobase/buf/buf0buf.cc3
-rw-r--r--storage/innobase/buf/buf0flu.cc10
-rw-r--r--storage/innobase/handler/ha_innodb.cc60
-rw-r--r--storage/innobase/handler/ha_innodb.h1
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc11
-rw-r--r--storage/innobase/page/page0cur.cc2
-rw-r--r--storage/innobase/row/row0purge.cc2
-rw-r--r--storage/innobase/row/row0sel.cc145
-rw-r--r--storage/perfschema/CMakeLists.txt3
-rw-r--r--storage/perfschema/my_thread.h25
-rw-r--r--storage/perfschema/pfs_config.h.cmake1
m---------wsrep-lib0
82 files changed, 1746 insertions, 926 deletions
diff --git a/.gitignore b/.gitignore
index 4fee2deaea5..2cf801d6290 100644
--- a/.gitignore
+++ b/.gitignore
@@ -197,11 +197,12 @@ sql/lex_hash.h
sql/mysql_tzinfo_to_sql
sql/mysqld
sql/sql_builtin.cc
-sql/sql_yacc.cc
-sql/sql_yacc.hh
-sql/sql_yacc_ora.cc
-sql/sql_yacc_ora.hh
-sql/sql_yacc_ora.yy
+sql/yy_mariadb.cc
+sql/yy_mariadb.hh
+sql/yy_mariadb.yy
+sql/yy_oracle.cc
+sql/yy_oracle.hh
+sql/yy_oracle.yy
storage/heap/hp_test1
storage/heap/hp_test2
storage/maria/aria_chk
diff --git a/cmake/make_dist.cmake.in b/cmake/make_dist.cmake.in
index 9e9eb555951..4f6988cfed2 100644
--- a/cmake/make_dist.cmake.in
+++ b/cmake/make_dist.cmake.in
@@ -125,15 +125,15 @@ IF(NOT GIT_EXECUTABLE)
ENDIF()
# Copy bison output
-CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc.hh
- ${PACKAGE_DIR}/sql/sql_yacc.hh COPYONLY)
-CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc.cc
- ${PACKAGE_DIR}/sql/sql_yacc.cc COPYONLY)
+CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/yy_mariadb.hh
+ ${PACKAGE_DIR}/sql/yy_mariadb.hh COPYONLY)
+CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/yy_mariadb.cc
+ ${PACKAGE_DIR}/sql/yy_mariadb.cc COPYONLY)
# Copy bison output
-CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.hh
- ${PACKAGE_DIR}/sql/sql_yacc_ora.hh COPYONLY)
-CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.cc
- ${PACKAGE_DIR}/sql/sql_yacc_ora.cc COPYONLY)
+CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/yy_oracle.hh
+ ${PACKAGE_DIR}/sql/yy_oracle.hh COPYONLY)
+CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/yy_oracle.cc
+ ${PACKAGE_DIR}/sql/yy_oracle.cc COPYONLY)
# Add documentation, if user has specified where to find them
IF(MYSQL_DOCS_LOCATION)
diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake
index 4c7d69bc131..9b855dbb42b 100644
--- a/cmake/os/WindowsCache.cmake
+++ b/cmake/os/WindowsCache.cmake
@@ -321,6 +321,7 @@ SET(HAVE_SNAPPY_H CACHE INTERNAL "")
SET(HAVE_SCHED_GETCPU CACHE INTERNAL "")
SET(HAVE_PTHREAD_THREADID_NP CACHE INTERNAL "")
SET(HAVE_SYS_GETTID CACHE INTERNAL "")
+SET(HAVE_GETTID CACHE INTERNAL "")
SET(HAVE_INTEGER_PTHREAD_SELF CACHE INTERNAL "")
SET(HAVE_PTHREAD_GETTHREADID_NP CACHE INTERNAL "")
SET(HAVE_TIMER_DELETE CACHE INTERNAL "")
diff --git a/debian/additions/innotop/innotop b/debian/additions/innotop/innotop
index f1648f4ce60..5d81980341d 100644
--- a/debian/additions/innotop/innotop
+++ b/debian/additions/innotop/innotop
@@ -466,7 +466,7 @@ sub parse_status_text {
# too many locks to print, the output might be truncated)
my $time_text;
- if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[012]\./) ) {
+ if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[0-9]\./) ) {
( $time_text ) = $fulltext =~ m/^([0-9-]* [0-9:]*) [0-9a-fx]* INNODB MONITOR OUTPUT/m;
$innodb_data{'ts'} = [ parse_innodb_timestamp_56( $time_text ) ];
} else {
@@ -634,7 +634,7 @@ sub parse_fk_section {
return 0 unless $fulltext;
my ( $ts, $type );
- if ( ($mysqlversion =~ /^5.[67]\./) || ($mysqlversion =~ /^10.[012]\./) ) {
+ if ( ($mysqlversion =~ /^5.[67]\./) || ($mysqlversion =~ /^10.[0-9]\./) ) {
( $ts, $type ) = $fulltext =~ m/^([0-9-]* [0-9:]*)\s[0-9a-fx]*\s+(\w+)/m;
$section->{'ts'} = [ parse_innodb_timestamp_56( $ts ) ];
} else {
@@ -894,7 +894,7 @@ sub parse_dl_section {
my ( $ts ) = $fulltext =~ m/^$s$/m;
return 0 unless $ts;
- if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[012]\./) ) {
+ if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[0-9]\./) ) {
$dl->{'ts'} = [ parse_innodb_timestamp_56( $ts ) ];
}
else {
@@ -12065,7 +12065,7 @@ This data is from the TRANSACTIONS section of SHOW INNODB STATUS.
=item IO_THREADS
-This data is from the list of threads in the the FILE I/O section of SHOW INNODB
+This data is from the list of threads in the FILE I/O section of SHOW INNODB
STATUS.
=item INNODB_LOCKS
diff --git a/debian/additions/innotop/innotop.1 b/debian/additions/innotop/innotop.1
index 65da5dc0dfb..62a9aed69ee 100644
--- a/debian/additions/innotop/innotop.1
+++ b/debian/additions/innotop/innotop.1
@@ -2048,7 +2048,7 @@ the processlist.
This data is from the \s-1TRANSACTIONS\s0 section of \s-1SHOW INNODB STATUS.\s0
.IP "\s-1IO_THREADS\s0" 4
.IX Item "IO_THREADS"
-This data is from the list of threads in the the \s-1FILE I/O\s0 section of \s-1SHOW INNODB
+This data is from the list of threads in the \s-1FILE I/O\s0 section of \s-1SHOW INNODB
STATUS.\s0
.IP "\s-1INNODB_LOCKS\s0" 4
.IX Item "INNODB_LOCKS"
diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc
index aaa1ca8de91..99440faf864 100644
--- a/extra/mariabackup/innobackupex.cc
+++ b/extra/mariabackup/innobackupex.cc
@@ -216,7 +216,7 @@ ibx_mode_t ibx_mode = IBX_MODE_BACKUP;
static struct my_option ibx_long_options[] =
{
- {"version", 'v', "print xtrabackup version information",
+ {"version", 'v', "print version information",
(uchar *) &opt_ibx_version, (uchar *) &opt_ibx_version, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -259,12 +259,11 @@ static struct my_option ibx_long_options[] =
(uchar *) &opt_ibx_slave_info, (uchar *) &opt_ibx_slave_info, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"incremental", OPT_INCREMENTAL, "This option tells xtrabackup to "
- "create an incremental backup, rather than a full one. It is passed "
- "to the xtrabackup child process. When this option is specified, "
+ {"incremental", OPT_INCREMENTAL,
+ "Create an incremental backup, rather than a full one. When this option is specified, "
"either --incremental-lsn or --incremental-basedir can also be given. "
- "If neither option is given, option --incremental-basedir is passed "
- "to xtrabackup by default, set to the first timestamped backup "
+ "If neither option is given, option --incremental-basedir is used "
+ "by default, set to the first timestamped backup "
"directory in the backup base directory.",
(uchar *) &opt_ibx_incremental, (uchar *) &opt_ibx_incremental, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -379,14 +378,14 @@ static struct my_option ibx_long_options[] =
{"incremental-history-name", OPT_INCREMENTAL_HISTORY_NAME,
"This option specifies the name of the backup series stored in the "
"PERCONA_SCHEMA.xtrabackup_history history record to base an "
- "incremental backup on. Xtrabackup will search the history table "
+ "incremental backup on. Backup will search the history table "
"looking for the most recent (highest innodb_to_lsn), successful "
"backup in the series and take the to_lsn value to use as the "
"starting lsn for the incremental backup. This will be mutually "
"exclusive with --incremental-history-uuid, --incremental-basedir "
"and --incremental-lsn. If no valid lsn can be found (no series by "
- "that name, no successful backups by that name) xtrabackup will "
- "return with an error. It is used with the --incremental option.",
+ "that name, no successful backups by that name), "
+ "an error will be returned. It is used with the --incremental option.",
(uchar*) &opt_ibx_incremental_history_name,
(uchar*) &opt_ibx_incremental_history_name, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -396,8 +395,8 @@ static struct my_option ibx_long_options[] =
"stored in the PERCONA_SCHEMA.xtrabackup_history to base an "
"incremental backup on. --incremental-history-name, "
"--incremental-basedir and --incremental-lsn. If no valid lsn can be "
- "found (no success record with that uuid) xtrabackup will return "
- "with an error. It is used with the --incremental option.",
+ "found (no success record with that uuid), an error will be returned."
+ " It is used with the --incremental option.",
(uchar*) &opt_ibx_incremental_history_uuid,
(uchar*) &opt_ibx_incremental_history_uuid, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -426,7 +425,7 @@ static struct my_option ibx_long_options[] =
{"include", OPT_INCLUDE,
"This option is a regular expression to be matched against table "
"names in databasename.tablename format. It is passed directly to "
- "xtrabackup's --tables option. See the xtrabackup documentation for "
+ "--tables option. See the documentation for "
"details.",
(uchar*) &opt_ibx_include,
(uchar*) &opt_ibx_include, 0, GET_STR,
@@ -476,12 +475,6 @@ static struct my_option ibx_long_options[] =
(uchar*) &opt_ibx_lock_wait_threshold, 0, GET_UINT,
REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
- {"debug-sleep-before-unlock", OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
- "This is a debug-only option used by the XtraBackup test suite.",
- (uchar*) &opt_ibx_debug_sleep_before_unlock,
- (uchar*) &opt_ibx_debug_sleep_before_unlock, 0, GET_UINT,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-
{"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
"How many seconds --safe-slave-backup should wait for "
"Slave_open_temp_tables to become zero. (default 300)",
@@ -494,22 +487,20 @@ static struct my_option ibx_long_options[] =
We put them here with only purpose for them to showup in
innobackupex --help output */
- {"close_files", OPT_CLOSE_FILES, "Do not keep files opened. This "
- "option is passed directly to xtrabackup. Use at your own risk.",
+ {"close_files", OPT_CLOSE_FILES, "Do not keep files opened."
+ " Use at your own risk.",
(uchar*) &ibx_xb_close_files, (uchar*) &ibx_xb_close_files, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"compress", OPT_COMPRESS, "This option instructs xtrabackup to "
- "compress backup copies of InnoDB data files. It is passed directly "
- "to the xtrabackup child process. Try 'xtrabackup --help' for more "
- "details.", (uchar*) &ibx_xtrabackup_compress_alg,
+ {"compress", OPT_COMPRESS, "This option instructs backup to "
+ "compress backup copies of InnoDB data files."
+ , (uchar*) &ibx_xtrabackup_compress_alg,
(uchar*) &ibx_xtrabackup_compress_alg, 0,
GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"compress-threads", OPT_COMPRESS_THREADS,
"This option specifies the number of worker threads that will be used "
- "for parallel compression. It is passed directly to the xtrabackup "
- "child process. Try 'xtrabackup --help' for more details.",
+ "for parallel compression.",
(uchar*) &ibx_xtrabackup_compress_threads,
(uchar*) &ibx_xtrabackup_compress_threads,
0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
@@ -520,17 +511,15 @@ static struct my_option ibx_long_options[] =
(uchar*) &ibx_xtrabackup_compress_chunk_size,
0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
- {"export", OPT_EXPORT, "This option is passed directly to xtrabackup's "
- "--export option. It enables exporting individual tables for import "
- "into another server. See the xtrabackup documentation for details.",
+ {"export", OPT_EXPORT, " enables exporting individual tables for import "
+ "into another server.",
(uchar*) &ibx_xtrabackup_export, (uchar*) &ibx_xtrabackup_export,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"extra-lsndir", OPT_EXTRA_LSNDIR, "This option specifies the "
"directory in which to save an extra copy of the "
"\"xtrabackup_checkpoints\" file. The option accepts a string "
- "argument. It is passed directly to xtrabackup's --extra-lsndir "
- "option. See the xtrabackup documentation for details.",
+ "argument.",
(uchar*) &ibx_xtrabackup_extra_lsndir,
(uchar*) &ibx_xtrabackup_extra_lsndir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -552,7 +541,7 @@ static struct my_option ibx_long_options[] =
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"incremental-force-scan", OPT_INCREMENTAL_FORCE_SCAN,
- "This options tells xtrabackup to perform full scan of data files "
+ "Perform full scan of data files "
"for taking an incremental backup even if full changed page bitmap "
"data is available to enable the backup without the full scan.",
(uchar*)&ibx_xtrabackup_incremental_force_scan,
@@ -578,10 +567,8 @@ static struct my_option ibx_long_options[] =
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"parallel", OPT_PARALLEL, "On backup, this option specifies the "
- "number of threads the xtrabackup child process should use to back "
- "up files concurrently. The option accepts an integer argument. It "
- "is passed directly to xtrabackup's --parallel option. See the "
- "xtrabackup documentation for details.",
+ "number of threads to use to back "
+ "up files concurrently. The option accepts an integer argument.",
(uchar*) &ibx_xtrabackup_parallel, (uchar*) &ibx_xtrabackup_parallel,
0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0},
@@ -589,23 +576,21 @@ static struct my_option ibx_long_options[] =
{"stream", OPT_STREAM, "This option specifies the format in which to "
"do the streamed backup. The option accepts a string argument. The "
"backup will be done to STDOUT in the specified format. Currently, "
- "the only supported formats are tar and mbstream/xbstream. This "
- "option is passed directly to xtrabackup's --stream option.",
+ "the only supported formats are tar and mbstream/xbstream.",
(uchar*) &ibx_xtrabackup_stream_str,
(uchar*) &ibx_xtrabackup_stream_str, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tables-file", OPT_TABLES_FILE, "This option specifies the file in "
"which there are a list of names of the form database. The option "
- "accepts a string argument.table, one per line. The option is passed "
- "directly to xtrabackup's --tables-file option.",
+ "accepts a string argument.table, one per line.",
(uchar*) &ibx_xtrabackup_tables_file,
(uchar*) &ibx_xtrabackup_tables_file,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"throttle", OPT_THROTTLE, "This option specifies a number of I/O "
"operations (pairs of read+write) per second. It accepts an integer "
- "argument. It is passed directly to xtrabackup's --throttle option.",
+ "argument.",
(uchar*) &ibx_xtrabackup_throttle, (uchar*) &ibx_xtrabackup_throttle,
0, GET_LONG, REQUIRED_ARG, 0, 0, LONG_MAX, 0, 1, 0},
@@ -617,11 +602,10 @@ static struct my_option ibx_long_options[] =
0, 0, 0, 0, 0, 0},
{"use-memory", OPT_USE_MEMORY, "This option accepts a string argument "
- "that specifies the amount of memory in bytes for xtrabackup to use "
+ "that specifies the amount of memory in bytes to use "
"for crash recovery while preparing a backup. Multiples are supported "
"providing the unit (e.g. 1MB, 1GB). It is used only with the option "
- "--apply-log. It is passed directly to xtrabackup's --use-memory "
- "option. See the xtrabackup documentation for details.",
+ "--apply-log.",
(uchar*) &ibx_xtrabackup_use_memory,
(uchar*) &ibx_xtrabackup_use_memory,
0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
@@ -643,7 +627,7 @@ static struct my_option ibx_long_options[] =
static void usage(void)
{
- puts("Open source backup tool for InnoDB and XtraDB\n\
+ puts("Open source backup tool\n\
\n\
Copyright (C) 2009-2015 Percona LLC and/or its affiliates.\n\
Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.\n\
@@ -694,7 +678,7 @@ innobackupex [--decompress]\n\
\n\
DESCRIPTION\n\
\n\
-The first command line above makes a hot backup of a MySQL database.\n\
+The first command line above makes a hot backup of a database.\n\
By default it creates a backup directory (named by the current date\n\
and time) in the given backup root directory. With the --no-timestamp\n\
option it does not create a time-stamped backup directory, but it puts\n\
@@ -704,22 +688,18 @@ indexes in all databases or in all of the databases specified with the\n\
--databases option. The created backup contains .frm, .MRG, .MYD,\n\
.MYI, .MAD, .MAI, .TRG, .TRN, .ARM, .ARZ, .CSM, CSV, .opt, .par, and\n\
InnoDB data and log files. The MY.CNF options file defines the\n\
-location of the database. This command connects to the MySQL server\n\
-using the mysql client program, and runs xtrabackup as a child\n\
-process.\n\
+location of the database.\n\
\n\
The --apply-log command prepares a backup for starting a MySQL\n\
server on the backup. This command recovers InnoDB data files as specified\n\
in BACKUP-DIR/backup-my.cnf using BACKUP-DIR/ib_logfile0,\n\
and creates new InnoDB log files as specified in BACKUP-DIR/backup-my.cnf.\n\
-The BACKUP-DIR should be the path to a backup directory created by\n\
-xtrabackup. This command runs xtrabackup as a child process, but it does not \n\
-connect to the database server.\n\
+The BACKUP-DIR should be the path to a backup directory\n\
\n\
The --copy-back command copies data, index, and log files\n\
from the backup directory back to their original locations.\n\
The MY.CNF options file defines the original location of the database.\n\
-The BACKUP-DIR is the path to a backup directory created by xtrabackup.\n\
+The BACKUP-DIR is the path to a backup directory.\n\
\n\
The --move-back command is similar to --copy-back with the only difference that\n\
it moves files to their original locations rather than copies them. As this\n\
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 4ed2a74006e..9d1e9a22b4a 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -253,9 +253,6 @@ static char* innobase_ignored_opt;
char* innobase_data_home_dir;
char* innobase_data_file_path;
-#ifndef _WIN32
-static char *xtrabackup_debug_sync = NULL;
-#endif
my_bool xtrabackup_incremental_force_scan = FALSE;
@@ -992,7 +989,7 @@ enum options_xtrabackup
struct my_option xb_client_options[]= {
{"verbose", 'V', "display verbose output", (G_PTR *) &verbose,
(G_PTR *) &verbose, 0, GET_BOOL, NO_ARG, FALSE, 0, 0, 0, 0, 0},
- {"version", 'v', "print xtrabackup version information",
+ {"version", 'v', "print version information",
(G_PTR *) &xtrabackup_version, (G_PTR *) &xtrabackup_version, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"target-dir", OPT_XTRA_TARGET_DIR, "destination directory",
@@ -1283,8 +1280,8 @@ struct my_option xb_client_options[]= {
"starting lsn for the incremental backup. This will be mutually "
"exclusive with --incremental-history-uuid, --incremental-basedir "
"and --incremental-lsn. If no valid lsn can be found (no series by "
- "that name, no successful backups by that name) xtrabackup will "
- "return with an error. It is used with the --incremental option.",
+ "that name, no successful backups by that name), an error will be returned."
+ " It is used with the --incremental option.",
(uchar *) &opt_incremental_history_name,
(uchar *) &opt_incremental_history_name, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
@@ -1294,8 +1291,8 @@ struct my_option xb_client_options[]= {
"stored in the PERCONA_SCHEMA.xtrabackup_history to base an "
"incremental backup on. --incremental-history-name, "
"--incremental-basedir and --incremental-lsn. If no valid lsn can be "
- "found (no success record with that uuid) xtrabackup will return "
- "with an error. It is used with the --incremental option.",
+ "found (no success record with that uuid), an error will be returned."
+ " It is used with the --incremental option.",
(uchar *) &opt_incremental_history_uuid,
(uchar *) &opt_incremental_history_uuid, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
@@ -1352,11 +1349,6 @@ struct my_option xb_client_options[]= {
(uchar *) &opt_lock_wait_threshold, (uchar *) &opt_lock_wait_threshold, 0,
GET_UINT, REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
- {"debug-sleep-before-unlock", OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
- "This is a debug-only option used by the XtraBackup test suite.",
- (uchar *) &opt_debug_sleep_before_unlock,
- (uchar *) &opt_debug_sleep_before_unlock, 0, GET_UINT, REQUIRED_ARG, 0, 0,
- 0, 0, 0, 0},
{"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
"How many seconds --safe-slave-backup should wait for "
@@ -1366,9 +1358,9 @@ struct my_option xb_client_options[]= {
0, 0, 0, 0, 0},
{"binlog-info", OPT_BINLOG_INFO,
- "This option controls how XtraBackup should retrieve server's binary log "
+ "This option controls how backup should retrieve server's binary log "
"coordinates corresponding to the backup. Possible values are OFF, ON, "
- "LOCKLESS and AUTO. See the XtraBackup manual for more information",
+ "LOCKLESS and AUTO.",
&opt_binlog_info, &opt_binlog_info, &binlog_info_typelib, GET_ENUM,
OPT_ARG, BINLOG_INFO_AUTO, 0, 0, 0, 0, 0},
@@ -1534,13 +1526,6 @@ struct my_option xb_server_options[] =
&dbug_option, &dbug_option, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
#endif
-#ifndef __WIN__
- {"debug-sync", OPT_XTRA_DEBUG_SYNC,
- "Debug sync point. This is only used by the xtrabackup test suite",
- (G_PTR*) &xtrabackup_debug_sync,
- (G_PTR*) &xtrabackup_debug_sync,
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"innodb_checksum_algorithm", OPT_INNODB_CHECKSUM_ALGORITHM,
"The algorithm InnoDB uses for page checksumming. [CRC32, STRICT_CRC32, "
@@ -1579,7 +1564,7 @@ struct my_option xb_server_options[] =
REQUIRED_ARG, 0, 0, UINT_MAX, 0, 1, 0},
{"lock-ddl-per-table", OPT_LOCK_DDL_PER_TABLE, "Lock DDL for each table "
- "before xtrabackup starts to copy it and until the backup is completed.",
+ "before backup starts to copy it and until the backup is completed.",
(uchar*) &opt_lock_ddl_per_table, (uchar*) &opt_lock_ddl_per_table, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -1622,60 +1607,6 @@ struct my_option xb_server_options[] =
uint xb_server_options_count = array_elements(xb_server_options);
-#ifndef __WIN__
-static int debug_sync_resumed;
-
-static void sigcont_handler(int sig);
-
-static void sigcont_handler(int sig __attribute__((unused)))
-{
- debug_sync_resumed= 1;
-}
-#endif
-
-static inline
-void
-debug_sync_point(const char *name)
-{
-#ifndef __WIN__
- FILE *fp;
- pid_t pid;
- char pid_path[FN_REFLEN];
-
- if (xtrabackup_debug_sync == NULL) {
- return;
- }
-
- if (strcmp(xtrabackup_debug_sync, name)) {
- return;
- }
-
- pid = getpid();
-
- snprintf(pid_path, sizeof(pid_path), "%s/xtrabackup_debug_sync",
- xtrabackup_target_dir);
- fp = fopen(pid_path, "w");
- if (fp == NULL) {
- die("Can't open open %s", pid_path);
- }
- fprintf(fp, "%u\n", (uint) pid);
- fclose(fp);
-
- msg("mariabackup: DEBUG: Suspending at debug sync point '%s'. "
- "Resume with 'kill -SIGCONT %u'.", name, (uint) pid);
-
- debug_sync_resumed= 0;
- kill(pid, SIGSTOP);
- while (!debug_sync_resumed) {
- sleep(1);
- }
-
- /* On resume */
- msg("mariabackup: DEBUG: removing the pid file.");
- my_delete(pid_path, MYF(MY_WME));
-#endif
-}
-
static std::set<std::string> tables_for_export;
@@ -3013,8 +2944,6 @@ static bool xtrabackup_copy_logfile(bool last = false)
log_copy_scanned_lsn = start_lsn;
pthread_cond_broadcast(&scanned_lsn_cond);
pthread_mutex_unlock(&backup_mutex);
-
- debug_sync_point("xtrabackup_copy_logfile_pause");
return(false);
}
@@ -3136,8 +3065,6 @@ static void data_copy_thread_func(data_thread_ctxt_t *ctxt) /* thread context */
*/
my_thread_init();
- debug_sync_point("data_copy_thread_func");
-
while ((node = datafiles_iter_next(ctxt->it)) != NULL) {
DBUG_MARIABACKUP_EVENT("before_copy", node->space->name());
DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy",
@@ -3905,7 +3832,6 @@ xb_load_tablespaces()
return(err);
}
- debug_sync_point("xtrabackup_load_tablespaces_pause");
DBUG_MARIABACKUP_EVENT("after_load_tablespaces", {});
return(DB_SUCCESS);
}
@@ -4016,7 +3942,7 @@ xb_register_filter_entry(
strncpy(dbname, name, p - name);
dbname[p - name] = 0;
- if (databases_hash) {
+ if (databases_hash && databases_hash->array) {
HASH_SEARCH(name_hash, databases_hash,
ut_fold_string(dbname),
xb_filter_entry_t*,
@@ -4679,8 +4605,6 @@ fail_before_log_copying_thread_start:
if (!flush_changed_page_bitmaps()) {
goto fail;
}
- debug_sync_point("xtrabackup_suspend_at_start");
-
ut_a(xtrabackup_parallel > 0);
@@ -6875,12 +6799,6 @@ static int main_low(char** argv)
}
}
-#ifndef __WIN__
- if (xtrabackup_debug_sync) {
- signal(SIGCONT, sigcont_handler);
- }
-#endif
-
/* --backup */
if (xtrabackup_backup && !xtrabackup_backup_func()) {
return(EXIT_FAILURE);
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index e74a6fbc6c2..efdf7277e5b 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -31,9 +31,9 @@ ${SSL_INTERNAL_INCLUDE_DIRS}
SET(GEN_SOURCES
${CMAKE_BINARY_DIR}/sql/sql_yacc.hh
-${CMAKE_BINARY_DIR}/sql/sql_yacc.cc
-${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.hh
-${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.cc
+${CMAKE_BINARY_DIR}/sql/yy_mariadb.cc
+${CMAKE_BINARY_DIR}/sql/yy_oracle.hh
+${CMAKE_BINARY_DIR}/sql/yy_oracle.cc
${CMAKE_BINARY_DIR}/sql/lex_hash.h
)
diff --git a/mysql-test/lib/My/SafeProcess.pm b/mysql-test/lib/My/SafeProcess.pm
index 19f2893ea51..b5a7660ed3e 100644
--- a/mysql-test/lib/My/SafeProcess.pm
+++ b/mysql-test/lib/My/SafeProcess.pm
@@ -622,7 +622,7 @@ sub self2str {
sub _verbose {
return unless $_verbose;
- print STDERR " ## ". @_. "\n";
+ print STDERR " ## @_\n";
}
diff --git a/mysql-test/main/delete_use_source.result b/mysql-test/main/delete_use_source.result
index 22972d0aa4a..0990a551db6 100644
--- a/mysql-test/main/delete_use_source.result
+++ b/mysql-test/main/delete_use_source.result
@@ -65,8 +65,8 @@ rollback;
start transaction;
explain delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL c1 NULL NULL NULL 670 Using where
-2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 167 Using index
+1 PRIMARY t1 ALL c1 NULL NULL NULL # Using where
+2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 # Using index
delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 ;
affected rows: 500
select count(*) from v1 where c1=0;
diff --git a/mysql-test/main/delete_use_source.test b/mysql-test/main/delete_use_source.test
index ed4c62b090c..4aed00da375 100644
--- a/mysql-test/main/delete_use_source.test
+++ b/mysql-test/main/delete_use_source.test
@@ -64,6 +64,7 @@ rollback;
--echo #
start transaction;
+--replace_column 9 #
explain delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500;
--enable_info ONCE
delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 ;
diff --git a/mysql-test/main/lock_kill.result b/mysql-test/main/lock_kill.result
new file mode 100644
index 00000000000..727dd66da7f
--- /dev/null
+++ b/mysql-test/main/lock_kill.result
@@ -0,0 +1,36 @@
+#
+# MDEV-17749 Kill during LOCK TABLE ; ALTER TABLE causes assert
+#
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+connect con1,localhost,root,,test;
+LOCK TABLE t1 WRITE;
+ALTER TABLE t1 ADD COLUMN b INT;
+connection default;
+Killing connection
+connection con1;
+connection default;
+disconnect con1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+CREATE TABLE t2 (a INT) ENGINE=InnoDB;
+connect con1,localhost,root,,test;
+LOCK TABLE t1 WRITE, t2 WRITE;
+ALTER TABLE t1 ADD COLUMN b INT;
+connection default;
+Killing connection
+connection con1;
+connection default;
+disconnect con1;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (id int(11)) ENGINE=InnoDB;
+LOCK TABLES t1 WRITE;
+SET max_statement_time= 0.0001;
+CREATE TRIGGER tr16 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO t1 VALUES (1);
+SET max_statement_time= default;
+DROP TRIGGER IF EXISTS trg16;
+Warnings:
+Note 1360 Trigger does not exist
+DROP TABLE t1;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/lock_kill.test b/mysql-test/main/lock_kill.test
new file mode 100644
index 00000000000..d0b83fe1413
--- /dev/null
+++ b/mysql-test/main/lock_kill.test
@@ -0,0 +1,59 @@
+--source include/have_innodb.inc
+
+# This test file is for testing killing of queries that are under LOCK TABLES
+
+--echo #
+--echo # MDEV-17749 Kill during LOCK TABLE ; ALTER TABLE causes assert
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+--connect (con1,localhost,root,,test)
+LOCK TABLE t1 WRITE;
+--let $conid= `SELECT CONNECTION_ID()`
+--send ALTER TABLE t1 ADD COLUMN b INT
+--connection default
+--disable_query_log
+--echo Killing connection
+eval KILL $conid;
+--enable_query_log
+--connection con1
+--error 0,2013
+reap;
+--connection default
+--disconnect con1
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+CREATE TABLE t2 (a INT) ENGINE=InnoDB;
+--connect (con1,localhost,root,,test)
+LOCK TABLE t1 WRITE, t2 WRITE;
+--let $conid= `SELECT CONNECTION_ID()`
+--send ALTER TABLE t1 ADD COLUMN b INT
+--connection default
+--disable_query_log
+--echo Killing connection
+eval KILL $conid;
+--enable_query_log
+--connection con1
+--error 0,2013
+reap;
+--connection default
+--disconnect con1
+DROP TABLE t1, t2;
+
+# Similar test for CREATE TRIGGER, which also failed
+
+CREATE TABLE t1 (id int(11)) ENGINE=InnoDB;
+LOCK TABLES t1 WRITE;
+SET max_statement_time= 0.0001;
+--error 0,1969
+--disable_warnings
+CREATE TRIGGER tr16 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO t1 VALUES (1);
+--enable_warnings
+SET max_statement_time= default;
+DROP TRIGGER IF EXISTS trg16;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index fd9fd7fff1c..c1334b0b9a7 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5551,6 +5551,24 @@ Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2;
#
+# MDEV-25576: The statement EXPLAIN running as regular statement and
+# as prepared statement produces different results for
+# UPDATE with subquery
+#
+CREATE TABLE t1 (c1 INT KEY) ENGINE=MyISAM;
+CREATE TABLE t2 (c2 INT) ENGINE=MyISAM;
+CREATE TABLE t3 (c3 INT) ENGINE=MyISAM;
+EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 );
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )";
+EXECUTE stmt;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2, t3;
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index 2a468d33ace..837fa6f2b6e 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -4975,7 +4975,21 @@ SHOW WARNINGS;
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-25576: The statement EXPLAIN running as regular statement and
+--echo # as prepared statement produces different results for
+--echo # UPDATE with subquery
--echo #
+CREATE TABLE t1 (c1 INT KEY) ENGINE=MyISAM;
+CREATE TABLE t2 (c2 INT) ENGINE=MyISAM;
+CREATE TABLE t3 (c3 INT) ENGINE=MyISAM;
+EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 );
+PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2, t3;
+
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 740abcc9314..c933a22aab1 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -50,4 +50,3 @@ pxc-421: wsrep_provider is read-only for security reasons
query_cache: MDEV-15805 Test failure on galera.query_cache
versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
-MDEV-25562: MDEV-25562 FIXME: lock wait timeout exceeded
diff --git a/mysql-test/suite/galera/r/MDEV-25562.result b/mysql-test/suite/galera/r/MDEV-25562.result
index b0d77af374b..ab8340af8a9 100644
--- a/mysql-test/suite/galera/r/MDEV-25562.result
+++ b/mysql-test/suite/galera/r/MDEV-25562.result
@@ -4,13 +4,3 @@ SET SESSION WSREP_ON=0;
FLUSH TABLES WITH READ LOCK AND DISABLE CHECKPOINT;
SET SESSION WSREP_ON=1;
UNLOCK TABLES;
-SET GLOBAL wsrep_ignore_apply_errors=1;
-CREATE TABLE t1 (a CHAR(1)) engine=innodb;
-CREATE TABLE t1 (a CHAR(1)) engine=innodb;
-ERROR 42S01: Table 't1' already exists
-SHOW PROCEDURE STATUS WHERE db = 'test';
-Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
-SET GLOBAL read_only=1;
-SET GLOBAL wsrep_ignore_apply_errors=DEFAULT;
-SET GLOBAL read_only=DEFAULT;
-DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_nonPK_and_PA.result b/mysql-test/suite/galera/r/galera_nonPK_and_PA.result
new file mode 100644
index 00000000000..5ad55417fd1
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_nonPK_and_PA.result
@@ -0,0 +1,63 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 VARCHAR(32) NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 (f1) VALUES ('0e66c5227a8a');
+INSERT INTO t1 (f1) VALUES ('c6c112992c9');
+CREATE TABLE t2 (i int primary key);
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_slave_threads = 2;
+***************************************************************
+scenario 1, conflicting UPDATE
+***************************************************************
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_slave_enter_sync';
+connection node_1;
+START TRANSACTION;
+UPDATE t1 SET f1='5ffceebfada' WHERE t1.f1 = 'c6c112992c9';
+COMMIT;
+connection node_2;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+connection node_1;
+START TRANSACTION;
+UPDATE t1 SET f1='4ffceebfcdc' WHERE t1.f1 = '0e66c5227a8a';
+COMMIT;
+connection node_2;
+distance
+1
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+***************************************************************
+scenario 2, conflicting DELETE
+***************************************************************
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_slave_enter_sync';
+connection node_1;
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+DELETE FROM t1 WHERE f1='5ffceebfada';
+COMMIT;
+connection node_2;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+connection node_1;
+START TRANSACTION;
+INSERT INTO t2 VALUES (2);
+DELETE FROM t1 WHERE f1='4ffceebfcdc';
+COMMIT;
+connection node_2;
+distance
+1
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+DROP TABLE t1;
+DROP TABLE t2;
+connection node_2;
+SET GLOBAL wsrep_slave_threads = DEFAULT;
diff --git a/mysql-test/suite/galera/t/MDEV-25562.test b/mysql-test/suite/galera/t/MDEV-25562.test
index 01729936b08..b4552643089 100644
--- a/mysql-test/suite/galera/t/MDEV-25562.test
+++ b/mysql-test/suite/galera/t/MDEV-25562.test
@@ -9,15 +9,3 @@ SET SESSION WSREP_ON=0;
FLUSH TABLES WITH READ LOCK AND DISABLE CHECKPOINT;
SET SESSION WSREP_ON=1;
UNLOCK TABLES;
-
-SET GLOBAL wsrep_ignore_apply_errors=1;
-CREATE TABLE t1 (a CHAR(1)) engine=innodb;
---error ER_TABLE_EXISTS_ERROR
-CREATE TABLE t1 (a CHAR(1)) engine=innodb;
-SHOW PROCEDURE STATUS WHERE db = 'test';
-SET GLOBAL read_only=1;
-
-SET GLOBAL wsrep_ignore_apply_errors=DEFAULT;
-SET GLOBAL read_only=DEFAULT;
-DROP TABLE t1;
-
diff --git a/mysql-test/suite/galera/t/galera_ist_mariabackup.cnf b/mysql-test/suite/galera/t/galera_ist_mariabackup.cnf
index 2392071ccfe..f03527dc808 100644
--- a/mysql-test/suite/galera/t/galera_ist_mariabackup.cnf
+++ b/mysql-test/suite/galera/t/galera_ist_mariabackup.cnf
@@ -12,4 +12,4 @@ wsrep_provider_options='base_port=@mysqld.2.#galera_port;pc.ignore_sb=true'
[sst]
transferfmt=@ENV.MTR_GALERA_TFMT
-streamfmt=xbstream
+streamfmt=mbstream
diff --git a/mysql-test/suite/galera/t/galera_ist_mariabackup_innodb_flush_logs.cnf b/mysql-test/suite/galera/t/galera_ist_mariabackup_innodb_flush_logs.cnf
index e98f37654cc..e4f52aaad0a 100644
--- a/mysql-test/suite/galera/t/galera_ist_mariabackup_innodb_flush_logs.cnf
+++ b/mysql-test/suite/galera/t/galera_ist_mariabackup_innodb_flush_logs.cnf
@@ -14,4 +14,3 @@ wsrep_provider_options='base_port=@mysqld.2.#galera_port;pc.ignore_sb=true'
[sst]
transferfmt=@ENV.MTR_GALERA_TFMT
-streamfmt=xbstream
diff --git a/mysql-test/suite/galera/t/galera_ist_mysqldump.cnf b/mysql-test/suite/galera/t/galera_ist_mysqldump.cnf
index 357c8678658..807829f047f 100644
--- a/mysql-test/suite/galera/t/galera_ist_mysqldump.cnf
+++ b/mysql-test/suite/galera/t/galera_ist_mysqldump.cnf
@@ -8,5 +8,3 @@ wsrep_provider_options='base_port=@mysqld.1.#galera_port;pc.ignore_sb=true'
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;pc.ignore_sb=true'
-
-
diff --git a/mysql-test/suite/galera/t/galera_nonPK_and_PA.test b/mysql-test/suite/galera/t/galera_nonPK_and_PA.test
new file mode 100644
index 00000000000..8a5173f576b
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_nonPK_and_PA.test
@@ -0,0 +1,168 @@
+#
+# This test is a modified version of Gabor Orosz (GOro) test in jira tracker:
+# https://jira.mariadb.org/browse/MDEV-25551
+#
+# The underlying problem with MDEV-25551 turned out to be that
+# transactions having changes for tables with no primary key,
+# were not safe to apply in parallel. This is due to excessive locking
+# in innodb side, and even non related row modifications could end up
+# in lock conflict during applying.
+#
+# The test creates a table with no primary key definition and executes two
+# transactions (in node1) modifying separate rows in the table. In node2
+# first applier is paused before commit phase, and second transaction is
+# then submitted to see if it can interfere with the first transaciton.
+# The fix for MDEV-25551 has disabled parallel applying for tables with no PK,
+# and in the test applying of the send trasnaction should not even start, before
+# the fisrt trkansaction is released from the sync point.
+# The test also verifies that certification depedency status reflects the fact
+# that the two transactions depend on each other.
+#
+# The test has two scenarios where both UPDATE and DELETE statements are verified
+# to disable parallel applying
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+
+# Setup
+
+CREATE TABLE t1 (f1 VARCHAR(32) NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 (f1) VALUES ('0e66c5227a8a');
+INSERT INTO t1 (f1) VALUES ('c6c112992c9');
+
+CREATE TABLE t2 (i int primary key);
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT COUNT(*)=2 FROM t1;
+--source include/wait_condition.inc
+
+# Ensure that we have enough applier threads to process transactions in parallel
+SET GLOBAL wsrep_slave_threads = 2;
+
+--echo ***************************************************************
+--echo scenario 1, conflicting UPDATE
+--echo ***************************************************************
+
+# Set up a synchronization point to catch the first transaction
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+# Invoke the first transaction
+START TRANSACTION;
+UPDATE t1 SET f1='5ffceebfada' WHERE t1.f1 = 'c6c112992c9';
+COMMIT;
+
+--connection node_2
+# Wait for the first transaction to apply until commit phase
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# remember status for received replication counter and certification dependency distance
+--let $expected_wsrep_received = `SELECT VARIABLE_VALUE+1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_received'`
+--let $cert_deps_distance = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'`
+
+--connection node_1
+# Invoke the second transaction
+START TRANSACTION;
+UPDATE t1 SET f1='4ffceebfcdc' WHERE t1.f1 = '0e66c5227a8a';
+COMMIT;
+
+# sleep is probably obsolete here, but it is good to give the latter update time to
+# proceed in applying in node 2. In buggy version the update will start applying
+# and cause conflict there.
+--sleep 5
+
+--connection node_2
+# Wait for the second transaction to appear in repliaction queue
+--let $wait_condition = SELECT VARIABLE_VALUE= $expected_wsrep_received FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_received';
+--source include/wait_condition.inc
+
+# verify that certification dependency distance has dropped
+--disable_query_log
+--eval SELECT VARIABLE_VALUE < $cert_deps_distance as 'distance' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'
+--enable_query_log
+
+# if deps distance dropped, it is indirect evidence that parallel applying was not approved
+
+# Let the first transaction to proceed
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+# second applier should now hit sync point
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+
+--echo ***************************************************************
+--echo scenario 2, conflicting DELETE
+--echo ***************************************************************
+
+# Set up a synchronization point to catch the first transaction
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+# Invoke the first transaction, mix this with insert to table having PK
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+DELETE FROM t1 WHERE f1='5ffceebfada';
+COMMIT;
+
+--connection node_2
+# Wait for the first transaction to apply until commit phase
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# remember status for received replication counter and certification dependency distance
+--let $expected_wsrep_received = `SELECT VARIABLE_VALUE+1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_received'`
+--let $cert_deps_distance = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'`
+
+--connection node_1
+# Invoke the second transaction, again mix this with insert to table having PK
+START TRANSACTION;
+INSERT INTO t2 VALUES (2);
+DELETE FROM t1 WHERE f1='4ffceebfcdc';
+COMMIT;
+
+# sleep is probably obsolete here, but it is good to give the latter update time to
+# proceed in applying in node 2. In buggy version the update will start applying
+# and cause conflict there.
+--sleep 5
+
+--connection node_2
+# Wait for the second transaction to appear in repliaction queue
+--let $wait_condition = SELECT VARIABLE_VALUE= $expected_wsrep_received FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_received';
+--source include/wait_condition.inc
+
+# verify that certification dependency distance has dropped
+--disable_query_log
+--eval SELECT VARIABLE_VALUE < $cert_deps_distance as 'distance' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'
+--enable_query_log
+
+# if deps distance dropped, it is indirect evidence that parallel applying was not approved
+
+# Let the first transaction to proceed
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+# second applier should now hit sync point
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# Teardown
+--connection node_1
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+
+DROP TABLE t1;
+DROP TABLE t2;
+--connection node_2
+SET GLOBAL wsrep_slave_threads = DEFAULT;
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
index be2ca0d1f09..857a4101406 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
@@ -13,4 +13,4 @@ wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore
[sst]
transferfmt=@ENV.MTR_GALERA_TFMT
-streamfmt=xbstream
+streamfmt=mbstream
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_qpress.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_qpress.cnf
index 0dba13ecffd..e99c819d2e8 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_qpress.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_qpress.cnf
@@ -10,3 +10,4 @@ transferfmt=@ENV.MTR_GALERA_TFMT
compress=quicklz
compress-threads=2
compress-chunk-size=32768
+backup-threads=2
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
index 337f4f2d2d9..d3fff4fcb0b 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
@@ -14,4 +14,4 @@ wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore
[sst]
transferfmt=@ENV.MTR_GALERA_TFMT
-streamfmt=xbstream
+streamfmt=mbstream
diff --git a/mysql-test/suite/galera/t/galera_sst_rsync.cnf b/mysql-test/suite/galera/t/galera_sst_rsync.cnf
index 93981d9daa7..29d3a1e6219 100644
--- a/mysql-test/suite/galera/t/galera_sst_rsync.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_rsync.cnf
@@ -8,4 +8,3 @@ wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
-
diff --git a/mysql-test/suite/galera/t/galera_sst_rsync2.cnf b/mysql-test/suite/galera/t/galera_sst_rsync2.cnf
index 34e67c66403..0159596f99b 100644
--- a/mysql-test/suite/galera/t/galera_sst_rsync2.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_rsync2.cnf
@@ -12,4 +12,3 @@ log_bin_index=@ENV.MYSQLTEST_VARDIR/tmp/server1_binlog_index.index
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
log_bin=@ENV.MYSQLTEST_VARDIR/server2_binlog
log_bin_index=@ENV.MYSQLTEST_VARDIR/tmp/server2_binlog_index.index
-
diff --git a/mysql-test/suite/galera/t/galera_sst_rsync_data_dir.cnf b/mysql-test/suite/galera/t/galera_sst_rsync_data_dir.cnf
index afe9796a11a..4cc3442bd87 100644
--- a/mysql-test/suite/galera/t/galera_sst_rsync_data_dir.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_rsync_data_dir.cnf
@@ -9,3 +9,6 @@ wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore
[mysqld.2]
innodb_data_home_dir=@ENV.MYSQL_TMP_DIR/rsync_test_2
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
+
+[sst]
+backup_threads=2
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
index 8432b1c368c..3b0f0173b35 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
@@ -34,4 +34,3 @@ bind-address=::
[SST]
transferfmt=@ENV.MTR_GALERA_TFMT
streamfmt=xbstream
-sockopt=",pf=ip6"
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.cnf
index aa3da690416..a3ab9a85707 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.cnf
@@ -38,4 +38,5 @@ bind-address=::
[SST]
transferfmt=@ENV.MTR_GALERA_TFMT
streamfmt=xbstream
+# Not needed, but left here for debugging:
sockopt=",pf=ip6"
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.cnf
index 5e77a45210b..c163f798335 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.cnf
@@ -24,6 +24,3 @@ wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
bind-address=::
-
-[SST]
-sockopt=",pf=ip6"
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync.cnf
index 5e77a45210b..c163f798335 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync.cnf
@@ -24,6 +24,3 @@ wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
bind-address=::
-
-[SST]
-sockopt=",pf=ip6"
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync_section.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync_section.cnf
index 809b83bb782..ee92efeff1d 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync_section.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_rsync_section.cnf
@@ -29,6 +29,3 @@ wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
bind-address=::
-
-[SST]
-sockopt=",pf=ip6"
diff --git a/mysql-test/suite/galera_sr/r/GCF-627.result b/mysql-test/suite/galera_sr/r/GCF-627.result
index 891cf4af5a9..65d8c95ad08 100644
--- a/mysql-test/suite/galera_sr/r/GCF-627.result
+++ b/mysql-test/suite/galera_sr/r/GCF-627.result
@@ -16,11 +16,9 @@ connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
INSERT INTO t1 VALUES (2);
ERROR 42S02: Table 'test.t1' doesn't exist
connection node_1;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
connection node_2;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_nonPK_and_PA.result b/mysql-test/suite/galera_sr/r/galera_sr_nonPK_and_PA.result
new file mode 100644
index 00000000000..b7e9cf49f95
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_nonPK_and_PA.result
@@ -0,0 +1,46 @@
+connection node_2;
+connection node_1;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_slave_threads = 2;
+flush status;
+connection node_1;
+CREATE TABLE t1 (f1 int, f2 int) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 int primary key, f2 int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,0);
+INSERT INTO t1 VALUES (2,0);
+INSERT INTO t2 VALUES (1,0);
+INSERT INTO t2 VALUES (2,0);
+connection node_2;
+connection node_1;
+set session wsrep_trx_fragment_size=1;
+START TRANSACTION;
+UPDATE t1 SET f2=1 where f1=1;
+connection node_2;
+distance
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_slave_enter_sync';
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+update t2 set f2=1 where f1=1;
+connection node_2;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_1;
+UPDATE t2 set f2=2 where f1=2;
+connection node_2;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+COMMIT;
+connection node_1;
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+DROP TABLE t1;
+DROP TABLE t2;
+connection node_2;
+SET GLOBAL wsrep_slave_threads = DEFAULT;
diff --git a/mysql-test/suite/galera_sr/t/GCF-627.test b/mysql-test/suite/galera_sr/t/GCF-627.test
index 86637ad8e7f..ad351eb9da6 100644
--- a/mysql-test/suite/galera_sr/t/GCF-627.test
+++ b/mysql-test/suite/galera_sr/t/GCF-627.test
@@ -22,9 +22,14 @@ COMMIT;
INSERT INTO t1 VALUES (2);
--connection node_1
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+SELECT * FROM mysql.wsrep_streaming_log;
--connection node_2
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+SELECT * FROM mysql.wsrep_streaming_log;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_nonPK_and_PA.test b/mysql-test/suite/galera_sr/t/galera_sr_nonPK_and_PA.test
new file mode 100644
index 00000000000..c343cd202bf
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_nonPK_and_PA.test
@@ -0,0 +1,109 @@
+#
+# This test is a modified version of Gabor Orosz (GOro) test in jira tracker:
+# https://jira.mariadb.org/browse/MDEV-25551
+#
+# The underlying problem with MDEV-25551 turned out to be that
+# transactions having changes for tables with no primary key,
+# were not safe to apply in parallel. This is due to excessive locking
+# in innodb side, and even non related row modifications could end up
+# in lock conflict during applying.
+#
+# The test verifies that a transaction executing a streaming replication
+# will disable parallel applying if it modifies a table with no primary key.
+# And, if PA was disabled temporarily, it will be relaxed if next fragment
+# contains changes for table with primary key.
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+
+# Setup
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+
+# Ensure that we have enough applier threads to process transactions in parallel
+SET GLOBAL wsrep_slave_threads = 2;
+
+flush status;
+
+--connection node_1
+CREATE TABLE t1 (f1 int, f2 int) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 int primary key, f2 int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,0);
+INSERT INTO t1 VALUES (2,0);
+
+INSERT INTO t2 VALUES (1,0);
+INSERT INTO t2 VALUES (2,0);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*)=2 FROM t2;
+--source include/wait_condition.inc
+
+# remember status for received replication counter and certification dependency distance
+--let $cert_deps_distance = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'`
+
+--connection node_1
+# Invoke the first transaction
+set session wsrep_trx_fragment_size=1;
+START TRANSACTION;
+UPDATE t1 SET f2=1 where f1=1;
+
+--connection node_2
+# verify that certification dependency distance has dropped
+--disable_query_log
+--eval SELECT VARIABLE_VALUE < $cert_deps_distance as 'distance' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cert_deps_distance'
+--enable_query_log
+
+# if deps distance dropped, it is indirect evidence that parallel applying was not approved
+
+# Try next that PA retricting is relaxed, if next fragment updates table t1 with primary key
+# wsrep_cert_deps_distance cannot be trsuted in this test phase, we verify parallel applying
+# by setting sync point for applier thread
+
+# Set up a synchronization point to catch update on t2
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+update t2 set f2=1 where f1=1;
+
+--connection node_2
+# Wait for the update t2 to apply until commit phase
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# Set up a synchronization point to catch the SR trx applying
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+# continue SR transaction, and now update t2, which has PK
+UPDATE t2 set f2=2 where f1=2;
+
+--connection node_2
+# Wait for the update t2 to apply until commit phase
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# Let the first transaction to proceed
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+--connection node_1
+COMMIT;
+
+# Teardown
+--connection node_1
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+
+DROP TABLE t1;
+DROP TABLE t2;
+--connection node_2
+SET GLOBAL wsrep_slave_threads = DEFAULT;
diff --git a/mysql-test/suite/innodb/r/index_vcol_purge_startup.result b/mysql-test/suite/innodb/r/index_vcol_purge_startup.result
new file mode 100644
index 00000000000..5603c14acf3
--- /dev/null
+++ b/mysql-test/suite/innodb/r/index_vcol_purge_startup.result
@@ -0,0 +1,24 @@
+call mtr.add_suppression('SQL_ERROR_LOG');
+call mtr.add_suppression('Failed to initialize plugins');
+call mtr.add_suppression('Aborting');
+create table t1(a int primary key, b int as (100-a*a), index(b)) engine=innodb;
+insert t1 (a) values(1),(2),(3);
+start transaction;
+select * from t1 order by a;
+a b
+1 99
+2 96
+3 91
+connect con1, localhost, root;
+delete from t1 where a=2;
+flush tables;
+connection default;
+# Kill the server
+# Failed restart
+# Start the server
+# restart
+select * from t1 order by a;
+a b
+1 99
+3 91
+drop table t1;
diff --git a/mysql-test/suite/innodb/t/index_vcol_purge_startup.test b/mysql-test/suite/innodb/t/index_vcol_purge_startup.test
new file mode 100644
index 00000000000..ccc07fe7314
--- /dev/null
+++ b/mysql-test/suite/innodb/t/index_vcol_purge_startup.test
@@ -0,0 +1,28 @@
+source include/have_innodb.inc;
+# need to restart server
+source include/not_embedded.inc;
+
+call mtr.add_suppression('SQL_ERROR_LOG');
+call mtr.add_suppression('Failed to initialize plugins');
+call mtr.add_suppression('Aborting');
+
+if (!$SQL_ERRLOG_SO) {
+ skip No SQL_ERROR_LOG plugin;
+}
+
+create table t1(a int primary key, b int as (100-a*a), index(b)) engine=innodb;
+insert t1 (a) values(1),(2),(3);
+start transaction;
+select * from t1 order by a;
+connect(con1, localhost, root);
+delete from t1 where a=2;
+flush tables;
+connection default;
+source include/kill_mysqld.inc;
+echo # Failed restart;
+error 1;
+exec $MYSQLD_LAST_CMD --plugin-load-add=$SQL_ERRLOG_SO --sql-error-log=FORCE --sql-error-log-filename=$MYSQLTEST_VARDIR/nonexistent/nonexistent;
+echo # Start the server;
+source include/start_mysqld.inc;
+select * from t1 order by a;
+drop table t1;
diff --git a/mysql-test/suite/innodb_gis/r/gis.result b/mysql-test/suite/innodb_gis/r/gis.result
index 3a71e0a8558..e673d92f5c6 100644
--- a/mysql-test/suite/innodb_gis/r/gis.result
+++ b/mysql-test/suite/innodb_gis/r/gis.result
@@ -1480,9 +1480,20 @@ FROM buildings, bridges
WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
count(*)
1
-DROP DATABASE gis_ogs;
#
# Bug#13362660 ASSERTION `FIELD_POS < FIELD_COUNT' FAILED. IN PROTOCOL_TEXT::STORE
#
SELECT ST_Union('', ''), md5(1);
ERROR HY000: Illegal parameter data type varchar for operation 'st_union'
+#
+# MDEV-25758 InnoDB spatial indexes miss large geometry
+# fields after MDEV-25459
+#
+CREATE TABLE t1(l LINESTRING NOT NULL, SPATIAL INDEX(l))ENGINE=InnoDB;
+SELECT GROUP_CONCAT(CONCAT(seq, ' ', seq) SEPARATOR ',') INTO @g FROM seq_0_to_504;
+INSERT INTO t1 SET l=ST_GeomFromText(CONCAT('LINESTRING(',@g,',0 0)'));
+SELECT COUNT(*) FROM t1 WHERE MBRIntersects(GeomFromText('Polygon((0 0,0 10,10 10,10 0,0 0))'), l);
+COUNT(*)
+1
+DROP TABLE t1;
+DROP DATABASE gis_ogs;
diff --git a/mysql-test/suite/innodb_gis/t/gis.test b/mysql-test/suite/innodb_gis/t/gis.test
index 629bb94b8c5..2963c033d80 100644
--- a/mysql-test/suite/innodb_gis/t/gis.test
+++ b/mysql-test/suite/innodb_gis/t/gis.test
@@ -2,6 +2,7 @@
--source include/have_innodb.inc
-- source include/have_geometry.inc
+--source include/have_sequence.inc
SET default_storage_engine=InnoDB;
@@ -1424,11 +1425,21 @@ WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
#FROM lakes
#WHERE lakes.name = 'Blue Lake';
-DROP DATABASE gis_ogs;
-
--echo #
--echo # Bug#13362660 ASSERTION `FIELD_POS < FIELD_COUNT' FAILED. IN PROTOCOL_TEXT::STORE
--echo #
--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
SELECT ST_Union('', ''), md5(1);
+
+--echo #
+--echo # MDEV-25758 InnoDB spatial indexes miss large geometry
+--echo # fields after MDEV-25459
+--echo #
+CREATE TABLE t1(l LINESTRING NOT NULL, SPATIAL INDEX(l))ENGINE=InnoDB;
+SELECT GROUP_CONCAT(CONCAT(seq, ' ', seq) SEPARATOR ',') INTO @g FROM seq_0_to_504;
+INSERT INTO t1 SET l=ST_GeomFromText(CONCAT('LINESTRING(',@g,',0 0)'));
+SELECT COUNT(*) FROM t1 WHERE MBRIntersects(GeomFromText('Polygon((0 0,0 10,10 10,10 0,0 0))'), l);
+DROP TABLE t1;
+
+DROP DATABASE gis_ogs;
diff --git a/mysql-test/suite/mariabackup/xbstream.test b/mysql-test/suite/mariabackup/xbstream.test
index 9161d227b20..212ac598064 100644
--- a/mysql-test/suite/mariabackup/xbstream.test
+++ b/mysql-test/suite/mariabackup/xbstream.test
@@ -8,7 +8,7 @@ mkdir $targetdir;
let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.xb;
echo # xtrabackup backup to stream;
-exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log;
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --databases-exclude=foobar --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log;
echo # xbstream extract;
--disable_result_log
exec $XBSTREAM -x -C $targetdir < $streamfile;
diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh
index 952a37f75d2..05944ef6035 100644
--- a/scripts/wsrep_sst_common.sh
+++ b/scripts/wsrep_sst_common.sh
@@ -29,7 +29,9 @@ WSREP_SST_OPT_USER="${WSREP_SST_OPT_USER:-}"
WSREP_SST_OPT_PSWD="${WSREP_SST_OPT_PSWD:-}"
WSREP_SST_OPT_REMOTE_AUTH="${WSREP_SST_OPT_REMOTE_AUTH:-}"
WSREP_SST_OPT_DEFAULT=""
+WSREP_SST_OPT_DEFAULTS=""
WSREP_SST_OPT_EXTRA_DEFAULT=""
+WSREP_SST_OPT_EXTRA_DEFAULTS=""
WSREP_SST_OPT_SUFFIX_DEFAULT=""
WSREP_SST_OPT_SUFFIX_VALUE=""
WSREP_SST_OPT_MYSQLD=""
@@ -152,10 +154,12 @@ case "$1" in
;;
'--defaults-file')
readonly WSREP_SST_OPT_DEFAULT="$1=$2"
+ readonly WSREP_SST_OPT_DEFAULTS="$1='$2'"
shift
;;
'--defaults-extra-file')
readonly WSREP_SST_OPT_EXTRA_DEFAULT="$1=$2"
+ readonly WSREP_SST_OPT_EXTRA_DEFAULTS="$1='$2'"
shift
;;
'--defaults-group-suffix')
@@ -295,7 +299,7 @@ case "$1" in
value="$1"
fi
fi
- if [ $option == 'h' ]; then
+ if [ $option = 'h' ]; then
if [ -z "$WSREP_SST_OPT_DATA" ]; then
MYSQLD_OPT_DATADIR="${value%/}"
fi
@@ -611,24 +615,54 @@ else
MYSQLDUMP="$(command -v mysqldump)"
fi
+wsrep_log()
+{
+ # echo everything to stderr so that it gets into common error log
+ # deliberately made to look different from the rest of the log
+ local readonly tst="$(date +%Y%m%d\ %H:%M:%S.%N | cut -b -21)"
+ echo "WSREP_SST: $* ($tst)" >&2
+}
+
+wsrep_log_error()
+{
+ wsrep_log "[ERROR] $*"
+}
+
+wsrep_log_warning()
+{
+ wsrep_log "[WARNING] $*"
+}
+
+wsrep_log_info()
+{
+ wsrep_log "[INFO] $*"
+}
+
if [ -x "$SCRIPTS_DIR/my_print_defaults" ]; then
MY_PRINT_DEFAULTS="$SCRIPTS_DIR/my_print_defaults"
elif [ -x "$EXTRA_DIR/my_print_defaults" ]; then
MY_PRINT_DEFAULTS="$EXTRA_DIR/my_print_defaults"
else
MY_PRINT_DEFAULTS="$(command -v my_print_defaults)"
+ if [ -z "$MY_PRINT_DEFAULTS" ]; then
+ wsrep_log_error "my_print_defaults not found in path"
+ exit 2
+ fi
fi
+readonly MY_PRINT_DEFAULTS
+
+wsrep_defaults="$WSREP_SST_OPT_DEFAULTS"
+wsrep_defaults="$wsrep_defaults${wsrep_defaults:+ }$WSREP_SST_OPT_EXTRA_DEFAULTS"
+wsrep_defaults="$wsrep_defaults${wsrep_defaults:+ }$WSREP_SST_OPT_SUFFIX_DEFAULT"
+
+readonly WSREP_SST_OPT_CONF="$wsrep_defaults"
+
wsrep_defaults="$WSREP_SST_OPT_DEFAULT"
-if [ -n "$wsrep_defaults" ]; then
- wsrep_defaults="$wsrep_defaults "
-fi
-wsrep_defaults="$wsrep_defaults$WSREP_SST_OPT_EXTRA_DEFAULT"
-if [ -n "$wsrep_defaults" ]; then
- wsrep_defaults="$wsrep_defaults "
-fi
-readonly WSREP_SST_OPT_CONF="$wsrep_defaults$WSREP_SST_OPT_SUFFIX_DEFAULT"
-readonly MY_PRINT_DEFAULTS="$MY_PRINT_DEFAULTS $WSREP_SST_OPT_CONF"
+wsrep_defaults="$wsrep_defaults${wsrep_defaults:+ }$WSREP_SST_OPT_EXTRA_DEFAULT"
+wsrep_defaults="$wsrep_defaults${wsrep_defaults:+ }$WSREP_SST_OPT_SUFFIX_DEFAULT"
+
+readonly WSREP_SST_OPT_CONF_UNQUOTED="$wsrep_defaults"
#
# User can specify mariabackup specific settings that will be used during sst
@@ -663,13 +697,21 @@ parse_cnf()
# If the group name is the same as the "mysqld" without "--" prefix,
# then try to use it together with the group suffix:
if [ "$group" = 'mysqld' -a -n "$WSREP_SST_OPT_SUFFIX_VALUE" ]; then
- reval=$($MY_PRINT_DEFAULTS "mysqld$WSREP_SST_OPT_SUFFIX_VALUE" | awk "$pattern")
+ reval=$("$MY_PRINT_DEFAULTS" \
+ ${WSREP_SST_OPT_DEFAULT:+"$WSREP_SST_OPT_DEFAULT"} \
+ ${WSREP_SST_OPT_EXTRA_DEFAULT:+"$WSREP_SST_OPT_EXTRA_DEFAULT"} \
+ ${WSREP_SST_OPT_SUFFIX_DEFAULT:+"$WSREP_SST_OPT_SUFFIX_DEFAULT"} \
+ "mysqld$WSREP_SST_OPT_SUFFIX_VALUE" | awk "$pattern")
if [ -n "$reval" ]; then
break
fi
fi
# Let's try to use the group name as it is:
- reval=$($MY_PRINT_DEFAULTS "$group" | awk "$pattern")
+ reval=$("$MY_PRINT_DEFAULTS" \
+ ${WSREP_SST_OPT_DEFAULT:+"$WSREP_SST_OPT_DEFAULT"} \
+ ${WSREP_SST_OPT_EXTRA_DEFAULT:+"$WSREP_SST_OPT_EXTRA_DEFAULT"} \
+ ${WSREP_SST_OPT_SUFFIX_DEFAULT:+"$WSREP_SST_OPT_SUFFIX_DEFAULT"} \
+ "$group" | awk "$pattern")
if [ -n "$reval" ]; then
break
fi
@@ -710,13 +752,21 @@ in_config()
# If the group name is the same as the "mysqld" without "--" prefix,
# then try to use it together with the group suffix:
if [ "$group" = 'mysqld' -a -n "$WSREP_SST_OPT_SUFFIX_VALUE" ]; then
- found=$($MY_PRINT_DEFAULTS "mysqld$WSREP_SST_OPT_SUFFIX_VALUE" | awk "$pattern")
+ found=$("$MY_PRINT_DEFAULTS" \
+ ${WSREP_SST_OPT_DEFAULT:+"$WSREP_SST_OPT_DEFAULT"} \
+ ${WSREP_SST_OPT_EXTRA_DEFAULT:+"$WSREP_SST_OPT_EXTRA_DEFAULT"} \
+ ${WSREP_SST_OPT_SUFFIX_DEFAULT:+"$WSREP_SST_OPT_SUFFIX_DEFAULT"} \
+ "mysqld$WSREP_SST_OPT_SUFFIX_VALUE" | awk "$pattern")
if [ $found -ne 0 ]; then
break
fi
fi
# Let's try to use the group name as it is:
- found=$($MY_PRINT_DEFAULTS "$group" | awk "$pattern")
+ found=$($MY_PRINT_DEFAULTS \
+ ${WSREP_SST_OPT_DEFAULT:+"$WSREP_SST_OPT_DEFAULT"} \
+ ${WSREP_SST_OPT_EXTRA_DEFAULT:+"$WSREP_SST_OPT_EXTRA_DEFAULT"} \
+ ${WSREP_SST_OPT_SUFFIX_DEFAULT:+"$WSREP_SST_OPT_SUFFIX_DEFAULT"} \
+ "$group" | awk "$pattern")
if [ $found -ne 0 ]; then
break
fi
@@ -797,29 +847,6 @@ else
SST_PROGRESS_FILE=""
fi
-wsrep_log()
-{
- # echo everything to stderr so that it gets into common error log
- # deliberately made to look different from the rest of the log
- local readonly tst="$(date +%Y%m%d\ %H:%M:%S.%N | cut -b -21)"
- echo "WSREP_SST: $* ($tst)" >&2
-}
-
-wsrep_log_error()
-{
- wsrep_log "[ERROR] $*"
-}
-
-wsrep_log_warning()
-{
- wsrep_log "[WARNING] $*"
-}
-
-wsrep_log_info()
-{
- wsrep_log "[INFO] $*"
-}
-
wsrep_cleanup_progress_file()
{
[ -n "$SST_PROGRESS_FILE" -a \
@@ -960,35 +987,139 @@ check_sockets_utils()
}
#
+# Check if the port is in the "listen" state.
+# The first parameter is the PID of the process that should
+# listen on the port - if it is not known, you can specify
+# an empty string or zero.
+# The second parameter is the port number.
+# The third parameter is a list of the names of utilities
+# (via "|") that can listen on this port during the state
+# transfer.
+#
+check_port()
+{
+ local pid="$1"
+ local port="$2"
+ local utils="$3"
+
+ [ -z "$pid" ] || [ $pid -eq 0 ] && pid='[0-9]+'
+
+ local rc=1
+
+ if [ $lsof_available -ne 0 ]; then
+ lsof -Pnl -i ":$port" 2>/dev/null | \
+ grep -q -E "^($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*\\(LISTEN\\)" && rc=0
+ elif [ $sockstat_available -ne 0 ]; then
+ sockstat -p "$port" 2>/dev/null | \
+ grep -q -E "[[:space:]]+($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*[[:space:]]LISTEN" && rc=0
+ elif [ $ss_available -ne 0 ]; then
+ ss -nlpH "( sport = :$port )" 2>/dev/null | \
+ grep -q -E "users:\\(.*\\(\"($utils)[^[:space:]]*\"[^)]*,pid=$pid(,[^)]*)?\\)" && rc=0
+ else
+ wsrep_log_error "unknown sockets utility"
+ exit 2 # ENOENT
+ fi
+
+ return $rc
+}
+
+#
# If the ssl_dhparams variable is already set, uses that as a source
# of dh parameters for OpenSSL. Otherwise, looks for dhparams.pem in
# the datadir, and creates it there if it can't find the file.
#
check_for_dhparams()
{
- if [ -z "$ssl_dhparams" ]; then
- ssl_dhparams="$DATA/dhparams.pem"
- if [ ! -r "$ssl_dhparams" ]; then
- get_openssl
- if [ -n "$OPENSSL_BINARY" ]; then
- wsrep_log_info "Could not find dhparams file, creating $ssl_dhparams"
- if ! "$OPENSSL_BINARY" dhparam -out "$ssl_dhparams" 2048 >/dev/null 2>&1
- then
- wsrep_log_error "******** ERROR *****************************************"
- wsrep_log_error "* Could not create the dhparams.pem file with OpenSSL. *"
- wsrep_log_error "********************************************************"
- ssl_dhparams=""
- fi
- else
- # Rollback: if openssl is not installed, then use
- # the default parameters:
+ ssl_dhparams="$DATA/dhparams.pem"
+ if [ ! -r "$ssl_dhparams" ]; then
+ get_openssl
+ if [ -n "$OPENSSL_BINARY" ]; then
+ wsrep_log_info "Could not find dhparams file, creating $ssl_dhparams"
+ if ! "$OPENSSL_BINARY" dhparam -out "$ssl_dhparams" 2048 >/dev/null 2>&1
+ then
+ wsrep_log_error "******** ERROR *****************************************"
+ wsrep_log_error "* Could not create the dhparams.pem file with OpenSSL. *"
+ wsrep_log_error "********************************************************"
ssl_dhparams=""
- fi
+ fi
+ else
+ # Rollback: if openssl is not installed, then use
+ # the default parameters:
+ ssl_dhparams=""
fi
fi
}
#
+# Verifies that the CA file verifies the certificate.
+# Doing this here lets us generate better error messages.
+#
+# 1st param: path to the CA file.
+# 2nd param: path to the certificate.
+#
+verify_ca_matches_cert()
+{
+ local ca_path="$1"
+ local cert_path="$2"
+
+ # If the openssl utility is not installed, then
+ # we will not do this certificate check:
+ get_openssl
+ if [ -z "$OPENSSL_BINARY" ]; then
+ return
+ fi
+
+ if ! "$OPENSSL_BINARY" verify -verbose -CAfile "$ca_path" "$cert_path" >/dev/null 2>&1
+ then
+ wsrep_log_error "******** FATAL ERROR ********************************************"
+ wsrep_log_error "* The certifcate and CA (certificate authority) do not match. *"
+ wsrep_log_error "* It does not appear that the certificate was issued by the CA. *"
+ wsrep_log_error "* Please check your certificate and CA files. *"
+ wsrep_log_error "*****************************************************************"
+ exit 22
+ fi
+}
+
+#
+# Verifies that the certificate matches the private key.
+# Doing this will save us having to wait for a timeout that would
+# otherwise occur.
+#
+# 1st param: path to the certificate.
+# 2nd param: path to the private key.
+#
+verify_cert_matches_key()
+{
+ local cert_path="$1"
+ local key_path="$2"
+
+ # If the diff utility is not installed, then
+ # we will not do this certificate check:
+ if [ -z "$(command -v diff)" ]; then
+ return
+ fi
+
+ # If the openssl utility is not installed, then
+ # we will not do this certificate check:
+ get_openssl
+ if [ -z "$OPENSSL_BINARY" ]; then
+ return
+ fi
+
+ # Generate the public key from the cert and the key.
+ # They should match (otherwise we can't create an SSL connection).
+ if ! diff <("$OPENSSL_BINARY" x509 -in "$cert_path" -pubkey -noout 2>/dev/null) \
+ <("$OPENSSL_BINARY" pkey -in "$key_path" -pubout 2>/dev/null) >/dev/null 2>&1
+ then
+ wsrep_log_error "******************* FATAL ERROR ****************"
+ wsrep_log_error "* The certifcate and private key do not match. *"
+ wsrep_log_error "* Please check your certificate and key files. *"
+ wsrep_log_error "************************************************"
+ exit 22
+ fi
+}
+
+#
# Compares two version strings.
# The first parameter is the version to be checked;
# The second parameter is the minimum version required;
@@ -996,22 +1127,22 @@ check_for_dhparams()
#
check_for_version()
{
- y1=${1#*.}
+ y1="${1#*.}"
[ "$y1" = "$1" ] && y1=""
z1=${y1#*.}
[ "$z1" = "$y1" ] && z1=""
- x1=${1%%.*}
- y1=${y1%%.*}
- z1=${z1%%.*}
+ x1="${1%%.*}"
+ y1="${y1%%.*}"
+ z1="${z1%%.*}"
[ -z "$y1" ] && y1=0
[ -z "$z1" ] && z1=0
- y2=${2#*.}
+ y2="${2#*.}"
[ "$y2" = "$2" ] && y2=""
- z2=${y2#*.}
+ z2="${y2#*.}"
[ "$z2" = "$y2" ] && z2=""
- x2=${2%%.*}
- y2=${y2%%.*}
- z2=${z2%%.*}
+ x2="${2%%.*}"
+ y2="${y2%%.*}"
+ z2="${z2%%.*}"
[ -z "$y2" ] && y2=0
[ -z "$z2" ] && z2=0
[ $x1 -lt $x2 ] && return 1
@@ -1021,3 +1152,127 @@ check_for_version()
[ $z1 -lt $z2 ] && return 1
return 0
}
+
+trim_string()
+{
+ if [ -n "$BASH_VERSION" ]; then
+ local pattern="[![:space:]${2:-}]"
+ local x="${1#*$pattern}"
+ local z=${#1}
+ x=${#x}
+ if [ $x -ne $z ]; then
+ local y="${1%$pattern*}"
+ y=${#y}
+ x=$(( z-x-1 ))
+ y=$(( y-x+1 ))
+ printf '%s' "${1:$x:$y}"
+ else
+ printf ''
+ fi
+ else
+ local pattern="[[:space:]${2:-}]"
+ echo "$1" | sed -E "s/^$pattern+|$pattern+\$//g"
+ fi
+}
+
+#
+# Check whether process is still running.
+# The first parameter contains the name of the PID file.
+# The second parameter is the flag of the need to delete
+# the PID file.
+# If the second parameter is not zero and not empty,
+# then if the process terminates, the corresponding
+# PID file will be deleted.
+# This function also sets the CHECK_PID variable to zero
+# if the process has already exited, or writes the PID
+# of the process there if it is still running.
+#
+check_pid()
+{
+ local pid_file="$1"
+ local remove=${2:-0}
+ if [ -r "$pid_file" ]; then
+ local pid=$(cat "$pid_file" 2>/dev/null)
+ if [ -n "$pid" ]; then
+ if [ $pid -ne 0 ]; then
+ if ps -p "$pid" >/dev/null 2>&1; then
+ CHECK_PID=$pid
+ return 0
+ fi
+ fi
+ fi
+ if [ $remove -eq 1 ]; then
+ rm -f "$pid_file"
+ fi
+ fi
+ CHECK_PID=0
+ return 1
+}
+
+#
+# Checking that the process with the specified PID is still
+# running and killing it in this case by sending SIGTERM
+# (using the "kill" operation).
+# The first parameter contains PID of the process.
+# The second and third parameters (both optional) are the names
+# of the PID and the configuration files, which should be removed
+# after the process ends.
+# If the first parameter (PID of the process) is zero, then
+# the function immediately deletes the PID and the configuration
+# files (if specified), without any additional checks.
+#
+cleanup_pid()
+{
+ local pid="$1"
+ local pid_file="${2:-}"
+ local config="${3:-}"
+
+ if [ $pid -ne 0 ]; then
+ if ps -p $pid >/dev/null 2>&1; then
+ if kill $pid >/dev/null 2>&1; then
+ sleep 0.5
+ local round=0
+ local force=0
+ while ps -p $pid >/dev/null 2>&1; do
+ sleep 1
+ round=$(( round+1 ))
+ if [ $round -eq 16 ]; then
+ if [ $force -eq 0 ]; then
+ round=8
+ force=1
+ kill -9 $pid >/dev/null 2>&1
+ else
+ return 1;
+ fi
+ fi
+ done
+ elif ps -p $pid >/dev/null 2>&1; then
+ wsrep_log_warning "Unable to kill PID=$pid ($pid_file)"
+ return 1
+ fi
+ fi
+ fi
+
+ [ -n "$pid_file" ] && [ -f "$pid_file" ] && rm -f "$pid_file"
+ [ -n "$config" ] && [ -f "$config" ] && rm -f "$config"
+
+ return 0
+}
+
+nproc=""
+
+get_proc()
+{
+ if [ -z "$nproc" ]; then
+ set +e
+ if [ "$OS" = 'Linux' ]; then
+ nproc=$(grep -c processor /proc/cpuinfo 2>/dev/null)
+ elif [ "$OS" = 'Darwin' -o "$OS" = 'FreeBSD' ]; then
+ nproc=$(sysctl -n hw.ncpu)
+ fi
+ if [ -z "$nproc" ] || [ $nproc -eq 0 ]; then
+ nproc=1
+ fi
+ set -e
+ fi
+}
diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh
index 5618c704dbc..7f97d9e8dea 100644
--- a/scripts/wsrep_sst_mariabackup.sh
+++ b/scripts/wsrep_sst_mariabackup.sh
@@ -29,13 +29,10 @@ eformat=""
ekey=""
ekeyfile=""
encrypt=0
-nproc=1
ecode=0
ssyslog=""
ssystag=""
-MARIABACKUP_PID=""
-SST_PORT=""
-REMOTEIP=""
+BACKUP_PID=""
tcert=""
tpem=""
tkey=""
@@ -55,7 +52,7 @@ ib_home_dir=""
ib_log_dir=""
ib_undo_dir=""
-sfmt="tar"
+sfmt=""
strmcmd=""
tfmt=""
tcmd=""
@@ -79,6 +76,11 @@ compress='none'
compress_chunk=""
compress_threads=""
+backup_threads=""
+
+encrypt_threads=""
+encrypt_chunk=""
+
readonly SECRET_TAG="secret"
# Required for backup locks
@@ -92,12 +94,11 @@ fi
pcmd="pv $pvopts"
declare -a RC
-MARIABACKUP_BIN="$(command -v mariabackup)"
-if [ ! -x "$MARIABACKUP_BIN" ]; then
- wsrep_log_error 'mariabackup binary not found in $PATH'
+BACKUP_BIN="$(command -v mariabackup)"
+if [ ! -x "$BACKUP_BIN" ]; then
+ wsrep_log_error 'mariabackup binary not found in path'
exit 42
fi
-MBSTREAM_BIN=mbstream
DATA="$WSREP_SST_OPT_DATA"
INFO_FILE="xtrabackup_galera_info"
@@ -111,7 +112,8 @@ INNOBACKUPLOG="$DATA/mariabackup.backup.log"
# Setting the path for ss and ip
export PATH="/usr/sbin:/sbin:$PATH"
-timeit(){
+timeit()
+{
local stage="$1"
shift
local cmd="$@"
@@ -201,6 +203,12 @@ get_keys()
else
ecmd="xbcrypt --encrypt-algo='$ealgo' --encrypt-key='$ekey'"
fi
+ if [ -n "$encrypt_threads" ]; then
+ ecmd="$ecmd --encrypt-threads=$encrypt_threads"
+ fi
+ if [ -n "$encrypt_chunk" ]; then
+ ecmd="$ecmd --encrypt-chunk-size=$encrypt_chunk"
+ fi
else
wsrep_log_error "Unknown encryption format='$eformat'"
exit 2
@@ -215,8 +223,6 @@ get_keys()
get_transfer()
{
- TSST_PORT="$SST_PORT"
-
if [ $tfmt = 'nc' ]; then
wsrep_log_info "Using netcat as streamer"
wsrep_check_programs nc
@@ -238,7 +244,7 @@ get_transfer()
wsrep_log_info "Using traditional netcat as streamer"
tcmd="$tcmd -l -p"
fi
- tcmd="$tcmd $TSST_PORT"
+ tcmd="$tcmd $SST_PORT"
else
# Check to see if netcat supports the '-N' flag.
# -N Shutdown the network socket after EOF on stdin
@@ -260,7 +266,7 @@ get_transfer()
wsrep_log_info "Using traditional netcat as streamer"
tcmd="$tcmd -q0"
fi
- tcmd="$tcmd $WSREP_SST_OPT_HOST_UNESCAPED $TSST_PORT"
+ tcmd="$tcmd $WSREP_SST_OPT_HOST_UNESCAPED $SST_PORT"
fi
else
tfmt='socat'
@@ -268,25 +274,68 @@ get_transfer()
wsrep_log_info "Using socat as streamer"
wsrep_check_programs socat
- if [ $encrypt -eq 2 -o $encrypt -eq 3 ] && ! socat -V | grep -q -F 'WITH_OPENSSL 1'; then
- wsrep_log_error "Encryption requested, but socat is not OpenSSL enabled (encrypt=$encrypt)"
- exit 2
+ if [ -n "$sockopt" ]; then
+ sockopt=$(trim_string "$sockopt" ',')
+ if [ -n "$sockopt" ]; then
+ sockopt=",$sockopt"
+ fi
fi
- # Determine the socat version
- SOCAT_VERSION=$(socat -V 2>&1 | grep -m1 -oe '[0-9]\.[0-9][\.0-9]*')
- if [ -z "$SOCAT_VERSION" ]; then
- wsrep_log_error "******** FATAL ERROR ******************"
- wsrep_log_error "* Cannot determine the socat version. *"
- wsrep_log_error "***************************************"
+ # Add an option for ipv6 if needed:
+ if [ $WSREP_SST_OPT_HOST_IPv6 -eq 1 ]; then
+ # If sockopt contains 'pf=ip6' somewhere in the middle,
+ # this will not interfere with socat, but exclude the trivial
+ # cases when sockopt contains 'pf=ip6' as prefix or suffix:
+ if [ "$sockopt" = "${sockopt#,pf=ip6}" -a \
+ "$sockopt" = "${sockopt%,pf=ip6}" ]
+ then
+ sockopt=",pf=ip6$sockopt"
+ fi
+ fi
+
+ if [ $encrypt -lt 2 ]; then
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ tcmd="socat -u TCP-LISTEN:$SST_PORT,reuseaddr$sockopt stdio"
+ else
+ tcmd="socat -u stdio TCP:$REMOTEIP:$SST_PORT$sockopt"
+ fi
+ return
+ fi
+
+ if ! socat -V | grep -q -F 'WITH_OPENSSL 1'; then
+ wsrep_log_error "******** FATAL ERROR ************************************************ "
+ wsrep_log_error "* Encryption requested, but socat is not OpenSSL enabled (encrypt=$encrypt) *"
+ wsrep_log_error "********************************************************************* "
exit 2
fi
- if ! check_for_version "$SOCAT_VERSION" "1.7.3"; then
- # socat versions < 1.7.3 will have 512-bit dhparams (too small)
- # so create 2048-bit dhparams and send that as a parameter:
- check_for_dhparams
- sockopt=",dhparam='$ssl_dhparams'$sockopt"
+ local action='Decrypting'
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ tcmd="socat -u openssl-listen:$SST_PORT,reuseaddr"
+ else
+ tcmd="socat -u stdio openssl-connect:$REMOTEIP:$SST_PORT"
+ action='Encrypting'
+ fi
+
+ if [ "${sockopt#*,dhparam=}" != "$sockopt" ]; then
+ if [ -z "$ssl_dhparams" ]; then
+ # Determine the socat version
+ SOCAT_VERSION=$(socat -V 2>&1 | grep -m1 -oe '[0-9]\.[0-9][\.0-9]*')
+ if [ -z "$SOCAT_VERSION" ]; then
+ wsrep_log_error "******** FATAL ERROR ******************"
+ wsrep_log_error "* Cannot determine the socat version. *"
+ wsrep_log_error "***************************************"
+ exit 2
+ fi
+ if ! check_for_version "$SOCAT_VERSION" '1.7.3'; then
+ # socat versions < 1.7.3 will have 512-bit dhparams (too small)
+ # so create 2048-bit dhparams and send that as a parameter:
+ check_for_dhparams
+ fi
+ fi
+ if [ -n "$ssl_dhparams" ]; then
+ tcmd="$tcmd,dhparam='$ssl_dhparams'"
+ fi
fi
if [ $encrypt -eq 2 ]; then
@@ -295,52 +344,60 @@ get_transfer()
wsrep_log_error "Both PEM and CRT files required"
exit 22
fi
- stagemsg="$stagemsg-OpenSSL-Encrypted-2"
- if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- wsrep_log_info "Decrypting with cert=${tpem}, cafile=${tcert}"
- tcmd="socat -u openssl-listen:$TSST_PORT,reuseaddr,cert='$tpem',cafile='$tcert'$sockopt stdio"
- else
- wsrep_log_info "Encrypting with cert=${tpem}, cafile=${tcert}"
- tcmd="socat -u stdio openssl-connect:$REMOTEIP:$TSST_PORT,cert='$tpem',cafile='$tcert'$sockopt"
+ if [ ! -r "$tpem" -o ! -r "$tcert" ]; then
+ wsrep_log_error "Both PEM and CRT files must be readable"
+ exit 22
fi
- elif [ $encrypt -eq 3 ]; then
+ verify_ca_matches_cert "$tcert" "$tpem"
+ tcmd="$tcmd,cert='$tpem',cafile='$tcert'$sockopt"
+ stagemsg="$stagemsg-OpenSSL-Encrypted-2"
+ wsrep_log_info "$action with cert=$tpem, cafile=$tcert"
+ elif [ $encrypt -eq 3 -o $encrypt -eq 4 ]; then
wsrep_log_info "Using openssl based encryption with socat: with key and crt"
if [ -z "$tpem" -o -z "$tkey" ]; then
wsrep_log_error "Both certificate and key files required"
exit 22
fi
+ if [ ! -r "$tpem" -o ! -r "$tkey" ]; then
+ wsrep_log_error "Both certificate and key files must be readable"
+ exit 22
+ fi
+ verify_cert_matches_key "$tpem" "$tkey"
stagemsg="$stagemsg-OpenSSL-Encrypted-3"
if [ -z "$tcert" ]; then
- # no verification
- if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- wsrep_log_info "Decrypting with cert=${tpem}, key=${tkey}, verify=0"
- tcmd="socat -u openssl-listen:$TSST_PORT,reuseaddr,cert='$tpem',key='$tkey',verify=0$sockopt stdio"
- else
- wsrep_log_info "Encrypting with cert=${tpem}, key=${tkey}, verify=0"
- tcmd="socat -u stdio openssl-connect:$REMOTEIP:$TSST_PORT,cert='$tpem',key='$tkey',verify=0$sockopt"
+ if [ $encrypt -eq 4 ]; then
+ wsrep_log_error "Peer certificate required if encrypt=4"
+ exit 22
fi
+ # no verification
+ tcmd="$tcmd,cert='$tpem',key='$tkey',verify=0$sockopt"
+ wsrep_log_info "$action with cert=$tpem, key=$tkey, verify=0"
else
# CA verification
- if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- wsrep_log_info "Decrypting with cert=${tpem}, key=${tkey}, cafile=${tcert}"
- tcmd="socat -u openssl-listen:$TSST_PORT,reuseaddr,cert='$tpem',key='$tkey',cafile='$tcert'$sockopt stdio"
+ if [ ! -r "$tcert" ]; then
+ wsrep_log_error "Certificate file must be readable"
+ exit 22
+ fi
+ verify_ca_matches_cert "$tcert" "$tpem"
+ if [ -n "$WSREP_SST_OPT_REMOTE_USER" ]; then
+ CN_option=",commonname='$WSREP_SST_OPT_REMOTE_USER'"
+ elif [ $encrypt -eq 4 ]; then
+ CN_option=",commonname=''"
+ elif is_local_ip "$WSREP_SST_OPT_HOST_UNESCAPED"; then
+ CN_option=',commonname=localhost'
else
- CN_option=""
- if [ -n "$WSREP_SST_OPT_REMOTE_USER" ]; then
- CN_option=",commonname='$WSREP_SST_OPT_REMOTE_USER'"
- elif is_local_ip "$WSREP_SST_OPT_HOST_UNESCAPED"; then
- CN_option=',commonname=localhost'
- fi
- wsrep_log_info "Encrypting with cert=${tpem}, key=${tkey}, cafile=${tcert}"
- tcmd="socat -u stdio openssl-connect:$REMOTEIP:$TSST_PORT,cert='$tpem',key='$tkey',cafile='$tcert'$CN_option$sockopt"
+ CN_option=",commonname='$WSREP_SST_OPT_HOST_UNSECAPED'"
fi
+ tcmd="$tcmd,cert='$tpem',key='$tkey',cafile='$tcert'$CN_option$sockopt"
+ wsrep_log_info "$action with cert=$tpem, key=$tkey, cafile=$tcert"
fi
else
- if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- tcmd="socat -u TCP-LISTEN:$TSST_PORT,reuseaddr$sockopt stdio"
- else
- tcmd="socat -u stdio TCP:$REMOTEIP:$TSST_PORT$sockopt"
- fi
+ wsrep_log_info "Unknown encryption mode: encrypt=$encrypt"
+ exit 22
+ fi
+
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ tcmd="$tcmd stdio"
fi
fi
}
@@ -348,7 +405,7 @@ get_transfer()
get_footprint()
{
pushd "$WSREP_SST_OPT_DATA" 1>/dev/null
- payload=$(find . -regex '.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' -type f -print0 | du --files0-from=- --block-size=1 -c | awk 'END { print $1 }')
+ payload=$(find . -regex '.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' -type f -print0 | du --files0-from=- --block-size=1 -c -s | awk 'END { print $1 }')
if [ "$compress" != 'none' ]; then
# QuickLZ has around 50% compression ratio
# When compression/compaction used, the progress is only an approximate.
@@ -412,10 +469,10 @@ read_cnf()
tpem=$(parse_cnf 'sst' 'tcert')
tkey=$(parse_cnf 'sst' 'tkey')
fi
- if [ "$tmode" != 'DISABLED' ]
- then # backward-incompatible behavior
- if [ -z "$tpem" -a -z "$tkey" -a -z "$tcert" ]
- then # no old-style SSL config in [sst]
+ if [ "$tmode" != 'DISABLED' ]; then
+ # backward-incompatible behavior
+ if [ -z "$tpem" -a -z "$tkey" -a -z "$tcert" ]; then
+ # no old-style SSL config in [sst]
check_server_ssl_config
fi
if [ 0 -eq $encrypt -a -n "$tpem" -a -n "$tkey" ]
@@ -441,7 +498,7 @@ read_cnf()
sockopt=$(parse_cnf sst sockopt "")
progress=$(parse_cnf sst progress "")
ttime=$(parse_cnf sst time 0)
- cpat='.*galera\.cache$\|.*sst_in_progress$\|.*\.sst$\|.*gvwstate\.dat$\|.*grastate\.dat$\|.*\.err$\|.*\.log$\|.*RPM_UPGRADE_MARKER$\|.*RPM_UPGRADE_HISTORY$'
+ cpat='.*\.pem$\|.*galera\.cache$\|.*sst_in_progress$\|.*\.sst$\|.*gvwstate\.dat$\|.*grastate\.dat$\|.*\.err$\|.*\.log$\|.*RPM_UPGRADE_MARKER$\|.*RPM_UPGRADE_HISTORY$'
[ "$OS" = 'FreeBSD' ] && cpat=$(echo "$cpat" | sed 's/\\|/|/g')
cpat=$(parse_cnf sst cpat "$cpat")
scomp=$(parse_cnf sst compressor "")
@@ -476,34 +533,38 @@ read_cnf()
compress_threads=$(parse_cnf "$encgroups" 'compress-threads')
fi
fi
+
+ backup_threads=$(parse_cnf "$encgroups" 'backup-threads')
+
+ if [ "$eformat" = 'xbcrypt' ]; then
+ encrypt_threads=$(parse_cnf "$encgroups" 'encrypt-threads')
+ encrypt_chunk=$(parse_cnf "$encgroups" 'encrypt-chunk-size')
+ fi
}
get_stream()
{
if [ "$sfmt" = 'mbstream' -o "$sfmt" = 'xbstream' ]; then
- wsrep_log_info "Streaming with ${sfmt}"
+ sfmt='mbstream'
+ STREAM_BIN="$(command -v mbstream)"
+ if [ -z "$STREAM_BIN" ]; then
+ wsrep_log_error "Streaming with $sfmt, but $sfmt not found in path"
+ exit 42
+ fi
if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- strmcmd="$MBSTREAM_BIN -x"
+ strmcmd="'$STREAM_BIN' -x"
else
- strmcmd="$MBSTREAM_BIN -c '$INFO_FILE'"
+ strmcmd="'$STREAM_BIN' -c '$INFO_FILE'"
fi
else
- sfmt="tar"
- wsrep_log_info "Streaming with tar"
- if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]]; then
- strmcmd="tar xfi -"
+ sfmt='tar'
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ strmcmd='tar xfi -'
else
strmcmd="tar cf - '$INFO_FILE'"
fi
fi
-}
-
-get_proc()
-{
- set +e
- nproc=$(grep -c processor /proc/cpuinfo)
- [ -z $nproc -o $nproc -eq 0 ] && nproc=1
- set -e
+ wsrep_log_info "Streaming with $sfmt"
}
sig_joiner_cleanup()
@@ -512,48 +573,7 @@ sig_joiner_cleanup()
[ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
}
-cleanup_joiner()
-{
- # Since this is invoked just after exit NNN
- local estatus=$?
- if [ $estatus -ne 0 ]; then
- wsrep_log_error "Cleanup after exit with status:$estatus"
- elif [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
- wsrep_log_info "Removing the sst_in_progress file"
- wsrep_cleanup_progress_file
- fi
- if [ -n "$progress" -a -p "$progress" ]; then
- wsrep_log_info "Cleaning up fifo file $progress"
- rm "$progress"
- fi
-
- if [ -n "$STATDIR" ]; then
- [ -d "$STATDIR" ] && rm -rf "$STATDIR"
- fi
-
- # Final cleanup
- pgid=$(ps -o pgid= $$ | grep -o '[0-9]*')
-
- # This means no setsid done in mysqld.
- # We don't want to kill mysqld here otherwise.
- if [ $$ -eq $pgid ]; then
- # This means a signal was delivered to the process.
- # So, more cleanup.
- if [ $estatus -ge 128 ]; then
- kill -KILL -$$ || true
- fi
- fi
-
- exit $estatus
-}
-
-check_pid()
-{
- local pid_file="$1"
- [ -r "$pid_file" ] && ps -p $(cat "$pid_file") 2>&1 >/dev/null
-}
-
-cleanup_donor()
+cleanup_at_exit()
{
# Since this is invoked just after exit NNN
local estatus=$?
@@ -561,16 +581,19 @@ cleanup_donor()
wsrep_log_error "Cleanup after exit with status:$estatus"
fi
- if [ -n "$MARIABACKUP_PID" ]; then
- if check_pid $MARIABACKUP_PID
- then
- wsrep_log_error "mariabackup process is still running. Killing..."
- kill_mariabackup
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ wsrep_log_info "Removing the sst_in_progress file"
+ wsrep_cleanup_progress_file
+ else
+ if [ -n "$BACKUP_PID" ]; then
+ if check_pid "$BACKUP_PID" 1; then
+ wsrep_log_error "mariabackup process is still running. Killing..."
+ cleanup_pid $CHECK_PID "$BACKUP_PID"
+ fi
fi
+ [ -f "$DATA/$IST_FILE" ] && rm -f "$DATA/$IST_FILE"
fi
- [ -f "$DATA/$IST_FILE" ] && rm -f "$DATA/$IST_FILE"
-
if [ -n "$progress" -a -p "$progress" ]; then
wsrep_log_info "Cleaning up fifo file $progress"
rm -f "$progress" || true
@@ -578,8 +601,14 @@ cleanup_donor()
wsrep_log_info "Cleaning up temporary directories"
- [ -n "$xtmpdir" -a -d "$xtmpdir" ] && rm -rf "$xtmpdir" || true
- [ -n "$itmpdir" -a -d "$itmpdir" ] && rm -rf "$itmpdir" || true
+ if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
+ if [ -n "$STATDIR" ]; then
+ [ -d "$STATDIR" ] && rm -rf "$STATDIR"
+ fi
+ else
+ [ -n "$xtmpdir" -a -d "$xtmpdir" ] && rm -rf "$xtmpdir" || true
+ [ -n "$itmpdir" -a -d "$itmpdir" ] && rm -rf "$itmpdir" || true
+ fi
# Final cleanup
pgid=$(ps -o pgid= $$ | grep -o '[0-9]*')
@@ -590,21 +619,13 @@ cleanup_donor()
# This means a signal was delivered to the process.
# So, more cleanup.
if [ $estatus -ge 128 ]; then
- kill -KILL -$$ || true
+ kill -KILL -- -$$ || true
fi
fi
exit $estatus
}
-kill_mariabackup()
-{
- local PID=$(cat "$MARIABACKUP_PID")
- [ -n "$PID" -a "0" != "$PID" ] && kill $PID && (kill $PID && kill -9 $PID) || :
- wsrep_log_info "Removing mariabackup pid file ($MARIABACKUP_PID)"
- rm -f "$MARIABACKUP_PID" || true
-}
-
setup_ports()
{
SST_PORT="$WSREP_SST_OPT_PORT"
@@ -615,51 +636,17 @@ setup_ports()
fi
}
-check_port()
-{
- local PORT="$1"
- local UTILS="$2"
-
- local port_info is_util
-
- if [ $lsof_available -ne 0 ]; then
- port_info=$(lsof -i ":$PORT" -Pn 2>/dev/null | \
- grep -F '(LISTEN)')
- is_util=$(echo "$port_info" | \
- grep -E "^($UTILS)[^[:space:]]*[[:space:]]+[0-9]+[[:space:]]+")
- elif [ $sockstat_available -ne 0 ]; then
- port_info=$(sockstat -p "$PORT" 2>/dev/null | \
- grep -F 'LISTEN')
- is_util=$(echo "$port_info" | \
- grep -E "[[:space:]]+($UTILS)[^[:space:]]*[[:space:]]+[0-9]+[[:space:]]+")
- elif [ $ss_available -ne 0 ]; then
- port_info=$(ss -H -p -n -l "( sport = :$PORT )" 2>/dev/null)
- is_util=$(echo "$port_info" | \
- grep -E "users:\\(.*\\(\"($UTILS)[^[:space:]]*\".*\<pid=[0-9]+\>.*\\)")
- else
- wsrep_log_error "unknown sockets utility"
- exit 2 # ENOENT
- fi
-
- if [ -z "$is_util" ]; then
- return 1
- fi
-
- return 0
-}
-
-# waits ~10 seconds for nc to open the port and then reports ready
-# (regardless of timeout)
+#
+# Waits ~30 seconds for socat or nc to open the port and
+# then reports ready, regardless of timeout.
+#
wait_for_listen()
{
local PORT="$1"
local ADDR="$2"
local MODULE="$3"
-
- for i in {1..50}
- do
- if check_port "$PORT" 'socat|nc'
- then
+ for i in {1..150}; do
+ if check_port "" "$PORT" 'socat|nc'; then
break
fi
sleep 0.2
@@ -675,8 +662,8 @@ check_extra()
if [ "$thread_handling" = 'pool-of-threads' ]; then
local eport=$(parse_cnf '--mysqld' 'extra-port')
if [ -n "$eport" ]; then
- # mariabackup works only locally, hence,
- # setting host to 127.0.0.1 unconditionally:
+ # mariabackup works only locally.
+ # Hence, setting host to 127.0.0.1 unconditionally:
wsrep_log_info "SST through extra_port $eport"
INNOEXTRA="$INNOEXTRA --host=127.0.0.1 --port=$eport"
use_socket=0
@@ -792,30 +779,29 @@ monitor_process()
local sst_stream_pid=$1
while true ; do
- if ! ps -p "$WSREP_SST_OPT_PARENT" &>/dev/null; then
+ if ! ps -p "$WSREP_SST_OPT_PARENT" >/dev/null 2>&1; then
wsrep_log_error "Parent mysqld process (PID: $WSREP_SST_OPT_PARENT) terminated unexpectedly."
+ kill -- -"$WSREP_SST_OPT_PARENT"
exit 32
fi
- if ! ps -p "$sst_stream_pid" &>/dev/null; then
+ if ! ps -p "$sst_stream_pid" >/dev/null 2>&1; then
break
fi
sleep 0.1
done
}
-wsrep_check_programs "$MARIABACKUP_BIN"
-
[ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
if [ "$WSREP_SST_OPT_ROLE" != 'joiner' -a "$WSREP_SST_OPT_ROLE" != 'donor' ]; then
- wsrep_log_error "Invalid role ${WSREP_SST_OPT_ROLE}"
+ wsrep_log_error "Invalid role '$WSREP_SST_OPT_ROLE'"
exit 22
fi
read_cnf
setup_ports
-if "$MARIABACKUP_BIN" --help 2>/dev/null | grep -qw -- '--version-check'; then
+if "$BACKUP_BIN" --help 2>/dev/null | grep -qw -- '--version-check'; then
disver='--no-version-check'
fi
@@ -838,7 +824,6 @@ INNODB_DATA_HOME_DIR=$(pwd -P)
cd "$OLD_PWD"
if [ $ssyslog -eq 1 ]; then
-
if [ -n "$(command -v logger)" ]; then
wsrep_log_info "Logging all stderr of SST/mariabackup to syslog"
@@ -856,70 +841,65 @@ if [ $ssyslog -eq 1 ]; then
else
wsrep_log_error "logger not in path: $PATH. Ignoring"
fi
-
INNOAPPLY="2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-apply"
INNOMOVE="2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move"
INNOBACKUP="2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)"
-
else
+ if [ $sstlogarchive -eq 1 ]
+ then
+ ARCHIVETIMESTAMP=$(date "+%Y.%m.%d-%H.%M.%S.%N")
-if [ $sstlogarchive -eq 1 ]
-then
- ARCHIVETIMESTAMP=$(date "+%Y.%m.%d-%H.%M.%S.%N")
-
- if [ -n "$sstlogarchivedir" ]; then
- if [ ! -d "$sstlogarchivedir" ]; then
- mkdir -p "$sstlogarchivedir"
+ if [ -n "$sstlogarchivedir" ]; then
+ if [ ! -d "$sstlogarchivedir" ]; then
+ mkdir -p "$sstlogarchivedir"
+ fi
fi
- fi
- if [ -e "$INNOAPPLYLOG" ]
- then
- if [ -n "$sstlogarchivedir" ]
+ if [ -e "$INNOAPPLYLOG" ]
then
- newfile=$(basename "$INNOAPPLYLOG")
- newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
- else
- newfile="$INNOAPPLYLOG.$ARCHIVETIMESTAMP"
+ if [ -n "$sstlogarchivedir" ]
+ then
+ newfile=$(basename "$INNOAPPLYLOG")
+ newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
+ else
+ newfile="$INNOAPPLYLOG.$ARCHIVETIMESTAMP"
+ fi
+ wsrep_log_info "Moving '$INNOAPPLYLOG' to '$newfile'"
+ mv "$INNOAPPLYLOG" "$newfile"
+ gzip "$newfile"
fi
- wsrep_log_info "Moving '$INNOAPPLYLOG' to '$newfile'"
- mv "$INNOAPPLYLOG" "$newfile"
- gzip "$newfile"
- fi
- if [ -e "$INNOMOVELOG" ]
- then
- if [ -n "$sstlogarchivedir" ]
+ if [ -e "$INNOMOVELOG" ]
then
- newfile=$(basename "$INNOMOVELOG")
- newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
- else
- newfile="$INNOMOVELOG.$ARCHIVETIMESTAMP"
+ if [ -n "$sstlogarchivedir" ]
+ then
+ newfile=$(basename "$INNOMOVELOG")
+ newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
+ else
+ newfile="$INNOMOVELOG.$ARCHIVETIMESTAMP"
+ fi
+ wsrep_log_info "Moving '$INNOMOVELOG' to '$newfile'"
+ mv "$INNOMOVELOG" "$newfile"
+ gzip "$newfile"
fi
- wsrep_log_info "Moving '$INNOMOVELOG' to '$newfile'"
- mv "$INNOMOVELOG" "$newfile"
- gzip "$newfile"
- fi
- if [ -e "$INNOBACKUPLOG" ]
- then
- if [ -n "$sstlogarchivedir" ]
+ if [ -e "$INNOBACKUPLOG" ]
then
- newfile=$(basename "$INNOBACKUPLOG")
- newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
- else
- newfile="$INNOBACKUPLOG.$ARCHIVETIMESTAMP"
+ if [ -n "$sstlogarchivedir" ]
+ then
+ newfile=$(basename "$INNOBACKUPLOG")
+ newfile="$sstlogarchivedir/$newfile.$ARCHIVETIMESTAMP"
+ else
+ newfile="$INNOBACKUPLOG.$ARCHIVETIMESTAMP"
+ fi
+ wsrep_log_info "Moving '$INNOBACKUPLOG' to '$newfile'"
+ mv "$INNOBACKUPLOG" "$newfile"
+ gzip "$newfile"
fi
- wsrep_log_info "Moving '$INNOBACKUPLOG' to '$newfile'"
- mv "$INNOBACKUPLOG" "$newfile"
- gzip "$newfile"
fi
-fi
-
- INNOAPPLY="&> '$INNOAPPLYLOG'"
- INNOMOVE="&> '$INNOMOVELOG'"
+ INNOAPPLY="> '$INNOAPPLYLOG' 2>&1"
+ INNOMOVE="> '$INNOMOVELOG' 2>&1"
INNOBACKUP="2> '$INNOBACKUPLOG'"
-
fi
setup_commands()
@@ -928,9 +908,9 @@ setup_commands()
if [ -n "$WSREP_SST_OPT_MYSQLD" ]; then
mysqld_args="--mysqld-args $WSREP_SST_OPT_MYSQLD"
fi
- INNOAPPLY="$MARIABACKUP_BIN --prepare $disver $iapts $INNOEXTRA --target-dir='$DATA' --datadir='$DATA' $mysqld_args $INNOAPPLY"
- INNOMOVE="$MARIABACKUP_BIN $WSREP_SST_OPT_CONF --move-back $disver $impts --force-non-empty-directories --target-dir='$DATA' --datadir='${TDATA:-$DATA}' $INNOMOVE"
- INNOBACKUP="$MARIABACKUP_BIN $WSREP_SST_OPT_CONF --backup $disver $iopts $tmpopts $INNOEXTRA --galera-info --stream='$sfmt' --target-dir='$itmpdir' --datadir='$DATA' $mysqld_args $INNOBACKUP"
+ INNOAPPLY="$BACKUP_BIN --prepare $disver $iapts $INNOEXTRA --target-dir='$DATA' --datadir='$DATA' $mysqld_args $INNOAPPLY"
+ INNOMOVE="$BACKUP_BIN $WSREP_SST_OPT_CONF --move-back $disver $impts --force-non-empty-directories --target-dir='$DATA' --datadir='${TDATA:-$DATA}' $INNOMOVE"
+ INNOBACKUP="$BACKUP_BIN $WSREP_SST_OPT_CONF --backup $disver $iopts $tmpopts $INNOEXTRA --galera-info --stream=$sfmt --target-dir='$itmpdir' --datadir='$DATA' $mysqld_args $INNOBACKUP"
}
get_stream
@@ -938,7 +918,7 @@ get_transfer
if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]
then
- trap cleanup_donor EXIT
+ trap cleanup_at_exit EXIT
if [ $WSREP_SST_OPT_BYPASS -eq 0 ]
then
@@ -951,12 +931,15 @@ then
tmpdir=$(parse_cnf "$encgroups" 'tmpdir')
if [ -z "$tmpdir" ]; then
xtmpdir="$(mktemp -d)"
- tmpopts="--tmpdir='$xtmpdir'"
- wsrep_log_info "Using $xtmpdir as mariabackup temporary directory"
+ else
+ xtmpdir=$(mktemp '-d' "--tmpdir=$tmpdir")
fi
+ wsrep_log_info "Using '$xtmpdir' as mariabackup temporary directory"
+ tmpopts="--tmpdir='$xtmpdir'"
+
itmpdir="$(mktemp -d)"
- wsrep_log_info "Using $itmpdir as mariabackup temporary directory"
+ wsrep_log_info "Using '$itmpdir' as mariabackup working directory"
usrst=0
if [ -n "$WSREP_SST_OPT_USER" ]; then
@@ -997,9 +980,9 @@ then
send_donor "$DATA" "$stagemsg-gtid"
+ # Restore the transport commmand to its original state
tcmd="$ttcmd"
- # Restore the transport commmand to its original state
if [ -n "$progress" ]; then
get_footprint
tcmd="$pcmd | $tcmd"
@@ -1011,7 +994,7 @@ then
wsrep_log_info "Sleeping before data transfer for SST"
sleep 10
- wsrep_log_info "Streaming the backup to joiner at ${REMOTEIP}:${SST_PORT}"
+ wsrep_log_info "Streaming the backup to joiner at $REMOTEIP:$SST_PORT"
# Add compression to the head of the stream (if specified)
if [ -n "$scomp" ]; then
@@ -1023,33 +1006,37 @@ then
tcmd="$ecmd | $tcmd"
fi
- iopts="$iopts --databases-exclude='lost+found'"
+ iopts="--databases-exclude='lost+found' $iopts"
if [ ${FORCE_FTWRL:-0} -eq 1 ]; then
- wsrep_log_info "Forcing FTWRL due to environment variable FORCE_FTWRL equal to $FORCE_FTWRL"
- iopts="$iopts --no-backup-locks"
+ wsrep_log_info "Forcing FTWRL due to environment variable FORCE_FTWRL equal to $FORCE_FTWRL"
+ iopts="--no-backup-locks $iopts"
fi
# if compression is enabled for backup files, then add the
# appropriate options to the mariabackup command line:
if [ "$compress" != 'none' ]; then
- iopts="$iopts --compress${compress:+=$compress}"
+ iopts="--compress${compress:+=$compress} $iopts"
if [ -n "$compress_threads" ]; then
- iopts="$iopts --compress-threads=$compress_threads"
+ iopts="--compress-threads=$compress_threads $iopts"
fi
if [ -n "$compress_chunk" ]; then
- iopts="$iopts --compress-chunk-size=$compress_chunk"
+ iopts="--compress-chunk-size=$compress_chunk $iopts"
fi
fi
+ if [ -n "$backup_threads" ]; then
+ iopts="--parallel=$backup_threads $iopts"
+ fi
+
setup_commands
set +e
timeit "$stagemsg-SST" "$INNOBACKUP | $tcmd; RC=( "\${PIPESTATUS[@]}" )"
set -e
if [ ${RC[0]} -ne 0 ]; then
- wsrep_log_error "${MARIABACKUP_BIN} finished with error: ${RC[0]}. " \
- "Check syslog or ${INNOBACKUPLOG} for details"
+ wsrep_log_error "mariabackup finished with error: ${RC[0]}. " \
+ "Check syslog or '$INNOBACKUPLOG' for details"
exit 22
elif [ ${RC[$(( ${#RC[@]}-1 ))]} -eq 1 ]; then
wsrep_log_error "$tcmd finished with error: ${RC[1]}"
@@ -1057,7 +1044,7 @@ then
fi
# mariabackup implicitly writes PID to fixed location in $xtmpdir
- MARIABACKUP_PID="$xtmpdir/xtrabackup_pid"
+ BACKUP_PID="$xtmpdir/xtrabackup_pid"
else # BYPASS FOR IST
@@ -1109,6 +1096,10 @@ then
ib_undo_dir="$INNODB_UNDO_DIR"
+ if [ -n "$backup_threads" ]; then
+ impts="--parallel=$backup_threads $impts"
+ fi
+
stagemsg='Joiner-Recv'
sencrypted=1
@@ -1148,7 +1139,7 @@ then
fi
trap sig_joiner_cleanup HUP PIPE INT TERM
- trap cleanup_joiner EXIT
+ trap cleanup_at_exit EXIT
if [ -n "$progress" ]; then
adjust_progress
@@ -1171,7 +1162,7 @@ then
recv_joiner "$STATDIR" "$stagemsg-gtid" $stimeout 1 1
- if ! ps -p "$WSREP_SST_OPT_PARENT" &>/dev/null
+ if ! ps -p "$WSREP_SST_OPT_PARENT" >/dev/null 2>&1
then
wsrep_log_error "Parent mysqld process (PID: $WSREP_SST_OPT_PARENT) terminated unexpectedly."
exit 32
@@ -1181,7 +1172,7 @@ then
then
if [ -d "$DATA/.sst" ]; then
- wsrep_log_info "WARNING: Stale temporary SST directory: ${DATA}/.sst from previous state transfer. Removing"
+ wsrep_log_info "WARNING: Stale temporary SST directory: '$DATA/.sst' from previous state transfer. Removing"
rm -rf "$DATA/.sst"
fi
mkdir -p "$DATA/.sst"
@@ -1291,37 +1282,37 @@ then
fi
- wsrep_log_info "Preparing the backup at ${DATA}"
+ wsrep_log_info "Preparing the backup at $DATA"
setup_commands
timeit "mariabackup prepare stage" "$INNOAPPLY"
if [ $? -ne 0 ]; then
- wsrep_log_error "${MARIABACKUP_BIN} apply finished with errors. Check syslog or ${INNOAPPLYLOG} for details"
+ wsrep_log_error "mariabackup apply finished with errors. Check syslog or '$INNOAPPLYLOG' for details"
exit 22
fi
MAGIC_FILE="$TDATA/$INFO_FILE"
- wsrep_log_info "Moving the backup to ${TDATA}"
- timeit "mariabackup move stage" "$INNOMOVE"
+ wsrep_log_info "Moving the backup to $TDATA"
+ timeit "mariabackup move stage" "$INNOMOVE"
if [ $? -eq 0 ]; then
- wsrep_log_info "Move successful, removing ${DATA}"
+ wsrep_log_info "Move successful, removing $DATA"
rm -rf "$DATA"
DATA="$TDATA"
else
- wsrep_log_error "Move failed, keeping ${DATA} for further diagnosis"
- wsrep_log_error "Check syslog or ${INNOMOVELOG} for details"
+ wsrep_log_error "Move failed, keeping '$DATA' for further diagnosis"
+ wsrep_log_error "Check syslog or '$INNOMOVELOG' for details"
exit 22
fi
else
- wsrep_log_info "${IST_FILE} received from donor: Running IST"
+ wsrep_log_info "'$IST_FILE' received from donor: Running IST"
fi
if [ ! -r "$MAGIC_FILE" ]; then
- wsrep_log_error "SST magic file ${MAGIC_FILE} not found/readable"
+ wsrep_log_error "SST magic file '$MAGIC_FILE' not found/readable"
exit 2
fi
diff --git a/scripts/wsrep_sst_mysqldump.sh b/scripts/wsrep_sst_mysqldump.sh
index 4aa3f8e63d8..798bee1ac10 100644
--- a/scripts/wsrep_sst_mysqldump.sh
+++ b/scripts/wsrep_sst_mysqldump.sh
@@ -103,7 +103,7 @@ then
DROP PREPARE stmt;"
fi
-MYSQL="$MYSQL_CLIENT $WSREP_SST_OPT_CONF "\
+MYSQL="$MYSQL_CLIENT $WSREP_SST_OPT_CONF_UNQUOTED "\
"$AUTH -h$WSREP_SST_OPT_HOST_UNESCAPED "\
"-P$WSREP_SST_OPT_PORT --disable-reconnect --connect_timeout=10"
@@ -140,7 +140,7 @@ then
fi
# NOTE: we don't use --routines here because we're dumping mysql.proc table
-MYSQLDUMP="$MYSQLDUMP $WSREP_SST_OPT_CONF $AUTH -S$WSREP_SST_OPT_SOCKET \
+MYSQLDUMP="$MYSQLDUMP $WSREP_SST_OPT_CONF_UNQUOTED $AUTH -S$WSREP_SST_OPT_SOCKET \
--add-drop-database --add-drop-table --skip-add-locks --create-options \
--disable-keys --extended-insert --skip-lock-tables --quick --set-charset \
--skip-comments --flush-privileges --all-databases --events"
diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh
index 92f77eec331..19a4d19fded 100644
--- a/scripts/wsrep_sst_rsync.sh
+++ b/scripts/wsrep_sst_rsync.sh
@@ -1,7 +1,7 @@
#!/bin/bash -ue
-# Copyright (C) 2010-2014 Codership Oy
# Copyright (C) 2017-2021 MariaDB
+# Copyright (C) 2010-2014 Codership Oy
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,9 +19,8 @@
# This is a reference script for rsync-based state snapshot tansfer
-RSYNC_PID= # rsync pid file
-RSYNC_CONF= # rsync configuration file
-RSYNC_REAL_PID= # rsync process id
+RSYNC_REAL_PID=0 # rsync process id
+STUNNEL_REAL_PID=0 # stunnel process id
OS="$(uname)"
[ "$OS" = 'Darwin' ] && export -n LD_LIBRARY_PATH
@@ -36,95 +35,95 @@ wsrep_check_programs rsync
cleanup_joiner()
{
- wsrep_log_info "Joiner cleanup. rsync PID: $RSYNC_REAL_PID"
- [ "0" != "$RSYNC_REAL_PID" ] && \
- kill $RSYNC_REAL_PID && \
- sleep 0.5 && \
- kill -9 $RSYNC_REAL_PID >/dev/null 2>&1 || :
- [ -f "$RSYNC_CONF" ] && rm -f "$RSYNC_CONF"
- [ -f "$STUNNEL_CONF" ] && rm -f "$STUNNEL_CONF"
- [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID"
- [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
- [ -f "$RSYNC_PID" ] && rm -f "$RSYNC_PID"
+ local failure=0
+
+ wsrep_log_info "Joiner cleanup: rsync PID=$RSYNC_REAL_PID, stunnel PID=$STUNNEL_REAL_PID"
+
+ if [ -n "$STUNNEL" ]; then
+ if cleanup_pid $STUNNEL_REAL_PID "$STUNNEL_PID" "$STUNNEL_CONF"; then
+ if [ $RSYNC_REAL_PID -eq 0 ]; then
+ if [ -r "$RSYNC_PID" ]; then
+ RSYNC_REAL_PID=$(cat "$RSYNC_PID" 2>/dev/null)
+ if [ -z "$RSYNC_REAL_PID" ]; then
+ RSYNC_REAL_PID=0
+ fi
+ fi
+ fi
+ else
+ wsrep_log_warning "stunnel cleanup failed."
+ failure=1
+ fi
+ fi
+
+ if [ $failure -eq 0 ]; then
+ if cleanup_pid $RSYNC_REAL_PID "$RSYNC_PID" "$RSYNC_CONF"; then
+ [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
+ else
+ wsrep_log_warning "rsync cleanup failed."
+ fi
+ fi
+
wsrep_log_info "Joiner cleanup done."
+
if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then
wsrep_cleanup_progress_file
fi
}
-# Check whether rsync process is still running.
-check_pid()
-{
- local pid_file="$1"
- [ -r "$pid_file" ] && ps -p $(cat "$pid_file") 2>&1 >/dev/null
-}
-
check_pid_and_port()
{
local pid_file="$1"
- local rsync_pid=$2
- local rsync_addr="$3"
- local rsync_port="$4"
+ local pid=$2
+ local addr="$3"
+ local port="$4"
- if [ -z "$rsync_port" -o -z "$rsync_addr" -o -z "$rsync_pid" ]; then
- wsrep_log_error "check_pid_and_port(): bad arguments"
- exit 2 # ENOENT
- fi
+ local utils='rsync|stunnel'
- local port_info is_rsync
-
- if [ $lsof_available -ne 0 ]; then
- port_info=$(lsof -i ":$rsync_port" -Pn 2>/dev/null | \
- grep -F '(LISTEN)')
- is_rsync=$(echo "$port_info" | \
- grep -E "^(rsync|stunnel)[^[:space:]]*[[:space:]]+$rsync_pid[[:space:]]+")
- elif [ $sockstat_available -ne 0 ]; then
- port_info=$(sockstat -p "$rsync_port" 2>/dev/null | \
- grep -F 'LISTEN')
- is_rsync=$(echo "$port_info" | \
- grep -E "[[:space:]]+(rsync|stunnel)[^[:space:]]*[[:space:]]+$rsync_pid[[:space:]]+")
- elif [ $ss_available -ne 0 ]; then
- port_info=$(ss -H -p -n -l "( sport = :$rsync_port )" 2>/dev/null)
- is_rsync=$(echo "$port_info" | \
- grep -E "users:\\(.*\\(\"(rsync|stunnel)[^[:space:]]*\".*\<pid=$rsync_pid\>.*\\)")
- else
- wsrep_log_error "unknown sockets utility"
- exit 2 # ENOENT
- fi
+ if ! check_port "$pid" "$port" "$utils"; then
+ local port_info
+ local busy=0
- if [ -z "$is_rsync" ]; then
- local is_listening_all
if [ $lsof_available -ne 0 ]; then
- is_listening_all=$(echo "$port_info" | \
- grep -E "[[:space:]](\\*|\\[?::\\]?):$rsync_port[[:space:]]")
+ port_info=$(lsof -Pnl -i ":$port" 2>/dev/null | \
+ grep -F '(LISTEN)')
+ echo "$port_info" | \
+ grep -q -E "[[:space:]](\\*|\\[?::\\]?):$port[[:space:]]" && busy=1
else
- if [ $sockstat_available -eq 0 ]; then
- port_info=$(echo "$port_info" | grep -q -F 'users:(')
+ local filter='([^[:space:]]+[[:space:]]+){4}[^[:space:]]+'
+ if [ $sockstat_available -eq 1 ]; then
+ port_info=$(sockstat -p "$port" 2>/dev/null | \
+ grep -E '[[:space:]]LISTEN' | grep -o -E "$filter")
+ else
+ port_info=$(ss -nlpH "( sport = :$port )" 2>/dev/null | \
+ grep -F 'users:(' | grep -o -E "$filter")
fi
- port_info=$(echo "$port_info" | \
- grep -E "[^[:space:]]+[[:space:]]+[^[:space:]]+[[:space:]]+[^[:space:]]+[[:space:]]+[^[:space:]]+[[:space:]]+[^[:space:]]+" -o)
- is_listening_all=$(echo "$port_info" | \
- grep -E "[[:space:]](\\*|\\[?::\\]?):$rsync_port\$")
+ echo "$port_info" | \
+ grep -q -E "[[:space:]](\\*|\\[?::\\]?):$port\$" && busy=1
fi
- local is_listening_addr=$(echo "$port_info" | \
- grep -w -F -- "$rsync_addr:$rsync_port")
- if [ -z "$is_listening_addr" ]; then
- is_listening_addr=$(echo "$port_info" | \
- grep -w -F "[$rsync_addr]:$rsync_port")
+
+ if [ $busy -eq 0 ]; then
+ if echo "$port_info" | grep -qw -F "[$addr]:$port" || \
+ echo "$port_info" | grep -qw -F -- "$addr:$port"
+ then
+ busy=1
+ fi
+ fi
+
+ if [ $busy -eq 0 ]; then
+ return 1
fi
- if [ -n "$is_listening_all" -o -n "$is_listening_addr" ]; then
- wsrep_log_error "rsync or stunnel daemon port '$rsync_port' " \
+
+ if ! check_port "$pid" "$port" "$utils"; then
+ wsrep_log_error "rsync or stunnel daemon port '$port' " \
"has been taken by another program"
exit 16 # EBUSY
fi
- return 1
fi
- check_pid "$pid_file" && [ $(cat "$pid_file") -eq $rsync_pid ]
+ check_pid "$pid_file" && [ $CHECK_PID -eq $pid ]
}
STUNNEL_CONF="$WSREP_SST_OPT_DATA/stunnel.conf"
-
STUNNEL_PID="$WSREP_SST_OPT_DATA/stunnel.pid"
MAGIC_FILE="$WSREP_SST_OPT_DATA/rsync_sst_complete"
@@ -201,6 +200,8 @@ FILTER="-f '- /lost+found'
-f '- /.zfs'
-f '- /.fseventsd'
-f '- /.Trashes'
+ -f '- /.pid'
+ -f '- /.conf'
-f '+ /wsrep_sst_binlog.tar'
-f '- $INNODB_DATA_HOME_DIR/ib_lru_dump'
-f '- $INNODB_DATA_HOME_DIR/ibdata*'
@@ -250,19 +251,31 @@ else
CAFILE_OPT=""
fi
+VERIFY_OPT=""
+CHECK_OPT=""
+CHECK_OPT_LOCAL=""
if [ "${SSLMODE#VERIFY}" != "$SSLMODE" ]
then
case "$SSLMODE" in
'VERIFY_IDENTITY')
VERIFY_OPT='verifyPeer = yes'
- CHECK_OPT=""
;;
'VERIFY_CA')
VERIFY_OPT='verifyChain = yes'
- if is_local_ip "$WSREP_SST_OPT_HOST_UNESCAPED"; then
- CHECK_OPT='checkHost = localhost'
+ if [ -n "$WSREP_SST_OPT_REMOTE_USER" ]; then
+ CHECK_OPT="checkHost = $WSREP_SST_OPT_REMOTE_USER"
else
- CHECK_OPT='checkHost = $WSREP_SST_OPT_HOST_UNESCAPED'
+ # check if the address is an ip-address (v4 or v6):
+ if echo "$WSREP_SST_OPT_HOST_UNESCAPED" | \
+ grep -q -E '^([0-9]+(\.[0-9]+){3}|[0-9a-fA-F]*(\:[0-9a-fA-F]*)+)$'
+ then
+ CHECK_OPT="checkIP = $WSREP_SST_OPT_HOST_UNESCAPED"
+ else
+ CHECK_OPT="checkHost = $WSREP_SST_OPT_HOST"
+ fi
+ if is_local_ip "$WSREP_SST_OPT_HOST_UNESCAPED"; then
+ CHECK_OPT_LOCAL="checkHost = localhost"
+ fi
fi
;;
*)
@@ -273,9 +286,6 @@ then
wsrep_log_error "Can't have ssl-mode='$SSLMODE' without CA file"
exit 22 # EINVAL
fi
-else
- VERIFY_OPT=""
- CHECK_OPT=""
fi
STUNNEL=""
@@ -294,10 +304,10 @@ then
[ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
[ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE"
+ [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID"
if [ -n "$STUNNEL" ]
then
- [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID"
cat << EOF > "$STUNNEL_CONF"
key = $SSTKEY
cert = $SSTCERT
@@ -310,7 +320,10 @@ connect = $WSREP_SST_OPT_HOST_UNESCAPED:$WSREP_SST_OPT_PORT
TIMEOUTclose = 0
${VERIFY_OPT}
${CHECK_OPT}
+${CHECK_OPT_LOCAL}
EOF
+ else
+ [ -f "$STUNNEL_CONF" ] && rm -f "$STUNNEL_CONF"
fi
if [ $WSREP_SST_OPT_BYPASS -eq 0 ]
@@ -319,13 +332,8 @@ EOF
FLUSHED="$WSREP_SST_OPT_DATA/tables_flushed"
ERROR="$WSREP_SST_OPT_DATA/sst_error"
- rm -rf "$FLUSHED"
- rm -rf "$ERROR"
-
- # Use deltaxfer only for WAN
- inv=$(basename "$0")
- [ "$inv" = "wsrep_sst_rsync_wan" ] && WHOLE_FILE_OPT="" \
- || WHOLE_FILE_OPT="--whole-file"
+ [ -f "$FLUSHED" ] && rm -f "$FLUSHED"
+ [ -f "$ERROR" ] && rm -f "$ERROR"
echo "flush tables"
@@ -340,15 +348,14 @@ EOF
if [ -f "$ERROR" ]
then
# Flush tables operation failed.
- rm -rf "$ERROR"
+ rm -f "$ERROR"
exit 255
fi
-
sleep 0.2
done
STATE=$(cat "$FLUSHED")
- rm -rf "$FLUSHED"
+ rm -f "$FLUSHED"
sync
@@ -375,6 +382,13 @@ EOF
cd "$OLD_PWD"
fi
+ # Use deltaxfer only for WAN
+ inv=$(basename "$0")
+ WHOLE_FILE_OPT=""
+ if [ "${inv%wsrep_sst_rsync_wan*}" != "$inv" ]; then
+ WHOLE_FILE_OPT="--whole-file"
+ fi
+
# first, the normal directories, so that we can detect incompatible protocol
RC=0
eval rsync ${STUNNEL:+"'--rsh=$STUNNEL'"} \
@@ -426,16 +440,18 @@ EOF
fi
# then, we parallelize the transfer of database directories,
- # use . so that path concatenation works:
+ # use '.' so that path concatenation works:
cd "$WSREP_SST_OPT_DATA"
- count=1
- [ "$OS" = 'Linux' ] && count=$(grep -c processor /proc/cpuinfo)
- [ "$OS" = 'Darwin' -o "$OS" = 'FreeBSD' ] && count=$(sysctl -n hw.ncpu)
+ backup_threads=$(parse_cnf "--mysqld|sst" 'backup-threads')
+ if [ -z "$backup_threads" ]; then
+ get_proc
+ backup_threads=$nproc
+ fi
find . -maxdepth 1 -mindepth 1 -type d -not -name 'lost+found' \
- -not -name '.zfs' -print0 | xargs -I{} -0 -P $count \
+ -not -name '.zfs' -print0 | xargs -I{} -0 -P $backup_threads \
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --recursive --delete --quiet \
@@ -474,35 +490,52 @@ EOF
echo "done $STATE"
+ if [ -n "$STUNNEL" ]; then
+ [ -f "$STUNNEL_CONF" ] && rm -f "$STUNNEL_CONF"
+ [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID"
+ fi
+
elif [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]
then
check_sockets_utils
- touch "$SST_PROGRESS_FILE"
- MYSQLD_PID="$WSREP_SST_OPT_PARENT"
+ # give some time for lingering stunnel from previous SST to complete
+ check_round=0
+ while check_pid "$STUNNEL_PID" 1
+ do
+ wsrep_log_info "lingering stunnel daemon found at startup, waiting for it to exit"
+ check_round=$(( check_round + 1 ))
+ if [ $check_round -eq 10 ]; then
+ wsrep_log_error "stunnel daemon already running."
+ exit 114 # EALREADY
+ fi
+ sleep 1
+ done
MODULE="rsync_sst"
-
RSYNC_PID="$WSREP_SST_OPT_DATA/$MODULE.pid"
+ RSYNC_CONF="$WSREP_SST_OPT_DATA/$MODULE.conf"
+
# give some time for lingering rsync from previous SST to complete
check_round=0
- while check_pid "$RSYNC_PID" && [ $check_round -lt 10 ]
+ while check_pid "$RSYNC_PID" 1
do
wsrep_log_info "lingering rsync daemon found at startup, waiting for it to exit"
check_round=$(( check_round + 1 ))
+ if [ $check_round -eq 10 ]; then
+ wsrep_log_error "rsync daemon already running."
+ exit 114 # EALREADY
+ fi
sleep 1
done
- if check_pid "$RSYNC_PID"
- then
- wsrep_log_error "rsync daemon already running."
- exit 114 # EALREADY
- fi
-
- [ -f "$RSYNC_PID" ] && rm -f "$RSYNC_PID"
[ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
[ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE"
+ if [ -z "$STUNNEL" ]; then
+ [ -f "$STUNNEL_CONF" ] && rm -f "$STUNNEL_CONF"
+ fi
+
ADDR="$WSREP_SST_OPT_ADDR"
RSYNC_PORT="$WSREP_SST_OPT_PORT"
RSYNC_ADDR="$WSREP_SST_OPT_HOST"
@@ -512,7 +545,7 @@ then
trap "exit 3" INT TERM ABRT
trap cleanup_joiner EXIT
- RSYNC_CONF="$WSREP_SST_OPT_DATA/$MODULE.conf"
+ touch "$SST_PROGRESS_FILE"
if [ -n "${MYSQL_TMP_DIR:-}" ]; then
SILENT="log file = $MYSQL_TMP_DIR/rsyncd.log"
@@ -535,18 +568,18 @@ $SILENT
path = $INNODB_DATA_HOME_DIR
EOF
-# rm -rf "$DATA"/ib_logfile* # we don't want old logs around
+# rm -rf "$DATA/ib_logfile"* # we don't want old logs around
- # If the IP is local listen only in it
+ # If the IP is local, listen only on it:
if is_local_ip "$RSYNC_ADDR_UNESCAPED"
then
RSYNC_EXTRA_ARGS="--address $RSYNC_ADDR_UNESCAPED"
STUNNEL_ACCEPT="$RSYNC_ADDR_UNESCAPED:$RSYNC_PORT"
else
- # Not local, possibly a NAT, listen on all interfaces
+ # Not local, possibly a NAT, listen on all interfaces:
RSYNC_EXTRA_ARGS=""
STUNNEL_ACCEPT="$RSYNC_PORT"
- # Overwrite address with all
+ # Overwrite address with all:
RSYNC_ADDR="*"
fi
@@ -554,8 +587,9 @@ EOF
then
rsync --daemon --no-detach --port "$RSYNC_PORT" --config "$RSYNC_CONF" $RSYNC_EXTRA_ARGS &
RSYNC_REAL_PID=$!
+ TRANSFER_REAL_PID="$RSYNC_REAL_PID"
+ TRANSFER_PID=$RSYNC_PID
else
- [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID"
# Let's check if the path to the config file contains a space?
if [ "${RSYNC_CONF#* }" = "$RSYNC_CONF" ]; then
cat << EOF > "$STUNNEL_CONF"
@@ -566,6 +600,9 @@ foreground = yes
pid = $STUNNEL_PID
debug = warning
client = no
+${VERIFY_OPT}
+${CHECK_OPT}
+${CHECK_OPT_LOCAL}
[rsync]
accept = $STUNNEL_ACCEPT
exec = $(command -v rsync)
@@ -583,6 +620,9 @@ foreground = yes
pid = $STUNNEL_PID
debug = warning
client = no
+${VERIFY_OPT}
+${CHECK_OPT}
+${CHECK_OPT_LOCAL}
[rsync]
accept = $STUNNEL_ACCEPT
exec = $SHELL
@@ -590,15 +630,11 @@ execargs = $SHELL -c \$RSYNC_CMD
EOF
fi
stunnel "$STUNNEL_CONF" &
- RSYNC_REAL_PID=$!
- RSYNC_PID="$STUNNEL_PID"
+ STUNNEL_REAL_PID=$!
+ TRANSFER_REAL_PID="$STUNNEL_REAL_PID"
+ TRANSFER_PID=$STUNNEL_PID
fi
- until check_pid_and_port "$RSYNC_PID" "$RSYNC_REAL_PID" "$RSYNC_ADDR_UNESCAPED" "$RSYNC_PORT"
- do
- sleep 0.2
- done
-
if [ "${SSLMODE#VERIFY}" != "$SSLMODE" ]
then # backward-incompatible behavior
CN=""
@@ -619,19 +655,26 @@ EOF
ADDR="$CN:$MY_SECRET@$WSREP_SST_OPT_HOST"
else
MY_SECRET="" # for check down in recv_joiner()
- ADDR=$WSREP_SST_OPT_HOST
+ ADDR="$WSREP_SST_OPT_HOST"
fi
+ until check_pid_and_port "$TRANSFER_PID" $TRANSFER_REAL_PID "$RSYNC_ADDR_UNESCAPED" "$RSYNC_PORT"
+ do
+ sleep 0.2
+ done
+
echo "ready $ADDR:$RSYNC_PORT/$MODULE"
+ MYSQLD_PID="$WSREP_SST_OPT_PARENT"
+
# wait for SST to complete by monitoring magic file
- while [ ! -r "$MAGIC_FILE" ] && check_pid "$RSYNC_PID" && \
- ps -p $MYSQLD_PID >/dev/null
+ while [ ! -r "$MAGIC_FILE" ] && check_pid "$TRANSFER_PID" && \
+ ps -p $MYSQLD_PID >/dev/null 2>&1
do
sleep 1
done
- if ! ps -p $MYSQLD_PID >/dev/null
+ if ! ps -p $MYSQLD_PID >/dev/null 2>&1
then
wsrep_log_error \
"Parent mysqld process (PID: $MYSQLD_PID) terminated unexpectedly."
@@ -682,7 +725,7 @@ EOF
echo "rsync process ended without creating '$MAGIC_FILE'"
fi
- wsrep_cleanup_progress_file
+# wsrep_cleanup_progress_file
# cleanup_joiner
else
wsrep_log_error "Unrecognized role: '$WSREP_SST_OPT_ROLE'"
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 5e6153202f7..ccab25adb2a 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -66,16 +66,18 @@ ADD_CUSTOM_COMMAND(
)
ADD_CUSTOM_COMMAND(
- OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.yy
- COMMAND ${CMAKE_COMMAND}
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.yy
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.yy
+ COMMAND ${CMAKE_COMMAND} "-DVAL1=ORACLE" "-DVAL2=MARIADB"
+ "-DOUT1=${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.yy"
+ "-DOUT2=${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.yy"
"-DIN=${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.yy"
- "-DOUT=${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.yy"
- -P ${CMAKE_CURRENT_SOURCE_DIR}/gen_sql_yacc_ora_yy.cmake
+ -P ${CMAKE_CURRENT_SOURCE_DIR}/gen_yy_files.cmake
+ COMMENT "Building yy_mariadb.yy and yy_oracle.yy from sql_yacc.yy"
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.yy
+ ${CMAKE_CURRENT_SOURCE_DIR}/gen_yy_files.cmake
)
-ADD_CUSTOM_TARGET(gen_sql_yacc_ora_yy DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.yy)
-
ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER)
IF(SSL_DEFINES)
@@ -83,8 +85,8 @@ IF(SSL_DEFINES)
ENDIF()
SET (SQL_SOURCE
- ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc
- ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.cc
../sql-common/client.c compat56.cc derror.cc des_key_file.cc
discover.cc ../sql-common/errmsg.c
field.cc field_conv.cc field_comp.cc
@@ -331,7 +333,7 @@ FIND_PACKAGE(BISON 2.0)
# there
IF (NOT BISON_FOUND)
IF (NOT ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR})
- FOREACH(file sql_yacc.cc sql_yacc.hh sql_yacc_ora.cc sql_yacc_ora.hh)
+ FOREACH(file yy_mariadb.cc yy_mariadb.hh yy_oracle.cc yy_oracle.hh)
IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${file} AND (NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${file}))
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/${file}
${CMAKE_CURRENT_BINARY_DIR}/${file} COPYONLY)
@@ -339,7 +341,7 @@ IF (NOT BISON_FOUND)
ENDFOREACH()
ENDIF()
- IF(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc)
+ IF(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.cc)
# Output files are missing, bail out.
SET(ERRMSG
"Bison (GNU parser generator) is required to build MySQL."
@@ -354,15 +356,17 @@ IF (NOT BISON_FOUND)
MESSAGE(FATAL_ERROR ${ERRMSG})
ENDIF()
ELSE()
- BISON_TARGET(gen_sql_yacc ${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.yy ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc
- COMPILE_FLAGS "-p MYSQL")
- BISON_TARGET(gen_sql_yacc_ora ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.yy ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc
- COMPILE_FLAGS "-p ORA")
+ BISON_TARGET(gen_mariadb_cc_hh ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.yy
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.cc
+ COMPILE_FLAGS "-p MYSQL -S ${CMAKE_CURRENT_SOURCE_DIR}/myskel.m4")
+ BISON_TARGET(gen_oracle_cc_hh ${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.yy
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.cc
+ COMPILE_FLAGS "-p ORA -S ${CMAKE_CURRENT_SOURCE_DIR}/myskel.m4")
ENDIF()
IF(NOT CMAKE_CROSSCOMPILING OR DEFINED CMAKE_CROSSCOMPILING_EMULATOR)
ADD_EXECUTABLE(gen_lex_token gen_lex_token.cc
- ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.hh)
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.hh)
ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc)
ENDIF()
@@ -381,11 +385,10 @@ ADD_CUSTOM_TARGET(
DEPENDS
${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
- ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/yy_oracle.cc
)
-ADD_DEPENDENCIES(GenServerSource gen_sql_yacc_ora_yy)
-
IF(WIN32 OR HAVE_DLOPEN AND NOT DISABLE_SHARED)
ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def)
SET_TARGET_PROPERTIES(udf_example PROPERTIES PREFIX "")
@@ -398,8 +401,8 @@ CONFIGURE_FILE(
ADD_CUSTOM_TARGET(dist
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_BINARY_DIR}/make_dist.cmake
- DEPENDS ${CMAKE_BINARY_DIR}/sql/sql_yacc.cc ${CMAKE_BINARY_DIR}/sql/sql_yacc.hh
- DEPENDS ${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.cc ${CMAKE_BINARY_DIR}/sql/sql_yacc_ora.hh
+ DEPENDS ${CMAKE_BINARY_DIR}/sql/yy_mariadb.cc ${CMAKE_BINARY_DIR}/sql/yy_mariadb.hh
+ DEPENDS ${CMAKE_BINARY_DIR}/sql/yy_oracle.cc ${CMAKE_BINARY_DIR}/sql/yy_oracle.hh
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
diff --git a/sql/gen_lex_token.cc b/sql/gen_lex_token.cc
index 0ca03b0bf7b..40145459917 100644
--- a/sql/gen_lex_token.cc
+++ b/sql/gen_lex_token.cc
@@ -19,7 +19,7 @@
/* We only need the tokens here */
#define YYSTYPE_IS_DECLARED
-#include <sql_yacc.hh>
+#include <yy_mariadb.hh>
#include <lex.h>
#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
diff --git a/sql/gen_sql_yacc_ora_yy.cmake b/sql/gen_sql_yacc_ora_yy.cmake
deleted file mode 100644
index 3fdd5d43f8d..00000000000
--- a/sql/gen_sql_yacc_ora_yy.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-
-file(READ "${IN}" yytmp)
-
-# Comment out sql_mode=DEFAULT rules and directives (e.g. %expect, %type)
-string(REPLACE "/* Start SQL_MODE_DEFAULT_SPECIFIC */"
- "/* Start SQL_MODE_DEFAULT_SPECIFIC" yytmp "${yytmp}")
-string(REPLACE "/* End SQL_MODE_DEFAULT_SPECIFIC */"
- "End SQL_MODE_DEFAULT_SPECIFIC */" yytmp "${yytmp}")
-
-# Uncomment sql_mode=ORACLE rules and directives
-string(REPLACE "/* Start SQL_MODE_ORACLE_SPECIFIC"
- "/* Start SQL_MODE_ORACLE_SPECIFIC */" yytmp "${yytmp}")
-string(REPLACE "End SQL_MODE_ORACLE_SPECIFIC */"
- "/* End SQL_MODE_ORACLE_SPECIFIC */" yytmp "${yytmp}")
-file(WRITE "${OUT}" "${yytmp}")
diff --git a/sql/gen_yy_files.cmake b/sql/gen_yy_files.cmake
new file mode 100644
index 00000000000..da63c72c37c
--- /dev/null
+++ b/sql/gen_yy_files.cmake
@@ -0,0 +1,39 @@
+if(POLICY CMP0054)
+ cmake_policy(SET CMP0054 NEW)
+endif()
+file(READ "${IN}" data)
+file(WRITE "${OUT1}" "")
+file(WRITE "${OUT2}" "")
+set(where 0)
+string(REGEX REPLACE "/\\* sql_yacc\\.yy \\*/" "/* DON'T EDIT THIS FILE. IT'S GENERATED. EDIT sql_yacc.yy INSTEAD */" data "${data}")
+while(NOT data STREQUAL "")
+ string(REGEX MATCH "^(%[ie][^\n]*\n)|((%[^ie\n]|[^%\n])[^\n]*\n)+|\n+" line "${data}")
+ string(LENGTH "${line}" ll)
+ string(SUBSTRING "${data}" ${ll} -1 data)
+
+ if (line MATCHES "^%ifdef +${VAL1} *\n")
+ set(where 1)
+ set(line "\n")
+ elseif(line MATCHES "^%ifdef +${VAL2} *\n")
+ set(where 2)
+ set(line "\n")
+ elseif(line MATCHES "^%else( *| +.*)\n" AND where GREATER 0)
+ math(EXPR where "3-${where}")
+ set(line "\n")
+ elseif(line MATCHES "^%endif( *| +.*)\n")
+ set(where 0)
+ set(line "\n")
+ endif()
+ if(where STREQUAL 1)
+ file(APPEND "${OUT1}" "${line}")
+ string(REGEX REPLACE "[^\n]+" "" line "${line}")
+ file(APPEND "${OUT2}" "${line}")
+ elseif(where STREQUAL 2)
+ file(APPEND "${OUT2}" "${line}")
+ string(REGEX REPLACE "[^\n]+" "" line "${line}")
+ file(APPEND "${OUT1}" "${line}")
+ else()
+ file(APPEND "${OUT1}" "${line}")
+ file(APPEND "${OUT2}" "${line}")
+ endif()
+endwhile()
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index dd9fea5937a..84867f057ed 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2399,25 +2399,6 @@ void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
}
}
-/*
- Change comments specific to handler
-
- SYNOPSIS
- update_table_comment()
- comment Original comment
-
- RETURN VALUE
- new comment
-
- DESCRIPTION
- No comment changes so far
-*/
-
-char *ha_partition::update_table_comment(const char *comment)
-{
- return (char*) comment; /* Nothing to change */
-}
-
/**
Handle delete and rename table
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 15513b96b97..90993cf694c 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -527,10 +527,6 @@ public:
Meta data routines to CREATE, DROP, RENAME table and often used at
ALTER TABLE (update_create_info used from ALTER TABLE and SHOW ..).
- update_table_comment is used in SHOW TABLE commands to provide a
- chance for the handler to add any interesting comments to the table
- comments not provided by the users comment.
-
create_partitioning_metadata is called before opening a new handler object
with openfrm to call create. It is used to create any local handler
object needed in opening the object in openfrm
@@ -546,7 +542,6 @@ public:
override;
bool check_if_updates_are_ignored(const char *op) const override;
void update_create_info(HA_CREATE_INFO *create_info) override;
- char *update_table_comment(const char *comment) override;
int change_partitions(HA_CREATE_INFO *create_info, const char *path,
ulonglong * const copied, ulonglong * const deleted,
const uchar *pack_frm_data, size_t pack_frm_len)
diff --git a/sql/handler.cc b/sql/handler.cc
index 01f5e761385..dbfdae5e6d0 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -7307,10 +7307,24 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
error= binlog_log_row(table, old_data, new_data, log_func);
}
#ifdef WITH_WSREP
- if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE &&
- ht->flags & HTON_WSREP_REPLICATION &&
- !error && (error= wsrep_after_row(ha_thd())))
- return error;
+ THD *thd= ha_thd();
+ if (WSREP_NNULL(thd))
+ {
+ /* for streaming replication, the following wsrep_after_row()
+ may replicate a fragment, so we have to declare potential PA
+ unsafe before that */
+ if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd))
+ {
+ WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key);
+ if (thd->wsrep_cs().mark_transaction_pa_unsafe())
+ WSREP_DEBUG("session does not have active transaction,"
+ " can not mark as PA unsafe");
+ }
+
+ if (!error && table_share->tmp_table == NO_TMP_TABLE &&
+ ht->flags & HTON_WSREP_REPLICATION)
+ error= wsrep_after_row(thd);
+ }
#endif /* WITH_WSREP */
}
return error;
@@ -7371,11 +7385,23 @@ int handler::ha_delete_row(const uchar *buf)
error= binlog_log_row(table, buf, 0, log_func);
}
#ifdef WITH_WSREP
- if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE &&
- ht->flags & HTON_WSREP_REPLICATION &&
- !error && (error= wsrep_after_row(ha_thd())))
+ THD *thd= ha_thd();
+ if (WSREP_NNULL(thd))
{
- return error;
+ /* for streaming replication, the following wsrep_after_row()
+ may replicate a fragment, so we have to declare potential PA
+ unsafe before that */
+ if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd))
+ {
+ WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key);
+ if (thd->wsrep_cs().mark_transaction_pa_unsafe())
+ WSREP_DEBUG("session does not have active transaction,"
+ " can not mark as PA unsafe");
+ }
+
+ if (!error && table_share->tmp_table == NO_TMP_TABLE &&
+ ht->flags & HTON_WSREP_REPLICATION)
+ error= wsrep_after_row(thd);
}
#endif /* WITH_WSREP */
}
diff --git a/sql/handler.h b/sql/handler.h
index 18a1a4abf40..71804a4e98c 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -4165,8 +4165,6 @@ public:
/* end of the list of admin commands */
virtual int indexes_are_disabled(void) {return 0;}
- virtual char *update_table_comment(const char * comment)
- { return (char*) comment;}
virtual void append_create_info(String *packet) {}
/**
If index == MAX_KEY then a check for table is made and if index <
diff --git a/sql/item_func.h b/sql/item_func.h
index 3293a5893a3..d323dc27fcc 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -3071,7 +3071,7 @@ public:
{ return get_item_copy<Item_func_udf_str>(thd, this); }
};
-#else /* Dummy functions to get sql_yacc.cc compiled */
+#else /* Dummy functions to get yy_*.cc files compiled */
class Item_func_udf_float :public Item_real_func
{
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 414658fd830..82cfe5c72ea 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -1828,7 +1828,7 @@ public:
{ return get_item_copy<Item_sum_udf_decimal>(thd, this); }
};
-#else /* Dummy functions to get sql_yacc.cc compiled */
+#else /* Dummy functions to get yy_*.cc files compiled */
class Item_sum_udf_float :public Item_sum_double
{
diff --git a/sql/mdl.h b/sql/mdl.h
index 45d28cc3068..52c48768329 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -1,7 +1,7 @@
#ifndef MDL_H
#define MDL_H
/* Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2020, MariaDB
+ Copyright (c) 2020, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -905,6 +905,10 @@ public:
m_tickets[MDL_TRANSACTION].is_empty() &&
m_tickets[MDL_EXPLICIT].is_empty());
}
+ bool has_explicit_locks() const
+ {
+ return !m_tickets[MDL_EXPLICIT].is_empty();
+ }
inline bool has_transactional_locks() const
{
return !m_tickets[MDL_TRANSACTION].is_empty();
diff --git a/sql/myskel.m4 b/sql/myskel.m4
new file mode 100644
index 00000000000..b26fe46d342
--- /dev/null
+++ b/sql/myskel.m4
@@ -0,0 +1,18 @@
+#
+# fix the #line directives in the generated .cc files
+# to refer to the original sql_yacc.yy
+#
+m4_define([yyfile],m4_bpatsubst(__file__,[[a-z.0-9]+$],sql_yacc.yy))
+
+m4_define([b4_syncline],
+[m4_if(m4_index([$2],[.yy]),[-1],
+[b4_sync_start([$1], [$2])[]dnl
+
+],[b4_sync_start([$1], ["yyfile"])[]dnl
+
+])])
+
+# try both paths for different bison versions
+m4_sinclude(skeletons/c-skel.m4)
+m4_sinclude(c-skel.m4)
+
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 5982d8a92e1..160ca1f5f2f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2556,7 +2556,17 @@ unlink_all_closed_tables(THD *thd, MYSQL_LOCK *lock, size_t reopen_count)
/* If no tables left, do an automatic UNLOCK TABLES */
if (thd->lock && thd->lock->table_count == 0)
+ {
+ /*
+ We have to rollback any open transactions here.
+ This is required in the case where the server has been killed
+ but some transations are still open (as part of locked tables).
+ If we don't do this, we will get an assert in unlock_locked_tables().
+ */
+ ha_rollback_trans(thd, FALSE);
+ ha_rollback_trans(thd, TRUE);
unlock_locked_tables(thd);
+ }
}
@@ -9261,6 +9271,21 @@ int dynamic_column_error_message(enum_dyncol_func_result rc)
return rc;
}
+
+/**
+ Turn on the SELECT_DESCRIBE flag for the primary SELECT_LEX of the statement
+ being processed in case the statement is EXPLAIN UPDATE/DELETE.
+
+ @param lex current LEX
+*/
+
+void promote_select_describe_flag_if_needed(LEX *lex)
+{
+ if (lex->describe)
+ lex->first_select_lex()->options|= SELECT_DESCRIBE;
+}
+
+
/**
@} (end of group Data_Dictionary)
*/
diff --git a/sql/sql_base.h b/sql/sql_base.h
index cafb5967480..19713a051bf 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -518,6 +518,8 @@ bool extend_table_list(THD *thd, TABLE_LIST *tables,
Prelocking_strategy *prelocking_strategy,
bool has_prelocking_list);
+void promote_select_describe_flag_if_needed(LEX *lex);
+
/**
A context of open_tables() function, used to recover
from a failed open_table() or open_routine() attempt.
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 8c83e83f992..c54851c295b 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -366,6 +366,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
query_plan.select_lex= thd->lex->first_select_lex();
query_plan.table= table;
+ promote_select_describe_flag_if_needed(thd->lex);
+
if (mysql_prepare_delete(thd, table_list, &conds, &delete_while_scanning))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_digest.cc b/sql/sql_digest.cc
index e00069366cd..36a6b398ad3 100644
--- a/sql/sql_digest.cc
+++ b/sql/sql_digest.cc
@@ -32,7 +32,7 @@
#include "sql_get_diagnostics.h"
/* Generated code */
-#include "sql_yacc.hh"
+#include "yy_mariadb.hh"
#define LEX_TOKEN_WITH_DEFINITION
#include "lex_token.h"
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 196d0d80b04..94575c074ef 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -356,7 +356,7 @@ void binlog_unsafe_map_init();
#ifdef MYSQL_SERVER
/*
- The following hack is needed because mysql_yacc.cc does not define
+ The following hack is needed because yy_*.cc do not define
YYSTYPE before including this file
*/
#ifdef MYSQL_YACC
@@ -364,10 +364,10 @@ void binlog_unsafe_map_init();
#else
#include "lex_symbol.h"
#ifdef MYSQL_LEX
-#include "item_func.h" /* Cast_target used in sql_yacc.hh */
-#include "sql_get_diagnostics.h" /* Types used in sql_yacc.hh */
+#include "item_func.h" /* Cast_target used in yy_mariadb.hh */
+#include "sql_get_diagnostics.h" /* Types used in yy_mariadb.hh */
#include "sp_pcontext.h"
-#include "sql_yacc.hh"
+#include "yy_mariadb.hh"
#define LEX_YYSTYPE YYSTYPE *
#else
#define LEX_YYSTYPE void *
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index da72bc8e7de..04af54ae8d9 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -10304,8 +10304,8 @@ bool check_host_name(LEX_CSTRING *str)
}
-extern int MYSQLparse(THD *thd); // from sql_yacc.cc
-extern int ORAparse(THD *thd); // from sql_yacc_ora.cc
+extern int MYSQLparse(THD *thd); // from yy_mariadb.cc
+extern int ORAparse(THD *thd); // from yy_oracle.cc
/**
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index 07f07a7150f..2206f71c060 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -392,7 +392,7 @@ enum enum_yes_no_unknown
*/
-/* sql_yacc.cc */
+/* yy_*.cc */
#ifndef DBUG_OFF
extern void turn_parser_debug_on_MYSQLparse();
extern void turn_parser_debug_on_ORAparse();
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 474dd413f52..81d6fedb4e3 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -719,8 +719,13 @@ end:
thd->lex->spname->m_name.str, static_cast<uint>(thd->lex->spname->m_name.length));
}
- if (mdl_request_for_trn.ticket)
- thd->mdl_context.release_lock(mdl_request_for_trn.ticket);
+ /* In Locked_tables_list::reopen_tables(),
+ MDL_context::set_transaction_duration_for_all_locks() may have been invoked,
+ converting our explicit MDL to transaction scope. In that case, we will not
+ release the lock, to avoid a debug assertion failure. */
+ if (MDL_ticket *ticket= mdl_request_for_trn.ticket)
+ if (thd->mdl_context.has_explicit_locks())
+ thd->mdl_context.release_lock(ticket);
DBUG_RETURN(result);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 80fae87133f..738b3e0781a 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -468,6 +468,8 @@ int mysql_update(THD *thd,
want_privilege= (table_list->view ? UPDATE_ACL :
table_list->grant.want_privilege);
#endif
+ promote_select_describe_flag_if_needed(thd->lex);
+
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
DBUG_RETURN(1);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 6c553e4878f..b91b657dea3 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -166,7 +166,7 @@ static void yyerror(THD *thd, const char *s)
void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
{
/*
- MYSQLdebug is in sql/sql_yacc.cc, in bison generated code.
+ MYSQLdebug is in sql/yy_*.cc, in bison generated code.
Turning this option on is **VERY** verbose, and should be
used when investigating a syntax error problem only.
@@ -355,14 +355,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
We should not introduce any further shift/reduce conflicts.
*/
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
%expect 67
-/* End SQL_MODE_DEFAULT_SPECIFIC */
-
-
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%else
%expect 69
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif
/*
@@ -1801,7 +1798,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <vers_history_point> history_point
%type <vers_column_versioning> with_or_without_system
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
%type <NONE> sp_tail_standalone
%type <NONE> sp_unlabeled_block_not_atomic
%type <NONE> sp_proc_stmt_in_returns_clause
@@ -1813,10 +1810,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <spblock> sp_decl_variable_list
%type <spblock> sp_decl_variable_list_anchored
%type <kwd> reserved_keyword_udt_param_type
-/* End SQL_MODE_DEFAULT_SPECIFIC */
-
-
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%else
%type <NONE> set_assign
%type <spvar_mode> sp_opt_inout
%type <NONE> sp_tail_standalone
@@ -1851,7 +1845,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <lex> package_routine_lex
%type <lex> package_specification_function
%type <lex> package_specification_procedure
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif ORACLE
%%
@@ -10346,12 +10340,11 @@ function_call_nonkeyword:
MYSQL_YYABORT;
}
| ROWNUM_SYM
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
'(' ')'
-/* End SQL_MODE_DEFAULT_SPECIFIC */
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%else
optional_braces
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif ORACLE
{
$$= new (thd->mem_root) Item_func_rownum(thd);
if (unlikely($$ == NULL))
@@ -10390,13 +10383,13 @@ End SQL_MODE_ORACLE_SPECIFIC */
if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5))))
MYSQL_YYABORT;
}
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%ifdef ORACLE
| SYSDATE
{
if (unlikely(!($$= Lex->make_item_func_sysdate(thd, 0))))
MYSQL_YYABORT;
}
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif
| SYSDATE '(' ')'
{
if (unlikely(!($$= Lex->make_item_func_sysdate(thd, 0))))
@@ -12023,7 +12016,7 @@ table_primary_derived:
if (!($$= Lex->parsed_derived_table($1->master_unit(), $2, $3)))
MYSQL_YYABORT;
}
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%ifdef ORACLE
| subquery
opt_for_system_time_clause
{
@@ -12032,7 +12025,7 @@ table_primary_derived:
!($$= Lex->parsed_derived_table($1->master_unit(), $2, &alias)))
MYSQL_YYABORT;
}
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif
;
opt_outer:
@@ -16059,9 +16052,9 @@ keyword_sp_var_and_label:
| MICROSECOND_SYM
| MIGRATE_SYM
| MINUTE_SYM
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
| MINUS_ORACLE_SYM
-/* End SQL_MODE_DEFAULT_SPECIFIC */
+%endif
| MINVALUE_SYM
| MIN_ROWS
| MODIFY_SYM
@@ -16146,9 +16139,9 @@ keyword_sp_var_and_label:
| ROWTYPE_MARIADB_SYM
| ROW_COUNT_SYM
| ROW_FORMAT_SYM
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
| ROWNUM_SYM
-/* End SQL_MODE_DEFAULT_SPECIFIC */
+%endif
| RTREE_SYM
| SCHEDULE_SYM
| SCHEMA_NAME_SYM
@@ -16184,9 +16177,9 @@ keyword_sp_var_and_label:
| SUSPEND_SYM
| SWAPS_SYM
| SWITCHES_SYM
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
| SYSDATE
-/* End SQL_MODE_DEFAULT_SPECIFIC */
+%endif
| SYSTEM
| SYSTEM_TIME_SYM
| TABLE_NAME_SYM
@@ -16354,9 +16347,9 @@ reserved_keyword_udt_not_param_type:
| MINUTE_MICROSECOND_SYM
| MINUTE_SECOND_SYM
| MIN_SYM
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%ifdef ORACLE
| MINUS_ORACLE_SYM
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif
| MODIFIES_SYM
| MOD_SYM
| NATURAL
@@ -18162,7 +18155,7 @@ uninstall:
}
;
-/* Avoid compiler warning from sql_yacc.cc where yyerrlab1 is not used */
+/* Avoid compiler warning from yy_*.cc where yyerrlab1 is not used */
keep_gcc_happy:
IMPOSSIBLE_ACTION
{
@@ -18174,7 +18167,7 @@ _empty:
/* Empty */
;
-/* Start SQL_MODE_DEFAULT_SPECIFIC */
+%ifdef MARIADB
statement:
@@ -18575,10 +18568,10 @@ sp_unlabeled_block_not_atomic:
;
-/* End SQL_MODE_DEFAULT_SPECIFIC */
+%endif MARIADB
-/* Start SQL_MODE_ORACLE_SPECIFIC
+%ifdef ORACLE
statement:
verb_clause
@@ -19589,7 +19582,7 @@ sp_block_statements_and_exceptions:
}
;
-End SQL_MODE_ORACLE_SPECIFIC */
+%endif ORACLE
/**
@} (end of group Parser)
diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp
index 4b6bb6a9e62..6679ccd55b0 100644
--- a/storage/connect/tabrest.cpp
+++ b/storage/connect/tabrest.cpp
@@ -60,12 +60,12 @@ int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
if (Uri) {
if (*Uri == '/' || Http[strlen(Http) - 1] == '/')
- sprintf(buf, "%s%s", Http, Uri);
+ my_snprintf(buf, sizeof(buf)-1, "%s%s", Http, Uri);
else
- sprintf(buf, "%s/%s", Http, Uri);
+ my_snprintf(buf, sizeof(buf)-1, "%s/%s", Http, Uri);
} else
- strcpy(buf, Http);
+ my_snprintf(buf, sizeof(buf)-1, "%s", Http);
#if defined(__WIN__)
char cmd[1024];
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 24e6e549da1..1c506968696 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -3406,7 +3406,8 @@ loop:
/* Delete possible entries for the page from the insert buffer:
such can exist if the page belonged to an index which was dropped */
- if (!recv_recovery_is_on())
+ if (page_id < page_id_t{SRV_SPACE_ID_UPPER_BOUND, 0} &&
+ !recv_recovery_is_on())
ibuf_merge_or_delete_for_page(nullptr, page_id, zip_size);
static_assert(FIL_PAGE_PREV + 4 == FIL_PAGE_NEXT, "adjacent");
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index b29122ea158..47bf72837eb 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -2313,14 +2313,8 @@ void buf_flush_sync()
{
const ulint n_flushed= buf_flush_lists(srv_max_io_capacity, LSN_MAX);
buf_flush_wait_batch_end_acquiring_mutex(false);
- if (!n_flushed)
- {
- mysql_mutex_lock(&buf_pool.flush_list_mutex);
- const auto len= UT_LIST_GET_LEN(buf_pool.flush_list);
- mysql_mutex_unlock(&buf_pool.flush_list_mutex);
- if (!len)
- return;
- }
+ if (!n_flushed && !buf_flush_list_length())
+ return;
}
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 0afd02a0c3d..5f7bef3ae70 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -14818,66 +14818,6 @@ ha_innobase::check(
DBUG_RETURN(is_ok ? HA_ADMIN_OK : HA_ADMIN_CORRUPT);
}
-/*************************************************************//**
-Adds information about free space in the InnoDB tablespace to a table comment
-which is printed out when a user calls SHOW TABLE STATUS. Adds also info on
-foreign keys.
-@return table comment + InnoDB free space + info on foreign keys */
-char *ha_innobase::update_table_comment(const char *comment)
-{
- uint length = (uint) strlen(comment);
- char* str=0;
- size_t flen;
- std::string fk_str;
-
- /* We do not know if MySQL can call this function before calling
- external_lock(). To be safe, update the thd of the current table
- handle. */
-
- if (length > 64000 - 3) {
- return((char*) comment); /* string too long */
- }
-
- update_thd(ha_thd());
-
- m_prebuilt->trx->op_info = "returning table comment";
-
-#define SSTR( x ) reinterpret_cast< std::ostringstream & >( \
- ( std::ostringstream() << std::dec << x ) ).str()
-
- if (m_prebuilt->table->space) {
- fk_str.append("InnoDB free: ");
- fk_str.append(SSTR(fsp_get_available_space_in_free_extents(
- *m_prebuilt->table->space)));
- }
-
- fk_str.append(dict_print_info_on_foreign_keys(
- FALSE, m_prebuilt->trx,
- m_prebuilt->table));
-
- flen = fk_str.length();
-
- if (length + flen + 3 > 64000) {
- flen = 64000 - 3 - length;
- }
- /* allocate buffer for the full string */
- str = (char*) my_malloc(PSI_INSTRUMENT_ME, length + flen + 3, MYF(0));
- if (str) {
- char* pos = str + length;
- if (length) {
- memcpy(str, comment, length);
- *pos++ = ';';
- *pos++ = ' ';
- }
- memcpy(pos, fk_str.c_str(), flen);
- pos[flen] = 0;
- }
-
- m_prebuilt->trx->op_info = (char*)"";
-
- return(str ? str : (char*) comment);
-}
-
/*******************************************************************//**
Gets the foreign key create info for a table stored in InnoDB.
@return own: character string in the form which can be inserted to the
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 53698162bda..431e93f7021 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -213,7 +213,6 @@ public:
int rename_table(const char* from, const char* to) override;
inline int defragment_table();
int check(THD* thd, HA_CHECK_OPT* check_opt) override;
- char* update_table_comment(const char* comment) override;
inline void reload_statistics();
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 178f94684f2..1519c69cf42 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -4153,6 +4153,10 @@ subsequently was dropped.
void ibuf_merge_or_delete_for_page(buf_block_t *block, const page_id_t page_id,
ulint zip_size)
{
+ if (trx_sys_hdr_page(page_id)) {
+ return;
+ }
+
btr_pcur_t pcur;
#ifdef UNIV_IBUF_DEBUG
ulint volume = 0;
@@ -4167,11 +4171,8 @@ void ibuf_merge_or_delete_for_page(buf_block_t *block, const page_id_t page_id,
ut_ad(!block || page_id == block->page.id());
ut_ad(!block || block->page.state() == BUF_BLOCK_FILE_PAGE);
ut_ad(!block || block->page.status == buf_page_t::NORMAL);
-
- if (trx_sys_hdr_page(page_id)
- || fsp_is_system_temporary(page_id.space())) {
- return;
- }
+ ut_ad(!trx_sys_hdr_page(page_id));
+ ut_ad(page_id < page_id_t(SRV_SPACE_ID_UPPER_BOUND, 0));
const ulint physical_size = zip_size ? zip_size : srv_page_size;
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index 025d4b9b967..f75d1a8be77 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -2443,7 +2443,7 @@ corrupted:
if (UNIV_UNLIKELY(free_rec - fextra_size < heap_bot))
goto corrupted;
const ulint fdata_size= rec_get_data_size_old(free_rec);
- if (UNIV_UNLIKELY(free_rec + data_size > heap_top))
+ if (UNIV_UNLIKELY(free_rec + fdata_size > heap_top))
goto corrupted;
if (UNIV_UNLIKELY(extra_size + data_size > fextra_size + fdata_size))
goto corrupted;
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index ad4ecbe9289..db895b9a184 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -1060,7 +1060,7 @@ already_locked:
if (!mysqld_server_started) {
node->close_table();
- if (srv_shutdown_state > SRV_SHUTDOWN_INITIATED) {
+ if (srv_shutdown_state > SRV_SHUTDOWN_NONE) {
return(false);
}
std::this_thread::sleep_for(std::chrono::seconds(1));
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index e9ad7270f82..79da2b522d7 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -152,6 +152,76 @@ row_sel_sec_rec_is_for_blob(
return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len));
}
+/** Function to read the secondary spatial index, calculate
+the minimum bounding rectangle for clustered index record
+and secondary index record and compare it.
+@param sec_rec secondary index record
+@param sec_index spatial secondary index
+@param clust_rec clustered index record
+@param clust_index clustered index
+@retval DB_SUCCESS_LOCKED_REC if the secondary record is equal to the
+ corresponding fields in the clustered record, when compared with
+ collation;
+@retval DB_SUCCESS if not equal */
+static
+dberr_t
+row_sel_spatial_sec_rec_is_for_clust_rec(
+ const rec_t *sec_rec, const dict_index_t *sec_index,
+ const rec_t *clust_rec, dict_index_t *clust_index)
+{
+ mem_heap_t *heap= mem_heap_create(256);
+ rec_offs clust_offsets_[REC_OFFS_NORMAL_SIZE];
+ rec_offs *clust_offs= clust_offsets_;
+ ulint clust_len;
+
+ rec_offs_init(clust_offsets_);
+ ulint clust_pos= dict_col_get_clust_pos(
+ dict_index_get_nth_col(sec_index, 0), clust_index);
+ clust_offs= rec_get_offsets(clust_rec, clust_index, clust_offs,
+ clust_index->n_core_fields, clust_pos + 1,
+ &heap);
+ ut_ad(sec_index->n_user_defined_cols == 1);
+ const byte *clust_field= rec_get_nth_field(clust_rec, clust_offs,
+ clust_pos, &clust_len);
+ if (clust_len == UNIV_SQL_NULL || clust_len < GEO_DATA_HEADER_SIZE)
+ {
+ ut_ad("corrupted geometry column" == 0);
+err_exit:
+ mem_heap_free(heap);
+ return DB_SUCCESS;
+ }
+
+ /* For externally stored field, we need to get full
+ geo data to generate the MBR for comparing. */
+ if (rec_offs_nth_extern(clust_offs, clust_pos))
+ {
+ clust_field= btr_copy_externally_stored_field(
+ &clust_len, clust_field, sec_index->table->space->zip_size(),
+ clust_len, heap);
+ if (clust_field == NULL)
+ {
+ ut_ad("corrupted geometry blob" == 0);
+ goto err_exit;
+ }
+ }
+
+ ut_ad(clust_len >= GEO_DATA_HEADER_SIZE);
+ rtr_mbr_t tmp_mbr;
+ rtr_mbr_t sec_mbr;
+
+ rtree_mbr_from_wkb(
+ clust_field + GEO_DATA_HEADER_SIZE,
+ static_cast<uint>(clust_len - GEO_DATA_HEADER_SIZE),
+ SPDIMS, reinterpret_cast<double*>(&tmp_mbr));
+
+ rtr_read_mbr(sec_rec, &sec_mbr);
+
+ mem_heap_free(heap);
+ return MBR_EQUAL_CMP(&sec_mbr, &tmp_mbr)
+ ? DB_SUCCESS_LOCKED_REC
+ : DB_SUCCESS;
+}
+
/** Returns TRUE if the user-defined column values in a secondary index record
are alphabetically the same as the corresponding columns in the clustered
index record.
@@ -179,12 +249,31 @@ row_sel_sec_rec_is_for_clust_rec(
dict_index_t* clust_index,
que_thr_t* thr)
{
+ if (rec_get_deleted_flag(clust_rec,
+ dict_table_is_comp(clust_index->table))) {
+ /* In delete-marked records, DB_TRX_ID must
+ always refer to an existing undo log record. */
+ ut_ad(rec_get_trx_id(clust_rec, clust_index));
+
+ /* The clustered index record is delete-marked;
+ it is not visible in the read view. Besides,
+ if there are any externally stored columns,
+ some of them may have already been purged. */
+ return DB_SUCCESS;
+ }
+
+ if (dict_index_is_spatial(sec_index)) {
+ return row_sel_spatial_sec_rec_is_for_clust_rec(
+ sec_rec, sec_index, clust_rec,
+ clust_index);
+ }
+
const byte* sec_field;
ulint sec_len;
const byte* clust_field;
ulint n;
ulint i;
- mem_heap_t* heap = NULL;
+ mem_heap_t* heap = mem_heap_create(256);
rec_offs clust_offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs sec_offsets_[REC_OFFS_SMALL_SIZE];
rec_offs* clust_offs = clust_offsets_;
@@ -193,20 +282,7 @@ row_sel_sec_rec_is_for_clust_rec(
rec_offs_init(clust_offsets_);
rec_offs_init(sec_offsets_);
- if (rec_get_deleted_flag(clust_rec,
- dict_table_is_comp(clust_index->table))) {
- /* In delete-marked records, DB_TRX_ID must
- always refer to an existing undo log record. */
- ut_ad(rec_get_trx_id(clust_rec, clust_index));
-
- /* The clustered index record is delete-marked;
- it is not visible in the read view. Besides,
- if there are any externally stored columns,
- some of them may have already been purged. */
- return DB_SUCCESS;
- }
- heap = mem_heap_create(256);
ib_vcol_row vc(heap);
clust_offs = rec_get_offsets(clust_rec, clust_index, clust_offs,
@@ -314,43 +390,10 @@ check_for_blob:
}
}
- /* For spatial index, the first field is MBR, we check
- if the MBR is equal or not. */
- if (dict_index_is_spatial(sec_index) && i == 0) {
- rtr_mbr_t tmp_mbr;
- rtr_mbr_t sec_mbr;
- byte* dptr =
- const_cast<byte*>(clust_field);
-
- ut_ad(clust_len != UNIV_SQL_NULL);
-
- /* For externally stored field, we need to get full
- geo data to generate the MBR for comparing. */
- if (rec_offs_nth_extern(clust_offs, clust_pos)) {
- dptr = btr_copy_externally_stored_field(
- &clust_len, dptr,
- clust_index->table->space->zip_size(),
- len, heap);
- }
-
- rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE,
- static_cast<uint>(clust_len
- - GEO_DATA_HEADER_SIZE),
- SPDIMS,
- reinterpret_cast<double*>(
- &tmp_mbr));
- rtr_read_mbr(sec_field, &sec_mbr);
-
- if (!MBR_EQUAL_CMP(&sec_mbr, &tmp_mbr)) {
- return DB_SUCCESS;
- }
- } else {
-
- if (0 != cmp_data_data(col->mtype, col->prtype,
- clust_field, len,
- sec_field, sec_len)) {
- return DB_SUCCESS;
- }
+ if (0 != cmp_data_data(col->mtype, col->prtype,
+ clust_field, len,
+ sec_field, sec_len)) {
+ return DB_SUCCESS;
}
}
diff --git a/storage/perfschema/CMakeLists.txt b/storage/perfschema/CMakeLists.txt
index 1884256407f..dfb41a5b1c1 100644
--- a/storage/perfschema/CMakeLists.txt
+++ b/storage/perfschema/CMakeLists.txt
@@ -290,6 +290,9 @@ int main(int ac, char **av)
}"
HAVE_PTHREAD_THREADID_NP)
+# gettid() library function (glibc-2.30+)
+CHECK_SYMBOL_EXISTS(gettid unistd.h HAVE_GETTID)
+
# Check for gettid() system call
CHECK_C_SOURCE_COMPILES("
#include <sys/types.h>
diff --git a/storage/perfschema/my_thread.h b/storage/perfschema/my_thread.h
index b9f3f7775ff..5ddea9b1303 100644
--- a/storage/perfschema/my_thread.h
+++ b/storage/perfschema/my_thread.h
@@ -17,7 +17,17 @@
typedef pthread_key_t thread_local_key_t;
typedef pthread_t my_thread_handle;
typedef pthread_attr_t my_thread_attr_t;
+#if defined(HAVE_PTHREAD_THREADID_NP) || defined(HAVE_GETTID) || defined(HAVE_SYS_GETTID) || defined(HAVE_GETTHRID)
+typedef pid_t my_thread_os_id_t;
+#elif defined(_WIN32)
typedef uint32 my_thread_os_id_t;
+#elif defined(HAVE_PTHREAD_GETTHREADID_NP)
+typedef int my_thread_os_id_t;
+#elif defined(HAVE_INTEGER_PTHREAD_SELF)
+typedef uintptr_t my_thread_os_id_t;
+#else
+typedef unsigned long long my_thread_os_id_t;
+#endif
#define LOCK_plugin_delete LOCK_plugin
@@ -50,12 +60,14 @@ static inline my_thread_os_id_t my_thread_os_id()
pthread_threadid_np(nullptr, &tid64);
return (pid_t)tid64;
#else
+#ifdef HAVE_GETTID
+ /* Linux glibc-2.30+ */
+ return gettid();
+#else
#ifdef HAVE_SYS_GETTID
/*
- Linux.
+ Linux before glibc-2.30
See man gettid
- See GLIBC Bug 6399 - gettid() should have a wrapper
- https://sourceware.org/bugzilla/show_bug.cgi?id=6399
*/
return syscall(SYS_gettid);
#else
@@ -72,8 +84,8 @@ static inline my_thread_os_id_t my_thread_os_id()
return getthrid();
#else
#ifdef HAVE_INTEGER_PTHREAD_SELF
- /* Unknown platform, fallback. */
- return pthread_self();
+ /* NetBSD, and perhaps something else, fallback. */
+ return (my_thread_os_id_t) pthread_self();
#else
/* Feature not available. */
return 0;
@@ -82,7 +94,8 @@ static inline my_thread_os_id_t my_thread_os_id()
#endif /* HAVE_PTHREAD_GETTHREADID_NP */
#endif /* _WIN32 */
#endif /* HAVE_SYS_GETTID */
-#endif /* HAVE_SYS_THREAD_SELFID */
+#endif /* HAVE_GETTID */
+#endif /* HAVE_PTHREAD_THREADID_NP */
}
#define CHANNEL_NAME_LENGTH MAX_CONNECTION_NAME
diff --git a/storage/perfschema/pfs_config.h.cmake b/storage/perfschema/pfs_config.h.cmake
index 1b518fe3211..2b61b7e170e 100644
--- a/storage/perfschema/pfs_config.h.cmake
+++ b/storage/perfschema/pfs_config.h.cmake
@@ -1,5 +1,6 @@
#cmakedefine HAVE_PTHREAD_THREADID_NP 1
#cmakedefine HAVE_SYS_GETTID 1
+#cmakedefine HAVE_GETTID
#cmakedefine HAVE_GETTHRID 1
#cmakedefine HAVE_PTHREAD_GETTHREADID_NP 1
#cmakedefine HAVE_INTEGER_PTHREAD_SELF 1
diff --git a/wsrep-lib b/wsrep-lib
-Subproject f271ad0c6e3c647df83c1d5ec9cd26d77cef233
+Subproject 85b815032145ef5c18b7a8931d84becf6df8cd1