diff options
48 files changed, 608 insertions, 515 deletions
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index c0026a1b2dd..1714c1a5868 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -17,8 +17,15 @@ export DEB_BUILD_OPTIONS="nocheck" # Debian policy and targeting Debian Sid. Then case-by-case run in autobake-deb.sh # tests for backwards compatibility and strip away parts on older builders. -CODENAME="$(lsb_release -sc)" -case "${CODENAME}" in +LSBID="$(lsb_release -si | tr '[:upper:]' '[:lower:]')" +LSBVERSION="$(lsb_release -sr | sed -e "s#\.##g")" +LSBNAME="$(lsb_release -sc)" + +if [ -z "${LSBID}" ] +then + LSBID="unknown" +fi +case "${LSBNAME}" in stretch) # MDEV-28022 libzstd-dev-1.1.3 minimum version sed -i -e '/libzstd-dev/d' debian/control @@ -55,10 +62,11 @@ UPSTREAM="${MYSQL_VERSION_MAJOR}.${MYSQL_VERSION_MINOR}.${MYSQL_VERSION_PATCH}${ PATCHLEVEL="+maria" LOGSTRING="MariaDB build" EPOCH="1:" +VERSION="${EPOCH}${UPSTREAM}${PATCHLEVEL}~${LSBID:0:3}${LSBVERSION}" -dch -b -D ${CODENAME} -v "${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME}" "Automatic build with ${LOGSTRING}." +dch -b -D ${LSBNAME} -v "${VERSION}" "Automatic build with ${LOGSTRING}." -echo "Creating package version ${EPOCH}${UPSTREAM}${PATCHLEVEL}~${CODENAME} ... " +echo "Creating package version ${VERSION} ... " # Build the package # Pass -I so that .git and other unnecessary temporary and source control files diff --git a/debian/control b/debian/control index e3938488fef..85d131bbd11 100644 --- a/debian/control +++ b/debian/control @@ -195,7 +195,8 @@ Depends: libmariadb-dev (= ${binary:Version}), libmariadbd19 (= ${binary:Version}), ${misc:Depends}, ${shlibs:Depends} -Breaks: libmysqld-dev +Breaks: libmysqld-dev, + libmariadbd-dev (<= 10.2) Replaces: libmysqld-dev Description: MariaDB embedded database, development files MariaDB is a fast, stable and true multi-user, multi-threaded SQL database diff --git a/mysql-test/main/key_cache.result b/mysql-test/main/key_cache.result index 322a2be6a4e..1964588edd5 100644 --- a/mysql-test/main/key_cache.result +++ b/mysql-test/main/key_cache.result @@ -834,3 +834,25 @@ set global keycache2.key_buffer_size=0; set global key_buffer_size=@save_key_buffer_size; set global key_cache_segments=@save_key_cache_segments; set global key_cache_file_hash_size=@save_key_cache_file_hash_size; +# +# SIGSEGV in flush_all_key_blocks when changing +# key_buffer_size / ASAN: heap-use-after-free in flush_all_key_blocks +# +SET GLOBAL keycache1.key_cache_segments=7; +SET GLOBAL keycache1.key_buffer_size=1*1024*1024; +SET GLOBAL keycache1.key_buffer_size=0; +SET GLOBAL keycache1.key_buffer_size=128*1024; +create table t1 (p int primary key, a char(10)) delay_key_write=1; +cache index t1 key (`primary`) in keycache1; +Table Op Msg_type Msg_text +test.t1 assign_to_keycache status OK +insert into t1 values (1, 'qqqq'), (11, 'yyyy'); +select * from t1; +p a +1 qqqq +11 yyyy +drop table t1; +SET GLOBAL keycache1.key_buffer_size=0; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/key_cache.test b/mysql-test/main/key_cache.test index fb6ae497e74..a99bb4ea04c 100644 --- a/mysql-test/main/key_cache.test +++ b/mysql-test/main/key_cache.test @@ -538,3 +538,24 @@ set global key_cache_segments=@save_key_cache_segments; set global key_cache_file_hash_size=@save_key_cache_file_hash_size; # End of 5.2 tests + +--echo # +--echo # SIGSEGV in flush_all_key_blocks when changing +--echo # key_buffer_size / ASAN: heap-use-after-free in flush_all_key_blocks +--echo # + +SET GLOBAL keycache1.key_cache_segments=7; +SET GLOBAL keycache1.key_buffer_size=1*1024*1024; +SET GLOBAL keycache1.key_buffer_size=0; +SET GLOBAL keycache1.key_buffer_size=128*1024; +create table t1 (p int primary key, a char(10)) delay_key_write=1; +cache index t1 key (`primary`) in keycache1; +insert into t1 values (1, 'qqqq'), (11, 'yyyy'); +select * from t1; +drop table t1; +SET GLOBAL keycache1.key_buffer_size=0; + + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/kill.result b/mysql-test/main/kill.result index cbb0598485f..5d38bd90059 100644 --- a/mysql-test/main/kill.result +++ b/mysql-test/main/kill.result @@ -197,20 +197,6 @@ f2 connection default; SET DEBUG_SYNC = 'RESET'; DROP TABLE t1, t2; -connection con1; -connection con2; -connection con1; -SET SESSION optimizer_search_depth=0; -SET DEBUG_SYNC= 'before_join_optimize SIGNAL in_sync'; -PREPARE stmt FROM 'EXPLAIN SELECT * FROM t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22,t23,t24,t25,t26,t27,t28,t29,t30,t31,t32,t33,t34,t35,t36,t37,t38,t39,t40 WHERE a1=a2 AND a2=a3 AND a3=a4 AND a4=a5 AND a5=a6 AND a6=a7 AND a7=a8 AND a8=a9 AND a9=a10 AND a10=a11 AND a11=a12 AND a12=a13 AND a13=a14 AND a14=a15 AND a15=a16 AND a16=a17 AND a17=a18 AND a18=a19 AND a19=a20 AND a20=a21 AND a21=a22 AND a22=a23 AND a23=a24 AND a24=a25 AND a25=a26 AND a26=a27 AND a27=a28 AND a28=a29 AND a29=a30 AND a30=a31 AND a31=a32 AND a32=a33 AND a33=a34 AND a34=a35 AND a35=a36 AND a36=a37 AND a37=a38 AND a38=a39 AND a39=a40 '; -EXECUTE stmt; -connection con2; -SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; -KILL QUERY @id; -connection con1; -ERROR 70100: Query execution was interrupted -connection default; -SET DEBUG_SYNC = 'RESET'; # # Bug#19723: kill of active connection yields different error code # depending on platform. diff --git a/mysql-test/main/kill.test b/mysql-test/main/kill.test index 05e70ff8651..80076afda1a 100644 --- a/mysql-test/main/kill.test +++ b/mysql-test/main/kill.test @@ -272,74 +272,6 @@ connection default; SET DEBUG_SYNC = 'RESET'; DROP TABLE t1, t2; -# -# Bug#28598: mysqld crash when killing a long-running explain query. -# -connection con1; -let $ID= `SELECT @id := CONNECTION_ID()`; -connection con2; -let $ignore= `SELECT @id := $ID`; -connection con1; ---disable_query_log -let $tab_count= 40; - ---disable_query_log -begin; -let $i= $tab_count; -while ($i) -{ - eval CREATE TABLE t$i (a$i INT, KEY(a$i)); - eval INSERT INTO t$i VALUES (1),(2),(3),(4),(5),(6),(7); - dec $i ; -} - -commit; ---enable_query_log - -SET SESSION optimizer_search_depth=0; - -let $i=$tab_count; -while ($i) -{ - let $a= a$i; - let $t= t$i; - dec $i; - if ($i) - { - let $comma=,; - let $from=$comma$t$from; - let $where=a$i=$a $and $where; - } - if (!$i) - { - let $from=FROM $t$from; - let $where=WHERE $where; - } - let $and=AND; -} - ---enable_query_log -SET DEBUG_SYNC= 'before_join_optimize SIGNAL in_sync'; -eval PREPARE stmt FROM 'EXPLAIN SELECT * $from $where'; -send EXECUTE stmt; - -connection con2; -SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; -KILL QUERY @id; -connection con1; ---error 1317 -reap; ---disable_query_log -let $i= $tab_count; -while ($i) -{ - eval DROP TABLE t$i; - dec $i ; -} ---enable_query_log -connection default; -SET DEBUG_SYNC = 'RESET'; - --echo # --echo # Bug#19723: kill of active connection yields different error code --echo # depending on platform. diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 53e5707a328..83b275bee3f 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -3221,17 +3221,21 @@ sub do_before_run_mysqltest($) if ($^O eq "MSWin32") { push @cmd, '--binary'; } - push @cmd, (qw/-r - -f -s -o/, $dest, $base_result, $resfile); + push @cmd, (qw/-r - -f -s -o/, $dest . $$, $base_result, $resfile); if (-w $resdir) { # don't rebuild a file if it's up to date unless (-e $dest and -M $dest < -M $resfile and -M $dest < -M $base_result) { run_system(@cmd); + rename $cmd[-3], $dest or unlink $cmd[-3]; } } else { - $cmd[-3] = $dest = $opt_tmpdir . '/' . basename($dest); + $dest = $opt_tmpdir . '/' . basename($dest); + $cmd[-3] = $dest . $$; run_system(@cmd); + rename $cmd[-3], $dest or unlink $cmd[-3]; } + $tinfo->{result_file} = $dest; } diff --git a/mysql-test/suite/innodb/r/stats_persistent.result b/mysql-test/suite/innodb/r/stats_persistent.result index 3ae10e31cb3..7e9c038d6f7 100644 --- a/mysql-test/suite/innodb/r/stats_persistent.result +++ b/mysql-test/suite/innodb/r/stats_persistent.result @@ -6,9 +6,9 @@ SET DEBUG_SYNC='dict_stats_update_persistent SIGNAL stop WAIT_FOR go'; ANALYZE TABLE t1; connect con1, localhost, root; SET DEBUG_SYNC='now WAIT_FOR stop'; -SELECT ENGINE,SUM(DATA_LENGTH+INDEX_LENGTH),COUNT(ENGINE),SUM(DATA_LENGTH),SUM(INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB'; -ENGINE SUM(DATA_LENGTH+INDEX_LENGTH) COUNT(ENGINE) SUM(DATA_LENGTH) SUM(INDEX_LENGTH) -InnoDB 114688 4 65536 49152 +SELECT SUM(DATA_LENGTH+INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB'; +SUM(DATA_LENGTH+INDEX_LENGTH) +SUM SET DEBUG_SYNC='now SIGNAL go'; disconnect con1; connection default; diff --git a/mysql-test/suite/innodb/t/stats_persistent.test b/mysql-test/suite/innodb/t/stats_persistent.test index ac412d56d0d..8561298c4d3 100644 --- a/mysql-test/suite/innodb/t/stats_persistent.test +++ b/mysql-test/suite/innodb/t/stats_persistent.test @@ -14,7 +14,8 @@ SET DEBUG_SYNC='dict_stats_update_persistent SIGNAL stop WAIT_FOR go'; --connect(con1, localhost, root) SET DEBUG_SYNC='now WAIT_FOR stop'; -SELECT ENGINE,SUM(DATA_LENGTH+INDEX_LENGTH),COUNT(ENGINE),SUM(DATA_LENGTH),SUM(INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB'; +--replace_column 1 SUM +SELECT SUM(DATA_LENGTH+INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB'; SET DEBUG_SYNC='now SIGNAL go'; --disconnect con1 diff --git a/mysql-test/suite/parts/inc/part_alter_values.inc b/mysql-test/suite/parts/inc/part_alter_values.inc index ca18faa5758..d3b63a4610f 100644 --- a/mysql-test/suite/parts/inc/part_alter_values.inc +++ b/mysql-test/suite/parts/inc/part_alter_values.inc @@ -78,3 +78,11 @@ if (`SELECT IF('$engine' != 'InnoDB', 1, 0)`) --remove_files_wildcard $MYSQLTEST_VARDIR/tmp/mdev_27065 * --rmdir $MYSQLTEST_VARDIR/tmp/mdev_27065 + +--echo # +--echo # MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION +--echo # +--eval CREATE TABLE t1 (c INT) ENGINE=$engine PARTITION BY KEY(c) PARTITIONS 4; +LOCK TABLES t1 WRITE, t1 AS a READ; +ALTER TABLE t1 REBUILD PARTITION p0; +DROP TABLE t1; diff --git a/mysql-test/suite/parts/inc/partition_auto_increment.inc b/mysql-test/suite/parts/inc/partition_auto_increment.inc index 4392d04db8a..2997dd9de4f 100644 --- a/mysql-test/suite/parts/inc/partition_auto_increment.inc +++ b/mysql-test/suite/parts/inc/partition_auto_increment.inc @@ -873,5 +873,16 @@ UPDATE t1 SET pk = 0; DROP TABLE t1; } +if (!$skip_update) +{ +--echo # +--echo # MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +--echo # ha_partition::set_auto_increment_if_higher +--echo # +eval CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=$engine PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; +} + --echo ############################################################################## } diff --git a/mysql-test/suite/parts/r/partition_alter_innodb.result b/mysql-test/suite/parts/r/partition_alter_innodb.result index ae3caaa4981..6afa133f989 100644 --- a/mysql-test/suite/parts/r/partition_alter_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter_innodb.result @@ -61,3 +61,10 @@ PARTITION p1 VALUES LESS THAN MAXVALUE Warnings: Warning 1618 <DATA DIRECTORY> table option of old schema is ignored DROP TABLE t1; +# +# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION +# +CREATE TABLE t1 (c INT) ENGINE=InnoDB PARTITION BY KEY(c) PARTITIONS 4;; +LOCK TABLES t1 WRITE, t1 AS a READ; +ALTER TABLE t1 REBUILD PARTITION p0; +DROP TABLE t1; diff --git a/mysql-test/suite/parts/r/partition_alter_maria.result b/mysql-test/suite/parts/r/partition_alter_maria.result index 358ffbdfbe7..eca8378430f 100644 --- a/mysql-test/suite/parts/r/partition_alter_maria.result +++ b/mysql-test/suite/parts/r/partition_alter_maria.result @@ -95,3 +95,10 @@ PARTITION p1 VALUES LESS THAN MAXVALUE Warnings: Warning 1618 <INDEX DIRECTORY> table option of old schema is ignored DROP TABLE t2; +# +# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION +# +CREATE TABLE t1 (c INT) ENGINE=Aria PARTITION BY KEY(c) PARTITIONS 4;; +LOCK TABLES t1 WRITE, t1 AS a READ; +ALTER TABLE t1 REBUILD PARTITION p0; +DROP TABLE t1; diff --git a/mysql-test/suite/parts/r/partition_alter_myisam.result b/mysql-test/suite/parts/r/partition_alter_myisam.result index 9d76881fdfa..ba1a0fe05c4 100644 --- a/mysql-test/suite/parts/r/partition_alter_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter_myisam.result @@ -68,6 +68,13 @@ PARTITION p1 VALUES LESS THAN MAXVALUE Warnings: Warning 1618 <INDEX DIRECTORY> table option of old schema is ignored DROP TABLE t2; +# +# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION +# +CREATE TABLE t1 (c INT) ENGINE=MyISAM PARTITION BY KEY(c) PARTITIONS 4;; +LOCK TABLES t1 WRITE, t1 AS a READ; +ALTER TABLE t1 REBUILD PARTITION p0; +DROP TABLE t1; create table t1 ( c1 int, c2 int, c3 varchar(100)) delay_key_write=1 partition by key(c1) ( partition p01 data directory = 'MYSQL_TMP_DIR' diff --git a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result index 76f1ddfceae..e5414c81616 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result @@ -1109,4 +1109,11 @@ CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam INSERT INTO t1 VALUES (1,1),(2,2); UPDATE t1 SET pk = 0; DROP TABLE t1; +# +# MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +# ha_partition::set_auto_increment_if_higher +# +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE='InnoDB' PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; ############################################################################## diff --git a/mysql-test/suite/parts/r/partition_auto_increment_maria.result b/mysql-test/suite/parts/r/partition_auto_increment_maria.result index 5a3902475a9..ad041735ebb 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_maria.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_maria.result @@ -1156,4 +1156,11 @@ CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam INSERT INTO t1 VALUES (1,1),(2,2); UPDATE t1 SET pk = 0; DROP TABLE t1; +# +# MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +# ha_partition::set_auto_increment_if_higher +# +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE='Aria' PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; ############################################################################## diff --git a/mysql-test/suite/parts/r/partition_auto_increment_memory.result b/mysql-test/suite/parts/r/partition_auto_increment_memory.result index c395f8ed0c9..d2d1fb6831c 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_memory.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_memory.result @@ -1137,4 +1137,11 @@ CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam INSERT INTO t1 VALUES (1,1),(2,2); UPDATE t1 SET pk = 0; DROP TABLE t1; +# +# MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +# ha_partition::set_auto_increment_if_higher +# +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE='Memory' PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; ############################################################################## diff --git a/mysql-test/suite/parts/r/partition_auto_increment_myisam.result b/mysql-test/suite/parts/r/partition_auto_increment_myisam.result index 792423096b5..f92a6ed18c6 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_myisam.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_myisam.result @@ -1156,4 +1156,11 @@ CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam INSERT INTO t1 VALUES (1,1),(2,2); UPDATE t1 SET pk = 0; DROP TABLE t1; +# +# MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +# ha_partition::set_auto_increment_if_higher +# +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE='MyISAM' PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; ############################################################################## diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 07dd8f3e1ef..f09739ae8af 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -701,7 +701,7 @@ int prepare_resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, keycache->in_resize= 1; /* Need to flush only if keycache is enabled. */ - if (keycache->can_be_used) + if (keycache->can_be_used && keycache->disk_blocks != -1) { /* Start the flush phase. */ keycache->resize_in_flush= 1; diff --git a/scripts/wsrep_sst_backup.sh b/scripts/wsrep_sst_backup.sh index 301739905b6..4f98ad41dd5 100644 --- a/scripts/wsrep_sst_backup.sh +++ b/scripts/wsrep_sst_backup.sh @@ -72,7 +72,7 @@ then # (c) ERROR file, in case flush tables operation failed. while [ ! -r "$FLUSHED" ] && \ - ! grep -q -F ':' -- "$FLUSHED" >/dev/null 2>&1 + ! grep -q -F ':' -- "$FLUSHED" do # Check whether ERROR file exists. if [ -f "$ERROR" ]; then @@ -105,4 +105,5 @@ else # joiner fi +wsrep_log_info "$WSREP_METHOD $WSREP_TRANSFER_TYPE completed on $WSREP_SST_OPT_ROLE" exit 0 diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 3d0a132f3fc..44c6a7b4476 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -80,6 +80,7 @@ to_minuses() } WSREP_SST_OPT_BYPASS=0 +WSREP_SST_OPT_PROGRESS=0 WSREP_SST_OPT_BINLOG="" WSREP_SST_OPT_BINLOG_INDEX="" WSREP_SST_OPT_LOG_BASENAME="" @@ -187,6 +188,10 @@ case "$1" in '--bypass') readonly WSREP_SST_OPT_BYPASS=1 ;; + '--progress') + readonly WSREP_SST_OPT_PROGRESS=$(( $2 )) + shift + ;; '--datadir') # Let's remove the trailing slash: readonly WSREP_SST_OPT_DATA=$(trim_dir "$2") @@ -246,11 +251,11 @@ case "$1" in shift ;; '--local-port') - readonly WSREP_SST_OPT_LPORT="$2" + readonly WSREP_SST_OPT_LPORT=$(( $2 )) shift ;; '--parent') - readonly WSREP_SST_OPT_PARENT="$2" + readonly WSREP_SST_OPT_PARENT=$(( $2 )) shift ;; '--password') @@ -258,7 +263,7 @@ case "$1" in shift ;; '--port') - readonly WSREP_SST_OPT_PORT="$2" + readonly WSREP_SST_OPT_PORT=$(( $2 )) shift ;; '--role') @@ -531,6 +536,8 @@ else readonly WSREP_SST_OPT_ROLE='donor' fi +readonly WSREP_SST_OPT_PROGRESS + # The same argument can be present on the command line several # times, in this case we must take its last value: if [ -n "${MYSQLD_OPT_INNODB_DATA_HOME_DIR:-}" -a \ @@ -1180,28 +1187,27 @@ check_port() check_for_dhparams() { ssl_dhparams="$DATA/dhparams.pem" - if [ ! -r "$ssl_dhparams" ]; then - get_openssl - if [ -n "$OPENSSL_BINARY" ]; then - wsrep_log_info \ - "Could not find dhparams file, creating $ssl_dhparams" - local bug=0 - local errmsg - errmsg=$("$OPENSSL_BINARY" \ - dhparam -out "$ssl_dhparams" 2048 2>&1) || bug=1 - if [ $bug -ne 0 ]; then - wsrep_log_info "run: \"$OPENSSL_BINARY\" dhparam -out \"$ssl_dhparams\" 2048" - wsrep_log_info "output: $errmsg" - wsrep_log_error "******** ERROR *****************************************" - wsrep_log_error "* Could not create the dhparams.pem file with OpenSSL. *" - wsrep_log_error "********************************************************" - ssl_dhparams="" - fi - else - # Rollback: if openssl is not installed, then use - # the default parameters: + get_openssl + if [ -n "$OPENSSL_BINARY" ]; then + wsrep_log_info \ + "Could not find dhparams file, creating $ssl_dhparams" + local bug=0 + local errmsg + errmsg=$("$OPENSSL_BINARY" \ + dhparam -out "$ssl_dhparams" -dsaparam 2048 2>&1) || bug=1 + if [ $bug -ne 0 ]; then + wsrep_log_info "run: \"$OPENSSL_BINARY\" dhparam"\ + "-out \"$ssl_dhparams\" -dsaparam 2048" + wsrep_log_info "output: $errmsg" + wsrep_log_error "******** ERROR *****************************************" + wsrep_log_error "* Could not create the dhparams.pem file with OpenSSL. *" + wsrep_log_error "********************************************************" ssl_dhparams="" fi + else + # Rollback: if openssl is not installed, then use + # the default parameters: + ssl_dhparams="" fi } @@ -1303,29 +1309,39 @@ verify_cert_matches_key() # check_for_version() { - y1="${1#*.}" + local y1="${1#*.}" [ "$y1" = "$1" ] && y1="" - z1=${y1#*.} + local z1="${y1#*.}" [ "$z1" = "$y1" ] && z1="" - x1="${1%%.*}" + local w1="${z1#*.}" + [ "$w1" = "$z1" ] && w1="" + local x1="${1%%.*}" y1="${y1%%.*}" z1="${z1%%.*}" + w1="${w1%%.*}" [ -z "$y1" ] && y1=0 [ -z "$z1" ] && z1=0 - y2="${2#*.}" + [ -z "$w1" ] && w1=0 + local y2="${2#*.}" [ "$y2" = "$2" ] && y2="" - z2="${y2#*.}" + local z2="${y2#*.}" [ "$z2" = "$y2" ] && z2="" - x2="${2%%.*}" + local w2="${z2#*.}" + [ "$w2" = "$z2" ] && w2="" + local x2="${2%%.*}" y2="${y2%%.*}" z2="${z2%%.*}" + w2="${w2%%.*}" [ -z "$y2" ] && y2=0 [ -z "$z2" ] && z2=0 + [ -z "$w2" ] && w2=0 [ $x1 -lt $x2 ] && return 1 [ $x1 -gt $x2 ] && return 0 [ $y1 -lt $y2 ] && return 1 [ $y1 -gt $y2 ] && return 0 [ $z1 -lt $z2 ] && return 1 + [ $z1 -gt $z2 ] && return 0 + [ $w1 -lt $w2 ] && return 1 return 0 } diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index a66a792b5ea..5521dd51098 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -86,15 +86,12 @@ encrypt_threads="" encrypt_chunk="" readonly SECRET_TAG='secret' +readonly TOTAL_TAG='total' # Required for backup locks # For backup locks it is 1 sent by joiner sst_ver=1 -if [ -n "$(commandex pv)" ] && pv --help | grep -qw -F -- '-F'; then - pvopts="$pvopts $pvformat" -fi -pcmd="pv $pvopts" declare -a RC BACKUP_BIN=$(commandex 'mariabackup') @@ -121,18 +118,19 @@ timeit() if [ $ttime -eq 1 ]; then x1=$(date +%s) - wsrep_log_info "Evaluating $cmd" - eval "$cmd" - extcode=$? + fi + + wsrep_log_info "Evaluating $cmd" + eval $cmd + extcode=$? + + if [ $ttime -eq 1 ]; then x2=$(date +%s) took=$(( x2-x1 )) wsrep_log_info "NOTE: $stage took $took seconds" totime=$(( totime+took )) - else - wsrep_log_info "Evaluating $cmd" - eval "$cmd" - extcode=$? fi + return $extcode } @@ -220,6 +218,21 @@ get_keys() stagemsg="$stagemsg-XB-Encrypted" } +get_socat_ver() +{ + [ -n "${SOCAT_VERSION+x}" ] && return + # Determine the socat version + SOCAT_VERSION=$(socat -V 2>&1 | \ + grep -m1 -owE '[0-9]+(\.[0-9]+)+' | \ + head -n1 || :) + if [ -z "$SOCAT_VERSION" ]; then + wsrep_log_error "******** FATAL ERROR ******************" + wsrep_log_error "* Cannot determine the socat version. *" + wsrep_log_error "***************************************" + exit 2 + fi +} + get_transfer() { if [ "$tfmt" = 'nc' ]; then @@ -285,7 +298,7 @@ get_transfer() # If sockopt contains 'pf=ip6' somewhere in the middle, # this will not interfere with socat, but exclude the trivial # cases when sockopt contains 'pf=ip6' as prefix or suffix: - if [ "$sockopt" = "${sockopt#,pf=ip6}" -a \ + if [ "$sockopt" = "${sockopt#,pf=ip6,}" -a \ "$sockopt" = "${sockopt%,pf=ip6}" ] then sockopt=",pf=ip6$sockopt" @@ -312,22 +325,25 @@ get_transfer() if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then tcmd="socat -u openssl-listen:$SST_PORT,reuseaddr" else - tcmd="socat -u stdio openssl-connect:$REMOTEIP:$SST_PORT" + local addr="$REMOTEIP:$SST_PORT" + tcmd="socat -u stdio openssl-connect:$addr" action='Encrypting' + get_socat_ver + if ! check_for_version "$SOCAT_VERSION" '1.7.4.1'; then + if check_for_version "$SOCAT_VERSION" '1.7.3.3'; then + # Workaround for a bug known as 'Red Hat issue 1870279' + # (connection reset by peer) in socat versions 1.7.3.3 + # to 1.7.4.0: + tcmd="socat stdio openssl-connect:$addr,linger=10" + wsrep_log_info \ + "Use workaround for socat $SOCAT_VERSION bug" + fi + fi fi - if [ "${sockopt#*,dhparam=}" != "$sockopt" ]; then + if [ "${sockopt#*,dhparam=}" = "$sockopt" ]; then if [ -z "$ssl_dhparams" ]; then - # Determine the socat version - SOCAT_VERSION=$(socat -V 2>&1 | \ - grep -m1 -owE '[0-9]+(\.[0-9]+)+' | \ - head -n1 || :) - if [ -z "$SOCAT_VERSION" ]; then - wsrep_log_error "******** FATAL ERROR ******************" - wsrep_log_error "* Cannot determine the socat version. *" - wsrep_log_error "***************************************" - exit 2 - fi + get_socat_ver if ! check_for_version "$SOCAT_VERSION" '1.7.3'; then # socat versions < 1.7.3 will have 512-bit dhparams (too small) # so create 2048-bit dhparams and send that as a parameter: @@ -419,44 +435,90 @@ get_transfer() get_footprint() { cd "$DATA_DIR" - payload=$(find . -regex '.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' \ - -type f -print0 | du --files0-from=- --block-size=1 -c -s | \ - awk 'END { print $1 }') + local payload_data=$(find . \ + -regex '.*undo[0-9]+$\|.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' \ + -type f -print0 | du --files0-from=- --block-size=1 -c -s | \ + awk 'END { print $1 }') + + local payload_undo=0 + if [ -n "$ib_undo_dir" -a -d "$ib_undo_dir" ]; then + cd "$ib_undo_dir" + payload_undo=$(find . -regex '.*undo[0-9]+$' -type f -print0 | \ + du --files0-from=- --block-size=1 -c -s | awk 'END { print $1 }') + fi + cd "$OLD_PWD" + + wsrep_log_info \ + "SST footprint estimate: data: $payload_data, undo: $payload_undo" + + payload=$(( payload_data + payload_undo )) + if [ "$compress" != 'none' ]; then # QuickLZ has around 50% compression ratio # When compression/compaction used, the progress is only an approximate. payload=$(( payload*1/2 )) fi - cd "$OLD_PWD" - pcmd="$pcmd -s $payload" + + if [ $WSREP_SST_OPT_PROGRESS -eq 1 ]; then + # report to parent the total footprint of the SST + echo "$TOTAL_TAG $payload" + fi + adjust_progress } adjust_progress() { - if [ -z "$(commandex pv)" ]; then - wsrep_log_error "pv not found in path: $PATH" - wsrep_log_error "Disabling all progress/rate-limiting" - pcmd="" - rlimit="" - progress="" - return - fi + pcmd="" + rcmd="" - if [ -n "$progress" -a "$progress" != '1' ]; then - if [ -e "$progress" ]; then - pcmd="$pcmd 2>>'$progress'" - else - pcmd="$pcmd 2>'$progress'" - fi - elif [ -z "$progress" -a -n "$rlimit" ]; then - # When rlimit is non-zero - pcmd='pv -q' - fi + [ "$progress" = 'none' ] && return + rlimitopts="" if [ -n "$rlimit" -a "$WSREP_SST_OPT_ROLE" = 'donor' ]; then wsrep_log_info "Rate-limiting SST to $rlimit" - pcmd="$pcmd -L \$rlimit" + rlimitopts=" -L $rlimit" + fi + + if [ -n "$progress" ]; then + + # Backward compatibility: user-configured progress output + pcmd="pv $pvopts$rlimitopts" + + if [ -z "${PV_FORMAT+x}" ]; then + PV_FORMAT=0 + pv --help | grep -qw -F -- '-F' && PV_FORMAT=1 + fi + if [ $PV_FORMAT -eq 1 ]; then + pcmd="$pcmd $pvformat" + fi + + if [ $payload -ne 0 ]; then + pcmd="$pcmd -s $payload" + fi + + if [ "$progress" != '1' ]; then + if [ -e "$progress" ]; then + pcmd="$pcmd 2>>'$progress'" + else + pcmd="$pcmd 2>'$progress'" + fi + fi + + elif [ $WSREP_SST_OPT_PROGRESS -eq 1 ]; then + + # Default progress output parseable by parent + pcmd="pv -f -i 1 -n -b$rlimitopts" + + # read progress data, add tag and post to stdout + # for the parent + rcmd="stdbuf -oL tr '\r' '\n' | xargs -n1 echo complete" + + elif [ -n "$rlimitopts" ]; then + + # Rate-limiting only, when rlimit is non-zero + pcmd="pv -q$rlimitopts" + fi } @@ -512,6 +574,10 @@ read_cnf() "CERT='$tpem', KEY='$tkey', MODE='$tmode'," \ "encrypt='$encrypt'" + if [ $encrypt -ge 2 ]; then + ssl_dhparams=$(parse_cnf "$encgroups" 'ssl-dhparams') + fi + sockopt=$(parse_cnf sst sockopt "") progress=$(parse_cnf sst progress "") ttime=$(parse_cnf sst time 0) @@ -765,18 +831,28 @@ recv_joiner() wsrep_log_info $(ls -l "$dir/"*) exit 32 fi - # Select the "secret" tag whose value does not start - # with a slash symbol. All new tags must to start with - # the space and the slash symbol after the word "secret" - - # to be removed by older versions of the SST scripts: - SECRET=$(grep -m1 -E "^$SECRET_TAG[[:space:]]+[^/]" \ - -- "$MAGIC_FILE" || :) - # Check donor supplied secret: - SECRET=$(trim_string "${SECRET#$SECRET_TAG}") - if [ "$SECRET" != "$MY_SECRET" ]; then - wsrep_log_error "Donor does not know my secret!" - wsrep_log_info "Donor: '$SECRET', my: '$MY_SECRET'" - exit 32 + + if [ -n "$MY_SECRET" ]; then + # Check donor supplied secret: + SECRET=$(grep -m1 -E "^$SECRET_TAG[[:space:]]" "$MAGIC_FILE" || :) + SECRET=$(trim_string "${SECRET#$SECRET_TAG}") + if [ "$SECRET" != "$MY_SECRET" ]; then + wsrep_log_error "Donor does not know my secret!" + wsrep_log_info "Donor: '$SECRET', my: '$MY_SECRET'" + exit 32 + fi + fi + + if [ $WSREP_SST_OPT_PROGRESS -eq 1 ]; then + # check total SST footprint + payload=$(grep -m1 -E "^$TOTAL_TAG[[:space:]]" "$MAGIC_FILE" || :) + if [ -n "$payload" ]; then + payload=$(trim_string "${payload#$TOTAL_TAG}") + if [ $payload -ge 0 ]; then + # report to parent + echo "$TOTAL_TAG $payload" + fi + fi fi fi } @@ -825,6 +901,14 @@ monitor_process() read_cnf setup_ports +if [ "$progress" = 'none' ]; then + wsrep_log_info "All progress/rate-limiting disabled by configuration" +elif [ -z "$(commandex pv)" ]; then + wsrep_log_info "Progress reporting tool pv not found in path: $PATH" + wsrep_log_info "Disabling all progress/rate-limiting" + progress='none' +fi + if "$BACKUP_BIN" --help 2>/dev/null | grep -qw -F -- '--version-check'; then disver=' --no-version-check' fi @@ -980,6 +1064,14 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then check_extra + if [ -n "$progress" -o $WSREP_SST_OPT_PROGRESS -eq 1 ]; then + wsrep_log_info "Estimating total transfer size" + get_footprint + wsrep_log_info "To transfer: $payload" + else + adjust_progress + fi + wsrep_log_info "Streaming GTID file before SST" # Store donor's wsrep GTID (state ID) and wsrep_gtid_domain_id @@ -991,6 +1083,11 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then echo "$SECRET_TAG $WSREP_SST_OPT_REMOTE_PSWD" >> "$MAGIC_FILE" fi + if [ $WSREP_SST_OPT_PROGRESS -eq 1 ]; then + # Tell joiner what to expect: + echo "$TOTAL_TAG $payload" >> "$MAGIC_FILE" + fi + ttcmd="$tcmd" if [ -n "$scomp" ]; then @@ -1007,12 +1104,14 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then # Restore the transport commmand to its original state tcmd="$ttcmd" - if [ -n "$progress" ]; then - get_footprint - tcmd="$pcmd | $tcmd" - elif [ -n "$rlimit" ]; then - adjust_progress - tcmd="$pcmd | $tcmd" + if [ -n "$pcmd" ]; then + if [ -n "$rcmd" ]; then + # redirect pv stderr to rcmd for tagging and output to parent + tcmd="{ $pcmd 2>&3 | $tcmd; } 3>&1 | $rcmd" + else + # use user-configured pv output + tcmd="$pcmd | $tcmd" + fi fi wsrep_log_info "Sleeping before data transfer for SST" @@ -1214,13 +1313,6 @@ else # joiner MY_SECRET="" # for check down in recv_joiner() fi - trap cleanup_at_exit EXIT - - if [ -n "$progress" ]; then - adjust_progress - tcmd="$tcmd | $pcmd" - fi - get_keys if [ $encrypt -eq 1 ]; then strmcmd="$ecmd | $strmcmd" @@ -1232,6 +1324,8 @@ else # joiner check_sockets_utils + trap cleanup_at_exit EXIT + STATDIR="$(mktemp -d)" MAGIC_FILE="$STATDIR/$INFO_FILE" @@ -1245,6 +1339,17 @@ else # joiner if [ ! -r "$STATDIR/$IST_FILE" ]; then + adjust_progress + if [ -n "$pcmd" ]; then + if [ -n "$rcmd" ]; then + # redirect pv stderr to rcmd for tagging and output to parent + strmcmd="{ $pcmd 2>&3 | $strmcmd; } 3>&1 | $rcmd" + else + # use user-configured pv output + strmcmd="$pcmd | $strmcmd" + fi + fi + if [ -d "$DATA/.sst" ]; then wsrep_log_info \ "WARNING: Stale temporary SST directory:" \ @@ -1265,13 +1370,13 @@ else # joiner cd "$DATA" wsrep_log_info "Cleaning the old binary logs" # If there is a file with binlogs state, delete it: - [ -f "$binlog_base.state" ] && rm -f "$binlog_base.state" >&2 + [ -f "$binlog_base.state" ] && rm "$binlog_base.state" >&2 # Clean up the old binlog files and index: if [ -f "$binlog_index" ]; then while read bin_file || [ -n "$bin_file" ]; do rm -f "$bin_file" >&2 || : done < "$binlog_index" - rm -f "$binlog_index" >&2 + rm "$binlog_index" >&2 fi if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \ -d "$binlog_dir" ] @@ -1335,16 +1440,14 @@ else # joiner dcmd="xargs -n 2 qpress -dT$nproc" - if [ -n "$progress" ] && \ + if [ -n "$progress" -a "$progress" != 'none' ] && \ pv --help | grep -qw -F -- '--line-mode' then - count=$(find "$DATA" -type f -name '*.qp' | wc -l) + count=$(find "$DATA" -maxdepth 1 -type f -name '*.qp' | wc -l) count=$(( count*2 )) - pvopts="-f -s $count -l -N Decompression" - if pv --help | grep -qw -F -- '-F'; then - pvopts="$pvopts -F '%N => Rate:%r Elapsed:%t %e Progress: [%b/$count]'" - fi - pcmd="pv $pvopts" + pvopts='-f -l -N Decompression' + pvformat="-F '%N => Rate:%r Elapsed:%t %e Progress: [%b/$count]'" + payload=$count adjust_progress dcmd="$pcmd | $dcmd" fi @@ -1442,7 +1545,7 @@ else # joiner fi # Remove special tags from the magic file, and from the output: - coords=$(grep -v -E "^$SECRET_TAG[[:space:]]" -- "$MAGIC_FILE") + coords=$(head -n1 "$MAGIC_FILE") wsrep_log_info "Galera co-ords from recovery: $coords" echo "$coords" # Output : UUID:seqno wsrep_gtid_domain_id diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index 994347d6f73..7096bb4b330 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -65,21 +65,21 @@ cleanup_joiner() if [ $failure -eq 0 ]; then if cleanup_pid $RSYNC_REAL_PID "$RSYNC_PID" "$RSYNC_CONF"; then - [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE" - [ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE" + [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE" || : + [ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE" || : else wsrep_log_warning "rsync cleanup failed." fi fi - wsrep_log_info "Joiner cleanup done." - if [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]; then wsrep_cleanup_progress_file fi [ -f "$SST_PID" ] && rm -f "$SST_PID" || : + wsrep_log_info "Joiner cleanup done." + exit $estatus } @@ -318,7 +318,7 @@ if [ -n "$SSLMODE" -a "$SSLMODE" != 'DISABLED' ]; then fi readonly SECRET_TAG='secret' -readonly BYPASS_TAG='secret /bypass' +readonly BYPASS_TAG='bypass' SST_PID="$WSREP_SST_OPT_DATA/wsrep_sst.pid" @@ -371,10 +371,11 @@ done [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE" [ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE" +RC=0 + if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then - if [ -n "$STUNNEL" ] - then + if [ -n "$STUNNEL" ]; then cat << EOF > "$STUNNEL_CONF" key = $SSTKEY cert = $SSTCERT @@ -392,8 +393,6 @@ ${CHECK_OPT_LOCAL} EOF fi - RC=0 - if [ $WSREP_SST_OPT_BYPASS -eq 0 ]; then FLUSHED="$WSREP_SST_OPT_DATA/tables_flushed" @@ -410,19 +409,19 @@ EOF # (c) ERROR file, in case flush tables operation failed. while [ ! -r "$FLUSHED" ] && \ - ! grep -q -F ':' -- "$FLUSHED" 2>/dev/null + ! grep -q -F ':' -- "$FLUSHED" do # Check whether ERROR file exists. if [ -f "$ERROR" ]; then # Flush tables operation failed. - rm -f "$ERROR" + rm "$ERROR" exit 255 fi sleep 0.2 done STATE=$(cat "$FLUSHED") - rm -f "$FLUSHED" + rm "$FLUSHED" sync @@ -629,6 +628,8 @@ FILTER="-f '- /lost+found' wsrep_log_info "Transfer of data done" + [ -f "$BINLOG_TAR_FILE" ] && rm "$BINLOG_TAR_FILE" + else # BYPASS wsrep_log_info "Bypassing state dump." @@ -657,6 +658,8 @@ FILTER="-f '- /lost+found' --archive --quiet --checksum "$MAGIC_FILE" \ "rsync://$WSREP_SST_OPT_ADDR" >&2 || RC=$? + rm "$MAGIC_FILE" + if [ $RC -ne 0 ]; then wsrep_log_error "rsync $MAGIC_FILE returned code $RC:" exit 255 # unknown error @@ -665,8 +668,8 @@ FILTER="-f '- /lost+found' echo "done $STATE" if [ -n "$STUNNEL" ]; then - [ -f "$STUNNEL_CONF" ] && rm -f "$STUNNEL_CONF" - [ -f "$STUNNEL_PID" ] && rm -f "$STUNNEL_PID" + rm "$STUNNEL_CONF" + [ -f "$STUNNEL_PID" ] && rm "$STUNNEL_PID" fi else # joiner @@ -704,8 +707,7 @@ $SILENT EOF # If the IP is local, listen only on it: - if is_local_ip "$RSYNC_ADDR_UNESCAPED" - then + if is_local_ip "$RSYNC_ADDR_UNESCAPED"; then RSYNC_EXTRA_ARGS="--address $RSYNC_ADDR_UNESCAPED" STUNNEL_ACCEPT="$RSYNC_ADDR_UNESCAPED:$RSYNC_PORT" else @@ -826,13 +828,8 @@ EOF fi if [ -n "$MY_SECRET" ]; then - # Select the "secret" tag whose value does not start - # with a slash symbol. All new tags must to start with - # the space and the slash symbol after the word "secret" - - # to be removed by older versions of the SST scripts: - SECRET=$(grep -m1 -E "^$SECRET_TAG[[:space:]]+[^/]" \ - -- "$MAGIC_FILE" || :) # Check donor supplied secret: + SECRET=$(grep -m1 -E "^$SECRET_TAG[[:space:]]" "$MAGIC_FILE" || :) SECRET=$(trim_string "${SECRET#$SECRET_TAG}") if [ "$SECRET" != "$MY_SECRET" ]; then wsrep_log_error "Donor does not know my secret!" @@ -842,7 +839,7 @@ EOF fi if [ $WSREP_SST_OPT_BYPASS -eq 0 ]; then - if grep -m1 -qE "^$BYPASS_TAG([[:space:]]+.*)?\$" -- "$MAGIC_FILE"; then + if grep -m1 -qE "^$BYPASS_TAG([[:space:]]+.*)?\$" "$MAGIC_FILE"; then readonly WSREP_SST_OPT_BYPASS=1 readonly WSREP_TRANSFER_TYPE='IST' fi @@ -850,10 +847,10 @@ EOF binlog_tar_present=0 if [ -f "$BINLOG_TAR_FILE" ]; then + binlog_tar_present=1 if [ $WSREP_SST_OPT_BYPASS -ne 0 ]; then wsrep_log_warning "tar with binlogs transferred in the IST mode" fi - binlog_tar_present=1 fi if [ $WSREP_SST_OPT_BYPASS -eq 0 -a -n "$WSREP_SST_OPT_BINLOG" ]; then @@ -867,7 +864,7 @@ EOF while read bin_file || [ -n "$bin_file" ]; do rm -f "$bin_file" || : done < "$binlog_index" - rm -f "$binlog_index" + rm "$binlog_index" fi binlog_cd=0 # Change the directory to binlog base (if possible): @@ -902,7 +899,6 @@ EOF fi # Extracting binlog files: wsrep_log_info "Extracting binlog files:" - RC=0 if tar --version | grep -qw -E '^bsdtar'; then tar -tf "$BINLOG_TAR_FILE" > "$tmpfile" && \ tar -xvf "$BINLOG_TAR_FILE" > /dev/null || RC=$? @@ -912,7 +908,7 @@ EOF fi if [ $RC -ne 0 ]; then wsrep_log_error "Error unpacking tar file with binlog files" - rm -f "$tmpfile" + rm "$tmpfile" exit 32 fi # Rebuild binlog index: @@ -920,18 +916,16 @@ EOF while read bin_file || [ -n "$bin_file" ]; do echo "$binlog_dir${binlog_dir:+/}$bin_file" >> "$binlog_index" done < "$tmpfile" - rm -f "$tmpfile" + rm "$tmpfile" cd "$OLD_PWD" fi fi # Remove special tags from the magic file, and from the output: - coords=$(grep -v -E "^$SECRET_TAG[[:space:]]" -- "$MAGIC_FILE") + coords=$(head -n1 "$MAGIC_FILE") wsrep_log_info "Galera co-ords from recovery: $coords" echo "$coords" # Output : UUID:seqno wsrep_gtid_domain_id fi -[ -f "$BINLOG_TAR_FILE" ] && rm -f "$BINLOG_TAR_FILE" - wsrep_log_info "$WSREP_METHOD $WSREP_TRANSFER_TYPE completed on $WSREP_SST_OPT_ROLE" exit 0 diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index bf430a00f18..8dc5a508343 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3663,7 +3663,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) } else { - check_insert_autoincrement(); + check_insert_or_replace_autoincrement(); if (unlikely((error= open_read_partitions(name_buff, sizeof(name_buff))))) goto err_handler; m_num_locks= m_file_sample->lock_count(); @@ -8707,7 +8707,7 @@ int ha_partition::change_partitions_to_open(List<String> *partition_names) return 0; } - check_insert_autoincrement(); + check_insert_or_replace_autoincrement(); if (bitmap_cmp(&m_opened_partitions, &m_part_info->read_partitions) != 0) return 0; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 2d618f4afe0..00ef8e34ead 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1401,15 +1401,16 @@ private: unlock_auto_increment(); } - void check_insert_autoincrement() + void check_insert_or_replace_autoincrement() { /* - If we INSERT into the table having the AUTO_INCREMENT column, + If we INSERT or REPLACE into the table having the AUTO_INCREMENT column, we have to read all partitions for the next autoincrement value unless we already did it. */ if (!part_share->auto_inc_initialized && - ha_thd()->lex->sql_command == SQLCOM_INSERT && + (ha_thd()->lex->sql_command == SQLCOM_INSERT || + ha_thd()->lex->sql_command == SQLCOM_REPLACE) && table->found_next_number_field) bitmap_set_all(&m_part_info->read_partitions); } diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 098d881eca8..e9bb1dc0436 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -82,6 +82,12 @@ static inline void output_core_info() my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); my_close(fd, MYF(0)); } + if ((fd= my_open("/proc/version", O_RDONLY, MYF(0))) >= 0) + { + len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); + my_close(fd, MYF(0)); + } #endif #elif defined(__APPLE__) || defined(__FreeBSD__) char buff[PATH_MAX]; @@ -90,6 +96,10 @@ static inline void output_core_info() { my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); } + if (sysctlbyname("kern.version", buff, &len, NULL, 0) == 0) + { + my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); + } #else char buff[80]; my_getwd(buff, sizeof(buff), 0); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 7977653ae56..487800df1ab 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2005, 2017, Oracle and/or its affiliates. - Copyright (c) 2009, 2018, MariaDB + Copyright (c) 2009, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -6913,14 +6913,29 @@ static bool alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt) static int alter_close_table(ALTER_PARTITION_PARAM_TYPE *lpt) { + THD *thd= lpt->thd; + TABLE_SHARE *share= lpt->table->s; DBUG_ENTER("alter_close_table"); - if (lpt->table->db_stat) - { - mysql_lock_remove(lpt->thd, lpt->thd->lock, lpt->table); - lpt->table->file->ha_close(); - lpt->table->db_stat= 0; // Mark file closed - } + TABLE *table= thd->open_tables; + do { + table= find_locked_table(table, share->db.str, share->table_name.str); + if (!table) + { + DBUG_RETURN(0); + } + + if (table->db_stat) + { + mysql_lock_remove(thd, thd->lock, table); + if (int error= table->file->ha_close()) + { + DBUG_RETURN(error); + } + table->db_stat= 0; // Mark file closed + } + } while ((table= table->next)); + DBUG_RETURN(0); } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index eb3ee3d5f74..267630df9b9 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -7039,19 +7039,18 @@ static int get_check_constraints_record(THD *thd, TABLE_LIST *tables, #ifndef NO_EMBEDDED_ACCESS_CHECKS TABLE_LIST table_acl_check; bzero((char*) &table_acl_check, sizeof(table_acl_check)); + + if (!(thd->col_access & TABLE_ACLS)) + { + table_acl_check.db= *db_name; + table_acl_check.table_name= *table_name; + table_acl_check.grant.privilege= thd->col_access; + if (check_grant(thd, TABLE_ACLS, &table_acl_check, FALSE, 1, TRUE)) + DBUG_RETURN(res); + } #endif for (uint i= 0; i < tables->table->s->table_check_constraints; i++) { -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (!(thd->col_access & TABLE_ACLS)) - { - table_acl_check.db= *db_name; - table_acl_check.table_name= *table_name; - table_acl_check.grant.privilege= thd->col_access; - if (check_grant(thd, TABLE_ACLS, &table_acl_check, FALSE, 1, TRUE)) - continue; - } -#endif Virtual_column_info *check= tables->table->check_constraints[i]; table->field[0]->store(STRING_WITH_LEN("def"), system_charset_info); table->field[3]->store(check->name.str, check->name.length, diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 84923b3a200..babd46499d6 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -1067,12 +1067,14 @@ static ssize_t sst_prepare_other (const char* method, WSREP_SST_OPT_ADDR " '%s' " WSREP_SST_OPT_DATA " '%s' " "%s" - WSREP_SST_OPT_PARENT " '%d'" + WSREP_SST_OPT_PARENT " %d " + WSREP_SST_OPT_PROGRESS " %d" "%s" "%s", method, addr_in, mysql_real_data_home, wsrep_defaults_file, (int)getpid(), + 0, binlog_opt_val, binlog_index_opt_val); my_free(binlog_opt_val); @@ -1849,16 +1851,18 @@ static int sst_donate_other (const char* method, "wsrep_sst_%s " WSREP_SST_OPT_ROLE " 'donor' " WSREP_SST_OPT_ADDR " '%s' " - WSREP_SST_OPT_LPORT " '%u' " + WSREP_SST_OPT_LPORT " %u " WSREP_SST_OPT_SOCKET " '%s' " + WSREP_SST_OPT_PROGRESS " %d " WSREP_SST_OPT_DATA " '%s' " "%s" WSREP_SST_OPT_GTID " '%s:%lld' " - WSREP_SST_OPT_GTID_DOMAIN_ID " '%d'" + WSREP_SST_OPT_GTID_DOMAIN_ID " %d" "%s" "%s" "%s", method, addr, mysqld_port, mysqld_unix_port, + 0, mysql_real_data_home, wsrep_defaults_file, uuid_oss.str().c_str(), gtid.seqno().get(), wsrep_gtid_domain_id, diff --git a/sql/wsrep_sst.h b/sql/wsrep_sst.h index 2389db4abe7..462db7a159e 100644 --- a/sql/wsrep_sst.h +++ b/sql/wsrep_sst.h @@ -32,6 +32,7 @@ #define WSREP_SST_OPT_PARENT "--parent" #define WSREP_SST_OPT_BINLOG "--binlog" #define WSREP_SST_OPT_BINLOG_INDEX "--binlog-index" +#define WSREP_SST_OPT_PROGRESS "--progress" #define WSREP_SST_OPT_MYSQLD "--mysqld-args" // mysqldump-specific options diff --git a/storage/connect/mysql-test/connect/r/mysql.result b/storage/connect/mysql-test/connect/r/mysql.result index 3ac23394cbc..4377d2ad014 100644 --- a/storage/connect/mysql-test/connect/r/mysql.result +++ b/storage/connect/mysql-test/connect/r/mysql.result @@ -294,3 +294,15 @@ a 30 DROP TABLE t2; DROP TABLE t1; +# +# MDEV-27766 CONNECT Engine Support for INSERT IGNORE with Mysql Table type +# +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (10),(20),(30); +CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='mysql://root@localhost:PORT/test/t1' OPTION_LIST="Delayed=1,Ignored=1"; +INSERT INTO t2 VALUES (10),(20),(30),(40); +DROP TABLE t2; +DROP TABLE t1; +# +# End of 10.3 tests +# diff --git a/storage/connect/mysql-test/connect/t/mysql.test b/storage/connect/mysql-test/connect/t/mysql.test index 7585c202b8b..451de29c0b0 100644 --- a/storage/connect/mysql-test/connect/t/mysql.test +++ b/storage/connect/mysql-test/connect/t/mysql.test @@ -470,3 +470,19 @@ SELECT * FROM t2; DROP TABLE t2; DROP TABLE t1; + +--echo # +--echo # MDEV-27766 CONNECT Engine Support for INSERT IGNORE with Mysql Table type +--echo # + +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (10),(20),(30); +--replace_result $PORT PORT +--eval CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='mysql://root@localhost:$PORT/test/t1' OPTION_LIST="Delayed=1,Ignored=1" +INSERT INTO t2 VALUES (10),(20),(30),(40); +DROP TABLE t2; +DROP TABLE t1; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index c65e2924ada..eab1d3ec669 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -94,6 +94,7 @@ MYSQLDEF::MYSQLDEF(void) Isview = false; Bind = false; Delayed = false; + Ignored = false; //Xsrc = false; Huge = false; } // end of MYSQLDEF constructor @@ -321,6 +322,9 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int) Desc = "MySQL Table"; + Delayed = !!GetIntCatInfo("Delayed", 0); + Ignored = !!GetIntCatInfo("Ignored", 0); + if (stricmp(am, "MYPRX")) { // Normal case of specific MYSQL table url = GetStringCatInfo(g, "Connect", NULL); @@ -339,7 +343,6 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int) return true; Bind = !!GetIntCatInfo("Bind", 0); - Delayed = !!GetIntCatInfo("Delayed", 0); } else { // MYSQL access from a PROXY table TABLE_SHARE* s; @@ -425,6 +428,7 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBEXT(tdp) Isview = tdp->Isview; Prep = tdp->Bind; Delayed = tdp->Delayed; + Ignored = tdp->Ignored; Myc.m_Use = tdp->Huge; } else { Host = NULL; @@ -440,6 +444,7 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBEXT(tdp) Isview = false; Prep = false; Delayed = false; + Ignored = false; } // endif tdp Bind = NULL; @@ -466,6 +471,7 @@ TDBMYSQL::TDBMYSQL(PTDBMY tdbp) : TDBEXT(tdbp) Isview = tdbp->Isview; Prep = tdbp->Prep; Delayed = tdbp->Delayed; + Ignored = tdbp->Ignored; Bind = NULL; //Query = tdbp->Query; Fetched = tdbp->Fetched; @@ -623,11 +629,13 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g) len += (strlen(TableName) + 40); Query = new(g) STRING(g, len); + Query->Set("INSERT "); if (Delayed) - Query->Set("INSERT DELAYED INTO "); - else - Query->Set("INSERT INTO "); + Query->Append("DELAYED "); + if (Ignored) + Query->Append("IGNORE "); + Query->Append("INTO "); Query->Append(tk); Query->Append(TableName); Query->Append("` ("); diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 4b61c7eb762..b5af77de50b 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -60,6 +60,7 @@ class MYSQLDEF : public EXTDEF {/* Logical table description */ bool Isview; /* true if this table is a MySQL view */ bool Bind; /* Use prepared statement on insert */ bool Delayed; /* Delayed insert */ + bool Ignored; /* Use insert IGNORE */ //bool Xsrc; /* Execution type */ bool Huge; /* True for big table */ }; // end of MYSQLDEF @@ -132,6 +133,7 @@ class TDBMYSQL : public TDBEXT { bool Isview; // True if this table is a MySQL view bool Prep; // Use prepared statement on insert bool Delayed; // Use delayed insert + bool Ignored; // Use insert IGNORE int m_Rc; // Return code from command //int AftRows; // The number of affected rows int N; // The current table index diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index b42964b8315..940e6d5d8ab 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -66,6 +66,7 @@ Created 11/5/1995 Heikki Tuuri #include "buf0dump.h" #include <map> #include <sstream> +#include "log.h" #ifdef UNIV_LINUX #include <stdlib.h> @@ -1241,171 +1242,35 @@ buf_madvise_do_dump() } #endif +static inline byte hex_to_ascii(byte hex_digit) +{ + return hex_digit <= 9 ? '0' + hex_digit : ('a' - 10) + hex_digit; +} + /** Dump a page to stderr. @param[in] read_buf database page @param[in] zip_size compressed page size, or 0 */ -void buf_page_print(const byte* read_buf, ulint zip_size) +ATTRIBUTE_COLD +void buf_page_print(const byte *read_buf, ulint zip_size) { - dict_index_t* index; - #ifndef UNIV_DEBUG - const ulint size = zip_size ? zip_size : srv_page_size; - ib::info() << "Page dump in ascii and hex (" - << size << " bytes):"; + const size_t size = zip_size ? zip_size : srv_page_size; + const byte * const end= read_buf + size; + sql_print_information("InnoDB: Page dump (%zu bytes):", size); - ut_print_buf(stderr, read_buf, size); - fputs("\nInnoDB: End of page dump\n", stderr); -#endif + do + { + byte row[64]; - if (zip_size) { - /* Print compressed page. */ - ib::info() << "Compressed page type (" - << fil_page_get_type(read_buf) - << "); stored checksum in field1 " - << mach_read_from_4( - read_buf + FIL_PAGE_SPACE_OR_CHKSUM) - << "; calculated checksums for field1: " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_CRC32) - << " " - << page_zip_calc_checksum( - read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_CRC32) - << ", " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_INNODB) - << " " - << page_zip_calc_checksum( - read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_INNODB) - << ", " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_NONE) - << " " - << page_zip_calc_checksum( - read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_NONE) - << "; page LSN " - << mach_read_from_8(read_buf + FIL_PAGE_LSN) - << "; page number (if stored to page" - << " already) " - << mach_read_from_4(read_buf + FIL_PAGE_OFFSET) - << "; space id (if stored to page already) " - << mach_read_from_4( - read_buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + for (byte *r= row; r != &row[64]; r+= 2, read_buf++) + r[0]= hex_to_ascii(*read_buf >> 4), r[1]= hex_to_ascii(*read_buf & 15); - } else { - const uint32_t crc32 = buf_calc_page_crc32(read_buf); - ulint page_type = fil_page_get_type(read_buf); - - ib::info() << "Uncompressed page, stored checksum in field1 " - << mach_read_from_4( - read_buf + FIL_PAGE_SPACE_OR_CHKSUM) - << ", calculated checksums for field1: " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_CRC32) << " " - << crc32 - << ", " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_INNODB) << " " - << buf_calc_page_new_checksum(read_buf) - << ", " - << " page type " << page_type << " == " - << fil_get_page_type_name(page_type) << "." - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_NONE) << " " - << BUF_NO_CHECKSUM_MAGIC - << ", stored checksum in field2 " - << mach_read_from_4(read_buf + srv_page_size - - FIL_PAGE_END_LSN_OLD_CHKSUM) - << ", calculated checksums for field2: " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_CRC32) << " " - << crc32 - << ", " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_INNODB) << " " - << buf_calc_page_old_checksum(read_buf) - << ", " - << buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_NONE) << " " - << BUF_NO_CHECKSUM_MAGIC - << ", page LSN " - << mach_read_from_4(read_buf + FIL_PAGE_LSN) - << " " - << mach_read_from_4(read_buf + FIL_PAGE_LSN + 4) - << ", low 4 bytes of LSN at page end " - << mach_read_from_4(read_buf + srv_page_size - - FIL_PAGE_END_LSN_OLD_CHKSUM + 4) - << ", page number (if stored to page already) " - << mach_read_from_4(read_buf + FIL_PAGE_OFFSET) - << ", space id (if created with >= MySQL-4.1.1" - " and stored already) " - << mach_read_from_4( - read_buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - } - - switch (fil_page_get_type(read_buf)) { - index_id_t index_id; - case FIL_PAGE_INDEX: - case FIL_PAGE_TYPE_INSTANT: - case FIL_PAGE_RTREE: - index_id = btr_page_get_index_id(read_buf); - ib::info() << "Page may be an index page where" - " index id is " << index_id; + sql_print_information("InnoDB: %.*s", 64, row); + } + while (read_buf != end); - index = dict_index_find_on_id_low(index_id); - if (index) { - ib::info() - << "Index " << index_id - << " is " << index->name - << " in table " << index->table->name; - } - break; - case FIL_PAGE_UNDO_LOG: - fputs("InnoDB: Page may be an undo log page\n", stderr); - break; - case FIL_PAGE_INODE: - fputs("InnoDB: Page may be an 'inode' page\n", stderr); - break; - case FIL_PAGE_IBUF_FREE_LIST: - fputs("InnoDB: Page may be an insert buffer free list page\n", - stderr); - break; - case FIL_PAGE_TYPE_ALLOCATED: - fputs("InnoDB: Page may be a freshly allocated page\n", - stderr); - break; - case FIL_PAGE_IBUF_BITMAP: - fputs("InnoDB: Page may be an insert buffer bitmap page\n", - stderr); - break; - case FIL_PAGE_TYPE_SYS: - fputs("InnoDB: Page may be a system page\n", - stderr); - break; - case FIL_PAGE_TYPE_TRX_SYS: - fputs("InnoDB: Page may be a transaction system page\n", - stderr); - break; - case FIL_PAGE_TYPE_FSP_HDR: - fputs("InnoDB: Page may be a file space header page\n", - stderr); - break; - case FIL_PAGE_TYPE_XDES: - fputs("InnoDB: Page may be an extent descriptor page\n", - stderr); - break; - case FIL_PAGE_TYPE_BLOB: - fputs("InnoDB: Page may be a BLOB page\n", - stderr); - break; - case FIL_PAGE_TYPE_ZBLOB: - case FIL_PAGE_TYPE_ZBLOB2: - fputs("InnoDB: Page may be a compressed BLOB page\n", - stderr); - break; - } + sql_print_information("InnoDB: End of page dump"); +#endif } # ifdef PFS_GROUP_BUFFER_SYNC diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 8357732e73b..29ab3c7fe70 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -2,7 +2,7 @@ Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2021, MariaDB Corporation. +Copyright (c) 2013, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1226,45 +1226,6 @@ dict_table_find_index_on_id( return(NULL); } -/**********************************************************************//** -Looks for an index with the given id. NOTE that we do not reserve -the dictionary mutex: this function is for emergency purposes like -printing info of a corrupt database page! -@return index or NULL if not found in cache */ -dict_index_t* -dict_index_find_on_id_low( -/*======================*/ - index_id_t id) /*!< in: index id */ -{ - if (!dict_sys.is_initialised()) return NULL; - - dict_table_t* table; - - for (table = UT_LIST_GET_FIRST(dict_sys.table_LRU); - table != NULL; - table = UT_LIST_GET_NEXT(table_LRU, table)) { - - dict_index_t* index = dict_table_find_index_on_id(table, id); - - if (index != NULL) { - return(index); - } - } - - for (table = UT_LIST_GET_FIRST(dict_sys.table_non_LRU); - table != NULL; - table = UT_LIST_GET_NEXT(table_LRU, table)) { - - dict_index_t* index = dict_table_find_index_on_id(table, id); - - if (index != NULL) { - return(index); - } - } - - return(NULL); -} - /** Function object to remove a foreign key constraint from the referenced_set of the referenced table. The foreign key object is also removed from the dictionary cache. The foreign key constraint @@ -4754,9 +4715,19 @@ dict_index_get_if_in_cache_low( /*===========================*/ index_id_t index_id) /*!< in: index id */ { - ut_ad(mutex_own(&dict_sys.mutex)); + ut_ad(mutex_own(&dict_sys.mutex)); + + for (dict_table_t *table= UT_LIST_GET_FIRST(dict_sys.table_LRU); + table; table= UT_LIST_GET_NEXT(table_LRU, table)) + if (dict_index_t *index= dict_table_find_index_on_id(table, index_id)) + return index; + + for (dict_table_t *table = UT_LIST_GET_FIRST(dict_sys.table_non_LRU); + table; table= UT_LIST_GET_NEXT(table_LRU, table)) + if (dict_index_t *index= dict_table_find_index_on_id(table, index_id)) + return index; - return(dict_index_find_on_id_low(index_id)); + return nullptr; } #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index d21c0cefec0..57e69926264 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2021, MariaDB Corporation. +Copyright (c) 2014, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1405,9 +1405,8 @@ i_s_cmp_per_index_fill_low( for (iter = snap.begin(), i = 0; iter != snap.end(); iter++, i++) { - dict_index_t* index = dict_index_find_on_id_low(iter->first); - - if (index != NULL) { + if (dict_index_t* index + = dict_index_get_if_in_cache_low(iter->first)) { char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 2326f00599b..f70297121fd 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -2,7 +2,7 @@ Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2021, MariaDB Corporation. +Copyright (c) 2013, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -934,16 +934,6 @@ dict_table_copy_types( const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull)); /**********************************************************************//** -Looks for an index with the given id. NOTE that we do not reserve -the dictionary mutex: this function is for emergency purposes like -printing info of a corrupt database page! -@return index or NULL if not found from cache */ -dict_index_t* -dict_index_find_on_id_low( -/*======================*/ - index_id_t id) /*!< in: index id */ - MY_ATTRIBUTE((warn_unused_result)); -/**********************************************************************//** Make room in the table cache by evicting an unused table. The unused table should not be part of FK relationship and currently not used in any user transaction. There is no guarantee that it will remove a table. diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index eb4052abdf5..559b315118f 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -1111,9 +1111,6 @@ THR_LOCK_DATA **ha_spider::store_lock( case TL_READ_HIGH_PRIORITY: high_priority = TRUE; break; - case TL_WRITE_DELAYED: - insert_delayed = TRUE; - break; case TL_WRITE_LOW_PRIORITY: low_priority = TRUE; break; @@ -1223,7 +1220,6 @@ THR_LOCK_DATA **ha_spider::store_lock( lock_type = TL_READ; if ( lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE && - lock_type != TL_WRITE_DELAYED && !thd->in_lock_tables && !thd_tablespace_op(thd) ) lock_type = TL_WRITE_ALLOW_WRITE; @@ -1805,7 +1801,6 @@ int ha_spider::reset() insert_with_update = FALSE; low_priority = FALSE; high_priority = FALSE; - insert_delayed = FALSE; use_pre_call = FALSE; use_pre_action = FALSE; pre_bitmap_checked = FALSE; @@ -9716,7 +9711,6 @@ ulonglong ha_spider::table_flags() const HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | HA_FILE_BASED | - HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_NO_COPY_ON_ALTER | HA_BINLOG_ROW_CAPABLE | @@ -14121,7 +14115,6 @@ int ha_spider::sync_from_clone_source( update_request = spider->update_request; lock_mode = spider->lock_mode; high_priority = spider->high_priority; - insert_delayed = spider->insert_delayed; low_priority = spider->low_priority; memcpy(conns, spider->conns, sizeof(SPIDER_CONN *) * share->link_count); @@ -14163,7 +14156,6 @@ int ha_spider::sync_from_clone_source( update_request = spider->update_request; lock_mode = spider->lock_mode; high_priority = spider->high_priority; - insert_delayed = spider->insert_delayed; low_priority = spider->low_priority; if ((error_num = spider_check_trx_and_get_conn(spider->trx->thd, diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h index cb0a2abcc06..543b92629da 100644 --- a/storage/spider/ha_spider.h +++ b/storage/spider/ha_spider.h @@ -169,7 +169,6 @@ public: bool insert_with_update; bool low_priority; bool high_priority; - bool insert_delayed; bool use_pre_call; bool use_pre_action; bool pre_bitmap_checked; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_28854.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_28854.result new file mode 100644 index 00000000000..e5cee8d4ee2 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_28854.result @@ -0,0 +1,32 @@ +# +# MDEV-28854 Spider: Disallow INSERT DELAYED on Spider table +# +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE tbl_a (id INT); +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +CREATE TABLE tbl_a ( +id INT +) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"'; +connection master_1; +INSERT DELAYED INTO tbl_a VALUES (1); +ERROR HY000: DELAYED option not supported for table 'tbl_a' +connection master_1; +DROP DATABASE IF EXISTS auto_test_local; +connection child2_1; +DROP DATABASE IF EXISTS auto_test_remote; +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.cnf new file mode 100644 index 00000000000..05dfd8a0bce --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.cnf @@ -0,0 +1,3 @@ +!include include/default_mysqld.cnf +!include ../my_1_1.cnf +!include ../my_2_1.cnf diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.test new file mode 100644 index 00000000000..47f4e57d4fc --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_28854.test @@ -0,0 +1,36 @@ +--echo # +--echo # MDEV-28854 Spider: Disallow INSERT DELAYED on Spider table +--echo # + +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +--connection child2_1 +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE tbl_a (id INT); + +--connection master_1 +CREATE DATABASE auto_test_local; +USE auto_test_local; +eval CREATE TABLE tbl_a ( + id INT +) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"'; + +--connection master_1 +--error ER_DELAYED_NOT_SUPPORTED +INSERT DELAYED INTO tbl_a VALUES (1); + +--connection master_1 +DROP DATABASE IF EXISTS auto_test_local; +--connection child2_1 +DROP DATABASE IF EXISTS auto_test_remote; + +--disable_query_log +--disable_result_log +--source ../t/test_deinit.inc +--enable_query_log +--enable_result_log diff --git a/storage/spider/spd_db_conn.h b/storage/spider/spd_db_conn.h index e820851d257..2abe62fc001 100644 --- a/storage/spider/spd_db_conn.h +++ b/storage/spider/spd_db_conn.h @@ -34,7 +34,6 @@ #define SPIDER_DB_INSERT_IGNORE (1 << 1) #define SPIDER_DB_INSERT_LOW_PRIORITY (1 << 2) #define SPIDER_DB_INSERT_HIGH_PRIORITY (1 << 3) -#define SPIDER_DB_INSERT_DELAYED (1 << 4) #define SPIDER_SQL_OPEN_PAREN_STR "(" #define SPIDER_SQL_OPEN_PAREN_LEN (sizeof(SPIDER_SQL_OPEN_PAREN_STR) - 1) @@ -76,8 +75,6 @@ #define SPIDER_SQL_HIGH_PRIORITY_LEN (sizeof(SPIDER_SQL_HIGH_PRIORITY_STR) - 1) #define SPIDER_SQL_LOW_PRIORITY_STR "low_priority " #define SPIDER_SQL_LOW_PRIORITY_LEN (sizeof(SPIDER_SQL_LOW_PRIORITY_STR) - 1) -#define SPIDER_SQL_SQL_DELAYED_STR "delayed " -#define SPIDER_SQL_SQL_DELAYED_LEN (sizeof(SPIDER_SQL_SQL_DELAYED_STR) - 1) #define SPIDER_SQL_SQL_IGNORE_STR "ignore " #define SPIDER_SQL_SQL_IGNORE_LEN (sizeof(SPIDER_SQL_SQL_IGNORE_STR) - 1) #define SPIDER_SQL_FROM_STR " from " diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 85f910aa661..782e7efac9d 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -9550,7 +9550,6 @@ int spider_mbase_handler::append_insert( spider_string *str, int link_idx ) { - SPIDER_SHARE *share = spider->share; DBUG_ENTER("spider_mbase_handler::append_insert"); direct_insert_kind = SPIDER_SQL_DIRECT_INSERT_KIND_INSERT; if ( @@ -9577,15 +9576,6 @@ int spider_mbase_handler::append_insert( DBUG_RETURN(HA_ERR_OUT_OF_MEM); str->q_append(SPIDER_SQL_LOW_PRIORITY_STR, SPIDER_SQL_LOW_PRIORITY_LEN); } - else if (spider->insert_delayed) - { - if (share->internal_delayed) - { - if (str->reserve(SPIDER_SQL_SQL_DELAYED_LEN)) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - str->q_append(SPIDER_SQL_SQL_DELAYED_STR, SPIDER_SQL_SQL_DELAYED_LEN); - } - } else if ( spider->lock_type >= TL_WRITE && !spider->write_can_replace && @@ -16270,12 +16260,6 @@ int spider_mbase_copy_table::append_insert_str( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_LOW_PRIORITY_STR, SPIDER_SQL_LOW_PRIORITY_LEN); } - else if (insert_flg & SPIDER_DB_INSERT_DELAYED) - { - if (sql.reserve(SPIDER_SQL_SQL_DELAYED_LEN)) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - sql.q_append(SPIDER_SQL_SQL_DELAYED_STR, SPIDER_SQL_SQL_DELAYED_LEN); - } else if (insert_flg & SPIDER_DB_INSERT_HIGH_PRIORITY) { if (sql.reserve(SPIDER_SQL_HIGH_PRIORITY_LEN)) diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h index 95064fa4c72..81d812280d7 100644 --- a/storage/spider/spd_include.h +++ b/storage/spider/spd_include.h @@ -942,7 +942,6 @@ typedef struct st_spider_share int selupd_lock_mode; int query_cache; int query_cache_sync; - int internal_delayed; int bulk_size; int bulk_update_mode; int bulk_update_size; diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 9d3846cb43b..7a00061e3a2 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -2043,7 +2043,6 @@ int spider_parse_connect_info( share->selupd_lock_mode = -1; share->query_cache = -1; share->query_cache_sync = -1; - share->internal_delayed = -1; share->bulk_size = -1; share->bulk_update_mode = -1; share->bulk_update_size = -1; @@ -2266,7 +2265,6 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("hws", hs_write_socks); #endif SPIDER_PARAM_INT("isa", init_sql_alloc_size, 0); - SPIDER_PARAM_INT_WITH_MAX("idl", internal_delayed, 0, 1); SPIDER_PARAM_LONGLONG("ilm", internal_limit, 0); SPIDER_PARAM_LONGLONG("ios", internal_offset, 0); SPIDER_PARAM_INT_WITH_MAX("iom", internal_optimize, 0, 1); @@ -2492,8 +2490,6 @@ int spider_parse_connect_info( SPIDER_PARAM_INT_WITH_MAX( "selupd_lock_mode", selupd_lock_mode, 0, 2); SPIDER_PARAM_INT_WITH_MAX( - "internal_delayed", internal_delayed, 0, 1); - SPIDER_PARAM_INT_WITH_MAX( "table_count_mode", table_count_mode, 0, 3); SPIDER_PARAM_INT_WITH_MAX( "use_pushdown_udf", use_pushdown_udf, 0, 1); @@ -3817,8 +3813,6 @@ int spider_set_connect_info_default( share->query_cache = 0; if (share->query_cache_sync == -1) share->query_cache_sync = 0; - if (share->internal_delayed == -1) - share->internal_delayed = 0; if (share->bulk_size == -1) share->bulk_size = 16000; if (share->bulk_update_mode == -1) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result index 14b6052a7d3..9b79cc21875 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result @@ -1123,5 +1123,12 @@ CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam INSERT INTO t1 VALUES (1,1),(2,2); UPDATE t1 SET pk = 0; DROP TABLE t1; +# +# MDEV-21027 Assertion `part_share->auto_inc_initialized || !can_use_for_auto_inc_init()' +# ha_partition::set_auto_increment_if_higher +# +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE='TokuDB' PARTITION BY HASH (a) PARTITIONS 3; +REPLACE INTO t1 PARTITION (p0) VALUES (3); +DROP TABLE t1; ############################################################################## SET GLOBAL tokudb_prelock_empty = @tokudb_prelock_empty_saved; |