diff options
author | Oleksandr Byelkin <sanja@mariadb.com> | 2023-03-31 21:32:41 +0200 |
---|---|---|
committer | Oleksandr Byelkin <sanja@mariadb.com> | 2023-03-31 21:32:41 +0200 |
commit | ac5a534a4caa6c86762e721dfe7183be2fee29ca (patch) | |
tree | a3d40e82beeef165e5965aec282a458b1febf23a | |
parent | e093e5abbed1a7883b8a78935c11505bd0bcb0d6 (diff) | |
parent | eaebe8b5600b144c51a9405de42a70bd4b710987 (diff) | |
download | mariadb-git-ac5a534a4caa6c86762e721dfe7183be2fee29ca.tar.gz |
Merge remote-tracking branch '10.4' into 10.5
306 files changed, 10915 insertions, 6610 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000000..39dae0facb8 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,494 @@ +--- +# This Gitlab-CI pipeline offers basic validation that a commit did not +# introduce easily detectable regressions. Builds run primairly on a new Fedora, +# which has all the latest upstream build dependencies and thus is the primary +# testing target, as eventually everything in Fedora becomes the next CentOS and +# Red Hat releases. +# +# In addition test building on CentOS 7 and 8 to ensure that the code base +# remains reasonably backwards compatible. +# +# This is now intentionally simple, to keep it fast and accurate with minimal +# false positive failures. If one wants to extend it, see debian/salsa-ci.yml +# for inspiration on more integration tests to run. +# +# Also make sure the pipeline stays within the bounds of what CI workers on +# Gitlab-CI are capable of executing, thus ensuring that any potential +# contributor can at any point in time fork to their own Gitlab account and +# start working towards meaningful contributions! +# +# NOTE TO MERGERS: Most of the contents in the Gitlab-CI configuration has been +# tailored for a specific release or MariaDB. As a general rule, do not merge +# changes in this file across MariaDB branches to avoid breaking the CI. Updates +# the Gitlab-CI pipeline are most of the time better done manually per major +# release branch. + +stages: + - build + - test + - Salsa-CI + +default: + # Base image for builds and tests unless otherwise defined + image: fedora:latest + # Extend build jobs to have longer timeout as the default GitLab + # timeout (1h) is often not enough + timeout: 3h + +# Define common CMAKE_FLAGS for all builds. Skim down build by omitting all +# submodules (a commit in this repo does not affect their builds anyway) and +# many components that are otherwise slow to build. +variables: + CMAKE_FLAGS: "-DPLUGIN_COLUMNSTORE=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_S3=NO -DPLUGIN_MROONGA=NO -DPLUGIN_CONNECT=NO -DPLUGIN_MROONGA=NO -DPLUGIN_TOKUDB=NO -DPLUGIN_PERFSCHEMA=NO -DWITH_WSREP=OFF" + # Major version dictates which branches share the same ccache. E.g. 10.6-abc + # and 10.6-xyz will have the same cache. + MARIADB_MAJOR_VERSION: "10.6" + # NOTE! Currently ccache is only used on the Centos8 build. As each job has + # sufficiently different environments they are unable to benefit from each + # other's ccaches. As each build generates about 1 GB of ccache, having + # multiple caches would quickly consume all free storage on Gitlab-CI and + # grind all builds to a halt. Also the network overhead of download/upload + # decreases the benefit of ccache in Gitlab-CI, and current cache:when and + # cache:policy are not flexible enough to have a system where the cache is + # uploaded only once a week and not on every build. Having ccache on at least + # one build still helps ensure that ccache compatibility is at least tested + # and if the Centos 8 build is always significantly faster than all other + # builds (e.g. on self-hosted Gitlab instances) then users would at least be + # able to discover it. + # + # Most steps don't need the source code, only artifacts + GIT_STRATEGY: none + # Hack to satisfy directory name length requirement by CPackRPM in CMake 3.x + # https://cmake.org/cmake/help/v3.7/module/CPackRPM.html#variable:CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX + GIT_CLONE_PATH: $CI_BUILDS_DIR/CPACK_BUILD_SOURCE_DIRS_LONG_NAME_REQUIREMENT + +# Define once, use many times +.rpm_listfiles: &rpm_listfiles + - | + echo "Generating rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log ..." + for package in *.rpm + do + echo "$package" + rpm -qlpv "$package" | awk '{print $1 " " $3 "/" $4 " ." $9 " " $10 " " $11}' | sort -k 3 + echo "------------------------------------------------" + done >> "../rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log" + # CPackRPM lists contents in build log, so no need to show the output of this, + # just store it as a build artifact that can be downloaded and diffed against + # other builds to detect which files where added/removed/moved + +fedora: + stage: build + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + - yum install -y yum-utils rpm-build openssl-devel graphviz clang gnutls-devel + # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway + - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm + # This repository does not have any .spec files, so install dependencies based on Fedora spec file + - yum-builddep -y mariadb-server + - mkdir builddir; cd builddir + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=bundled .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake --graphviz=../dependencies.dot .. && dot -Tpng -o ../dependencies.png ../dependencies.dot + - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just + # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 + - make test + # - make test-force # mysql-test-runner takes too long, run MTR in a separate job instead + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + - dependencies.dot + - dependencies.png + +fedora-ninja: + stage: build + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + - yum install -y yum-utils rpm-build openssl-devel graphviz ninja-build gnutls-devel + # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway + - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm + # This repository does not have any .spec files, so install dependencies based on Fedora spec file + - yum-builddep -y mariadb-server + - mkdir builddir; cd builddir + - cmake -DRPM=generic $CMAKE_FLAGS -DWITH_SSL=bundled -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -G Ninja .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - ninja -t graph > ../dependencies.dot && dot -Tpng -o ../dependencies.png ../dependencies.dot + - eatmydata ninja package -j 2 --verbose 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: Unlike other builds, the Ninja builds using Gitlab.com runners don't get stuck, but they do get + # stuck on runners with more processors, see https://jira.mariadb.org/browse/MDEV-25968. + # Thus, use the same limitation on Ninja builds as well to ensure it never gets stuck due to this bug. + - ninja test + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + - dependencies.dot + - dependencies.png + +fedora-clang: + stage: build + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + - yum install -y yum-utils rpm-build openssl-devel graphviz clang gnutls-devel + # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway + - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm + # This repository does not have any .spec files, so install dependencies based on Fedora spec file + - yum-builddep -y mariadb-server + - mkdir builddir; cd builddir + - export CXX=${CXX:-clang++} + - export CC=${CC:-clang} + - export CXX_FOR_BUILD=${CXX_FOR_BUILD:-clang++} + - export CC_FOR_BUILD=${CC_FOR_BUILD:-clang} + - export CFLAGS='-Wno-unused-command-line-argument' + - export CXXFLAGS='-Wno-unused-command-line-argument' + - cmake -DRPM=generic $CMAKE_FLAGS -DWITH_SSL=bundled .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake --graphviz=../dependencies.dot .. && dot -Tpng -o ../dependencies.png ../dependencies.dot + - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just + # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 + - make test + # - make test-force # mysql-test-runner takes too long, run MTr in a separate job instead + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + - dependencies.dot + - dependencies.png + +fedora-sanitizer: + stage: build + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + - yum install -y yum-utils rpm-build openssl-devel clang gnutls-devel + - yum install -y libasan libtsan libubsan + # This repository does not have any .spec files, so install dependencies based on Fedora spec file + - yum-builddep -y mariadb-server + - mkdir builddir; cd builddir + - export CXX=${CXX:-clang++} + - export CC=${CC:-clang} + - export CXX_FOR_BUILD=${CXX_FOR_BUILD:-clang++} + - export CC_FOR_BUILD=${CC_FOR_BUILD:-clang} + - export CFLAGS='-Wno-unused-command-line-argument' + - export CXXFLAGS='-Wno-unused-command-line-argument' + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=bundled $SANITIZER .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: the build will fail consistently at 24% when trying to make using eatmydata + - make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + parallel: + matrix: + - SANITIZER: [-DWITH_ASAN=YES, -DWITH_TSAN=YES, -DWITH_UBSAN=YES, -DWITH_MSAN=YES] + +centos8: + stage: build + image: quay.io/centos/centos:stream8 # CentOS 8 is deprecated, use this Stream8 instead + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + - yum install -y yum-utils rpm-build openssl-devel pcre2-devel + - yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + # dnf --enablerepo=powertools install Judy-devel #--> not found + - dnf config-manager --set-enabled powertools + # Error: + # Problem: conflicting requests + # - package Judy-devel-1.0.5-18.module_el8.3.0+757+d382997d.i686 is filtered out by modular filtering + # - package Judy-devel-1.0.5-18.module_el8.3.0+757+d382997d.x86_64 is filtered out by modular filtering + # Solution: install Judy-devel directly from downloaded rpm file: + - yum install -y http://vault.centos.org/centos/8/PowerTools/x86_64/os/Packages/Judy-devel-1.0.5-18.module_el8.3.0+757+d382997d.x86_64.rpm + # Use eatmydata to speed up build + - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm + - yum install -y ccache # From EPEL + - source /etc/profile.d/ccache.sh + - export CCACHE_DIR="$(pwd)/.ccache"; ccache --zero-stats + # This repository does not have any .spec files, so install dependencies based on CentOS spec file + - yum-builddep -y mariadb-server + - mkdir builddir; cd builddir + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=system .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just + # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 + - make test + # - make test-force # mysql-test-runner takes too long, run it MTR a separate job instead + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + - ccache -s + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + cache: + key: $MARIADB_MAJOR_VERSION + paths: + - .ccache + +centos7: + stage: build + image: centos:7 + variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: normal + script: + # This repository does not have any .spec files, so install dependencies based on Fedora spec file + - yum-builddep -y mariadb-server + # ..with a few extra ones, as CentOS 7 is very old and these are added in newer MariaDB releases + - yum install -y yum-utils rpm-build gcc gcc-c++ bison libxml2-devel libevent-devel openssl-devel pcre2-devel + - mkdir builddir; cd builddir + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=system .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just + # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 + - make test + # - make test-force # mysql-test-runner takes too long, run it in a separate job instead + - *rpm_listfiles + - mkdir ../rpm; mv *.rpm ../rpm + artifacts: + when: always # Must be able to see logs + paths: + - build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpmlist-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - rpm + - builddir/_CPack_Packages/Linux/RPM/SPECS/ + +.mysql-test-run: &mysql-test-run-def + stage: test + script: + # Install packages so tests and the dependencies install + # @TODO: RPM missing 'patch' and 'diff' as dependency, so installing it manually for now + - yum install -y rpm/*.rpm patch diffutils + # @TODO: Fix on packaging level for /usr/share/mariadb to work and errormsg.sys be found + - rm -rf /usr/share/mariadb; ln -s /usr/share/mysql /usr/share/mariadb + # mtr expects to be launched in-place and with write access to it's own directories + - cd /usr/share/mysql-test + # Skip failing tests + - | + echo " + main.mysqldump : Field separator argument is not what is expected; check the manual when executing 'SELECT INTO OUTFILE' + main.flush_logs_not_windows : query 'flush logs' succeeded - should have failed with error ER_CANT_CREATE_FILE (1004) + main.mysql_upgrade_noengine : upgrade output order does not match the expected + " > skiplist + - ./mtr --suite=main --force --parallel=auto --xml-report=$CI_PROJECT_DIR/junit.xml --skip-test-list=skiplist $RESTART_POLICY + +mysql-test-run: + stage: test + dependencies: + - fedora + needs: + - fedora + <<: *mysql-test-run-def + artifacts: + when: always # Also show results when tests fail + reports: + junit: + - junit.xml + +# Duplicate of the above jobs, except we use sanitizer build jobs as a dependency. This is so we can keep +# sanitizer errors separate from functional test failures. Currently, there is no way to run the same +# job for different dependencies. +# +# Additionally, for each sanitizer MTR job, we enable --force-restart so that +# sanitizer errors can be traced to individual tests. The difference in test +# suite runtime as a result of this flag is negligable (~30s for the entire test suite). +# (see https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_MYSQL_TEST_RUN_PL.html) +mysql-test-run-asan: + stage: test + variables: + RESTART_POLICY: "--force-restart" + dependencies: + - "fedora-sanitizer: [-DWITH_ASAN=YES]" + needs: + - "fedora-sanitizer: [-DWITH_ASAN=YES]" + <<: *mysql-test-run-def + artifacts: + when: always # Also show results when tests fail + reports: + junit: + - junit.xml + +mysql-test-run-tsan: + stage: test + variables: + RESTART_POLICY: "--force-restart" + dependencies: + - "fedora-sanitizer: [-DWITH_TSAN=YES]" + needs: + - "fedora-sanitizer: [-DWITH_TSAN=YES]" + <<: *mysql-test-run-def + allow_failure: true + artifacts: + when: always # Also show results when tests fail + reports: + junit: + - junit.xml + +mysql-test-run-ubsan: + stage: test + variables: + RESTART_POLICY: "--force-restart" + dependencies: + - "fedora-sanitizer: [-DWITH_UBSAN=YES]" + needs: + - "fedora-sanitizer: [-DWITH_UBSAN=YES]" + <<: *mysql-test-run-def + allow_failure: true + artifacts: + when: always # Also show results when tests fail + reports: + junit: + - junit.xml + +mysql-test-run-msan: + stage: test + variables: + RESTART_POLICY: "--force-restart" + dependencies: + - "fedora-sanitizer: [-DWITH_MSAN=YES]" + needs: + - "fedora-sanitizer: [-DWITH_MSAN=YES]" + <<: *mysql-test-run-def + allow_failure: true + artifacts: + when: always # Also show results when tests fail + reports: + junit: + - junit.xml + +rpmlint: + stage: test + dependencies: + - fedora + needs: + - fedora + script: + - yum install -y rpmlint + - rm -f rpm/*debuginfo* # Not relevant in this test + # Limit output to 1000 lines as Gitlab-CI max output is 4194304 bytes + # Save everything in a log file so it can be viewed in full via artifacts + - rpmlint --info rpm/*.rpm | tee -a rpmlint-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + artifacts: + when: always # Also show results when tests fail + paths: + - rpmlint-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + allow_failure: true + # @TODO: The package is not rpmlint clean, must allow failure for now + +fedora install: + stage: test + dependencies: + - fedora + needs: + - fedora + script: + - rm -f rpm/*debuginfo* # Not relevant in this test + # Nothing provides galera-4 on Fedora, so this step fails if built with wsrep + - yum install -y rpm/*.rpm + # Fedora does not support running services in Docker (like Debian packages do) so start it manually + - /usr/bin/mariadb-install-db -u mysql + - sudo -u mysql /usr/sbin/mariadbd & sleep 10 + # Dump database contents as is before upgrade + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > installed-database.sql + # Since we did a manual start, we also need to run upgrade manually + - /usr/bin/mariadb-upgrade -u root + # Dump database contents as is after upgrade + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > upgraded-database.sql + - | + mariadb --skip-column-names -e "SELECT @@version, @@version_comment" | tee /tmp/version + grep $MARIADB_MAJOR_VERSION /tmp/version || echo "MariaDB didn't install properly" + - mariadb --table -e "SELECT * FROM mysql.global_priv; SHOW CREATE USER root@localhost; SHOW CREATE USER 'mariadb.sys'@localhost" + - mariadb --table -e "SELECT * FROM mysql.plugin; SHOW PLUGINS" + - mariadb -e "SHUTDOWN;" + - rm -rf /var/lib/mysql/* # Clear datadir before next run + # Start database without install-db step + - sudo -u mysql /usr/sbin/mariadbd --skip-network --skip-grant & sleep 10 + # Dump database contents in initial state + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > empty-database.sql + artifacts: + paths: + - installed-database.sql + - upgraded-database.sql + +fedora upgrade: + stage: test + dependencies: + - fedora + needs: + - fedora + script: + - dnf install -y mariadb-server + # Fedora does not support running services in Docker (like Debian packages do) so start it manually + - /usr/libexec/mariadb-check-socket + - /usr/libexec/mariadb-prepare-db-dir + - sudo -u mysql /usr/libexec/mariadbd --basedir=/usr & sleep 10 + # Dump database contents in installed state + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > old-installed-database.sql + - /usr/libexec/mariadb-check-upgrade + # Dump database contents in upgraded state + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > old-upgraded-database.sql + - mariadb --skip-column-names -e "SELECT @@version, @@version_comment" # Show version + # @TODO: Upgrade from Fedora 33 MariaDB 10.4 to MariaDB.org latest does not work + # so do this manual step to remove conflicts until packaging is fixed + - yum remove -y mariadb-server-utils mariadb-gssapi-server mariadb-cracklib-password-check mariadb-backup mariadb-connector-c-config + - rm -f rpm/*debuginfo* # Not relevant in this test + - yum install -y rpm/*.rpm + # nothing provides galera-4 on Fedora, so this step fails if built with wsrep + - mysql -e "SHUTDOWN;" + - /usr/bin/mariadb-install-db # This step should not do anything on upgrades, just exit + - sudo -u mysql /usr/sbin/mariadbd & sleep 10 + # Dump database contents in installed state + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > new-installed-database.sql || true + # The step above fails on: mariadb-dump: Couldn't execute 'show events': Cannot proceed, because event scheduler is disabled (1577) + # @TODO: Since we did a manual start, we also need to run upgrade manually + - /usr/bin/mariadb-upgrade + # Dump database contents in upgraded state + - mariadb-dump --all-databases --all-tablespaces --triggers --routines --events --skip-extended-insert > new-upgraded-database.sql + - | + mariadb --skip-column-names -e "SELECT @@version, @@version_comment" | tee /tmp/version + grep $MARIADB_MAJOR_VERSION /tmp/version || echo "MariaDB didn't upgrade properly" + - mariadb --table -e "SELECT * FROM mysql.global_priv; SHOW CREATE USER root@localhost; SHOW CREATE USER 'mariadb.sys'@localhost" + - mariadb --table -e "SELECT * FROM mysql.plugin; SHOW PLUGINS" + artifacts: + paths: + - old-installed-database.sql + - old-upgraded-database.sql + - new-installed-database.sql + - new-upgraded-database.sql + +# Once all RPM builds and tests have passed, also run the DEB builds and tests +# @NOTE: This is likely to work well only on salsa.debian.org as the Gitlab.com +# runners are too small for everything this stage does. +# build_deb: +# stage: Salsa-CI +# trigger: +# include: debian/salsa-ci.yml diff --git a/.gitmodules b/.gitmodules index 3847bf6bff3..18bcb465fa2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,25 +1,19 @@ [submodule "libmariadb"] path = libmariadb url = https://github.com/MariaDB/mariadb-connector-c.git - ignore = all [submodule "storage/rocksdb/rocksdb"] path = storage/rocksdb/rocksdb url = https://github.com/facebook/rocksdb.git - ignore = all [submodule "wsrep-lib"] path = wsrep-lib url = https://github.com/codership/wsrep-lib.git branch = master - ignore = all [submodule "extra/wolfssl/wolfssl"] path = extra/wolfssl/wolfssl url = https://github.com/wolfSSL/wolfssl.git - ignore = all [submodule "storage/maria/libmarias3"] path = storage/maria/libmarias3 url = https://github.com/mariadb-corporation/libmarias3.git - ignore = all [submodule "storage/columnstore/columnstore"] path = storage/columnstore/columnstore url = https://github.com/mariadb-corporation/mariadb-columnstore-engine.git - ignore = all diff --git a/client/mysql.cc b/client/mysql.cc index b1d2bc8def6..e47527af390 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -5369,6 +5369,7 @@ static void init_username() full_username=my_strdup(PSI_NOT_INSTRUMENTED, cur[0],MYF(MY_WME)); part_username=my_strdup(PSI_NOT_INSTRUMENTED, strtok(cur[0],"@"),MYF(MY_WME)); (void) mysql_fetch_row(result); // Read eof + mysql_free_result(result); } } diff --git a/client/mysql_plugin.c b/client/mysql_plugin.c index f6a08558722..73fa43752a9 100644 --- a/client/mysql_plugin.c +++ b/client/mysql_plugin.c @@ -102,7 +102,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ plugin_data.name= 0; /* initialize name */ - + /* The following operations comprise the method for enabling or disabling a plugin. We begin by processing the command options then check the @@ -110,15 +110,15 @@ int main(int argc,char *argv[]) --plugin-ini (if specified). If the directories are Ok, we then look for the mysqld executable and the plugin soname. Finally, we build a bootstrap command file for use in bootstraping the server. - + If any step fails, the method issues an error message and the tool exits. - + 1) Parse, execute, and verify command options. 2) Check access to directories. 3) Look for mysqld executable. 4) Look for the plugin. 5) Build a bootstrap file with commands to enable or disable plugin. - + */ if ((error= process_options(argc, argv, operation)) || (error= check_access()) || @@ -126,11 +126,11 @@ int main(int argc,char *argv[]) (error= find_plugin(tp_path)) || (error= build_bootstrap_file(operation, bootstrap))) goto exit; - + /* Dump the bootstrap file if --verbose specified. */ if (opt_verbose && ((error= dump_bootstrap_file(bootstrap)))) goto exit; - + /* Start the server in bootstrap mode and execute bootstrap commands */ error= bootstrap_server(server_path, bootstrap); @@ -238,7 +238,7 @@ static int run_command(char* cmd, const char *mode) #ifdef __WIN__ /** Check to see if there are spaces in a path. - + @param[in] path The Windows path to examine. @retval int spaces found = 1, no spaces = 0 @@ -253,7 +253,7 @@ static int has_spaces(const char *path) /** Convert a Unix path to a Windows path. - + @param[in] path The Windows path to examine. @returns string containing path with / changed to \\ @@ -335,12 +335,12 @@ static int get_default_values() #ifdef __WIN__ { char *format_str= 0; - + if (has_spaces(tool_path) || has_spaces(defaults_file)) format_str = "\"%s --mysqld > %s\""; else format_str = "%s --mysqld > %s"; - + snprintf(defaults_cmd, sizeof(defaults_cmd), format_str, add_quotes(tool_path), add_quotes(defaults_file)); if (opt_verbose) @@ -675,7 +675,7 @@ static int load_plugin_data(char *plugin_name, char *config_file) { reason= "Bad format in plugin configuration file."; fclose(file_ptr); - goto error; + goto error; } break; } @@ -709,7 +709,7 @@ static int load_plugin_data(char *plugin_name, char *config_file) } } } - + fclose(file_ptr); return 0; @@ -740,7 +740,7 @@ static int check_options(int argc, char **argv, char *operation) int num_found= 0; /* number of options found (shortcut loop) */ char config_file[FN_REFLEN]; /* configuration file name */ char plugin_name[FN_REFLEN]; /* plugin name */ - + /* Form prefix strings for the options. */ const char *basedir_prefix = "--basedir="; size_t basedir_len= strlen(basedir_prefix); @@ -815,7 +815,7 @@ static int check_options(int argc, char **argv, char *operation) return 1; } /* If a plugin was specified, read the config file. */ - else if (strlen(plugin_name) > 0) + else if (strlen(plugin_name) > 0) { if (load_plugin_data(plugin_name, config_file)) { @@ -847,22 +847,22 @@ static int check_options(int argc, char **argv, char *operation) /** Parse, execute, and verify command options. - + This method handles all of the option processing including the optional features for displaying data (--print-defaults, --help ,etc.) that do not result in an attempt to ENABLE or DISABLE of a plugin. - + @param[in] arc Count of arguments @param[in] argv Array of arguments @param[out] operation Operation (ENABLE or DISABLE) - + @retval int error = 1, success = 0, exit program = -1 */ static int process_options(int argc, char *argv[], char *operation) { int error= 0; - + /* Parse and execute command-line options */ if ((error= handle_options(&argc, &argv, my_long_options, get_one_option))) return error; @@ -881,7 +881,7 @@ static int process_options(int argc, char *argv[], char *operation) char buff[FN_REFLEN]; if (basedir_len + 2 > FN_REFLEN) return -1; - + memcpy(buff, opt_basedir, basedir_len); buff[basedir_len]= '/'; buff[basedir_len + 1]= '\0'; @@ -890,7 +890,7 @@ static int process_options(int argc, char *argv[], char *operation) opt_basedir= my_strdup(PSI_NOT_INSTRUMENTED, buff, MYF(MY_FAE)); } } - + /* If the user did not specify the option to skip loading defaults from a config file and the required options are not present or there was an error @@ -925,18 +925,18 @@ static int process_options(int argc, char *argv[], char *operation) /** Check access - + This method checks to ensure all of the directories (opt_basedir, opt_plugin_dir, opt_datadir, and opt_plugin_ini) are accessible by the user. - + @retval int error = 1, success = 0 */ static int check_access() { int error= 0; - + if ((error= my_access(opt_basedir, F_OK))) { fprintf(stderr, "ERROR: Cannot access basedir at '%s'.\n", @@ -1048,13 +1048,13 @@ static int find_plugin(char *tp_path) /** Build the bootstrap file. - + Create a new file and populate it with SQL commands to ENABLE or DISABLE the plugin via REPLACE and DELETE operations on the mysql.plugin table. param[in] operation The type of operation (ENABLE or DISABLE) param[out] bootstrap A FILE* pointer - + @retval int error = 1, success = 0 */ @@ -1062,7 +1062,7 @@ static int build_bootstrap_file(char *operation, char *bootstrap) { int error= 0; FILE *file= 0; - + /* Perform plugin operation : ENABLE or DISABLE @@ -1073,10 +1073,10 @@ static int build_bootstrap_file(char *operation, char *bootstrap) <plugin_name>.ini configuration file. Once the file is built, a call to mysqld is made in read only, bootstrap modes to read the SQL statements and execute them. - + Note: Replace was used so that if a user loads a newer version of a library with a different library name, the new library name is - used for symbols that match. + used for symbols that match. */ if ((error= make_tempfile(bootstrap, "sql"))) { @@ -1123,7 +1123,7 @@ static int build_bootstrap_file(char *operation, char *bootstrap) printf("# Disabling %s...\n", plugin_data.name); } } - + exit: fclose(file); return error; @@ -1132,11 +1132,11 @@ exit: /** Dump bootstrap file. - + Read the contents of the bootstrap file and print it out. - + @param[in] bootstrap_file Name of bootstrap file to read - + @retval int error = 1, success = 0 */ @@ -1173,7 +1173,7 @@ exit: /** Bootstrap the server - + Create a command line sequence to launch mysqld in bootstrap mode. This will allow mysqld to launch a minimal server instance to read and execute SQL commands from a file piped in (the bootstrap file). We use @@ -1194,47 +1194,39 @@ exit: static int bootstrap_server(char *server_path, char *bootstrap_file) { - char bootstrap_cmd[FN_REFLEN]; + char bootstrap_cmd[FN_REFLEN]= {0}; + char lc_messages_dir_str[FN_REFLEN]= {0}; int error= 0; #ifdef __WIN__ char *format_str= 0; const char *verbose_str= NULL; - - +#endif + + if (opt_lc_messages_dir != NULL) + snprintf(lc_messages_dir_str, sizeof(lc_messages_dir_str), "--lc-messages-dir=%s", + opt_lc_messages_dir); + +#ifdef __WIN__ if (opt_verbose) verbose_str= "--console"; else verbose_str= ""; + if (has_spaces(opt_datadir) || has_spaces(opt_basedir) || - has_spaces(bootstrap_file)) - { - if (opt_lc_messages_dir != NULL) - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s <%s\""; - else - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s <%s\""; - } + has_spaces(bootstrap_file) || has_spaces(lc_messages_dir_str)) + format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s %s <%s\""; else - { - if (opt_lc_messages_dir != NULL) - format_str= "\"%s %s --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s <%s\""; - else - format_str= "%s %s --bootstrap --datadir=%s --basedir=%s <%s"; - } + format_str= "%s %s --bootstrap --datadir=%s --basedir=%s %s <%s"; + snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), format_str, add_quotes(convert_path(server_path)), verbose_str, add_quotes(opt_datadir), add_quotes(opt_basedir), - add_quotes(bootstrap_file)); + add_quotes(lc_messages_dir_str), add_quotes(bootstrap_file)); #else - if (opt_lc_messages_dir != NULL) - snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), - "%s --no-defaults --bootstrap --datadir=%s --basedir=%s --lc-messages-dir=%s" - " <%s", server_path, opt_datadir, opt_basedir, opt_lc_messages_dir, bootstrap_file); - else - snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), - "%s --no-defaults --bootstrap --datadir=%s --basedir=%s" - " <%s", server_path, opt_datadir, opt_basedir, bootstrap_file); - + snprintf(bootstrap_cmd, sizeof(bootstrap_cmd), + "%s --no-defaults --bootstrap --datadir=%s --basedir=%s %s" + " <%s", server_path, opt_datadir, opt_basedir, lc_messages_dir_str, bootstrap_file); #endif /* Execute the command */ @@ -1247,6 +1239,6 @@ static int bootstrap_server(char *server_path, char *bootstrap_file) fprintf(stderr, "ERROR: Unexpected result from bootstrap. Error code: %d.\n", error); - + return error; } diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index ad54b06101e..598045a013b 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -76,6 +76,8 @@ char upgrade_from_version[sizeof("10.20.456-MariaDB")+30]; static my_bool opt_write_binlog; +static void print_conn_args(const char *tool_name); + #define OPT_SILENT OPT_MAX_CLIENT_OPTION static struct my_option my_long_options[]= @@ -155,7 +157,10 @@ static struct my_option my_long_options[]= GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login.", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"verbose", 'v', "Display more output about the process; Using it twice will print connection argument; Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase.", + {"verbose", 'v', "Display more output about the process; Using it twice will print connection argument;" + "Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase;" + "Using it 4 times (added in MariaDB 10.0.14) will also write out all mariadb-check commands used;" + "Using it 5 times will print all the mariadb commands used and their results while running mysql_fix_privilege_tables script.", &opt_not_used, &opt_not_used, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -210,6 +215,7 @@ static void die(const char *fmt, ...) DBUG_ENTER("die"); /* Print the error message */ + print_conn_args("mariadb-check"); fflush(stdout); va_start(args, fmt); if (fmt) @@ -640,6 +646,7 @@ static int run_query(const char *query, DYNAMIC_STRING *ds_res, "--database=mysql", "--batch", /* Turns off pager etc. */ force ? "--force": "--skip-force", + opt_verbose >= 5 ? "--verbose" : "", ds_res || opt_silent ? "--silent": "", "<", query_file_path, @@ -866,8 +873,7 @@ static int upgrade_already_done(int silent) "There is no need to run mysql_upgrade again for %s.", upgrade_from_version, version); if (!opt_check_upgrade) - verbose("You can use --force if you still want to run mysql_upgrade", - upgrade_from_version, version); + verbose("You can use --force if you still want to run mysql_upgrade"); } return 0; } @@ -1329,9 +1335,7 @@ static int run_sql_fix_privilege_tables(void) dynstr_append(&ds_script, *query_ptr); } - run_query(ds_script.str, - &ds_result, /* Collect result */ - TRUE); + run_query(ds_script.str, (opt_verbose >= 5) ? NULL : &ds_result, TRUE); { /* @@ -1502,6 +1506,7 @@ int main(int argc, char **argv) DBUG_ASSERT(phase == phases_total); end: + print_conn_args("mariadb-check"); free_used_memory(); my_end(my_end_arg); exit(0); diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 85043f03b53..8f29c83a441 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -1593,7 +1593,8 @@ static void print_relative_row_vert(MYSQL_RES *result __attribute__((unused)), llstr((tmp - last_values[row]), buff)); /* Find the minimum row length needed to output the relative value */ - if ((length=(uint) strlen(buff) > ex_val_max_len[row]) && ex_status_printed) + length=(uint) strlen(buff); + if (length > ex_val_max_len[row] && ex_status_printed) ex_val_max_len[row] = length; last_values[row] = tmp; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index ced13b402b0..9dd31c14e81 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -1832,15 +1832,20 @@ static void cleanup() my_free_open_file_info(); load_processor.destroy(); mysql_server_end(); + if (opt_flashback) + { + delete_dynamic(&binlog_events); + delete_dynamic(&events_in_stmt); + } DBUG_VOID_RETURN; } -static void die() +static void die(int err) { cleanup(); my_end(MY_DONT_FREE_DBUG); - exit(1); + exit(err); } @@ -1877,7 +1882,7 @@ static my_time_t convert_str_to_timestamp(const char* str) l_time.time_type != MYSQL_TIMESTAMP_DATETIME || status.warnings) { error("Incorrect date and time argument: %s", str); - die(); + die(1); } /* Note that Feb 30th, Apr 31st cause no error messages and are mapped to @@ -1940,7 +1945,7 @@ get_one_option(const struct my_option *opt, const char *argument, const char *) opt->name)) <= 0) { sf_leaking_memory= 1; /* no memory leak reports here */ - die(); + die(1); } break; #ifdef WHEN_FLASHBACK_REVIEW_READY @@ -1961,7 +1966,7 @@ get_one_option(const struct my_option *opt, const char *argument, const char *) opt->name)) <= 0) { sf_leaking_memory= 1; /* no memory leak reports here */ - die(); + die(1); } opt_base64_output_mode= (enum_base64_output_mode)(val - 1); break; @@ -2046,7 +2051,7 @@ static int parse_args(int *argc, char*** argv) if ((ho_error=handle_options(argc, argv, my_options, get_one_option))) { - die(); + die(ho_error); } if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; @@ -3003,6 +3008,12 @@ int main(int argc, char** argv) my_set_max_open_files(open_files_limit); + if (opt_flashback && opt_raw_mode) + { + error("The --raw mode is not allowed with --flashback mode"); + die(1); + } + if (opt_flashback) { my_init_dynamic_array(PSI_NOT_INSTRUMENTED, &binlog_events, @@ -3018,7 +3029,7 @@ int main(int argc, char** argv) if (!remote_opt) { error("The --raw mode only works with --read-from-remote-server"); - die(); + die(1); } if (one_database) warning("The --database option is ignored in raw mode"); @@ -3040,7 +3051,7 @@ int main(int argc, char** argv) O_WRONLY | O_BINARY, MYF(MY_WME)))) { error("Could not create log file '%s'", result_file_name); - die(); + die(1); } } else @@ -3129,7 +3140,7 @@ int main(int argc, char** argv) /* Set delimiter back to semicolon */ if (retval != ERROR_STOP) { - if (!stop_event_string.is_empty()) + if (!stop_event_string.is_empty() && result_file) fprintf(result_file, "%s", stop_event_string.ptr()); if (!opt_raw_mode && opt_flashback) fprintf(result_file, "DELIMITER ;\n"); diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index 147ebb81231..0803aa3224d 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -947,6 +947,7 @@ static int handle_request_for_tables(char *tables, size_t length, DBUG_RETURN(1); if (dont_quote) { + DBUG_ASSERT(op); DBUG_ASSERT(strlen(op)+strlen(tables)+strlen(options)+8+1 <= query_size); /* No backticks here as we added them before */ diff --git a/client/mysqldump.c b/client/mysqldump.c index e236fdf82f2..d649684b9d7 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2527,7 +2527,10 @@ static uint dump_events_for_db(char *db) /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) + { + mysql_free_result(event_list_res); DBUG_RETURN(1); + } } if (switch_character_set_results(mysql, "binary")) @@ -3329,7 +3332,10 @@ static uint get_table_structure(const char *table, const char *db, char *table_t if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) + { + mysql_free_result(result); DBUG_RETURN(0); + } write_header(sql_file, db); } @@ -3730,7 +3736,7 @@ static int dump_triggers_for_table(char *table_name, char *db_name) char name_buff[NAME_LEN*4+3]; char query_buff[QUERY_LENGTH]; uint old_opt_compatible_mode= opt_compatible_mode; - MYSQL_RES *show_triggers_rs; + MYSQL_RES *show_triggers_rs= NULL; MYSQL_ROW row; FILE *sql_file= md_result_file; @@ -3814,8 +3820,6 @@ static int dump_triggers_for_table(char *table_name, char *db_name) } skip: - mysql_free_result(show_triggers_rs); - if (switch_character_set_results(mysql, default_charset)) goto done; @@ -3830,7 +3834,7 @@ skip: done: if (path) my_fclose(sql_file, MYF(0)); - + mysql_free_result(show_triggers_rs); DBUG_RETURN(ret); } @@ -3936,7 +3940,7 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, uint num_fields; size_t total_length, init_length; - MYSQL_RES *res; + MYSQL_RES *res= NULL; MYSQL_FIELD *field; MYSQL_ROW row; DBUG_ENTER("dump_table"); @@ -4130,6 +4134,8 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, fprintf(stderr,"%s: Error in field count for table: %s ! Aborting.\n", my_progname_short, result_table); error= EX_CONSCHECK; + if (!quick) + mysql_free_result(res); goto err; } @@ -4438,6 +4444,7 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, err: dynstr_free(&query_string); maybe_exit(error); + mysql_free_result(res); DBUG_VOID_RETURN; } /* dump_table */ @@ -4703,7 +4710,11 @@ static int dump_all_users_roles_and_grants() " '@', QUOTE(DEFAULT_ROLE_HOST))) as r," " CONCAT(QUOTE(mu.USER),'@',QUOTE(mu.HOST)) as u " "FROM mysql.user mu LEFT JOIN mysql.default_roles using (USER, HOST)")) + { + mysql_free_result(tableres); return 1; + } + while ((row= mysql_fetch_row(tableres))) { if (dump_grants(row[1])) @@ -5780,7 +5791,8 @@ static int get_sys_var_lower_case_table_names() lower_case_table_names= atoi(row[1]); mysql_free_result(table_res); } - + if (!row) + mysql_free_result(table_res); return lower_case_table_names; } @@ -6023,7 +6035,11 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos, } if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 1)) + { + mysql_free_result(master); return 1; + } + } /* SHOW MASTER STATUS reports file and position */ @@ -6145,7 +6161,10 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid, { char gtid_pos[MAX_GTID_LENGTH]; if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0)) + { + mysql_free_result(slave); return 1; + } if (opt_comments) fprintf(md_result_file, "\n--\n-- Gtid position to start replication " "from\n--\n\n"); @@ -6341,7 +6360,7 @@ static ulong find_set(TYPELIB *lib, const char *x, size_t length, { const char *end= x + length; ulong found= 0; - uint find; + int find; char buff[255]; *err_pos= 0; /* No error yet */ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index f5253533921..c27e3f4010d 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -1773,6 +1773,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) uint x; struct timeval start_time, end_time; thread_context con; + int error; pthread_t mainthread; /* Thread descriptor */ pthread_attr_t attr; /* Thread attributes */ DBUG_ENTER("run_scheduler"); @@ -1781,8 +1782,11 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) con.limit= limit; pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, - PTHREAD_CREATE_DETACHED); + if ((error= pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))) + { + printf("Got error: %d from pthread_attr_setdetachstate\n", error); + exit(1); + } pthread_mutex_lock(&counter_mutex); thread_counter= 0; diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 97a19f2128c..08e18be3f1b 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -3564,9 +3564,11 @@ void do_system(struct st_command *command) /* returns TRUE if path is inside a sandbox */ bool is_sub_path(const char *path, size_t plen, const char *sandbox) { - size_t len= strlen(sandbox); - if (!sandbox || !len || plen <= len || memcmp(path, sandbox, len - 1) - || path[len] != '/') + size_t len; + if (!sandbox) + return false; + len= strlen(sandbox); + if (plen <= len || memcmp(path, sandbox, len-1) || path[len] != '/') return false; return true; } @@ -11752,7 +11754,7 @@ void dynstr_append_sorted(DYNAMIC_STRING* ds, DYNAMIC_STRING *ds_input, /* Sort array */ qsort(lines.buffer, lines.elements, - sizeof(char**), (qsort_cmp)comp_lines); + sizeof(uchar *), (qsort_cmp)comp_lines); /* Create new result */ for (i= 0; i < lines.elements ; i++) diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 76300fc2c17..27c4ba29c91 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -1815,13 +1815,28 @@ apply_log_finish() return(true); } +class Copy_back_dst_dir +{ + std::string buf; + +public: + const char *make(const char *path) + { + if (!path || !path[0]) + return mysql_data_home; + if (is_absolute_path(path)) + return path; + return buf.assign(mysql_data_home).append(path).c_str(); + } +}; + bool copy_back() { bool ret = false; datadir_iter_t *it = NULL; datadir_node_t node; - char *dst_dir; + const char *dst_dir; memset(&node, 0, sizeof(node)); @@ -1873,9 +1888,9 @@ copy_back() /* copy undo tablespaces */ + Copy_back_dst_dir dst_dir_buf; - dst_dir = (srv_undo_dir && *srv_undo_dir) - ? srv_undo_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(srv_undo_dir); ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); @@ -1896,8 +1911,7 @@ copy_back() /* copy redo logs */ - dst_dir = (srv_log_group_home_dir && *srv_log_group_home_dir) - ? srv_log_group_home_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(srv_log_group_home_dir); /* --backup generates a single LOG_FILE_NAME, which we must copy if it exists. */ @@ -1925,8 +1939,7 @@ copy_back() /* copy innodb system tablespace(s) */ - dst_dir = (innobase_data_home_dir && *innobase_data_home_dir) - ? innobase_data_home_dir : mysql_data_home; + dst_dir = dst_dir_buf.make(innobase_data_home_dir); ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc index cee76e5f3d7..588a15eb791 100644 --- a/extra/mariabackup/xbcloud.cc +++ b/extra/mariabackup/xbcloud.cc @@ -2534,7 +2534,7 @@ swift_keystone_auth_v3(const char *auth_url, swift_auth_info *info) } else if (opt_swift_project != NULL) { snprintf(scope, sizeof(scope), ",\"scope\":{\"project\":{\"name\":\"%s\"%s}}", - opt_swift_project_id, domain); + opt_swift_project, domain); } snprintf(payload, sizeof(payload), "{\"auth\":{\"identity\":" diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index c57c2685c94..7d45337bb18 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -1058,7 +1058,8 @@ struct my_option xb_client_options[]= { (G_PTR *) &xtrabackup_print_param, (G_PTR *) &xtrabackup_print_param, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"use-memory", OPT_XTRA_USE_MEMORY, - "The value is used instead of buffer_pool_size", + "The value is used in place of innodb_buffer_pool_size. " + "This option is only relevant when the --prepare option is specified.", (G_PTR *) &xtrabackup_use_memory, (G_PTR *) &xtrabackup_use_memory, 0, GET_LL, REQUIRED_ARG, 100 * 1024 * 1024L, 1024 * 1024L, LONGLONG_MAX, 0, 1024 * 1024L, 0}, @@ -1785,6 +1786,12 @@ static void print_version(void) my_progname, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); } +static void concatenate_default_groups(std::vector<const char*> &backup_load_groups, const char **default_groups) +{ + for ( ; *default_groups ; default_groups++) + backup_load_groups.push_back(*default_groups); +} + static void usage(void) { puts("Open source backup tool for InnoDB and XtraDB\n\ @@ -1805,7 +1812,11 @@ GNU General Public License for more details.\n\ You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0.txt\n"); printf("Usage: %s [--defaults-file=#] [--backup | --prepare | --copy-back | --move-back] [OPTIONS]\n",my_progname); - print_defaults("my", load_default_groups); + std::vector<const char*> backup_load_default_groups; + concatenate_default_groups(backup_load_default_groups, backup_default_groups); + concatenate_default_groups(backup_load_default_groups, load_default_groups); + backup_load_default_groups.push_back(nullptr); + print_defaults("my", &backup_load_default_groups[0]); my_print_help(xb_client_options); my_print_help(xb_server_options); my_print_variables(xb_server_options); @@ -6694,6 +6705,7 @@ int main(int argc, char **argv) */ if (strcmp(argv[1], "--mysqld") == 0) { + srv_operation= SRV_OPERATION_EXPORT_RESTORED; extern int mysqld_main(int argc, char **argv); argc--; argv++; diff --git a/include/mysql/service_encryption.h b/include/mysql/service_encryption.h index 280b9c69e35..4963940758c 100644 --- a/include/mysql/service_encryption.h +++ b/include/mysql/service_encryption.h @@ -30,8 +30,6 @@ #ifndef __cplusplus #define inline __inline #endif -#else -#include <stdlib.h> #endif #endif diff --git a/include/ssl_compat.h b/include/ssl_compat.h index 6db1baab9b5..bea3484a1f2 100644 --- a/include/ssl_compat.h +++ b/include/ssl_compat.h @@ -19,7 +19,8 @@ /* OpenSSL version specific definitions */ #if defined(OPENSSL_VERSION_NUMBER) -#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \ + !(defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x30500000L) #define HAVE_OPENSSL11 1 #define SSL_LIBRARY OpenSSL_version(OPENSSL_VERSION) #define ERR_remove_state(X) ERR_clear_error() diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c index 9a0858fd067..4fb5212d6e3 100644 --- a/libmysqld/libmysql.c +++ b/libmysqld/libmysql.c @@ -2941,7 +2941,8 @@ my_bool STDCALL mysql_stmt_bind_param(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) break; default: strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, + snprintf(stmt->last_error, + sizeof(stmt->last_error), ER(stmt->last_errno= CR_UNSUPPORTED_PARAM_TYPE), param->buffer_type, count); DBUG_RETURN(1); @@ -3028,7 +3029,9 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, { /* Long data handling should be used only for string/binary types */ strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, ER(stmt->last_errno= CR_INVALID_BUFFER_USE), + snprintf(stmt->last_error, + sizeof(stmt->last_error), + ER(stmt->last_errno= CR_INVALID_BUFFER_USE), param->param_number); DBUG_RETURN(1); } @@ -4159,7 +4162,8 @@ my_bool STDCALL mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) if (setup_one_fetch_function(param, field)) { strmov(stmt->sqlstate, unknown_sqlstate); - sprintf(stmt->last_error, + snprintf(stmt->last_error, + sizeof(stmt->last_error), ER(stmt->last_errno= CR_UNSUPPORTED_PARAM_TYPE), field->type, param_count); DBUG_RETURN(1); diff --git a/man/mysql_upgrade.1 b/man/mysql_upgrade.1 index f6beca90c80..3014ed52df5 100644 --- a/man/mysql_upgrade.1 +++ b/man/mysql_upgrade.1 @@ -656,7 +656,8 @@ The MariaDB user name to use when connecting to the server and not using the cur Display more output about the process\&. Using it twice will print connection arguments; using it 3 times will print out all CHECK, RENAME and ALTER TABLE commands used during the check phase; using it 4 times (added in MariaDB 10.0.14) -will also write out all mysqlcheck commands used\&. +will also write out all mariadb-check commands used; using it 5 times will print all +the mariadb commands used and their results while running mysql_fix_privilege_tables script\&. .RE .sp .RS 4 diff --git a/mysql-test/include/expect_crash.inc b/mysql-test/include/expect_crash.inc index af8b0908104..b4bd9828a08 100644 --- a/mysql-test/include/expect_crash.inc +++ b/mysql-test/include/expect_crash.inc @@ -1,5 +1,5 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect # There should be a debug crash after using this .inc file --exec echo "wait" > $_expect_file_name diff --git a/mysql-test/include/explain_non_select.inc b/mysql-test/include/explain_non_select.inc index d22310c9813..bd0962d3876 100644 --- a/mysql-test/include/explain_non_select.inc +++ b/mysql-test/include/explain_non_select.inc @@ -788,6 +788,22 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5); DROP TABLE t1; +--echo #75 + +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +--let $query = INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +--source include/explain_utils.inc +DROP TABLE t1; + +--echo #76 + +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +CREATE TABLE t2 (a INT, b INT); +INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11); +--let $query = INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +--source include/explain_utils.inc +DROP TABLE t1,t2; + --echo # --echo # Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH --echo # EXPLAIN UPDATE/DEL/INS diff --git a/mysql-test/include/kill_galera.inc b/mysql-test/include/kill_galera.inc index d7f665df6c7..aba672d8a89 100644 --- a/mysql-test/include/kill_galera.inc +++ b/mysql-test/include/kill_galera.inc @@ -1,8 +1,8 @@ --echo Killing server ... # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Kill the connected server diff --git a/mysql-test/include/kill_mysqld.inc b/mysql-test/include/kill_mysqld.inc index 86ee048a0f1..01ee7f82bdc 100644 --- a/mysql-test/include/kill_mysqld.inc +++ b/mysql-test/include/kill_mysqld.inc @@ -1,5 +1,5 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --echo # Kill the server --exec echo "wait" > $_expect_file_name diff --git a/mysql-test/include/shutdown_mysqld.inc b/mysql-test/include/shutdown_mysqld.inc index db0cfb82c68..fc2972560c3 100644 --- a/mysql-test/include/shutdown_mysqld.inc +++ b/mysql-test/include/shutdown_mysqld.inc @@ -22,8 +22,8 @@ if ($rpl_inited) } # Write file to make mysql-test-run.pl expect the "crash", but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Avoid warnings from connection threads that does not have time to exit diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index ce0d695797b..eb1220f9a56 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -6442,6 +6442,23 @@ IS_IPV4('10.0.0.1') 1 SET NAMES utf8; # +# MDEV-30746 Regression in ucs2_general_mysql500_ci +# +SET NAMES utf8mb3; +CREATE TABLE t1 (a VARCHAR(32) CHARACTER SET ucs2 COLLATE ucs2_general_mysql500_ci); +INSERT INTO t1 VALUES ('s'),('z'),(_latin1 0xDF); +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +DROP TABLE t1; +# # End of 10.4 tests # # diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test index 77a717c11a9..86be0d6da25 100644 --- a/mysql-test/main/ctype_ucs.test +++ b/mysql-test/main/ctype_ucs.test @@ -1137,6 +1137,18 @@ SELECT IS_IPV4('10.0.0.1'); SET NAMES utf8; --echo # +--echo # MDEV-30746 Regression in ucs2_general_mysql500_ci +--echo # + +SET NAMES utf8mb3; +CREATE TABLE t1 (a VARCHAR(32) CHARACTER SET ucs2 COLLATE ucs2_general_mysql500_ci); +INSERT INTO t1 VALUES ('s'),('z'),(_latin1 0xDF); +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +DROP TABLE t1; + + +--echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/ctype_upgrade.result b/mysql-test/main/ctype_upgrade.result index 960d44f5937..b995e9f6265 100644 --- a/mysql-test/main/ctype_upgrade.result +++ b/mysql-test/main/ctype_upgrade.result @@ -399,3 +399,56 @@ DROP TABLE maria050313_utf8_croatian_ci; DROP TABLE maria050533_xxx_croatian_ci; DROP TABLE maria100004_xxx_croatian_ci; DROP TABLE mysql050614_xxx_croatian_ci; +# +# Start of 10.4 tests +# +# +# MDEV-30746 Regression in ucs2_general_mysql500_ci +# +SET NAMES utf8mb3; +SHOW CREATE TABLE t1; +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +SELECT * FROM t1; +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +SELECT * FROM t1 IGNORE KEY(a); +ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.t1` FORCE" or dump/reload to fix it! +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair status OK +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 FORCE KEY(a) ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 IGNORE KEY(a) ORDER BY a; +a HEX(a) HEX(WEIGHT_STRING(a)) +s 0073 0053 +z 007A 005A +ß 00DF 00DF +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT GROUP_CONCAT(a) FROM t1 IGNORE KEY(a) GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +SELECT GROUP_CONCAT(a) FROM t1 FORCE KEY(a) GROUP BY a ORDER BY a; +GROUP_CONCAT(a) +s +z +ß +DROP TABLE t1; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/ctype_upgrade.test b/mysql-test/main/ctype_upgrade.test index fee962e7ceb..7cb1ec9b69d 100644 --- a/mysql-test/main/ctype_upgrade.test +++ b/mysql-test/main/ctype_upgrade.test @@ -203,3 +203,38 @@ DROP TABLE maria050313_utf8_croatian_ci; DROP TABLE maria050533_xxx_croatian_ci; DROP TABLE maria100004_xxx_croatian_ci; DROP TABLE mysql050614_xxx_croatian_ci; + + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-30746 Regression in ucs2_general_mysql500_ci +--echo # + +SET NAMES utf8mb3; + +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm $MYSQLD_DATADIR/test/t1.frm; +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD $MYSQLD_DATADIR/test/t1.MYD; +copy_file std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI $MYSQLD_DATADIR/test/t1.MYI; + +--error ER_TABLE_NEEDS_REBUILD +SHOW CREATE TABLE t1; +--error ER_TABLE_NEEDS_REBUILD +SELECT * FROM t1; +--error ER_TABLE_NEEDS_REBUILD +SELECT * FROM t1 IGNORE KEY(a); +CHECK TABLE t1; +REPAIR TABLE t1; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 FORCE KEY(a) ORDER BY a; +SELECT a, HEX(a), HEX(WEIGHT_STRING(a)) FROM t1 IGNORE KEY(a) ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 GROUP BY a ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 IGNORE KEY(a) GROUP BY a ORDER BY a; +SELECT GROUP_CONCAT(a) FROM t1 FORCE KEY(a) GROUP BY a ORDER BY a; +DROP TABLE t1; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result index 15a7784c890..c367b882e7f 100644 --- a/mysql-test/main/derived_view.result +++ b/mysql-test/main/derived_view.result @@ -3689,3 +3689,485 @@ drop procedure sp2; drop view v, v2; drop table t1,t2; # End of 10.2 tests +# +# MDEV-30706: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); +CREATE TABLE t2 (a INT PRIMARY KEY, b INT); +INSERT INTO t2 VALUES (1,1), (3,3); +CREATE TABLE t3 (a INT PRIMARY KEY, b INT); +INSERT INTO t3 VALUES (2,2), (4,4), (7,7); +CREATE TABLE t4 (a INT PRIMARY KEY, b INT); +INSERT INTO t4 VALUES (2,2), (5,5), (7,7); +CREATE VIEW v AS SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +m +9 +SELECT * FROM v; +m +9 +WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte; +m +9 +EXPLAIN SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 +2 SUBQUERY t4 ALL NULL NULL NULL NULL 3 Using where +2 SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +3 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +EXPLAIN WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 +2 DERIVED t4 ALL NULL NULL NULL NULL 3 Using where +2 DERIVED t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1 Using index +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort +3 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using index +PREPARE stmt FROM "SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT +(SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 +WHERE t3.a = t4.b ) SELECT * FROM cte"; +execute stmt; +m +9 +execute stmt; +m +9 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2,t3,t4; +# +# MDEV-29224: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (f1 INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f2 int); +INSERT INTO t2 VALUES (3); +CREATE VIEW v AS SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +( SELECT MAX(f1) FROM t2 ) +2 +SELECT * FROM v; +( SELECT MAX(f1) FROM t2 ) +2 +WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte; +( SELECT MAX(f1) FROM t2 ) +2 +EXPLAIN SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +2 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 +2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 +3 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +EXPLAIN WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 2 +3 DEPENDENT SUBQUERY t2 system NULL NULL NULL NULL 1 +PREPARE stmt FROM "SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1 ) SELECT * FROM cte"; +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +execute stmt; +( SELECT MAX(f1) FROM t2 ) +2 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-28573: view defined as select with implicit grouping and +# a set function used in a subquery +# +CREATE TABLE t1 (a INTEGER, b INTEGER); +CREATE TABLE t2 (c INTEGER); +INSERT INTO t1 VALUES (1,11), (2,22), (2,22); +INSERT INTO t2 VALUES (1), (2); +CREATE VIEW v1 AS SELECT (SELECT COUNT(b) FROM t2) FROM t1; +CREATE VIEW v2 AS SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; +SELECT (SELECT COUNT(b) FROM t2) FROM t1; +ERROR 21000: Subquery returns more than 1 row +SELECT * FROM v1; +ERROR 21000: Subquery returns more than 1 row +WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte; +ERROR 21000: Subquery returns more than 1 row +SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +SELECT * FROM v2; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1 ) SELECT * FROM cte; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +EXPLAIN SELECT (SELECT COUNT(b) FROM t2) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +EXPLAIN SELECT * FROM v1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 +2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +EXPLAIN WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 +2 DERIVED t1 ALL NULL NULL NULL NULL 3 +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 +PREPARE stmt FROM "SELECT (SELECT COUNT(b) FROM t2) FROM t1"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v1"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2) FROM t1 ) SELECT * FROM cte"; +execute stmt; +ERROR 21000: Subquery returns more than 1 row +execute stmt; +ERROR 21000: Subquery returns more than 1 row +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v2"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1 ) SELECT * FROM cte"; +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +execute stmt; +(SELECT COUNT(b) FROM t2 WHERE c > 1) +3 +DEALLOCATE PREPARE stmt; +DROP VIEW v1,v2; +DROP TABLE t1,t2; +# +# MDEV-28570: VIEW with WHERE containing subquery +# with set function aggregated in query +# +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (c int, d int); +INSERT INTO t1 VALUES +(1,10), (2,10), (1,20), (2,20), (3,20), (2,30), (4,40); +INSERT INTO t2 VALUES +(2,10), (2,20), (4,10), (5,10), (3,20), (2,40); +CREATE VIEW v AS SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +a +2 +4 +SELECT * FROM v; +a +2 +4 +WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte; +a +2 +4 +EXPLAIN SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 7 +2 DERIVED t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +EXPLAIN WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 7 +2 DERIVED t1 ALL NULL NULL NULL NULL 7 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 6 +PREPARE stmt FROM "SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20)"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT a FROM t1 GROUP BY a +HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20) ) SELECT * FROM cte"; +execute stmt; +a +2 +4 +execute stmt; +a +2 +4 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-28571: VIEW with select list containing subquery +# with set function aggregated in query +# +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); +CREATE VIEW v AS SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +c +2 +3 +1,1 +SELECT * FROM v; +c +2 +3 +1,1 +WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte; +c +2 +3 +1,1 +EXPLAIN SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +EXPLAIN SELECT * FROM v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6 +2 DERIVED t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +EXPLAIN WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6 +2 DERIVED t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +PREPARE stmt FROM "SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +PREPARE stmt FROM "WITH cte AS ( SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a ) SELECT * FROM cte"; +execute stmt; +c +2 +3 +1,1 +execute stmt; +c +2 +3 +1,1 +DEALLOCATE PREPARE stmt; +DROP VIEW v; +DROP TABLE t1,t2; +# +# MDEV-30668: VIEW with WHERE containing nested subquery +# with set function aggregated in outer subquery +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (2), (1), (4), (7); +create table t3 (a int, b int); +insert into t3 values (2,10), (7,30), (2,30), (1,10), (7,40); +create view v as select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +a +7 +select * from v; +a +7 +with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte; +a +7 +explain select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 +2 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +explain select * from v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 test.t1.a 1 +3 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +4 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +explain with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 test.t1.a 1 +3 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using temporary +4 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where +prepare stmt from "select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b)))"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +prepare stmt from "select * from v"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +prepare stmt from "with cte as ( select * from t1 +where t1.a in (select t3.a from t3 group by t3.a +having t3.a > any (select t2.b from t2 +where t2.b*10 < sum(t3.b))) ) select * from cte"; +execute stmt; +a +7 +execute stmt; +a +7 +deallocate prepare stmt; +drop view v; +drop table t1,t2,t3; +# End of 10.4 tests diff --git a/mysql-test/main/derived_view.test b/mysql-test/main/derived_view.test index caccc7dafa1..5422fbcfd1d 100644 --- a/mysql-test/main/derived_view.test +++ b/mysql-test/main/derived_view.test @@ -2455,3 +2455,300 @@ drop view v, v2; drop table t1,t2; --echo # End of 10.2 tests + +--echo # +--echo # MDEV-30706: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); +CREATE TABLE t2 (a INT PRIMARY KEY, b INT); +INSERT INTO t2 VALUES (1,1), (3,3); +CREATE TABLE t3 (a INT PRIMARY KEY, b INT); +INSERT INTO t3 VALUES (2,2), (4,4), (7,7); +CREATE TABLE t4 (a INT PRIMARY KEY, b INT); +INSERT INTO t4 VALUES (2,2), (5,5), (7,7); + +let $q= +SELECT + (SELECT SUM(t4.b) FROM t1, t2 WHERE t1.a = t2.b GROUP BY t1.a) AS m +FROM t3, t4 + WHERE t3.a = t4.b; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2,t3,t4; + +--echo # +--echo # MDEV-29224: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (f1 INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f2 int); +INSERT INTO t2 VALUES (3); + +let $q= +SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28573: view defined as select with implicit grouping and +--echo # a set function used in a subquery +--echo # + +CREATE TABLE t1 (a INTEGER, b INTEGER); +CREATE TABLE t2 (c INTEGER); +INSERT INTO t1 VALUES (1,11), (2,22), (2,22); +INSERT INTO t2 VALUES (1), (2); + +let $q1= +SELECT (SELECT COUNT(b) FROM t2) FROM t1; +let $q2= +SELECT (SELECT COUNT(b) FROM t2 WHERE c > 1) FROM t1; + +eval CREATE VIEW v1 AS $q1; +eval CREATE VIEW v2 AS $q2; + +--error ER_SUBQUERY_NO_1_ROW +eval $q1; +--error ER_SUBQUERY_NO_1_ROW +SELECT * FROM v1; +--error ER_SUBQUERY_NO_1_ROW +eval WITH cte AS ( $q1 ) SELECT * FROM cte; +eval $q2; +SELECT * FROM v2; +eval WITH cte AS ( $q2 ) SELECT * FROM cte; + +eval EXPLAIN $q1; +EXPLAIN SELECT * FROM v1; +eval EXPLAIN WITH cte AS ( $q1 ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q1"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v1"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q1 ) SELECT * FROM cte"; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +--error ER_SUBQUERY_NO_1_ROW +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "$q2"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v2"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q2 ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v1,v2; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28570: VIEW with WHERE containing subquery +--echo # with set function aggregated in query +--echo # + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (c int, d int); + +INSERT INTO t1 VALUES + (1,10), (2,10), (1,20), (2,20), (3,20), (2,30), (4,40); +INSERT INTO t2 VALUES + (2,10), (2,20), (4,10), (5,10), (3,20), (2,40); + +let $q= +SELECT a FROM t1 GROUP BY a + HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-28571: VIEW with select list containing subquery +--echo # with set function aggregated in query +--echo # + +CREATE TABLE t1 (a int, b int); +CREATE TABLE t2 (m int, n int); +INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); +INSERT INTO t2 VALUES (1,11), (2,22), (3,32), (4,44), (4,44); + +let $q= +SELECT (SELECT GROUP_CONCAT(COUNT(a)) FROM t2 WHERE m = a) AS c +FROM t1 +GROUP BY a; + +eval CREATE VIEW v AS $q; + +eval $q; +SELECT * FROM v; +eval WITH cte AS ( $q ) SELECT * FROM cte; + +eval EXPLAIN $q; +EXPLAIN SELECT * FROM v; +eval EXPLAIN WITH cte AS ( $q ) SELECT * FROM cte; + +eval PREPARE stmt FROM "$q"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "SELECT * FROM v"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +eval PREPARE stmt FROM "WITH cte AS ( $q ) SELECT * FROM cte"; +execute stmt; +execute stmt; +DEALLOCATE PREPARE stmt; + +DROP VIEW v; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-30668: VIEW with WHERE containing nested subquery +--echo # with set function aggregated in outer subquery +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +create table t2 (b int); +insert into t2 values (2), (1), (4), (7); + +create table t3 (a int, b int); +insert into t3 values (2,10), (7,30), (2,30), (1,10), (7,40); + +let $q= +select * from t1 + where t1.a in (select t3.a from t3 group by t3.a + having t3.a > any (select t2.b from t2 + where t2.b*10 < sum(t3.b))); +eval create view v as $q; + +eval $q; +eval select * from v; +eval with cte as ( $q ) select * from cte; + +eval explain $q; +eval explain select * from v; +eval explain with cte as ( $q ) select * from cte; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "select * from v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "with cte as ( $q ) select * from cte"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +drop view v; +drop table t1,t2,t3; + +--echo # End of 10.4 tests diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index 888d3143f2c..0c5eccad754 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1071,6 +1071,43 @@ UNION drop table t1; End of 5.5 tests # +# MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result +# +create table t1 (c int, d int); +insert into t1 values (5, 1), (0, 3); +select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 2 +1 1 +1 0 +select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 2 c +1 1 0 +1 0 5 +insert into t1 values (6,6); +select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 5 +1 1 +1 0 +select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c; +sum(distinct 1) sum(t1.d) > 5 c +1 1 0 +1 0 5 +1 1 6 +set @@sort_buffer_size=1024; +insert into t1 select -seq,-seq from seq_1_to_100; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ; +sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 +1 0 0 +1 1 0 +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2; +sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 c +1 0 0 -1 +1 1 0 0 +1 1 0 5 +1 1 0 6 +drop table t1; +# End of 10.4 tests +# # MDEV-27382: OFFSET is ignored when it is combined with the DISTINCT, IN() and JOIN # CREATE TABLE t1 ( diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 32e189da98a..9039edff20f 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -4,6 +4,7 @@ # --source include/default_optimizer_switch.inc +--source include/have_sequence.inc --disable_warnings drop table if exists t1,t2,t3; --enable_warnings @@ -820,6 +821,28 @@ drop table t1; --echo End of 5.5 tests --echo # +--echo # MDEV-20057 Distinct SUM on CROSS JOIN and grouped returns wrong result +--echo # + +create table t1 (c int, d int); +insert into t1 values (5, 1), (0, 3); +select distinct sum(distinct 1), sum(t1.d) > 2 from (t1 e join t1) group by t1.c; +select distinct sum(distinct 1), sum(t1.d) > 2, t1.c from (t1 e join t1) group by t1.c; + +insert into t1 values (6,6); +select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c; +select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c; + +# Force usage of remove_dup_with_compare() algorithm +set @@sort_buffer_size=1024; +insert into t1 select -seq,-seq from seq_1_to_100; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ; +select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2; +drop table t1; + +--echo # End of 10.4 tests + +--echo # --echo # MDEV-27382: OFFSET is ignored when it is combined with the DISTINCT, IN() and JOIN --echo # CREATE TABLE t1 ( diff --git a/mysql-test/main/explain_non_select.result b/mysql-test/main/explain_non_select.result index 111b4c8ae50..d60f10f85c8 100644 --- a/mysql-test/main/explain_non_select.result +++ b/mysql-test/main/explain_non_select.result @@ -157,9 +157,13 @@ id select_type table partitions type possible_keys key key_len ref rows Extra explain extended update t2 set b=3 where a in (3,4); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`b` = 3 where `test`.`t2`.`a` in (3,4) explain extended delete from t2 where a in (3,4); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`a` in (3,4) drop table t1,t2; # # Check the special case where partition pruning removed all partitions diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result index 244fdb2856f..4a2011283c8 100644 --- a/mysql-test/main/func_group.result +++ b/mysql-test/main/func_group.result @@ -1443,16 +1443,11 @@ FROM derived1 AS X WHERE X.int_nokey < 61 GROUP BY pk -LIMIT 1) +LIMIT 1) AS m FROM D AS X WHERE X.int_key < 13 GROUP BY int_nokey LIMIT 1; -(SELECT COUNT( int_nokey ) -FROM derived1 AS X -WHERE -X.int_nokey < 61 -GROUP BY pk -LIMIT 1) +m 1 DROP TABLE derived1; DROP TABLE D; diff --git a/mysql-test/main/func_group.test b/mysql-test/main/func_group.test index e5ae33f7208..446154e517b 100644 --- a/mysql-test/main/func_group.test +++ b/mysql-test/main/func_group.test @@ -579,8 +579,6 @@ DROP TABLE t1; # # Bug #16792 query with subselect, join, and group not returning proper values # -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (a INT, b INT); INSERT INTO t1 VALUES (1,1),(1,2),(2,3); @@ -591,7 +589,6 @@ SELECT AVG(2), BIT_AND(2), BIT_OR(2), BIT_XOR(2), COUNT(*), COUNT(12), COUNT(DISTINCT 12), MIN(2),MAX(2),STD(2), VARIANCE(2),SUM(2), GROUP_CONCAT(2),GROUP_CONCAT(DISTINCT 2); DROP TABLE t1; ---enable_view_protocol # End of 4.1 tests @@ -623,13 +620,10 @@ drop table t1, t2, t3; # # BUG#3190, WL#1639: Standard Deviation STDDEV - 2 different calculations # -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (id int(11),value1 float(10,2)); INSERT INTO t1 VALUES (1,0.00),(1,1.00), (1,2.00), (2,10.00), (2,11.00), (2,12.00), (2,13.00); select id, stddev_pop(value1), var_pop(value1), stddev_samp(value1), var_samp(value1) from t1 group by id; DROP TABLE t1; ---enable_view_protocol # # BUG#8464 decimal AVG returns incorrect result @@ -966,22 +960,19 @@ INSERT INTO D VALUES (83,45,4,repeat(' X', 42)), (105,53,12,NULL); -#enable after fix MDEV-27871 ---disable_view_protocol SELECT (SELECT COUNT( int_nokey ) FROM derived1 AS X WHERE X.int_nokey < 61 GROUP BY pk - LIMIT 1) + LIMIT 1) AS m FROM D AS X WHERE X.int_key < 13 GROUP BY int_nokey LIMIT 1; DROP TABLE derived1; DROP TABLE D; ---enable_view_protocol # # Bug #39656: Behaviour different for agg functions with & without where - diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index b9df193d3bd..44d50261de4 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1449,6 +1449,13 @@ JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) [{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] drop table t200; # +# MDEV-24538: JSON_LENGTH does not return error upon wrong number of parameters +# +SELECT JSON_LENGTH('{"a":"b"}','$','$', 'foo'); +ERROR 42000: Incorrect parameter count in the call to native function 'json_length' +SELECT JSON_LENGTH(); +ERROR 42000: Incorrect parameter count in the call to native function 'JSON_LENGTH' +# # End of 10.4 tests # # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 0a5d1638717..257d3ee375e 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -665,6 +665,7 @@ SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest'); SELECT NULL; SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a'); + --echo # --echo # End of 10.3 tests --echo # @@ -918,6 +919,14 @@ select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200 drop table t200; --echo # +--echo # MDEV-24538: JSON_LENGTH does not return error upon wrong number of parameters +--echo # +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT JSON_LENGTH('{"a":"b"}','$','$', 'foo'); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT JSON_LENGTH(); + +--echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result index e0b2aeacb5d..55ccaee1e7e 100644 --- a/mysql-test/main/func_str.result +++ b/mysql-test/main/func_str.result @@ -5242,6 +5242,15 @@ DROP TABLE t1; # Start of 10.4 tests # # +# MDEV-30351 crash in Item_func_left::val_str +# +SELECT WEIGHT_STRING('aa') IN (LEFT(WEIGHT_STRING('aaa'),4),'bbb') as expect_1; +expect_1 +1 +SELECT UNHEX('0032') in (LEFT(UNHEX('003200'), 2),'dog') as expect_1; +expect_1 +1 +# # MDEV-21841 CONV() function truncates the result type to 21 symbol. # CREATE TABLE t1(i BIGINT); diff --git a/mysql-test/main/func_str.test b/mysql-test/main/func_str.test index b8bb8a54090..1bf9301c1f2 100644 --- a/mysql-test/main/func_str.test +++ b/mysql-test/main/func_str.test @@ -2277,17 +2277,23 @@ CREATE TABLE crash_test_2 ( --echo # Cleanup DROP TABLE t1; - --echo # --echo # End of 10.3 tests --echo # - --echo # --echo # Start of 10.4 tests --echo # --echo # +--echo # MDEV-30351 crash in Item_func_left::val_str +--echo # + +SELECT WEIGHT_STRING('aa') IN (LEFT(WEIGHT_STRING('aaa'),4),'bbb') as expect_1; + +SELECT UNHEX('0032') in (LEFT(UNHEX('003200'), 2),'dog') as expect_1; + +--echo # --echo # MDEV-21841 CONV() function truncates the result type to 21 symbol. --echo # diff --git a/mysql-test/main/grant5.result b/mysql-test/main/grant5.result index 1038198bd02..c3a4baad259 100644 --- a/mysql-test/main/grant5.result +++ b/mysql-test/main/grant5.result @@ -453,4 +453,13 @@ insert mysql.host values (1); flush privileges; ERROR HY000: Fatal error: mysql.host table is damaged or in unsupported 3.20 format drop table mysql.host; +# +# MDEV-30826 Invalid data on mysql.host segfaults the server after an upgrade to 10.4 +# +create table mysql.host (host char(60) binary default '' not null, db char(64) binary default '' not null, select_priv enum('n','y') collate utf8_general_ci default 'n' not null, insert_priv enum('n','y') collate utf8_general_ci default 'n' not null, update_priv enum('n','y') collate utf8_general_ci default 'n' not null, delete_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_priv enum('n','y') collate utf8_general_ci default 'n' not null, drop_priv enum('n','y') collate utf8_general_ci default 'n' not null, grant_priv enum('n','y') collate utf8_general_ci default 'n' not null, references_priv enum('n','y') collate utf8_general_ci default 'n' not null, index_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_tmp_table_priv enum('n','y') collate utf8_general_ci default 'n' not null, lock_tables_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, show_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, execute_priv enum('n','y') collate utf8_general_ci default 'n' not null, trigger_priv enum('n','y') collate utf8_general_ci default 'n' not null, primary key /*host*/ (host,db)) engine=myisam character set utf8 collate utf8_bin comment='host privileges; merged with database privileges'; +insert mysql.host values('10.5.0.0/255.255.0.0','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','N'); +flush privileges; +drop table mysql.host; +# # End of 10.4 tests +# diff --git a/mysql-test/main/grant5.test b/mysql-test/main/grant5.test index c4a302fca86..49e0ab1abf1 100644 --- a/mysql-test/main/grant5.test +++ b/mysql-test/main/grant5.test @@ -408,4 +408,16 @@ insert mysql.host values (1); flush privileges; drop table mysql.host; +--echo # +--echo # MDEV-30826 Invalid data on mysql.host segfaults the server after an upgrade to 10.4 +--echo # + +# from mysql_system_tables.sql in 10.3: +create table mysql.host (host char(60) binary default '' not null, db char(64) binary default '' not null, select_priv enum('n','y') collate utf8_general_ci default 'n' not null, insert_priv enum('n','y') collate utf8_general_ci default 'n' not null, update_priv enum('n','y') collate utf8_general_ci default 'n' not null, delete_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_priv enum('n','y') collate utf8_general_ci default 'n' not null, drop_priv enum('n','y') collate utf8_general_ci default 'n' not null, grant_priv enum('n','y') collate utf8_general_ci default 'n' not null, references_priv enum('n','y') collate utf8_general_ci default 'n' not null, index_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_tmp_table_priv enum('n','y') collate utf8_general_ci default 'n' not null, lock_tables_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, show_view_priv enum('n','y') collate utf8_general_ci default 'n' not null, create_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, alter_routine_priv enum('n','y') collate utf8_general_ci default 'n' not null, execute_priv enum('n','y') collate utf8_general_ci default 'n' not null, trigger_priv enum('n','y') collate utf8_general_ci default 'n' not null, primary key /*host*/ (host,db)) engine=myisam character set utf8 collate utf8_bin comment='host privileges; merged with database privileges'; +insert mysql.host values('10.5.0.0/255.255.0.0','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','N'); +flush privileges; +drop table mysql.host; + +--echo # --echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result index ba403fa8b73..b7bbab6dd47 100644 --- a/mysql-test/main/group_by.result +++ b/mysql-test/main/group_by.result @@ -1036,8 +1036,9 @@ FROM t1 AS t1_outer GROUP BY t1_outer.b; 21 21 SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +AS m FROM t1 AS t1_outer; -(SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +m 3 3 3 @@ -1277,12 +1278,9 @@ a select avg ( (select (select sum(outr.a + innr.a) from t1 as innr limit 1) as tt -from t1 as outr order by outr.a limit 1)) +from t1 as outr order by outr.a limit 1)) as m from t1 as most_outer; -avg ( -(select -(select sum(outr.a + innr.a) from t1 as innr limit 1) as tt -from t1 as outr order by outr.a limit 1)) +m 29.0000 select avg ( (select ( diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test index 357959d4071..7f81c787c36 100644 --- a/mysql-test/main/group_by.test +++ b/mysql-test/main/group_by.test @@ -783,11 +783,9 @@ SELECT 1 FROM t1 as t1_outer GROUP BY a SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner LIMIT 1) FROM t1 AS t1_outer GROUP BY t1_outer.b; -#enable after fix MDEV-27871 ---disable_view_protocol SELECT (SELECT SUM(t1_inner.a) FROM t1 AS t1_inner GROUP BY t1_inner.b LIMIT 1) +AS m FROM t1 AS t1_outer; ---enable_view_protocol --error ER_WRONG_FIELD_WITH_GROUP SELECT (SELECT SUM(t1_outer.a) FROM t1 AS t1_inner LIMIT 1) @@ -863,6 +861,7 @@ DROP TABLE t1; --echo # --echo # Bug#27219: Aggregate functions in ORDER BY. --echo # + SET @save_sql_mode=@@sql_mode; SET @@sql_mode='ONLY_FULL_GROUP_BY'; @@ -884,6 +883,8 @@ SELECT 1 FROM t1 ORDER BY SUM(a) + 1; --error 1140 SELECT 1 FROM t1 ORDER BY SUM(a), b; +--disable_service_connection + --error 1140 SELECT a FROM t1 ORDER BY COUNT(b); @@ -896,9 +897,6 @@ SELECT t1.a FROM t1 ORDER BY (SELECT SUM(t2.a) FROM t2 ORDER BY t2.a); --error 1140 SELECT t1.a FROM t1 ORDER BY (SELECT t2.a FROM t2 ORDER BY SUM(t2.b) LIMIT 1); -#enable after fix MDEV-28570 ---disable_view_protocol - --error 1140 SELECT t1.a FROM t1 WHERE t1.a = (SELECT t2.a FROM t2 ORDER BY SUM(t2.b) LIMIT 1); @@ -936,7 +934,7 @@ SELECT 1 FROM t1 GROUP BY t1.a SELECT 1 FROM t1 GROUP BY t1.a HAVING (SELECT AVG(t1.b + t2.b) FROM t2 ORDER BY t2.a LIMIT 1); ---enable_view_protocol +--enable_service_connection # Both SUMs are aggregated in the subquery, no mixture: SELECT t1.a FROM t1 @@ -961,18 +959,17 @@ SELECT t1.a, SUM(t1.b) FROM t1 ORDER BY SUM(t2.b + t1.a) LIMIT 1) GROUP BY t1.a; -#enable after fix MDEV-28570, MDEV-28571 ---disable_view_protocol - SELECT t1.a FROM t1 GROUP BY t1.a HAVING (1, 1) = (SELECT SUM(t1.a), t1.a FROM t2 LIMIT 1); select avg ( (select (select sum(outr.a + innr.a) from t1 as innr limit 1) as tt - from t1 as outr order by outr.a limit 1)) + from t1 as outr order by outr.a limit 1)) as m from t1 as most_outer; +--disable_service_connection + --error 1140 select avg ( (select ( @@ -980,7 +977,7 @@ select avg ( from t1 as outr order by count(outr.a) limit 1)) as tt from t1 as most_outer; ---enable_view_protocol +--enable_service_connection select (select sum(outr.a + t1.a) from t1 limit 1) as tt from t1 as outr order by outr.a; @@ -1378,7 +1375,7 @@ DROP TABLE t1; --echo # Bug#11765254 (58200): Assertion failed: param.sort_length when grouping --echo # by functions --echo # -#createing view adds one new warning +#creating view adds one new warning --disable_view_protocol set tmp_memory_table_size=0; diff --git a/mysql-test/main/insert_returning.result b/mysql-test/main/insert_returning.result index 1976c1ca02e..b2ed9c90e51 100644 --- a/mysql-test/main/insert_returning.result +++ b/mysql-test/main/insert_returning.result @@ -63,6 +63,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1(id1,val1) VALUES(9,'j') RETURNING id1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(id1,val1) values (9,'j') EXPLAIN FORMAT="json" INSERT INTO t1(id1,val1) VALUES (10,'k') RETURNING id1; EXPLAIN { @@ -151,6 +153,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 VALUES (17,'s'),(18,'t') RETURNING *; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1` values (17,'s'),(18,'t') EXPLAIN FORMAT="json" INSERT INTO t1 VALUES(19,'u'),(20,'v') RETURNING id1; EXPLAIN { @@ -236,6 +240,8 @@ EXPLAIN EXTENDED INSERT INTO ins_duplicate(id,val) VALUES (2,'b') ON DUPLICATE KEY UPDATE val='i' RETURNING val; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT ins_duplicate ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`ins_duplicate`(id,val) values (2,'b') on duplicate key update `test`.`ins_duplicate`.`val` = 'i' EXPLAIN FORMAT="json" INSERT INTO ins_duplicate(id,val) VALUES (2,'b') ON DUPLICATE KEY UPDATE val='j' RETURNING val; EXPLAIN @@ -320,6 +326,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 SET id1=10, val1='j' RETURNING val1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(id1,val1) values (10,'j') EXPLAIN FORMAT="json" INSERT INTO t1 SET id1=11, val1='k' RETURNING val1; EXPLAIN { @@ -404,6 +412,8 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN EXTENDED INSERT INTO t1 SELECT * FROM t1 WHERE id1=9 RETURNING val1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 insert into `test`.`t1` select sql_buffer_result NULL AS `id1`,NULL AS `val1` from `test`.`t1` where 0 EXPLAIN FORMAT="json" INSERT INTO t1 SELECT * FROM t1 WHERE id1=10 RETURNING val1; EXPLAIN { diff --git a/mysql-test/main/kill-2.result b/mysql-test/main/kill-2.result index daaba2c092a..d735d412eeb 100644 --- a/mysql-test/main/kill-2.result +++ b/mysql-test/main/kill-2.result @@ -10,3 +10,34 @@ foo root kill user foo@'127.0.0.1'; drop user foo@'127.0.0.1'; +# +# KILL USER and missing privileges +# +create user a@'127.0.0.1'; +create user b@'127.0.0.1'; +grant process on *.* to a@'127.0.0.1'; +grant select on *.* to b@'127.0.0.1'; +connect a,127.0.0.1,a; +show grants; +Grants for a@127.0.0.1 +GRANT PROCESS ON *.* TO `a`@`127.0.0.1` +connect b,127.0.0.1,b; +show processlist; +Id User Host db Command Time State Info Progress +# b # test # # starting show processlist # +kill user a; +kill user x; +connection a; +show processlist; +Id User Host db Command Time State Info Progress +# root # test # # # # # +# a # test # # # # # +# b # test # # # # # +kill user b; +ERROR HY000: Operation KILL USER failed for b@% +connection default; +drop user a@'127.0.0.1'; +drop user b@'127.0.0.1'; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/kill-2.test b/mysql-test/main/kill-2.test index 6d40e14a700..9bc4fe03346 100644 --- a/mysql-test/main/kill-2.test +++ b/mysql-test/main/kill-2.test @@ -27,3 +27,30 @@ let $wait_condition= --source include/wait_condition.inc drop user foo@'127.0.0.1'; --enable_service_connection + +--echo # +--echo # KILL USER and missing privileges +--echo # +create user a@'127.0.0.1'; +create user b@'127.0.0.1'; +grant process on *.* to a@'127.0.0.1'; +grant select on *.* to b@'127.0.0.1'; +--connect a,127.0.0.1,a +show grants; +--connect b,127.0.0.1,b +--replace_column 1 # 3 # 5 # 6 # 9 # +show processlist; +kill user a; # existing connection, but not visible to current_user +kill user x; # not existing connection +--connection a +--replace_column 1 # 3 # 5 # 6 # 7 # 8 # 9 # +show processlist; +--error ER_KILL_DENIED_ERROR +kill user b; +--connection default +drop user a@'127.0.0.1'; +drop user b@'127.0.0.1'; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/locale.result b/mysql-test/main/locale.result index f136e9e99ab..8ff5bde39e1 100644 --- a/mysql-test/main/locale.result +++ b/mysql-test/main/locale.result @@ -306,3 +306,13 @@ date_format('2001-01-06', '%w %a %W', 'de_CH') select date_format('2001-09-01', '%c %b %M', 'de_CH'); date_format('2001-09-01', '%c %b %M', 'de_CH') 9 Sep September +# +# MDEV-30630 locale: Chinese error message for ZH_CN +# +SET lc_messages=ZH_CN; +SELECT x; +ERROR 42S22: 未知列'x'在'field list' +SET lc_messages=DEFAULT; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/locale.test b/mysql-test/main/locale.test index a9a507bc387..7d9a07178ee 100644 --- a/mysql-test/main/locale.test +++ b/mysql-test/main/locale.test @@ -181,3 +181,17 @@ select date_format('2001-10-01', '%c %b %M', 'rm_CH'); select date_format('2001-12-01', '%c %b %M', 'rm_CH'); select date_format('2001-01-06', '%w %a %W', 'de_CH'); select date_format('2001-09-01', '%c %b %M', 'de_CH'); + +--echo # +--echo # MDEV-30630 locale: Chinese error message for ZH_CN +--echo # + +SET lc_messages=ZH_CN; +--error ER_BAD_FIELD_ERROR +SELECT x; + +SET lc_messages=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result index d6cf9ba685f..222c592cbce 100644 --- a/mysql-test/main/multi_update.result +++ b/mysql-test/main/multi_update.result @@ -1253,7 +1253,7 @@ EXPLAIN DROP TABLES t1, t2; # End of 10.3 tests # -# MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in +# MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in # create table t1 (c1 int, c2 int, c3 int, index idx(c2)); insert into t1 values diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test index 48e6250393b..329394e8468 100644 --- a/mysql-test/main/multi_update.test +++ b/mysql-test/main/multi_update.test @@ -1134,7 +1134,7 @@ DROP TABLES t1, t2; --echo # End of 10.3 tests --echo # ---echo # MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in +--echo # MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in --echo # create table t1 (c1 int, c2 int, c3 int, index idx(c2)); diff --git a/mysql-test/main/myisam_explain_non_select_all.result b/mysql-test/main/myisam_explain_non_select_all.result index 2ff966fdfd3..210679ba29e 100644 --- a/mysql-test/main/myisam_explain_non_select_all.result +++ b/mysql-test/main/myisam_explain_non_select_all.result @@ -17,6 +17,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a < 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` < 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -60,6 +62,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -103,6 +107,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE a = 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t1` where `test`.`t1`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -150,6 +156,8 @@ EXPLAIN EXTENDED UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where 1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 update `test`.`t1` join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -200,6 +208,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where 1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` = 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -248,6 +258,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(1,<exists>(/* select#2 */ select 1 from `test`.`t2` where `test`.`t2`.`b` < 3 and 1)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -300,6 +312,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t2`.`b` from `test`.`t2` where `test`.`t1`.`a` < 3 and <cache>(`test`.`t1`.`a`) = `test`.`t2`.`b`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -353,6 +366,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00 1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` semi join (`test`.`t2`) join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t2`.`b` < 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -405,6 +420,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -457,6 +474,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY <derived2> system NULL NULL NULL NULL 1 100.00 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -511,6 +530,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where 1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` > 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -555,6 +576,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a > 1 LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 1 limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -598,6 +621,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` where 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -638,6 +663,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t1` where 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -678,6 +705,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a = 3; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` = 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 5 @@ -719,6 +748,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 3; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 3 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 5 @@ -758,6 +789,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -797,6 +830,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -840,6 +875,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where @a:=`test`.`t1`.`a` order by `test`.`t1`.`a` limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -884,6 +921,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using filesort +Warnings: +Note 1003 delete from `test`.`t1` order by `test`.`t1`.`a`,`test`.`t1`.`b` limit 1 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -941,6 +980,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 100.00 1 SIMPLE t3 eq_ref PRIMARY PRIMARY 8 test.t2.b2,test.t1.b1 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1`,`test`.`t2`,`test`.`t3` using `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a2` = `test`.`t1`.`a1` and `test`.`t3`.`a3` = `test`.`t2`.`b2` and `test`.`t3`.`b3` = `test`.`t1`.`b1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 13 @@ -993,6 +1034,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` where <cache>(`test`.`t1`.`a`) = `test`.`t2`.`a`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1043,6 +1086,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a1`,<exists>(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`a2`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1088,6 +1133,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a1`,<exists>(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`a2`)) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1132,6 +1179,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00 +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`i` = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -1175,6 +1224,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL 5 NULL Deleting all rows +Warnings: +Note 1003 delete from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -1221,6 +1272,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1267,6 +1320,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO t2 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 insert into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1311,6 +1366,8 @@ FLUSH TABLES; EXPLAIN EXTENDED REPLACE INTO t2 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 replace into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1351,6 +1408,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(i) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -1374,6 +1433,8 @@ FLUSH TABLES; EXPLAIN EXTENDED REPLACE INTO t1 SET i = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 replace into `test`.`t1`(i) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -1402,6 +1463,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1447,6 +1510,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1500,6 +1565,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1554,6 +1621,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1603,6 +1672,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1657,6 +1728,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -1712,6 +1785,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1765,6 +1840,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1812,6 +1889,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort +Warnings: +Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1866,6 +1945,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 6 NULL 5 100.00 +Warnings: +Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -1915,6 +1996,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -1963,6 +2046,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -2017,6 +2102,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2072,6 +2159,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2122,6 +2211,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2176,6 +2267,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 8 @@ -2231,6 +2324,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`i` = 123 where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2284,6 +2379,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -2332,6 +2429,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2387,6 +2486,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 index NULL a 6 NULL 5 100.00 Using buffer +Warnings: +Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2439,6 +2540,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`c2` = 0 where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2485,6 +2588,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort +Warnings: +Note 1003 delete from `test`.`t1` where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2534,6 +2639,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+10 WHERE a > 34; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 10 where `test`.`t1`.`a` > 34 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -2581,6 +2688,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` set NULL = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2624,6 +2733,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 W id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where +Warnings: +Note 1003 update `test`.`t1` set NULL = 10 where `test`.`t1`.`c3` = 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2676,6 +2787,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1 +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`f2` = (/* select#2 */ select max(`test`.`t2`.`f4`) from `test`.`t2` where `test`.`t2`.`f3` = `test`.`t1`.`f1`) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 7 @@ -2747,6 +2859,8 @@ EXPLAIN EXTENDED UPDATE v1 SET a = 1 WHERE a > 0; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where 1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` > 0 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -2792,6 +2906,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where 1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00 +Warnings: +Note 1003 update `test`.`t1` join `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -2841,6 +2957,8 @@ FLUSH TABLES; EXPLAIN EXTENDED DELETE FROM v1 WHERE a < 4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ delete from `test`.`t1` where `test`.`t1`.`a` < 4 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -2892,6 +3010,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2943,6 +3063,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where 1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00 +Warnings: +Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 6 @@ -2987,6 +3109,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO v1 VALUES (10); id select_type table type possible_keys key key_len ref rows filtered Extra 1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1`(x) values (10) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 2 @@ -3027,6 +3151,8 @@ FLUSH TABLES; EXPLAIN EXTENDED INSERT INTO v1 SELECT * FROM t1; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 system NULL NULL NULL NULL 0 0.00 Const row not found +Warnings: +Note 1003 insert into `test`.`t2`(x) /* select#1 */ select NULL AS `a` from `test`.`t1` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3084,6 +3210,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where 2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 5 func 2 100.00 3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in (temporary) on key0))) # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3137,6 +3265,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1) 1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` set `test`.`t1`.`a` = 10 where `x`.`b` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3192,6 +3322,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00 4 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort 2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `y` set `test`.`t1`.`a` = 10 where `x`.`b` = `test`.`t1`.`a` # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 4 @@ -3241,6 +3373,8 @@ JOIN t1 AS a12 ON a12.c1 = a11.c1 id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) DROP TABLE t1, t2, t3; #73 CREATE TABLE t1 (id INT); @@ -3269,6 +3403,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using buffer +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -3306,6 +3442,8 @@ FLUSH TABLES; EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using filesort +Warnings: +Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10 order by `test`.`t1`.`a` + 20 # Status of EXPLAIN EXTENDED query Variable_name Value Handler_read_key 3 @@ -3331,6 +3469,61 @@ Handler_read_key 4 Sort_range 1 DROP TABLE t1; +#75 +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +# +# query: INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +# select: +# +EXPLAIN INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows Extra +1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL +FLUSH STATUS; +FLUSH TABLES; +EXPLAIN EXTENDED INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL +Warnings: +Note 1003 insert into `test`.`t1` values (3,10),(7,11),(3,11) on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`) +# Status of EXPLAIN EXTENDED query +Variable_name Value +Handler_read_key 4 +# Status of testing query execution: +Variable_name Value +Handler_read_key 4 +Handler_read_rnd 1 +Handler_write 3 + +DROP TABLE t1; +#76 +CREATE TABLE t1 (id INT PRIMARY KEY, i INT); +CREATE TABLE t2 (a INT, b INT); +INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11); +# +# query: INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id); +# select: +# +EXPLAIN INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 +FLUSH STATUS; +FLUSH TABLES; +EXPLAIN EXTENDED INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 +Warnings: +Note 1003 insert into `test`.`t1` select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`) +# Status of EXPLAIN EXTENDED query +Variable_name Value +Handler_read_key 7 +# Status of testing query execution: +Variable_name Value +Handler_read_key 7 +Handler_read_rnd 1 +Handler_read_rnd_next 5 +Handler_write 4 + +DROP TABLE t1,t2; # # Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH # EXPLAIN UPDATE/DEL/INS diff --git a/mysql-test/main/mysqlbinlog.test b/mysql-test/main/mysqlbinlog.test index b12709583e4..22a85393a35 100644 --- a/mysql-test/main/mysqlbinlog.test +++ b/mysql-test/main/mysqlbinlog.test @@ -534,7 +534,7 @@ remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog.warn; --echo # MYSQL_BINLOG std_data/master-bin.000001 --base64-output 2>&1 # The error produces the absolute path of the mysqlbinlog executable, remove it. --replace_regex /.*mariadb-binlog.*:/mariadb-binlog:/i ---error 1 +--error 5 --exec $MYSQL_BINLOG std_data/master-bin.000001 --base64-output 2>&1 --echo # diff --git a/mysql-test/main/name_resolution_cache_debug.result b/mysql-test/main/name_resolution_cache_debug.result new file mode 100644 index 00000000000..7030176c5fe --- /dev/null +++ b/mysql-test/main/name_resolution_cache_debug.result @@ -0,0 +1,25 @@ +connect con1,localhost,root; +create table t1 (a int, b int); +create table t2 (c int, d int); +create view v1 as select c+1 as e, d+1 as f from t2; +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select a from t1"; +execute stmt1; +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; +connection con1; +a +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select e from v1"; +execute stmt1; +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; +connection con1; +e +connection default; +disconnect con1; +SET DEBUG_SYNC = 'RESET'; +drop view v1; +drop table t1,t2; diff --git a/mysql-test/main/name_resolution_cache_debug.test b/mysql-test/main/name_resolution_cache_debug.test new file mode 100644 index 00000000000..362d883cbd1 --- /dev/null +++ b/mysql-test/main/name_resolution_cache_debug.test @@ -0,0 +1,36 @@ + +source include/have_debug_sync.inc; + +connect con1,localhost,root; +create table t1 (a int, b int); +create table t2 (c int, d int); +create view v1 as select c+1 as e, d+1 as f from t2; + +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select a from t1"; +--send execute stmt1 + +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; + +connection con1; +--reap + +SET DEBUG_SYNC= 'table_field_cached SIGNAL in_sync WAIT_FOR go'; +prepare stmt1 from "select e from v1"; +--send execute stmt1 + +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR in_sync'; +SET DEBUG_SYNC= 'now SIGNAL go'; + +connection con1; +--reap + +connection default; +disconnect con1; + +SET DEBUG_SYNC = 'RESET'; +drop view v1; +drop table t1,t2; diff --git a/mysql-test/main/old-mode.result b/mysql-test/main/old-mode.result index e0a3412bbdf..bb65acd54ce 100644 --- a/mysql-test/main/old-mode.result +++ b/mysql-test/main/old-mode.result @@ -221,3 +221,39 @@ a UNIX_TIMESTAMP(t1.a) a UNIX_TIMESTAMP(t2.a) DROP TABLE t1; SET time_zone=DEFAULT; SET global mysql56_temporal_format=true; +# +# MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +# +SET old_mode=zero_date_time_cast; +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +CURRENT_TIMESTAMP +1970-01-15 06:56:07 +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +UNIX_TIMESTAMP(CURRENT_TIME()) +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '06:56:07' +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +UNIX_TIMESTAMP(TIME'06:56:07') +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '06:56:07' +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +UNIX_TIMESTAMP(TIME'10:20:30') +NULL +Warnings: +Warning 1292 Truncated incorrect datetime value: '10:20:30' +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +UNIX_TIMESTAMP(a) +NULL +NULL +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +DROP TABLE t1; +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; diff --git a/mysql-test/main/old-mode.test b/mysql-test/main/old-mode.test index a09de1cf87d..d3fc254110d 100644 --- a/mysql-test/main/old-mode.test +++ b/mysql-test/main/old-mode.test @@ -149,3 +149,23 @@ SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 DROP TABLE t1; SET time_zone=DEFAULT; SET global mysql56_temporal_format=true; + + +--echo # +--echo # MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +--echo # + +SET old_mode=zero_date_time_cast; +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +DROP TABLE t1; + +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index f365041b28a..cd05e24ce15 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -1016,7 +1016,6 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b { "index": "a", "used_range_estimates": false, "cause": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true @@ -1073,7 +1072,6 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b { "index": "a", "used_range_estimates": false, "cause": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true @@ -3733,7 +3731,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 { "select_id": 1, "steps": [ { - "expanded_query": "select NULL AS `NULL` from t0 join t1 where t0.a = t1.a and t1.a < 3" + "expanded_query": "delete from t0,t1 using t0 join t1 where t0.a = t1.a and t1.a < 3" } ] } @@ -3962,7 +3960,6 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 { "index": "a", "used_range_estimates": false, "cause": "not better than ref estimates", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 3.001757383, "chosen": true @@ -4098,7 +4095,7 @@ explain select * from (select rand() from t1)q { "derived": { "table": "q", "select_id": 2, - "algorithm": "merged" + "algorithm": "materialized" } }, { @@ -4112,7 +4109,7 @@ explain select * from (select rand() from t1)q { } }, { - "expanded_query": "/* select#1 */ select rand() AS `rand()` from (/* select#2 */ select rand() AS `rand()` from t1) q" + "expanded_query": "/* select#1 */ select q.`rand()` AS `rand()` from (/* select#2 */ select rand() AS `rand()` from t1) q" } ] } @@ -4122,14 +4119,6 @@ explain select * from (select rand() from t1)q { "select_id": 1, "steps": [ { - "derived": { - "table": "q", - "select_id": 2, - "algorithm": "materialized", - "cause": "Random function in the select" - } - }, - { "join_optimization": { "select_id": 2, "steps": [ @@ -8031,7 +8020,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "b", "used_range_estimates": false, "cause": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 20.00585794, "chosen": true @@ -8234,7 +8222,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "a", "used_range_estimates": false, "cause": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 20.00585794, "chosen": true @@ -8302,7 +8289,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "index": "a", "used_range_estimates": false, "cause": "not available", - "rowid_filter_skipped": "cost_factor <= 0", "rows": 1, "cost": 200.0585794, "chosen": true diff --git a/mysql-test/main/partition.result b/mysql-test/main/partition.result index 6f26ec02148..4042012d85b 100644 --- a/mysql-test/main/partition.result +++ b/mysql-test/main/partition.result @@ -2820,3 +2820,41 @@ DROP TABLE t1,t2; # # End of 10.1 tests # +# +# MDEV-30596: Assertion 'pushed_rowid_filter != __null ...' failed +# +create table t1 (a int); +insert into t1 values (NULL),(1),(2); +create table t2 (a int); +insert into t2 select seq from seq_1_to_1000; +create table t3 ( +a1 int, +a2 int, +b int, +c int, +filler1 char(200), +filler2 char(200), +key(a1,a2), +key(b) +) partition by hash(a1) partitions 2; +insert into t3 select seq/100, seq/100, seq, seq, seq, seq from seq_1_to_10000; +analyze table t3 persistent for all; +Table Op Msg_type Msg_text +test.t3 analyze status Engine-independent statistics collected +test.t3 analyze status OK +set @tmp_os= @@optimizer_switch; +set optimizer_switch='materialization=off'; +# Must not show "Using rowid filter": +explain +select * +from t1 +where +t1.a not in (select straight_join t3.a1 +from t2, t3 +where t3.b < 3000 and t3.a2=t2.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 1000 Using where +2 DEPENDENT SUBQUERY t3 ref_or_null a1,b a1 10 func,test.t2.a 198 Using where; Full scan on NULL key +set optimizer_switch=@tmp_os; +drop table t1,t2,t3; diff --git a/mysql-test/main/partition.test b/mysql-test/main/partition.test index 5d5dafdb206..d4a107517e8 100644 --- a/mysql-test/main/partition.test +++ b/mysql-test/main/partition.test @@ -3019,7 +3019,44 @@ INSERT INTO t2 VALUES (1),(2); UPDATE t1 SET a = 7 WHERE a = ( SELECT b FROM t2 ) ORDER BY a LIMIT 6; DROP TABLE t1,t2; - --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # MDEV-30596: Assertion 'pushed_rowid_filter != __null ...' failed +--echo # +--source include/have_sequence.inc +create table t1 (a int); +insert into t1 values (NULL),(1),(2); +create table t2 (a int); +insert into t2 select seq from seq_1_to_1000; + +create table t3 ( + a1 int, + a2 int, + b int, + c int, + filler1 char(200), + filler2 char(200), + key(a1,a2), + key(b) +) partition by hash(a1) partitions 2; +insert into t3 select seq/100, seq/100, seq, seq, seq, seq from seq_1_to_10000; +analyze table t3 persistent for all; + +set @tmp_os= @@optimizer_switch; +set optimizer_switch='materialization=off'; + +--echo # Must not show "Using rowid filter": +explain +select * +from t1 +where + t1.a not in (select straight_join t3.a1 + from t2, t3 + where t3.b < 3000 and t3.a2=t2.a); +set optimizer_switch=@tmp_os; + +drop table t1,t2,t3; + diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index 7cd6484d7ff..38a7fa1aeed 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5562,11 +5562,15 @@ EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 F id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )"; EXECUTE stmt; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0) DEALLOCATE PREPARE stmt; DROP TABLE t1, t2, t3; # diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result index 9860b2e9ad3..e32b738c4e5 100644 --- a/mysql-test/main/rowid_filter.result +++ b/mysql-test/main/rowid_filter.result @@ -337,8 +337,8 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition -1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter +1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND @@ -348,40 +348,40 @@ EXPLAIN "query_block": { "select_id": 1, "table": { - "table_name": "lineitem", + "table_name": "orders", "access_type": "range", + "possible_keys": ["PRIMARY", "i_o_totalprice"], + "key": "i_o_totalprice", + "key_length": "9", + "used_key_parts": ["o_totalprice"], + "rows": 69, + "filtered": 100, + "index_condition": "orders.o_totalprice between 200000 and 230000" + }, + "table": { + "table_name": "lineitem", + "access_type": "ref", "possible_keys": [ "PRIMARY", "i_l_shipdate", "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "i_l_shipdate", - "key_length": "4", - "used_key_parts": ["l_shipDATE"], - "rows": 98, - "filtered": 100, - "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" - }, - "table": { - "table_name": "orders", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "i_o_totalprice"], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", - "used_key_parts": ["o_orderkey"], - "ref": ["dbt3_s001.lineitem.l_orderkey"], + "used_key_parts": ["l_orderkey"], + "ref": ["dbt3_s001.orders.o_orderkey"], "rowid_filter": { "range": { - "key": "i_o_totalprice", - "used_key_parts": ["o_totalprice"] + "key": "i_l_shipdate", + "used_key_parts": ["l_shipDATE"] }, - "rows": 69, - "selectivity_pct": 4.6 + "rows": 98, + "selectivity_pct": 1.631973356 }, - "rows": 1, - "filtered": 4.599999905, - "attached_condition": "orders.o_totalprice between 200000 and 230000" + "rows": 4, + "filtered": 1.631973386, + "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" } } } @@ -390,8 +390,8 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra -1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition -1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter +1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) 0.15 (2%) 1.63 100.00 Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND @@ -403,55 +403,55 @@ ANALYZE "r_loops": 1, "r_total_time_ms": "REPLACED", "table": { - "table_name": "lineitem", + "table_name": "orders", "access_type": "range", - "possible_keys": [ - "PRIMARY", - "i_l_shipdate", - "i_l_orderkey", - "i_l_orderkey_quantity" - ], - "key": "i_l_shipdate", - "key_length": "4", - "used_key_parts": ["l_shipDATE"], + "possible_keys": ["PRIMARY", "i_o_totalprice"], + "key": "i_o_totalprice", + "key_length": "9", + "used_key_parts": ["o_totalprice"], "r_loops": 1, - "rows": 98, - "r_rows": 98, + "rows": 69, + "r_rows": 71, "r_table_time_ms": "REPLACED", "r_other_time_ms": "REPLACED", "filtered": 100, "r_filtered": 100, - "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" + "index_condition": "orders.o_totalprice between 200000 and 230000" }, "table": { - "table_name": "orders", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "i_o_totalprice"], - "key": "PRIMARY", + "table_name": "lineitem", + "access_type": "ref", + "possible_keys": [ + "PRIMARY", + "i_l_shipdate", + "i_l_orderkey", + "i_l_orderkey_quantity" + ], + "key": "i_l_orderkey", "key_length": "4", - "used_key_parts": ["o_orderkey"], - "ref": ["dbt3_s001.lineitem.l_orderkey"], + "used_key_parts": ["l_orderkey"], + "ref": ["dbt3_s001.orders.o_orderkey"], "rowid_filter": { "range": { - "key": "i_o_totalprice", - "used_key_parts": ["o_totalprice"] + "key": "i_l_shipdate", + "used_key_parts": ["l_shipDATE"] }, - "rows": 69, - "selectivity_pct": 4.6, - "r_rows": 71, - "r_lookups": 96, - "r_selectivity_pct": 10.41666667, + "rows": 98, + "selectivity_pct": 1.631973356, + "r_rows": 98, + "r_lookups": 476, + "r_selectivity_pct": 2.31092437, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" }, - "r_loops": 98, - "rows": 1, - "r_rows": 0.112244898, + "r_loops": 71, + "rows": 4, + "r_rows": 0.154929577, "r_table_time_ms": "REPLACED", "r_other_time_ms": "REPLACED", - "filtered": 4.599999905, + "filtered": 1.631973386, "r_filtered": 100, - "attached_condition": "orders.o_totalprice between 200000 and 230000" + "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'" } } } diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index 35520d1bd23..a18676f9d45 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index 790e94488cf..a7c4d1ec68e 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -3755,7 +3755,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index 35520d1bd23..a18676f9d45 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where +1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result index 25caa02da3e..5220483f164 100644 --- a/mysql-test/main/selectivity.result +++ b/mysql-test/main/selectivity.result @@ -1856,7 +1856,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 @@ -1868,7 +1868,7 @@ WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter set optimizer_switch= @save_optimizer_switch; set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; diff --git a/mysql-test/main/shutdown.test b/mysql-test/main/shutdown.test index 13b7ac3f672..71f2156a17f 100644 --- a/mysql-test/main/shutdown.test +++ b/mysql-test/main/shutdown.test @@ -18,8 +18,8 @@ disconnect c1; create procedure try_shutdown() shutdown; drop procedure try_shutdown; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name --send shutdown diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 4209e2bc529..0c877b03b97 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -118,27 +118,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -228,19 +228,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -307,21 +314,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -330,7 +350,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -486,8 +506,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -505,8 +528,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -879,6 +905,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -889,6 +934,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1530,8 +1589,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1726,8 +1785,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3782,9 +3841,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3794,9 +3854,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3820,16 +3880,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6423,11 +6477,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6560,66 +6613,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6665,8 +6745,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6883,7 +6965,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6893,9 +6977,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6921,7 +7007,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6931,9 +7019,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index 7620a51c296..ae072787028 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -74,13 +74,13 @@ SELECT ROW(1,2,3) > (SELECT 1,2,1); #enable after fix MDEV-28585 --disable_view_protocol SELECT ROW(1,2,3) = (SELECT 1,2,NULL); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); --enable_view_protocol +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; -- error ER_OPERAND_COLUMNS SELECT (SELECT * FROM (SELECT 'test' a,'test' b) a); @@ -118,11 +118,15 @@ set optimizer_switch=@tmp_optimizer_switch; select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3) order by 1 desc limit 1); select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a > t1.a) order by 1 desc limit 1); select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); -#enable afte fix MDEV-27871 ---disable_view_protocol -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; ---enable_view_protocol +select + b, + (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +explain extended +select + b, + (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; select * from t3 where exists (select * from t2 where t2.b=t3.a); select * from t3 where not exists (select * from t2 where t2.b=t3.a); select * from t3 where a in (select b from t2); @@ -155,16 +159,25 @@ delete from t2 where a=2 and b=10; select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 where t2.b=t4.b); create table t5 (a int); -#enable afte fix MDEV-27871 ---disable_view_protocol - -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; ---enable_view_protocol +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; +explain extended +select + (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, + a +from t2; -- error ER_SUBQUERY_NO_1_ROW select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; @@ -269,19 +282,19 @@ SELECT * from t2 where topic = any (SELECT topic FROM t2 GROUP BY topic HAVING t SELECT * from t2 where topic = any (SELECT SUM(topic) FROM t1); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100); -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; ---enable_view_protocol +SELECT + *, + topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); SELECT * from t2 where topic <> any (SELECT SUM(topic) FROM t2); SELECT * from t2 where topic IN (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); SELECT * from t2 where topic = any (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; ---enable_view_protocol +SELECT + *, + topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; drop table t1,t2; #forumconthardwarefr7 @@ -521,11 +534,20 @@ select 10.5 IN (SELECT * from t1); select 1.5 > ALL (SELECT * from t1); select 10.5 > ALL (SELECT * from t1); select 1.5 > ANY (SELECT * from t1); +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +select 3.5 IN (SELECT * from t1); +select 10.5 IN (SELECT * from t1); +select 1.5 > ALL (SELECT * from t1); +select 10.5 > ALL (SELECT * from t1); +select 1.5 > ANY (SELECT * from t1); select 10.5 > ANY (SELECT * from t1); +--enable_view_protocol +explain extended select (select a+1) from t1; +explain extended select (select a+1) from t1; explain extended select (select a+1) from t1; select (select a+1) from t1; drop table t1; ---enable_view_protocol # # Null with keys @@ -945,10 +967,7 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -#enable after fix MDEV-27871 ---disable_view_protocol -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; ---enable_view_protocol +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; drop table t1,t2,t3; # @@ -1066,10 +1085,7 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -#enable after fix MDEV-27871 ---disable_view_protocol -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; ---enable_view_protocol +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; SELECT dbid, name FROM t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND ((date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01')) AND t4.task_id = taskid; drop table t1,t2,t3,t4; @@ -2388,9 +2404,6 @@ SELECT a, MAX(b), MIN(b) FROM t1 GROUP BY a; SELECT * FROM t2; SELECT * FROM t3; -#enable after fix MDEV-28570 ---disable_view_protocol - SELECT a FROM t1 GROUP BY a HAVING a IN (SELECT c FROM t2 WHERE MAX(b)>20); SELECT a FROM t1 GROUP BY a @@ -2455,8 +2468,6 @@ SELECT t1.a, SUM(b) AS sum FROM t1 GROUP BY t1.a HAVING t1.a IN (SELECT t2.c FROM t2 GROUP BY t2.c HAVING t2.c+sum > 20); ---enable_view_protocol - DROP TABLE t1,t2,t3; @@ -2687,19 +2698,17 @@ DROP TABLE t1; # Bug#21540 Subqueries with no from and aggregate functions return # wrong results -#enable after fix MDEV-27871, MDEV-28573 ---disable_view_protocol - CREATE TABLE t1 (a INT, b INT); CREATE TABLE t2 (a INT); INSERT INTO t2 values (1); INSERT INTO t1 VALUES (1,1),(1,2),(2,3),(3,4); SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT + (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; SELECT COUNT(DISTINCT t1.b), (SELECT COUNT(DISTINCT t1.b)) FROM t1 GROUP BY t1.a; SELECT COUNT(DISTINCT t1.b), - (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) + (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; SELECT ( SELECT ( @@ -2713,11 +2722,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) - FROM t1 GROUP BY t1.a LIMIT 1) + FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; DROP TABLE t1,t2; ---enable_view_protocol # # Bug#21727 Correlated subquery that requires filesort: @@ -2952,8 +2960,6 @@ DROP TABLE t1,t2; # Bug#27229 GROUP_CONCAT in subselect with COUNT() as an argument # -#enable after fix MDEV-28571 ---disable_view_protocol CREATE TABLE t1 (a int, b int); CREATE TABLE t2 (m int, n int); INSERT INTO t1 VALUES (2,2), (2,2), (3,3), (3,3), (3,3), (4,4); @@ -2968,7 +2974,6 @@ SELECT COUNT(*) c, a, FROM t1 GROUP BY a; DROP table t1,t2; ---enable_view_protocol # # Bug#27321 Wrong subquery result in a grouping select @@ -2999,14 +3004,11 @@ SELECT tt.a, FROM t1 WHERE t1.a=tt.a GROUP BY a LIMIT 1) as test FROM t1 as tt GROUP BY tt.a; -#enable after fix MDEV-28571 ---disable_view_protocol SELECT tt.a, MAX( (SELECT (SELECT t.c FROM t1 AS t WHERE t1.a=t.a AND t.d=MAX(t1.b + tt.a) LIMIT 1) FROM t1 WHERE t1.a=tt.a GROUP BY a LIMIT 1)) as test FROM t1 as tt GROUP BY tt.a; ---enable_view_protocol DROP TABLE t1; @@ -3161,8 +3163,6 @@ CREATE TABLE t2 (x INTEGER); INSERT INTO t1 VALUES (1,11), (2,22), (2,22); INSERT INTO t2 VALUES (1), (2); -#enable after fix MDEV-28573 ---disable_view_protocol # wasn't failing, but should --error ER_SUBQUERY_NO_1_ROW SELECT a, COUNT(b), (SELECT COUNT(b) FROM t2) FROM t1 GROUP BY a; @@ -3172,7 +3172,6 @@ SELECT a, COUNT(b), (SELECT COUNT(b) FROM t2) FROM t1 GROUP BY a; SELECT a, COUNT(b), (SELECT COUNT(b)+0 FROM t2) FROM t1 GROUP BY a; SELECT (SELECT SUM(t1.a)/AVG(t2.x) FROM t2) FROM t1; ---enable_view_protocol DROP TABLE t1,t2; @@ -3187,8 +3186,6 @@ GROUP BY a1.a; DROP TABLE t1; #test cases from 29297 -#enable after fix MDEV-28573 ---disable_view_protocol CREATE TABLE t1 (a INT); CREATE TABLE t2 (a INT); INSERT INTO t1 VALUES (1),(2); @@ -3198,7 +3195,6 @@ SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a!=0) FROM t1; SELECT (SELECT SUM(t1.a) FROM t2 WHERE a=1) FROM t1; DROP TABLE t1,t2; ---enable_view_protocol # # Bug#31884 Assertion + crash in subquery in the SELECT clause. @@ -5400,14 +5396,11 @@ INSERT INTO t2 VALUES (10,7,0), (0,7,0); CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); -#enable after fix MDEV-27871 ---disable_view_protocol SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 - WHERE t.a != 0 AND t2.a != 0) + WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; ---enable_view_protocol SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -5565,29 +5558,53 @@ INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); +SELECT + (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m + FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +SELECT + (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; set @@session.optimizer_switch=@old_optimizer_switch; # check different IN with default switches -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); ---enable_view_protocol +SELECT + (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +SELECT + (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; +SELECT + (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +SELECT + (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; # other row operation with NULL single row subquery also should work select (null, null) = (null, null); @@ -5627,10 +5644,9 @@ INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -#enable after fix MDEV-27871 ---disable_view_protocol -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; ---enable_view_protocol +SELECT + ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; # This query just for example, it should return the same as above (1 and NULL) SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -5819,15 +5835,17 @@ INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; -#enable after fix MDEV-27871 ---disable_view_protocol EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; show status like "subquery_cache%"; @@ -5836,17 +5854,20 @@ show status like '%Handler_read%'; set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT + (SELECT MIN(b) FROM t1, t2 + WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; show status like "subquery_cache%"; show status like '%Handler_read%'; ---enable_view_protocol drop table t1, t2, t3; @@ -6100,22 +6121,16 @@ INSERT INTO t1 VALUES (1),(2); CREATE TABLE t2 (f2 int); INSERT INTO t2 VALUES (3); -#enable after fix MDEV-29224 ---disable_view_protocol SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; SELECT ( SELECT MAX(f1) FROM t2 ) FROM v1; ---enable_view_protocol INSERT INTO t2 VALUES (4); -#enable after fix MDEV-28573 ---disable_view_protocol --error ER_SUBQUERY_NO_1_ROW SELECT ( SELECT MAX(f1) FROM t2 ) FROM v1; --error ER_SUBQUERY_NO_1_ROW SELECT ( SELECT MAX(f1) FROM t2 ) FROM t1; ---enable_view_protocol drop view v1; drop table t1,t2; diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result index 3eb3c265b54..d90a2d645b8 100644 --- a/mysql-test/main/subselect4.result +++ b/mysql-test/main/subselect4.result @@ -20,8 +20,8 @@ WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3)) ORDER BY count(*); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 2 Using index -2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where -3 DEPENDENT SUBQUERY t3 system NULL NULL NULL NULL 0 Const row not found +2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where +3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table # should not crash the next statement SELECT 1 FROM t1 WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3)) diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index e32e6007328..c7fc78ce28e 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -122,27 +122,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -232,19 +232,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -311,21 +318,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -334,7 +354,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -490,8 +510,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -509,8 +532,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -883,6 +909,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -893,6 +938,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1534,8 +1593,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1730,8 +1789,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3785,9 +3844,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3797,9 +3857,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3823,16 +3883,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6423,11 +6477,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6560,66 +6613,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6665,8 +6745,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6883,7 +6965,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6893,9 +6977,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6921,7 +7007,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6931,9 +7019,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index 07755a5144a..5998be62535 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -125,27 +125,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -235,19 +235,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -314,21 +321,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -337,7 +357,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -493,8 +513,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -512,8 +535,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -886,6 +912,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -896,6 +941,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1537,8 +1596,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1733,8 +1792,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3785,9 +3844,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3797,9 +3857,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3823,16 +3883,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6418,11 +6472,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6555,66 +6608,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6660,8 +6740,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6877,7 +6959,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6887,9 +6971,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6915,7 +7001,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6925,9 +7013,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index 15688fc1717..4d3ecfd70e1 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -121,27 +121,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -231,19 +231,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -310,21 +317,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -333,7 +353,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -489,8 +509,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -508,8 +531,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -882,6 +908,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -892,6 +937,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1533,8 +1592,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1729,8 +1788,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3781,9 +3840,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3793,9 +3853,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3819,16 +3879,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6414,11 +6468,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6551,66 +6604,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6656,8 +6736,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6874,7 +6956,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6884,9 +6968,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6912,7 +6998,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6922,9 +7010,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index e3bdddbf84b..7991829cd49 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -124,27 +124,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -234,19 +234,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,(/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -313,21 +320,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -336,7 +356,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select (/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -492,8 +512,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -511,8 +534,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -885,6 +911,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -895,6 +940,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1536,8 +1595,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1732,8 +1791,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3788,9 +3847,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3800,9 +3860,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3826,16 +3886,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6429,11 +6483,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6566,66 +6619,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6671,8 +6751,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6889,7 +6971,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6899,9 +6983,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6927,7 +7013,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 58 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6937,9 +7025,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index a06a4aef99b..aeac9d4c1ed 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -121,27 +121,27 @@ ROW(1,2,3) > (SELECT 1,2,1) SELECT ROW(1,2,3) = (SELECT 1,2,NULL); ROW(1,2,3) = (SELECT 1,2,NULL) NULL -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'a') AS m; +m 1 -SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b'); -(SELECT 1.5,2,'a') = ROW(1.5,2,'b') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,2,'b') AS m; +m 0 -SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b'); -(SELECT 1.5,2,'a') = ROW('1.5b',2,'b') +SELECT (SELECT 1.5,2,'a') = ROW('1.5b',2,'b') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: '1.5b' -SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a'); -(SELECT 'b',2,'a') = ROW(1.5,2,'a') +SELECT (SELECT 'b',2,'a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'b' -SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a'); -(SELECT 1.5,2,'a') = ROW(1.5,'2','a') +SELECT (SELECT 1.5,2,'a') = ROW(1.5,'2','a') AS m; +m 1 -SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a'); -(SELECT 1.5,'c','a') = ROW(1.5,2,'a') +SELECT (SELECT 1.5,'c','a') = ROW(1.5,2,'a') AS m; +m 0 Warnings: Warning 1292 Truncated incorrect DECIMAL value: 'c' @@ -231,19 +231,26 @@ a 2 select * from t1 where t1.a=(select t2.a from t2 where t2.b=(select max(a) from t3 where t3.a < t1.a) order by 1 desc limit 1); a -select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; -b (select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; +b m 8 7.5000 8 4.5000 9 7.5000 -explain extended select b,(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) from t4; +explain extended +select +b, +(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2) as m +from t4; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 3 100.00 Using where Warnings: Note 1276 Field or reference 'test.t4.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `(select avg(t2.a+(select min(t3.a) from t3 where t3.a >= t4.a)) from t2)` from `test`.`t4` +Note 1003 /* select#1 */ select `test`.`t4`.`b` AS `b`,<expr_cache><`test`.`t4`.`a`>((/* select#2 */ select avg(`test`.`t2`.`a` + (/* select#3 */ select min(`test`.`t3`.`a`) from `test`.`t3` where `test`.`t3`.`a` >= `test`.`t4`.`a`)) from `test`.`t2`)) AS `m` from `test`.`t4` select * from t3 where exists (select * from t2 where t2.b=t3.a); a 7 @@ -310,21 +317,34 @@ select b,max(a) as ma from t4 group by b having b >= (select max(t2.a) from t2 w b ma 7 12 create table t5 (a int); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (5); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 insert into t5 values (2); -select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; -(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) a +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; +m a NULL 1 2 2 -explain extended select (select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a), a from t2; +explain extended +select +(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a) as m, +a +from t2; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 2 DEPENDENT SUBQUERY t1 system NULL NULL NULL NULL 1 100.00 @@ -333,7 +353,7 @@ NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1 Note 1276 Field or reference 'test.t2.a' of SELECT #3 was resolved in SELECT #1 -Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `(select a from t1 where t1.a=t2.a union select a from t5 where t5.a=t2.a)`,`test`.`t2`.`a` AS `a` from `test`.`t2` +Note 1003 /* select#1 */ select <expr_cache><`test`.`t2`.`a`>((/* select#2 */ select 2 from dual where 2 = `test`.`t2`.`a` union /* select#3 */ select `test`.`t5`.`a` from `test`.`t5` where `test`.`t5`.`a` = `test`.`t2`.`a`)) AS `m`,`test`.`t2`.`a` AS `a` from `test`.`t2` select (select a from t1 where t1.a=t2.a union all select a from t5 where t5.a=t2.a), a from t2; ERROR 21000: Subquery returns more than 1 row create table t6 (patient_uq int, clinic_uq int, index i1 (clinic_uq)); @@ -489,8 +509,11 @@ SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING t mot topic date pseudo joce 40143 2002-10-22 joce joce 43506 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 4100) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 1 SELECT * from t2 where topic = all (SELECT SUM(topic) FROM t2); @@ -508,8 +531,11 @@ joce 40143 2002-10-22 joce SELECT * from t2 where topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000); mot topic date pseudo joce 40143 2002-10-22 joce -SELECT *, topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) from t2; -mot topic date pseudo topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) +SELECT +*, +topic = all (SELECT topic FROM t2 GROUP BY topic HAVING topic < 41000) AS m +FROM t2; +mot topic date pseudo m joce 40143 2002-10-22 joce 1 joce 43506 2002-10-22 joce 0 drop table t1,t2; @@ -882,6 +908,25 @@ NULL select 1.5 > ANY (SELECT * from t1); 1.5 > ANY (SELECT * from t1) NULL +update t1 set a=NULL where a=2.5; +select 1.5 IN (SELECT * from t1); +1.5 IN (SELECT * from t1) +1 +select 3.5 IN (SELECT * from t1); +3.5 IN (SELECT * from t1) +1 +select 10.5 IN (SELECT * from t1); +10.5 IN (SELECT * from t1) +NULL +select 1.5 > ALL (SELECT * from t1); +1.5 > ALL (SELECT * from t1) +0 +select 10.5 > ALL (SELECT * from t1); +10.5 > ALL (SELECT * from t1) +NULL +select 1.5 > ANY (SELECT * from t1); +1.5 > ANY (SELECT * from t1) +NULL select 10.5 > ANY (SELECT * from t1); 10.5 > ANY (SELECT * from t1) 1 @@ -892,6 +937,20 @@ Warnings: Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 Note 1249 Select 2 was reduced during optimization Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` +explain extended select (select a+1) from t1; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1 +Note 1249 Select 2 was reduced during optimization +Note 1003 select `test`.`t1`.`a` + 1 AS `(select a+1)` from `test`.`t1` select (select a+1) from t1; (select a+1) 2.5 @@ -1533,8 +1592,8 @@ create table t3 (a int, b int); insert into t1 values (0,100),(1,2), (1,3), (2,2), (2,7), (2,-1), (3,10); insert into t2 values (0,0), (1,1), (2,1), (3,1), (4,1); insert into t3 values (3,3), (2,2), (1,1); -select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) from t3; -a (select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) +select a,(select count(distinct t1.b) as sum from t1,t2 where t1.a=t2.a and t2.b > 0 and t1.a <= t3.b group by t1.a order by sum limit 1) as m from t3; +a m 3 1 2 2 1 2 @@ -1729,8 +1788,8 @@ CREATE TABLE `t3` (`taskgenid` mediumint(9) NOT NULL auto_increment,`dbid` int(1 INSERT INTO `t3` (`taskgenid`, `dbid`, `taskid`, `mon`, `tues`,`wed`, `thur`, `fri`, `sat`, `sun`, `how_often`, `userid`, `active`) VALUES (1,-1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1); CREATE TABLE `t4` (`task_id` smallint(6) NOT NULL default '0',`description` varchar(200) NOT NULL default '') ENGINE=MyISAM CHARSET=latin1; INSERT INTO `t4` (`task_id`, `description`) VALUES (1, 'Daily Check List'),(2, 'Weekly Status'); -select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; -dbid name (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') +select dbid, name, (date_format(now() , '%Y-%m-%d') - INTERVAL how_often DAY) >= ifnull((SELECT date_format(max(create_date),'%Y-%m-%d') as m FROM t1 WHERE dbid = b.db_id AND taskid = a.taskgenid), '1950-01-01') as m from t3 a, t2 b, t4 WHERE dbid = - 1 AND primary_uid = '1' AND t4.task_id = taskid; +dbid name m -1 Valid 1 -1 Valid 2 1 -1 Should Not Return 0 @@ -3781,9 +3840,10 @@ SELECT (SELECT COUNT(DISTINCT t1.b) from t2) FROM t1 GROUP BY t1.a; 2 1 1 -SELECT (SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +SELECT +(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) AS m FROM t1 GROUP BY t1.a; -(SELECT COUNT(DISTINCT t1.b) from t2 union select 1 from t2 where 12 < 3) +m 2 1 1 @@ -3793,9 +3853,9 @@ COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b)) 1 1 1 1 SELECT COUNT(DISTINCT t1.b), -(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +(SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) AS m FROM t1 GROUP BY t1.a; -COUNT(DISTINCT t1.b) (SELECT COUNT(DISTINCT t1.b) union select 1 from DUAL where 12 < 3) +COUNT(DISTINCT t1.b) m 2 2 1 1 1 1 @@ -3819,16 +3879,10 @@ SELECT ( SELECT COUNT(DISTINCT t1.b) ) ) -FROM t1 GROUP BY t1.a LIMIT 1) +FROM t1 GROUP BY t1.a LIMIT 1) AS m FROM t1 t2 GROUP BY t2.a; -( -SELECT ( -SELECT ( -SELECT COUNT(DISTINCT t1.b) -) -) -FROM t1 GROUP BY t1.a LIMIT 1) +m 2 2 2 @@ -6414,11 +6468,10 @@ CREATE TABLE t3 (a int, b int); INSERT INTO t3 VALUES (10,7), (0,7); SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +WHERE t.a != 0 AND t2.a != 0) AS m FROM (SELECT * FROM t3) AS t GROUP BY 2; -SUM(DISTINCT b) (SELECT t2.a FROM t1 JOIN t2 ON t2.c != 0 -WHERE t.a != 0 AND t2.a != 0) +SUM(DISTINCT b) m 7 NULL SELECT SUM(DISTINCT b), (SELECT t2.a FROM t1,t2 WHERE t.a != 0 or 1=2 LIMIT 1) @@ -6551,66 +6604,93 @@ CREATE TABLE t3 (f3a int default 1, f3b int default 2); INSERT INTO t3 VALUES (1,1),(2,2); set @old_optimizer_switch = @@session.optimizer_switch; set @@optimizer_switch='materialization=on,partial_match_rowid_merge=on,partial_match_table_scan=off,subquery_cache=off,semijoin=off'; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL set @@session.optimizer_switch=@old_optimizer_switch; -SELECT (SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) FROM t2; -(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) +SELECT +(SELECT f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) NOT IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) NOT IN (SELECT f1a, f1b FROM t1) AS m; +m NULL -SELECT (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2; +SELECT +(SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) FROM t2 AS m; (SELECT f3a FROM t3 where f3a > 3) IN (SELECT f1a FROM t1) NULL NULL -SELECT (SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) FROM t2; -(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) +SELECT +(SELECT f3a,f3a FROM t3 where f3a > 3) IN (SELECT f1a,f1a FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) FROM t2; -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m +FROM t2; +m NULL NULL -SELECT (SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1); -(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) +SELECT +(SELECT f3a, f3b FROM t3 where f3a > 3) IN (SELECT f1a, f1b FROM t1) AS m; +m NULL select (null, null) = (null, null); (null, null) = (null, null) @@ -6656,8 +6736,10 @@ INSERT INTO t2 VALUES (1); CREATE TABLE t3 ( c INT ); INSERT INTO t3 VALUES (4),(5); SET optimizer_switch='subquery_cache=off'; -SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1; -( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) +SELECT +( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) AS m +FROM t1; +m 1 NULL SELECT ( SELECT b FROM t2 WHERE b = a OR b * 0) FROM t1; @@ -6874,7 +6956,9 @@ CREATE TABLE t3 (c INT); INSERT INTO t3 VALUES (8),(3); set @@expensive_subquery_limit= 0; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6884,9 +6968,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL @@ -6912,7 +6998,9 @@ Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 set @@expensive_subquery_limit= default; EXPLAIN -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY alias1 ALL NULL NULL NULL NULL 2 @@ -6922,9 +7010,11 @@ id select_type table type possible_keys key key_len ref rows Extra 2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) 3 SUBQUERY t3 ALL NULL NULL NULL NULL 2 flush status; -SELECT (SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +SELECT +(SELECT MIN(b) FROM t1, t2 +WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) AS m FROM t2 alias1, t1 alias2, t1 alias3; -(SELECT MIN(b) FROM t1, t2 WHERE b = a AND (b = alias1.b OR EXISTS (SELECT * FROM t3))) +m NULL NULL NULL diff --git a/mysql-test/main/system_mysql_db_fix50030.result b/mysql-test/main/system_mysql_db_fix50030.result index c7086d66a59..987dcbf83fc 100644 --- a/mysql-test/main/system_mysql_db_fix50030.result +++ b/mysql-test/main/system_mysql_db_fix50030.result @@ -166,14 +166,14 @@ show create table servers; Table Create Table servers CREATE TABLE `servers` ( `Server_name` char(64) NOT NULL DEFAULT '', - `Host` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', - `Owner` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', PRIMARY KEY (`Server_name`) ) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' show create table proc; diff --git a/mysql-test/main/system_mysql_db_fix50117.result b/mysql-test/main/system_mysql_db_fix50117.result index 84861744a70..ca817b198c6 100644 --- a/mysql-test/main/system_mysql_db_fix50117.result +++ b/mysql-test/main/system_mysql_db_fix50117.result @@ -146,14 +146,14 @@ show create table servers; Table Create Table servers CREATE TABLE `servers` ( `Server_name` char(64) NOT NULL DEFAULT '', - `Host` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', - `Owner` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', PRIMARY KEY (`Server_name`) ) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' show create table proc; diff --git a/mysql-test/main/system_mysql_db_fix50568.result b/mysql-test/main/system_mysql_db_fix50568.result new file mode 100644 index 00000000000..898b11a6ce4 --- /dev/null +++ b/mysql-test/main/system_mysql_db_fix50568.result @@ -0,0 +1,297 @@ +use test; +CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; +CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins'; +CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table'; +CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; +CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; +CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics'; +CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories'; +CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation'; +CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords'; +CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names'; +Warnings: +Warning 1280 Name 'Name' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones'; +Warnings: +Warning 1280 Name 'TzId' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions'; +Warnings: +Warning 1280 Name 'TzIdTranTime' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types'; +Warnings: +Warning 1280 Name 'TzIdTrTId' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones'; +Warnings: +Warning 1280 Name 'TranTime' ignored for PRIMARY key. +CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures'; +CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges'; +CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; +CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; +CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges'; +Warnings: +Warning 1280 Name 'Host' ignored for PRIMARY key. +show tables; +Tables_in_db +column_stats +columns_priv +db +event +func +general_log +global_priv +gtid_slave_pos +help_category +help_keyword +help_relation +help_topic +host +index_stats +innodb_index_stats +innodb_table_stats +ndb_binlog_index +plugin +proc +procs_priv +proxies_priv +roles_mapping +servers +slow_log +table_stats +tables_priv +time_zone +time_zone_leap_second +time_zone_name +time_zone_transition +time_zone_transition_type +transaction_registry +user +show create table db; +Table Create Table +db CREATE TABLE `db` ( + `Host` char(60) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(80) NOT NULL DEFAULT '', + `Select_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Insert_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Update_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Delete_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Create_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Drop_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Grant_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `References_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Index_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Alter_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Create_view_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Show_view_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Execute_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Event_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Trigger_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N', + PRIMARY KEY (`Host`,`Db`,`User`), + KEY `User` (`User`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges' +show create table user; +View Create View character_set_client collation_connection +user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci +show create table func; +Table Create Table +func CREATE TABLE `func` ( + `name` char(64) NOT NULL DEFAULT '', + `ret` tinyint(1) NOT NULL DEFAULT 0, + `dl` char(128) NOT NULL DEFAULT '', + `type` enum('function','aggregate') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL, + PRIMARY KEY (`name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions' +show create table global_priv; +Table Create Table +global_priv CREATE TABLE `global_priv` ( + `Host` char(60) NOT NULL DEFAULT '', + `User` char(80) NOT NULL DEFAULT '', + `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)), + PRIMARY KEY (`Host`,`User`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges' +show create table tables_priv; +Table Create Table +tables_priv CREATE TABLE `tables_priv` ( + `Host` char(60) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(80) NOT NULL DEFAULT '', + `Table_name` char(64) NOT NULL DEFAULT '', + `Grantor` char(141) NOT NULL DEFAULT '', + `Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `Table_priv` set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger','Delete versioning rows') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`), + KEY `Grantor` (`Grantor`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges' +show create table columns_priv; +Table Create Table +columns_priv CREATE TABLE `columns_priv` ( + `Host` char(60) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(80) NOT NULL DEFAULT '', + `Table_name` char(64) NOT NULL DEFAULT '', + `Column_name` char(64) NOT NULL DEFAULT '', + `Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges' +show create table procs_priv; +Table Create Table +procs_priv CREATE TABLE `procs_priv` ( + `Host` char(60) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `User` char(80) NOT NULL DEFAULT '', + `Routine_name` char(64) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `Routine_type` enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, + `Grantor` char(141) NOT NULL DEFAULT '', + `Proc_priv` set('Execute','Alter Routine','Grant') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT '', + `Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`), + KEY `Grantor` (`Grantor`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges' +show create table servers; +Table Create Table +servers CREATE TABLE `servers` ( + `Server_name` char(64) NOT NULL DEFAULT '', + `Host` varchar(2048) NOT NULL DEFAULT '', + `Db` char(64) NOT NULL DEFAULT '', + `Username` char(80) NOT NULL DEFAULT '', + `Password` char(64) NOT NULL DEFAULT '', + `Port` int(4) NOT NULL DEFAULT 0, + `Socket` char(64) NOT NULL DEFAULT '', + `Wrapper` char(64) NOT NULL DEFAULT '', + `Owner` varchar(512) NOT NULL DEFAULT '', + PRIMARY KEY (`Server_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table' +show create table proc; +Table Create Table +proc CREATE TABLE `proc` ( + `db` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `name` char(64) NOT NULL DEFAULT '', + `type` enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, + `specific_name` char(64) NOT NULL DEFAULT '', + `language` enum('SQL') NOT NULL DEFAULT 'SQL', + `sql_data_access` enum('CONTAINS_SQL','NO_SQL','READS_SQL_DATA','MODIFIES_SQL_DATA') NOT NULL DEFAULT 'CONTAINS_SQL', + `is_deterministic` enum('YES','NO') NOT NULL DEFAULT 'NO', + `security_type` enum('INVOKER','DEFINER') NOT NULL DEFAULT 'DEFINER', + `param_list` blob NOT NULL, + `returns` longblob NOT NULL, + `body` longblob NOT NULL, + `definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '', + `comment` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `body_utf8` longblob DEFAULT NULL, + `aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE', + PRIMARY KEY (`db`,`name`,`type`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures' +show create table event; +Table Create Table +event CREATE TABLE `event` ( + `db` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `name` char(64) NOT NULL DEFAULT '', + `body` longblob NOT NULL, + `definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `execute_at` datetime DEFAULT NULL, + `interval_value` int(11) DEFAULT NULL, + `interval_field` enum('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') DEFAULT NULL, + `created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + `last_executed` datetime DEFAULT NULL, + `starts` datetime DEFAULT NULL, + `ends` datetime DEFAULT NULL, + `status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED', + `on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP', + `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '', + `comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '', + `originator` int(10) unsigned NOT NULL, + `time_zone` char(64) CHARACTER SET latin1 COLLATE latin1_swedish_ci NOT NULL DEFAULT 'SYSTEM', + `character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `body_utf8` longblob DEFAULT NULL, + PRIMARY KEY (`db`,`name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events' +show create table general_log; +Table Create Table +general_log CREATE TABLE `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci COMMENT='General log' +show create table slow_log; +Table Create Table +slow_log CREATE TABLE `slow_log` ( + `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `query_time` time(6) NOT NULL, + `lock_time` time(6) NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) NOT NULL, + `last_insert_id` int(11) NOT NULL, + `insert_id` int(11) NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `sql_text` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `rows_affected` int(11) NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci COMMENT='Slow log' +show create table table_stats; +Table Create Table +table_stats CREATE TABLE `table_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `cardinality` bigint(21) unsigned DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables' +show create table column_stats; +Table Create Table +column_stats CREATE TABLE `column_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `column_name` varchar(64) NOT NULL, + `min_value` varbinary(255) DEFAULT NULL, + `max_value` varbinary(255) DEFAULT NULL, + `nulls_ratio` decimal(12,4) DEFAULT NULL, + `avg_length` decimal(12,4) DEFAULT NULL, + `avg_frequency` decimal(12,4) DEFAULT NULL, + `hist_size` tinyint(3) unsigned DEFAULT NULL, + `hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB') DEFAULT NULL, + `histogram` varbinary(255) DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`,`column_name`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns' +show create table index_stats; +Table Create Table +index_stats CREATE TABLE `index_stats` ( + `db_name` varchar(64) NOT NULL, + `table_name` varchar(64) NOT NULL, + `index_name` varchar(64) NOT NULL, + `prefix_arity` int(11) unsigned NOT NULL, + `avg_frequency` decimal(12,4) DEFAULT NULL, + PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`) +) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes' +DROP VIEW user; +DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event, proxies_priv, general_log, slow_log, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv, ndb_binlog_index; +show tables; +Tables_in_test diff --git a/mysql-test/main/system_mysql_db_fix50568.test b/mysql-test/main/system_mysql_db_fix50568.test new file mode 100644 index 00000000000..9ecb2a23d5f --- /dev/null +++ b/mysql-test/main/system_mysql_db_fix50568.test @@ -0,0 +1,99 @@ +# Embedded server doesn't support external clients +--source include/not_embedded.inc +--source include/have_innodb.inc + +# Don't run this test if $MYSQL_FIX_PRIVILEGE_TABLES isn't set +# to the location of mysql_fix_privilege_tables.sql +if (!$MYSQL_FIX_PRIVILEGE_TABLES) +{ + skip Test needs MYSQL_FIX_PRIVILEGE_TABLES; +} + +# +# This is the test for mysql_fix_privilege_tables +# It checks that a system tables from mysql 5.5.68 +# can be upgraded to current system table format +# +# Note: If this test fails, don't be confused about the errors reported +# by mysql-test-run This shows warnings generated by +# mysql_fix_system_tables which should be ignored. +# Instead, concentrate on the errors in r/system_mysql_db.reject + +use test; + +# create system tables as in mysql-5.5.68 +CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; + +CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; + +CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; + +CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; + + +CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins'; + + +CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table'; + + +CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; + +CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; + + +CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics'; + + +CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories'; + + +CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation'; + + +CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords'; + + +CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names'; + + +CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones'; + + +CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions'; + + +CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types'; + + +CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones'; + +CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures'; + +CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges'; + + +CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; + + +CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; + +CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges'; + + +-- disable_result_log +# Run the mysql_fix_privilege_tables.sql using "mysql --force" +--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES +-- enable_result_log + +# Dump the tables that should be compared +-- source include/system_db_struct.inc + +# Drop all tables created by this test +DROP VIEW user; +DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event, proxies_priv, general_log, slow_log, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv, ndb_binlog_index; + +# check that we dropped all system tables +show tables; + +# End of 4.1 tests diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result index b90b5c86c15..49d23264a63 100644 --- a/mysql-test/main/table_value_constr.result +++ b/mysql-test/main/table_value_constr.result @@ -3131,5 +3131,124 @@ INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE)); ERROR HY000: 'ignore' is not allowed in this context DROP TABLE t1; # +# MDEV-28603: VIEW with table value constructor used as single-value +# subquery contains subquery as its first element +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (1), (2); +create view v as select (values ((select * from t1 where a > 5))) as m from t2; +select (values ((select * from t1 where a > 5))) as m from t2; +m +7 +7 +select * from v; +m +7 +7 +with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte; +m +7 +7 +explain select (values ((select * from t1 where a > 5))) as m from t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +4 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2 +2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +3 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +explain select * from v; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +5 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +4 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +explain with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 +5 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 +3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used +4 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where +prepare stmt from "select (values ((select * from t1 where a > 5))) as m from t2"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "select * from v"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "with cte as ( select (values ((select * from t1 where a > 5))) as m from t2 ) select * from cte"; +execute stmt; +m +7 +7 +execute stmt; +m +7 +7 +deallocate prepare stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +drop view v; +prepare stmt from "create view v as select (values ((select * from t1 where a > 5))) as m from t2"; +execute stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +select * from v; +m +7 +7 +drop view v; +execute stmt; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +select * from v; +m +7 +7 +deallocate prepare stmt; +prepare stmt from "show create view v"; +execute stmt; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +execute stmt; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5))) AS `m` from `t2` latin1 latin1_swedish_ci +deallocate prepare stmt; +drop view v; +create view v as select (values ((select * from t1 where a > 5 +union +select * from t1 where a > 7))) as m from t2; +select (values ((select * from t1 where a > 5 +union +select * from t1 where a > 7))) as m from t2; +m +7 +7 +select * from v; +m +7 +7 +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select (values ((select `t1`.`a` from `t1` where `t1`.`a` > 5 union select `t1`.`a` from `t1` where `t1`.`a` > 7))) AS `m` from `t2` latin1 latin1_swedish_ci +drop view v; +drop table t1,t2; +# # End of 10.4 tests # diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 51acbe97c27..cfe6be43d18 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1737,5 +1737,79 @@ INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE)); DROP TABLE t1; --echo # +--echo # MDEV-28603: VIEW with table value constructor used as single-value +--echo # subquery contains subquery as its first element +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); +create table t2 (b int); +insert into t2 values (1), (2); + +let $q= +select (values ((select * from t1 where a > 5))) as m from t2; + +eval create view v as $q; + +eval $q; +eval select * from v; +eval with cte as ( $q ) select * from cte; + +eval explain $q; +eval explain select * from v; +eval explain with cte as ( $q ) select * from cte; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "select * from v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval prepare stmt from "with cte as ( $q ) select * from cte"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +show create view v; + +drop view v; + +eval prepare stmt from "create view v as $q"; +execute stmt; +show create view v; +select * from v; +drop view v; +execute stmt; +show create view v; +select * from v; +deallocate prepare stmt; + +prepare stmt from "show create view v"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +drop view v; + +let $q= +select (values ((select * from t1 where a > 5 + union + select * from t1 where a > 7))) as m from t2; + +eval create view v as $q; + +eval $q; +eval select * from v; + +show create view v; + +drop view v; +drop table t1,t2; + +--echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/type_time.result b/mysql-test/main/type_time.result index 40746345bdf..a863eaea686 100644 --- a/mysql-test/main/type_time.result +++ b/mysql-test/main/type_time.result @@ -2420,5 +2420,31 @@ SET @@global.mysql56_temporal_format=default; DROP PROCEDURE p1; SET timestamp=DEFAULT; # +# MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +# +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +CURRENT_TIMESTAMP +1970-01-15 06:56:07 +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +UNIX_TIMESTAMP(CURRENT_TIME()) +1234567 +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +UNIX_TIMESTAMP(TIME'06:56:07') +1234567 +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +UNIX_TIMESTAMP(TIME'10:20:30') +1246830 +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +UNIX_TIMESTAMP(a) +1234567 +1246830 +DROP TABLE t1; +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; +# # End of 10.4 tests # diff --git a/mysql-test/main/type_time.test b/mysql-test/main/type_time.test index 0f67223238c..9ed5c3a73dc 100644 --- a/mysql-test/main/type_time.test +++ b/mysql-test/main/type_time.test @@ -1567,5 +1567,24 @@ DROP PROCEDURE p1; SET timestamp=DEFAULT; --echo # +--echo # MDEV-26765 UNIX_TIMESTAMP(CURRENT_TIME()) return null ?!? +--echo # + +SET @@time_zone='+00:00'; +SET timestamp=1234567; +SELECT CURRENT_TIMESTAMP; +SELECT UNIX_TIMESTAMP(CURRENT_TIME()); +SELECT UNIX_TIMESTAMP(TIME'06:56:07'); +SELECT UNIX_TIMESTAMP(TIME'10:20:30'); +CREATE OR REPLACE TABLE t1 (a TIME); +INSERT INTO t1 VALUES (TIME'06:56:07'),('10:20:30'); +SELECT UNIX_TIMESTAMP(a) FROM t1 ORDER BY a; +DROP TABLE t1; + +SET @@time_zone=DEFAULT; +SET TIMESTAMP=DEFAULT; + + +--echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD Binary files differnew file mode 100644 index 00000000000..77a281667b5 --- /dev/null +++ b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYD diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI Binary files differnew file mode 100644 index 00000000000..20fc1b97c00 --- /dev/null +++ b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.MYI diff --git a/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm Binary files differnew file mode 100644 index 00000000000..1eb8ac15133 --- /dev/null +++ b/mysql-test/std_data/ctype_upgrade/mariadb100428_ucs2_general_ci.frm diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result index 9148f0e8c2b..294e96e5997 100644 --- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result +++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result @@ -1,3 +1,7 @@ +# +# MDEV-30698 Cover missing test cases for mariadb-binlog options +# --raw [and] --flashback +# CREATE TABLE t1 (a int); FLUSH LOGS; INSERT INTO t1 VALUES (1); diff --git a/mysql-test/suite/binlog/r/flashback.result b/mysql-test/suite/binlog/r/flashback.result index 9c238fa2874..ebbbeef9572 100644 --- a/mysql-test/suite/binlog/r/flashback.result +++ b/mysql-test/suite/binlog/r/flashback.result @@ -702,6 +702,10 @@ include/assert.inc [Table t1 should have 0 rows.] # 6- Rows must be present upon restoring from flashback include/assert.inc [Table t1 should have six rows.] DROP TABLE t1; +# +# MDEV-30698 Cover missing test cases for mariadb-binlog options +# --raw [and] --flashback +# SET binlog_format=statement; Warnings: Warning 1105 MariaDB Galera and flashback do not support binlog format: STATEMENT diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test index f95fc0137a2..252a8577b6c 100644 --- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test @@ -5,6 +5,7 @@ # respective log file specified by --result-file, and shown on-disk. This test # ensures that the log files on disk, created by mariadb-binlog, have the most # up-to-date events from the master. +# Option --raw works only with --read-from-remote-server, otherwise returns error. # # Methodology: # On the master, rotate to a newly active binlog file and write an event to @@ -20,6 +21,14 @@ --source include/linux.inc --source include/have_log_bin.inc +--echo # +--echo # MDEV-30698 Cover missing test cases for mariadb-binlog options +--echo # --raw [and] --flashback +--echo # +# Test --raw format without -R (--read-from-remote-server) +--error 1 # Error 1 operation not permitted +--exec $MYSQL_BINLOG --raw --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --stop-never --result-file=$MYSQLTEST_VARDIR/tmp/ master-bin.000001 + # Create newly active log CREATE TABLE t1 (a int); FLUSH LOGS; diff --git a/mysql-test/suite/binlog/t/flashback.test b/mysql-test/suite/binlog/t/flashback.test index f8f76ae8b11..787405822fd 100644 --- a/mysql-test/suite/binlog/t/flashback.test +++ b/mysql-test/suite/binlog/t/flashback.test @@ -364,6 +364,14 @@ FLUSH LOGS; DROP TABLE t1; +--echo # +--echo # MDEV-30698 Cover missing test cases for mariadb-binlog options +--echo # --raw [and] --flashback +--echo # + +--error 1 # --raw mode and --flashback mode are not allowed +--exec $MYSQL_BINLOG -vv -B --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000003> $MYSQLTEST_VARDIR/tmp/mysqlbinlog_row_flashback_8.sql + ## Clear SET binlog_format=statement; --error ER_FLASHBACK_NOT_SUPPORTED diff --git a/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test b/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test index c2a7ec9d27a..25849c111a7 100644 --- a/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_binlog_errors.test @@ -1,2 +1,2 @@ --let $binlog_limit= 5,1 ---source suite/rpl/include/rpl_binlog_errors.inc +--source suite/rpl/t/rpl_binlog_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test b/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test index acbe0d59a5e..406af58c03b 100644 --- a/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test +++ b/mysql-test/suite/binlog_encryption/rpl_cant_read_event_incident.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_cant_read_event_incident.inc +--source suite/rpl/t/rpl_cant_read_event_incident.test diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum.test b/mysql-test/suite/binlog_encryption/rpl_checksum.test index ca8cdc06726..a2abd019e24 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum.test +++ b/mysql-test/suite/binlog_encryption/rpl_checksum.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_checksum.inc +--source suite/rpl/t/rpl_checksum.test diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test b/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test index 8fa44136fc2..56fb2be0ce3 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test +++ b/mysql-test/suite/binlog_encryption/rpl_checksum_cache.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_checksum_cache.inc +--source suite/rpl/t/rpl_checksum_cache.test diff --git a/mysql-test/suite/binlog_encryption/rpl_corruption.test b/mysql-test/suite/binlog_encryption/rpl_corruption.test index 1abf2c882ec..f6ba2944398 100644 --- a/mysql-test/suite/binlog_encryption/rpl_corruption.test +++ b/mysql-test/suite/binlog_encryption/rpl_corruption.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_corruption.inc +--source suite/rpl/t/rpl_corruption.test diff --git a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result index 4e17669605f..0e066fc0418 100644 --- a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result +++ b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result @@ -558,3 +558,27 @@ a connection server_1; DROP TABLE t1; include/rpl_end.inc +# +# Start of 10.2 tests +# +# +# MDEV-10134 Add full support for DEFAULT +# +CREATE TABLE t1 (a VARCHAR(100) DEFAULT BINLOG_GTID_POS("master-bin.000001", 600)); +ERROR HY000: Function or expression 'binlog_gtid_pos()' cannot be used in the DEFAULT clause of `a` +# +# End of 10.2 tests +# +# +# Start of 10.3 tests +# +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT MASTER_GTID_WAIT(ROW(1,1),'str'); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +SELECT MASTER_GTID_WAIT('str',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +# +# End of 10.3 tests +# diff --git a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test index b9f5a18a588..b183c1d4b4e 100644 --- a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test +++ b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_gtid_basic.inc +--source suite/rpl/t/rpl_gtid_basic.test diff --git a/mysql-test/suite/binlog_encryption/rpl_incident.test b/mysql-test/suite/binlog_encryption/rpl_incident.test index b6d2a24a71e..5a707774f3c 100644 --- a/mysql-test/suite/binlog_encryption/rpl_incident.test +++ b/mysql-test/suite/binlog_encryption/rpl_incident.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_incident.inc +--source suite/rpl/t/rpl_incident.test diff --git a/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test b/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test index 872b8cd3598..532db963e63 100644 --- a/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_init_slave_errors.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_init_slave_errors.inc +--source suite/rpl/t/rpl_init_slave_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test b/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test index 9e0bb9598bf..35ad09647a6 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test +++ b/mysql-test/suite/binlog_encryption/rpl_loaddata_local.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_loaddata_local.inc +--source suite/rpl/t/rpl_loaddata_local.test diff --git a/mysql-test/suite/binlog_encryption/rpl_loadfile.test b/mysql-test/suite/binlog_encryption/rpl_loadfile.test index 84e6ecd7a0d..235c4a3d29e 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loadfile.test +++ b/mysql-test/suite/binlog_encryption/rpl_loadfile.test @@ -1,4 +1,4 @@ ---source suite/rpl/include/rpl_loadfile.inc +--source suite/rpl/t/rpl_loadfile.test --let $datadir= `SELECT @@datadir` diff --git a/mysql-test/suite/binlog_encryption/rpl_packet.test b/mysql-test/suite/binlog_encryption/rpl_packet.test index 43637314236..28beae1a91c 100644 --- a/mysql-test/suite/binlog_encryption/rpl_packet.test +++ b/mysql-test/suite/binlog_encryption/rpl_packet.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_packet.inc +--source suite/rpl/t/rpl_packet.test diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test b/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test index 8a26778c8f2..0c3e5386930 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_ignored_errors.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_parallel_ignored_errors.inc +--source suite/rpl/t/rpl_parallel_ignored_errors.test diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test b/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test index 7bdfaaf9adb..e342e4d61ff 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_show_binlog_events_purge_logs.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc +--source suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test diff --git a/mysql-test/suite/binlog_encryption/rpl_relayrotate.test b/mysql-test/suite/binlog_encryption/rpl_relayrotate.test index e52f5159fcc..a8dabdc3e8d 100644 --- a/mysql-test/suite/binlog_encryption/rpl_relayrotate.test +++ b/mysql-test/suite/binlog_encryption/rpl_relayrotate.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_relayrotate.inc +--source suite/rpl/t/rpl_relayrotate.test diff --git a/mysql-test/suite/binlog_encryption/rpl_semi_sync.test b/mysql-test/suite/binlog_encryption/rpl_semi_sync.test index dfc68b699c0..2e0907cce30 100644 --- a/mysql-test/suite/binlog_encryption/rpl_semi_sync.test +++ b/mysql-test/suite/binlog_encryption/rpl_semi_sync.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_semi_sync.inc +--source suite/rpl/t/rpl_semi_sync.test diff --git a/mysql-test/suite/binlog_encryption/rpl_skip_replication.test b/mysql-test/suite/binlog_encryption/rpl_skip_replication.test index 40992586c7c..11c5b33b362 100644 --- a/mysql-test/suite/binlog_encryption/rpl_skip_replication.test +++ b/mysql-test/suite/binlog_encryption/rpl_skip_replication.test @@ -1,2 +1,2 @@ --let $use_remote_mysqlbinlog= 1 ---source suite/rpl/include/rpl_skip_replication.inc +--source suite/rpl/t/rpl_skip_replication.test diff --git a/mysql-test/suite/binlog_encryption/rpl_special_charset.test b/mysql-test/suite/binlog_encryption/rpl_special_charset.test index eb697204860..c74f8915798 100644 --- a/mysql-test/suite/binlog_encryption/rpl_special_charset.test +++ b/mysql-test/suite/binlog_encryption/rpl_special_charset.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_special_charset.inc +--source suite/rpl/t/rpl_special_charset.test diff --git a/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test b/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test index 0dab68a4b08..1e3992dc5d9 100644 --- a/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test +++ b/mysql-test/suite/binlog_encryption/rpl_sporadic_master.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_sporadic_master.inc +--source suite/rpl/t/rpl_sporadic_master.test diff --git a/mysql-test/suite/binlog_encryption/rpl_ssl.test b/mysql-test/suite/binlog_encryption/rpl_ssl.test index 9a4788c1d2d..fb30d83ab5e 100644 --- a/mysql-test/suite/binlog_encryption/rpl_ssl.test +++ b/mysql-test/suite/binlog_encryption/rpl_ssl.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_ssl.inc +--source suite/rpl/t/rpl_ssl.test diff --git a/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test b/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test index 45d18a25410..d5a08bde969 100644 --- a/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test +++ b/mysql-test/suite/binlog_encryption/rpl_stm_relay_ign_space.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_stm_relay_ign_space.inc +--source suite/rpl/t/rpl_stm_relay_ign_space.test diff --git a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test index 2a16d90f9ad..c65cc202ba2 100644 --- a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_switch_stm_row_mixed.inc +--source suite/rpl/t/rpl_switch_stm_row_mixed.test diff --git a/mysql-test/suite/binlog_encryption/rpl_sync.test b/mysql-test/suite/binlog_encryption/rpl_sync.test index 8dbd6ff254b..1ff72228565 100644 --- a/mysql-test/suite/binlog_encryption/rpl_sync.test +++ b/mysql-test/suite/binlog_encryption/rpl_sync.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_sync_test.inc +--source suite/rpl/t/rpl_sync.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test index 30f5f247c23..5a9a79bad42 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_default_to_default.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test index 68afb4148ef..b9576b30f92 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mariadb53_to_mysql56.test @@ -3,4 +3,4 @@ --let $force_master_mysql56_temporal_format=false; --let $force_slave_mysql56_temporal_format=true; ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test index 96d928fcf08..7d09942814e 100644 --- a/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test +++ b/mysql-test/suite/binlog_encryption/rpl_temporal_format_mysql56_to_mariadb53.test @@ -1,4 +1,4 @@ --let $force_master_mysql56_temporal_format=true; --let $force_slave_mysql56_temporal_format=false; ---source suite/rpl/include/rpl_temporal_format_default_to_default.inc +--source suite/rpl/t/rpl_temporal_format_default_to_default.test diff --git a/mysql-test/suite/binlog_encryption/rpl_typeconv.test b/mysql-test/suite/binlog_encryption/rpl_typeconv.test index fe56a148256..6761cddfb87 100644 --- a/mysql-test/suite/binlog_encryption/rpl_typeconv.test +++ b/mysql-test/suite/binlog_encryption/rpl_typeconv.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_typeconv.inc +--source suite/rpl/t/rpl_typeconv.test diff --git a/mysql-test/suite/encryption/r/import_4k.result b/mysql-test/suite/encryption/r/import_4k.result new file mode 100644 index 00000000000..959e2498e00 --- /dev/null +++ b/mysql-test/suite/encryption/r/import_4k.result @@ -0,0 +1,10 @@ +set @save_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug; +set global innodb_limit_optimistic_insert_debug=3; +create table t1 (a INT PRIMARY KEY) engine=InnoDB ENCRYPTED=YES; +insert into t1 select * from seq_1_to_6000; +flush table t1 for export; +unlock tables; +alter table t1 discard tablespace; +alter table t1 import tablespace; +set global innodb_limit_optimistic_insert_debug=@save_limit; +drop table t1; diff --git a/mysql-test/suite/encryption/t/import_4k.opt b/mysql-test/suite/encryption/t/import_4k.opt new file mode 100644 index 00000000000..e5b58602036 --- /dev/null +++ b/mysql-test/suite/encryption/t/import_4k.opt @@ -0,0 +1 @@ +--innodb-page-size=4k diff --git a/mysql-test/suite/encryption/t/import_4k.test b/mysql-test/suite/encryption/t/import_4k.test new file mode 100644 index 00000000000..aef7c702d12 --- /dev/null +++ b/mysql-test/suite/encryption/t/import_4k.test @@ -0,0 +1,20 @@ +--source include/have_innodb.inc +--source include/have_sequence.inc +--source include/have_example_key_management_plugin.inc +--source include/have_debug.inc + +set @save_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug; +set global innodb_limit_optimistic_insert_debug=3; +create table t1 (a INT PRIMARY KEY) engine=InnoDB ENCRYPTED=YES; +insert into t1 select * from seq_1_to_6000; +flush table t1 for export; +--let $datadir= `select @@datadir` +--copy_file $datadir/test/t1.ibd $datadir/t1.ibd +--copy_file $datadir/test/t1.cfg $datadir/t1.cfg +unlock tables; +alter table t1 discard tablespace; +--move_file $datadir/t1.ibd $datadir/test/t1.ibd +--move_file $datadir/t1.cfg $datadir/test/t1.cfg +alter table t1 import tablespace; +set global innodb_limit_optimistic_insert_debug=@save_limit; +drop table t1; diff --git a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test index 2a16d90f9ad..c65cc202ba2 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test @@ -1 +1 @@ ---source suite/rpl/include/rpl_switch_stm_row_mixed.inc +--source suite/rpl/t/rpl_switch_stm_row_mixed.test diff --git a/mysql-test/suite/galera/include/kill_galera.inc b/mysql-test/suite/galera/include/kill_galera.inc index 56118df84f9..28a1b0f368c 100644 --- a/mysql-test/suite/galera/include/kill_galera.inc +++ b/mysql-test/suite/galera/include/kill_galera.inc @@ -6,8 +6,8 @@ if (!$kill_signal) } # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Kill the connected server diff --git a/mysql-test/suite/galera/include/shutdown_mysqld.inc b/mysql-test/suite/galera/include/shutdown_mysqld.inc index 54bba1318e7..793be8d76ac 100644 --- a/mysql-test/suite/galera/include/shutdown_mysqld.inc +++ b/mysql-test/suite/galera/include/shutdown_mysqld.inc @@ -8,8 +8,8 @@ if ($rpl_inited) } # Write file to make mysql-test-run.pl expect the "crash", but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Send shutdown to the connected server diff --git a/mysql-test/suite/galera/r/galera_sequences.result b/mysql-test/suite/galera/r/galera_sequences.result index 7276cb8dbde..da669e6774e 100644 --- a/mysql-test/suite/galera/r/galera_sequences.result +++ b/mysql-test/suite/galera/r/galera_sequences.result @@ -1,6 +1,11 @@ connection node_2; connection node_1; connection node_1; +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +CALL mtr.add_suppression("WSREP: CREATE TABLE isolation failure"); +connection node_2; +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +connection node_1; CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 1000000 increment by 0 cache 1000 nocycle ENGINE=InnoDB; SHOW CREATE SEQUENCE seq; Table Create Table @@ -47,6 +52,58 @@ NEXT VALUE FOR Seq1_1 3001 connection node_1; DROP SEQUENCE Seq1_1; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +connection node_1; +CREATE TABLE t2 (d CHAR(1)KEY); +SET SESSION autocommit=0; +INSERT INTO t2 VALUES(1); +CREATE TEMPORARY SEQUENCE seq1 NOCACHE ENGINE=INNODB; +CREATE SEQUENCE seq2 NOCACHE ENGINE=INNODB; +COMMIT; +SET SESSION AUTOCOMMIT=1; +SHOW CREATE TABLE seq1; +Table Create Table +seq1 CREATE TEMPORARY TABLE `seq1` ( + `next_not_cached_value` bigint(21) NOT NULL, + `minimum_value` bigint(21) NOT NULL, + `maximum_value` bigint(21) NOT NULL, + `start_value` bigint(21) NOT NULL COMMENT 'start value when sequences is created or value if RESTART is used', + `increment` bigint(21) NOT NULL COMMENT 'increment value', + `cache_size` bigint(21) unsigned NOT NULL, + `cycle_option` tinyint(1) unsigned NOT NULL COMMENT '0 if no cycles are allowed, 1 if the sequence should begin a new cycle when maximum_value is passed', + `cycle_count` bigint(21) NOT NULL COMMENT 'How many cycles have been done' +) ENGINE=InnoDB SEQUENCE=1 connection node_2; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +SHOW CREATE SEQUENCE seq1; +ERROR 42S02: Table 'test.seq1' doesn't exist +SHOW CREATE SEQUENCE seq2; +Table Create Table +seq2 CREATE SEQUENCE `seq2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +connection node_1; +SET SESSION autocommit=1; +DROP SEQUENCE seq1; +DROP SEQUENCE seq2; +DROP TABLE t2; +SET SESSION AUTOCOMMIT=0; +SET SESSION wsrep_OSU_method='RSU'; +CREATE TABLE t1(c1 VARCHAR(10)); +INSERT INTO t1 (c1) VALUES(''); +create temporary sequence sq1 NOCACHE engine=innodb; +create sequence sq2 NOCACHE engine=innodb; +COMMIT; +SHOW CREATE SEQUENCE sq1; +Table Create Table +sq1 CREATE SEQUENCE `sq1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +SHOW CREATE SEQUENCE sq2; +Table Create Table +sq2 CREATE SEQUENCE `sq2` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 nocache nocycle ENGINE=InnoDB +connection node_2; +SHOW CREATE SEQUENCE sq1; +ERROR 42S02: Table 'test.sq1' doesn't exist +SHOW CREATE SEQUENCE sq2; +ERROR 42S02: Table 'test.sq2' doesn't exist +connection node_1; +SET SESSION AUTOCOMMIT=1; +DROP TABLE t1; +DROP SEQUENCE sq1; +DROP SEQUENCE sq2; +SET SESSION wsrep_OSU_method='TOI'; diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test index c535ac455b9..b36a0de57b6 100644 --- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test +++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test @@ -35,8 +35,8 @@ UPDATE t1 SET f2 = 'c' WHERE f1 > 2; --connection node_2 # Write file to make mysql-test-run.pl expect the crash, but don't start it ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name --let KILL_NODE_PIDFILE = `SELECT @@pid_file` diff --git a/mysql-test/suite/galera/t/galera_sequences.cnf b/mysql-test/suite/galera/t/galera_sequences.cnf new file mode 100644 index 00000000000..98e724fb2d0 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_sequences.cnf @@ -0,0 +1,9 @@ +!include ../galera_2nodes.cnf + +[mysqld.1] +log-bin +log-slave-updates + +[mysqld.2] +log-bin +log-slave-updates diff --git a/mysql-test/suite/galera/t/galera_sequences.test b/mysql-test/suite/galera/t/galera_sequences.test index d469cc73516..faa3b46d2a7 100644 --- a/mysql-test/suite/galera/t/galera_sequences.test +++ b/mysql-test/suite/galera/t/galera_sequences.test @@ -5,6 +5,13 @@ # --connection node_1 +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +CALL mtr.add_suppression("WSREP: CREATE TABLE isolation failure"); +--connection node_2 + +CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); + +--connection node_1 CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 1000000 increment by 0 cache 1000 nocycle ENGINE=InnoDB; SHOW CREATE SEQUENCE seq; @@ -45,8 +52,48 @@ select NEXT VALUE FOR Seq1_1; --connection node_1 DROP SEQUENCE Seq1_1; -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +# +# MDEV-24045 : Assertion client_state_.mode() != wsrep::client_state::m_toi failed in int wsrep::transaction::before_commit() +# +--connection node_1 +CREATE TABLE t2 (d CHAR(1)KEY); +SET SESSION autocommit=0; +INSERT INTO t2 VALUES(1); +CREATE TEMPORARY SEQUENCE seq1 NOCACHE ENGINE=INNODB; +CREATE SEQUENCE seq2 NOCACHE ENGINE=INNODB; +COMMIT; +SET SESSION AUTOCOMMIT=1; +SHOW CREATE TABLE seq1; --connection node_2 - -CALL mtr.add_suppression("SEQUENCES declared without `NOCACHE` will not behave correctly in galera cluster."); +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE seq1; +SHOW CREATE SEQUENCE seq2; +--connection node_1 +SET SESSION autocommit=1; +DROP SEQUENCE seq1; +DROP SEQUENCE seq2; +DROP TABLE t2; +# +# Case2 +# +SET SESSION AUTOCOMMIT=0; +SET SESSION wsrep_OSU_method='RSU'; +CREATE TABLE t1(c1 VARCHAR(10)); +INSERT INTO t1 (c1) VALUES(''); +create temporary sequence sq1 NOCACHE engine=innodb; +create sequence sq2 NOCACHE engine=innodb; +COMMIT; +SHOW CREATE SEQUENCE sq1; +SHOW CREATE SEQUENCE sq2; +--connection node_2 +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE sq1; +--error ER_NO_SUCH_TABLE +SHOW CREATE SEQUENCE sq2; +--connection node_1 +SET SESSION AUTOCOMMIT=1; +DROP TABLE t1; +DROP SEQUENCE sq1; +DROP SEQUENCE sq2; +SET SESSION wsrep_OSU_method='TOI'; diff --git a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf index 9f99adbd711..cd7a892f4c9 100644 --- a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf +++ b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf @@ -55,7 +55,7 @@ wsrep-on=1 #galera_port=@OPT.port #ist_port=@OPT.port #sst_port=@OPT.port -wsrep_cluster_address='gcomm:// +wsrep_cluster_address=gcomm:// wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.4.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M' wsrep_node_address='127.0.0.1:@mysqld.4.#galera_port' wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test index eb7f5603452..ab8b62b969a 100644 --- a/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test +++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test @@ -15,8 +15,8 @@ --connection node_2 SET GLOBAL debug_dbug="d,crash_last_fragment_commit_after_fragment_removal"; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name CREATE TABLE t1 (f1 VARCHAR(30)) ENGINE=InnoDB; diff --git a/mysql-test/suite/gcol/r/gcol_rollback.result b/mysql-test/suite/gcol/r/gcol_rollback.result index 5ee94d3ef44..0bbf034122b 100644 --- a/mysql-test/suite/gcol/r/gcol_rollback.result +++ b/mysql-test/suite/gcol/r/gcol_rollback.result @@ -79,10 +79,29 @@ a b c d ROLLBACK; SET DEBUG_SYNC = 'now SIGNAL dml_done'; connection con1; -disconnect con1; connection default; SELECT * FROM t; a b d 9 10 29 DROP TABLE t; SET DEBUG_SYNC = 'RESET'; +# +# MDEV-30597 Assertion `flag == 1' failed in +# row_build_index_entry_low +# +CREATE TABLE t1 ( +col1 INT PRIMARY KEY, col_text TEXT, +col_text_g TEXT GENERATED ALWAYS AS (SUBSTR(col_text,1,499)) +) ENGINE = InnoDB ROW_FORMAT = Compact; +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; +INSERT INTO t1 (col1) VALUES (1) ; +DELETE FROM t1 WHERE col1 = 1; +ALTER TABLE t1 ADD UNIQUE INDEX (col_text_g(9)); +BEGIN; +INSERT INTO t1 (col1) VALUES (1); +ROLLBACK; +disconnect con1; +DROP TABLE t1; +# End of 10.4 tests diff --git a/mysql-test/suite/gcol/t/gcol_rollback.test b/mysql-test/suite/gcol/t/gcol_rollback.test index ba88dda45d7..888e6be861e 100644 --- a/mysql-test/suite/gcol/t/gcol_rollback.test +++ b/mysql-test/suite/gcol/t/gcol_rollback.test @@ -103,7 +103,6 @@ SET DEBUG_SYNC = 'now SIGNAL dml_done'; connection con1; reap; -disconnect con1; connection default; SELECT * FROM t; @@ -111,5 +110,27 @@ SELECT * FROM t; DROP TABLE t; SET DEBUG_SYNC = 'RESET'; +--echo # +--echo # MDEV-30597 Assertion `flag == 1' failed in +--echo # row_build_index_entry_low +--echo # +CREATE TABLE t1 ( +col1 INT PRIMARY KEY, col_text TEXT, +col_text_g TEXT GENERATED ALWAYS AS (SUBSTR(col_text,1,499)) +) ENGINE = InnoDB ROW_FORMAT = Compact; +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; +INSERT INTO t1 (col1) VALUES (1) ; +DELETE FROM t1 WHERE col1 = 1; +ALTER TABLE t1 ADD UNIQUE INDEX (col_text_g(9)); +BEGIN; +INSERT INTO t1 (col1) VALUES (1); +ROLLBACK; +disconnect con1; +DROP TABLE t1; + # Wait till all disconnects are completed --source include/wait_until_count_sessions.inc + +--echo # End of 10.4 tests diff --git a/mysql-test/suite/innodb/r/full_crc32_import.result b/mysql-test/suite/innodb/r/full_crc32_import.result index 99f11548420..32964be46d4 100644 --- a/mysql-test/suite/innodb/r/full_crc32_import.result +++ b/mysql-test/suite/innodb/r/full_crc32_import.result @@ -177,6 +177,17 @@ UNLOCK TABLES; SET GLOBAL innodb_compression_algorithm=0; ALTER TABLE t1 FORCE; ALTER TABLE t1 DISCARD TABLESPACE; +# Display the discarded table name by using SPACE and PAGE_NO +# column in INNODB_SYS_INDEXES and discard doesn't affect the +# SPACE in INNODB_SYS_TABLES +SELECT t.NAME, t.SPACE BETWEEN 1 and 0xFFFFFFEF as SYS_TABLE_SPACE_RANGE +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t +WHERE t.TABLE_ID IN ( +SELECT i.TABLE_ID FROM +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i WHERE +i.PAGE_NO IS NULL and i.SPACE IS NULL); +NAME SYS_TABLE_SPACE_RANGE +test/t1 1 db.opt t1.frm restore: t1 .ibd and .cfg files diff --git a/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result b/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result index d9f5e4dfeed..fa17487df97 100644 --- a/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result +++ b/mysql-test/suite/innodb/r/innodb_buffer_pool_dump_pct.result @@ -2,13 +2,11 @@ CREATE TABLE tab5 (col1 int auto_increment primary key, col2 VARCHAR(25), col3 varchar(25)) ENGINE=InnoDB; CREATE INDEX idx1 ON tab5(col2(10)); CREATE INDEX idx2 ON tab5(col3(10)); -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool100; SET GLOBAL innodb_buffer_pool_dump_pct=100; SELECT variable_value INTO @IBPDS FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS'; SET GLOBAL innodb_buffer_pool_dump_now=ON; -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool1; SET GLOBAL innodb_buffer_pool_dump_pct=1; SELECT @@global.innodb_buffer_pool_dump_pct; @@global.innodb_buffer_pool_dump_pct @@ -18,5 +16,4 @@ FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS'; SET GLOBAL innodb_buffer_pool_dump_now=ON; SET GLOBAL innodb_buffer_pool_dump_pct=DEFAULT; -SET GLOBAL innodb_buffer_pool_filename=DEFAULT; DROP TABLE tab5; diff --git a/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result b/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result index 32d87b4668a..6932b8f2292 100644 --- a/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result +++ b/mysql-test/suite/innodb/r/innodb_sys_var_valgrind.result @@ -25,27 +25,6 @@ select @@innodb_ft_server_stopword_table; @@innodb_ft_server_stopword_table NULL drop table user_stopword_1, user_stopword_2; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -ib_buffer_pool -set @blah='hello'; -set global innodb_buffer_pool_filename = @blah; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -hello -set global innodb_buffer_pool_filename="bye"; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -bye -set global innodb_buffer_pool_filename=NULL; -ERROR 42000: Variable 'innodb_buffer_pool_filename' can't be set to the value of 'NULL' -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -bye -set global innodb_buffer_pool_filename=default; -select @@innodb_buffer_pool_filename; -@@innodb_buffer_pool_filename -ib_buffer_pool CREATE TABLE t1 ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, opening_line TEXT(500), author VARCHAR(200), title VARCHAR(200), FULLTEXT idx (opening_line)) ENGINE=InnoDB; diff --git a/mysql-test/suite/innodb/r/instant_alter_crash.result b/mysql-test/suite/innodb/r/instant_alter_crash.result index d15c0337c37..c16d5951c07 100644 --- a/mysql-test/suite/innodb/r/instant_alter_crash.result +++ b/mysql-test/suite/innodb/r/instant_alter_crash.result @@ -181,3 +181,27 @@ t3 CREATE TABLE `t3` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1,t2,t3; db.opt +# +# MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +# ADD PRIMARY KEY or OPTIMIZE TABLE +# +CREATE TABLE t1(f1 year default null, f2 year default null, +f3 text, f4 year default null, f5 year default null, +f6 year default null, f7 year default null, +f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; +connect con1,localhost,root,,,; +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; +connection default; +disconnect con1; +SET DEBUG_SYNC=RESET; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/alter_crash.test b/mysql-test/suite/innodb/t/alter_crash.test index 7a2f4452f4d..6f6a6dc5cbc 100644 --- a/mysql-test/suite/innodb/t/alter_crash.test +++ b/mysql-test/suite/innodb/t/alter_crash.test @@ -51,8 +51,8 @@ let $MYSQLD_DATADIR= `select @@datadir`; let datadir= `select @@datadir`; # These are from include/shutdown_mysqld.inc and allow to call start_mysqld.inc ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --echo # --echo # Bug #14669848 CRASH DURING ALTER MAKES ORIGINAL TABLE INACCESSIBLE diff --git a/mysql-test/suite/innodb/t/full_crc32_import.test b/mysql-test/suite/innodb/t/full_crc32_import.test index c50e3899fc8..b79fd95471b 100644 --- a/mysql-test/suite/innodb/t/full_crc32_import.test +++ b/mysql-test/suite/innodb/t/full_crc32_import.test @@ -199,6 +199,16 @@ SET GLOBAL innodb_compression_algorithm=0; ALTER TABLE t1 FORCE; ALTER TABLE t1 DISCARD TABLESPACE; +--echo # Display the discarded table name by using SPACE and PAGE_NO +--echo # column in INNODB_SYS_INDEXES and discard doesn't affect the +--echo # SPACE in INNODB_SYS_TABLES +SELECT t.NAME, t.SPACE BETWEEN 1 and 0xFFFFFFEF as SYS_TABLE_SPACE_RANGE +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t +WHERE t.TABLE_ID IN ( + SELECT i.TABLE_ID FROM + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i WHERE + i.PAGE_NO IS NULL and i.SPACE IS NULL); + --list_files $MYSQLD_DATADIR/test perl; do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl"; diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test index dac176f3b77..26576129a16 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test +++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test @@ -29,8 +29,8 @@ call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict let datadir= `select @@datadir`; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name=$MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL) ENGINE=innodb; SET debug_dbug='+d,innodb_alter_commit_crash_before_commit'; diff --git a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test index 129037e783b..c15a7a4cb7e 100644 --- a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test +++ b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test @@ -25,8 +25,8 @@ CREATE TABLE t1( INDEX(b)) ENGINE=InnoDB STATS_PERSISTENT=0; ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect # The flag innodb_change_buffering_debug is only available in debug builds. # It instructs InnoDB to try to evict pages from the buffer pool when diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test index 7bc71d87a03..cebb8ce7ec4 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test @@ -38,8 +38,8 @@ SET GLOBAL innodb_file_per_table = 1; CREATE TABLE t1 (c1 INT) ENGINE = InnoDB; INSERT INTO t1 VALUES(1),(2),(3); ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo wait > $_expect_file_name SET SESSION debug_dbug="+d,ib_discard_before_commit_crash"; diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test index a7a414d61da..381091165ef 100644 --- a/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test +++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_dump_pct.test @@ -15,7 +15,6 @@ col2 VARCHAR(25), col3 varchar(25)) ENGINE=InnoDB; CREATE INDEX idx1 ON tab5(col2(10)); CREATE INDEX idx2 ON tab5(col3(10)); -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool100; SET GLOBAL innodb_buffer_pool_dump_pct=100; #*********************************************************** @@ -58,8 +57,7 @@ AND variable_value != @IBPDS AND variable_value like 'Buffer pool(s) dump completed at%'; --source include/wait_condition.inc ---file_exists $MYSQLD_DATADIR/ib_buffer_pool100 -SET GLOBAL innodb_buffer_pool_filename=ib_buffer_pool1; +--move_file $MYSQLD_DATADIR/ib_buffer_pool $MYSQLD_DATADIR/ib_buffer_pool100 SET GLOBAL innodb_buffer_pool_dump_pct=1; SELECT @@global.innodb_buffer_pool_dump_pct; @@ -83,17 +81,15 @@ AND variable_value != @IBPDS AND variable_value like 'Buffer pool(s) dump completed at%'; --source include/wait_condition.inc ---file_exists $MYSQLD_DATADIR/ib_buffer_pool1 +--file_exists $MYSQLD_DATADIR/ib_buffer_pool perl; -my $size1 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool1"; +my $size1 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool"; my $size100 = -s "$ENV{MYSQLD_DATADIR}/ib_buffer_pool100"; die "$size100<=$size1\n" unless $size100 > $size1; EOF SET GLOBAL innodb_buffer_pool_dump_pct=DEFAULT; -SET GLOBAL innodb_buffer_pool_filename=DEFAULT; --remove_file $MYSQLD_DATADIR/ib_buffer_pool100 ---remove_file $MYSQLD_DATADIR/ib_buffer_pool1 DROP TABLE tab5; diff --git a/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test b/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test index 2e1391355b9..4383e26877d 100644 --- a/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test +++ b/mysql-test/suite/innodb/t/innodb_sys_var_valgrind.test @@ -25,24 +25,6 @@ select @@innodb_ft_server_stopword_table; drop table user_stopword_1, user_stopword_2; -#Test innodb_buffer_pool_filename (global variable) - -select @@innodb_buffer_pool_filename; - -set @blah='hello'; -set global innodb_buffer_pool_filename = @blah; -select @@innodb_buffer_pool_filename; - -set global innodb_buffer_pool_filename="bye"; -select @@innodb_buffer_pool_filename; - ---error ER_WRONG_VALUE_FOR_VAR -set global innodb_buffer_pool_filename=NULL; -select @@innodb_buffer_pool_filename; - -set global innodb_buffer_pool_filename=default; -select @@innodb_buffer_pool_filename; - #Test innodb_ft_aux_table (global variable) CREATE TABLE t1 ( id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, opening_line TEXT(500), author VARCHAR(200), title VARCHAR(200), FULLTEXT idx diff --git a/mysql-test/suite/innodb/t/instant_alter_crash.test b/mysql-test/suite/innodb/t/instant_alter_crash.test index 43db8f619f3..292de8a802b 100644 --- a/mysql-test/suite/innodb/t/instant_alter_crash.test +++ b/mysql-test/suite/innodb/t/instant_alter_crash.test @@ -205,3 +205,29 @@ DROP TABLE t1,t2,t3; --remove_files_wildcard $MYSQLD_DATADIR/test #sql*.frm --list_files $MYSQLD_DATADIR/test + +--echo # +--echo # MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +--echo # ADD PRIMARY KEY or OPTIMIZE TABLE +--echo # +CREATE TABLE t1(f1 year default null, f2 year default null, + f3 text, f4 year default null, f5 year default null, + f6 year default null, f7 year default null, + f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +send ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; + +connect(con1,localhost,root,,,); +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; + +connection default; +reap; +disconnect con1; +SET DEBUG_SYNC=RESET; +CHECK TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/purge_thread_shutdown.test b/mysql-test/suite/innodb/t/purge_thread_shutdown.test index 5be29b7a6a3..762336cf0d1 100644 --- a/mysql-test/suite/innodb/t/purge_thread_shutdown.test +++ b/mysql-test/suite/innodb/t/purge_thread_shutdown.test @@ -12,8 +12,8 @@ select user,state from information_schema.processlist order by 2; set global debug_dbug='+d,only_kill_system_threads'; set global innodb_fast_shutdown=0; -let $_server_id= `SELECT @@server_id`; -let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect; +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect exec echo "wait" > $_expect_file_name; send shutdown; diff --git a/mysql-test/suite/innodb/t/restart.test b/mysql-test/suite/innodb/t/restart.test index 32058b3abf5..3e726c971ab 100644 --- a/mysql-test/suite/innodb/t/restart.test +++ b/mysql-test/suite/innodb/t/restart.test @@ -110,8 +110,6 @@ SET GLOBAL innodb_buffer_pool_size = @innodb_buffer_pool_size_orig; --echo # --let MYSQLD_DATADIR= `SELECT @@datadir` ---let SERVER_ID= `SELECT @@server_id` ---let EXPECT_FILE_NAME= $MYSQLTEST_VARDIR/tmp/mysqld.$SERVER_ID.expect --source include/shutdown_mysqld.inc diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 752243e599d..12885f40a92 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -11,8 +11,8 @@ --source include/no_valgrind_without_big.inc --disable_query_log -call mtr.add_suppression("Can't create/write to file '/dev/null/nonexistent/ib"); call mtr.add_suppression("Can't create/write to file '' \\\(Errcode: 20 \"Not a directory\"\\\)"); +call mtr.add_suppression("Can't create/write to file '/dev/null/.*/ib"); call mtr.add_suppression("InnoDB: Unable to create temporary file"); call mtr.add_suppression("last file in setting innodb_temp_data_file_path"); call mtr.add_suppression("The table 't1' is full"); @@ -135,7 +135,7 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED'); # We cannot use include/restart_mysqld.inc in this particular test, # because SHOW STATUS would fail due to unwritable (nonexistent) tmpdir. --source include/shutdown_mysqld.inc ---exec echo "restart: --tmpdir=/dev/null/nonexistent --skip-innodb-fast-shutdown" > $_expect_file_name +--exec echo "restart: --tmpdir=/dev/null/$MYSQL_TMP_DIR --skip-innodb-fast-shutdown" > $_expect_file_name --enable_reconnect --disable_result_log --disable_query_log diff --git a/mysql-test/suite/innodb_fts/t/sync.test b/mysql-test/suite/innodb_fts/t/sync.test index 89a45a2873f..168309a5c92 100644 --- a/mysql-test/suite/innodb_fts/t/sync.test +++ b/mysql-test/suite/innodb_fts/t/sync.test @@ -104,7 +104,8 @@ disconnect con1; DROP TABLE t1; --echo # Case 3: Test insert crash recovery ---let $_expect_file_name=$MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect CREATE TABLE t1 ( FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY, diff --git a/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result b/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result index aa713643a16..1eadeb8bbce 100644 --- a/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result +++ b/mysql-test/suite/innodb_i_s/innodb_sys_indexes.result @@ -6,7 +6,7 @@ INNODB_SYS_INDEXES CREATE TEMPORARY TABLE `INNODB_SYS_INDEXES` ( `TABLE_ID` bigint(21) unsigned NOT NULL, `TYPE` int(11) NOT NULL, `N_FIELDS` int(11) NOT NULL, - `PAGE_NO` int(11) NOT NULL, - `SPACE` int(11) NOT NULL, + `PAGE_NO` int(11), + `SPACE` int(11), `MERGE_THRESHOLD` int(11) NOT NULL ) ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci diff --git a/mysql-test/suite/mariabackup/include/restart_and_restore.inc b/mysql-test/suite/mariabackup/include/restart_and_restore.inc index 2d1e5493957..aa26d28efba 100644 --- a/mysql-test/suite/mariabackup/include/restart_and_restore.inc +++ b/mysql-test/suite/mariabackup/include/restart_and_restore.inc @@ -4,5 +4,5 @@ echo # shutdown server; echo # remove datadir; rmdir $_datadir; echo # xtrabackup move back; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1 $backup_opts; --source include/start_mysqld.inc diff --git a/mysql-test/suite/mariabackup/partial_exclude.result b/mysql-test/suite/mariabackup/partial_exclude.result index 628613040e0..9f4c1042d93 100644 --- a/mysql-test/suite/mariabackup/partial_exclude.result +++ b/mysql-test/suite/mariabackup/partial_exclude.result @@ -8,8 +8,15 @@ CREATE DATABASE db2; USE db2; CREATE TABLE t1(i INT) ENGINE INNODB; USE test; +BEGIN; +INSERT INTO db2.t1 VALUES(20); +INSERT INTO test.t1 VALUES(20); +INSERT INTO test.t2 VALUES(20); # xtrabackup backup +COMMIT; t1.ibd DROP TABLE t1; DROP TABLE t2; DROP DATABASE db2; +NOT FOUND /Operating system error number/ in backup.log +NOT FOUND /Could not find a valid tablespace file for/ in backup.log diff --git a/mysql-test/suite/mariabackup/partial_exclude.test b/mysql-test/suite/mariabackup/partial_exclude.test index 99d14e58231..beff778e7bc 100644 --- a/mysql-test/suite/mariabackup/partial_exclude.test +++ b/mysql-test/suite/mariabackup/partial_exclude.test @@ -19,6 +19,11 @@ CREATE TABLE t1(i INT) ENGINE INNODB; USE test; +BEGIN; +INSERT INTO db2.t1 VALUES(20); +INSERT INTO test.t1 VALUES(20); +INSERT INTO test.t2 VALUES(20); + echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; @@ -26,6 +31,8 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.*2" "--databases-exclude=db2" --target-dir=$targetdir; --enable_result_log +COMMIT; + # check that only t1 table is in backup (t2 is excluded) list_files $targetdir/test *.ibd; # check that db2 database is not in the backup (excluded) @@ -46,4 +53,17 @@ DROP DATABASE db2; rmdir $MYSQLD_DATADIR/db3; rmdir $MYSQLD_DATADIR/db4; rmdir $MYSQLD_DATADIR/db5; + +--let $backup_log=$MYSQLTEST_VARDIR/tmp/backup.log +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --export --prepare --target-dir="$targetdir" > $backup_log; +--enable_result_log + +--let SEARCH_FILE=$backup_log +--let SEARCH_PATTERN=Operating system error number +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN=Could not find a valid tablespace file for +--source include/search_pattern_in_file.inc +--remove_file $backup_log + rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/relative_path.opt b/mysql-test/suite/mariabackup/relative_path.opt new file mode 100644 index 00000000000..3e3c33e44f8 --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.opt @@ -0,0 +1 @@ +--innodb-undo-tablespaces=2 diff --git a/mysql-test/suite/mariabackup/relative_path.result b/mysql-test/suite/mariabackup/relative_path.result new file mode 100644 index 00000000000..7aa0c6968f3 --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.result @@ -0,0 +1,20 @@ +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); +# xtrabackup backup +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +# shutdown server +# remove datadir +# xtrabackup move back +# restart +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t; +i +1 +DROP TABLE t; diff --git a/mysql-test/suite/mariabackup/relative_path.test b/mysql-test/suite/mariabackup/relative_path.test new file mode 100644 index 00000000000..bd25a217e71 --- /dev/null +++ b/mysql-test/suite/mariabackup/relative_path.test @@ -0,0 +1,35 @@ +--source include/have_innodb.inc + +CREATE TABLE t(i INT) ENGINE INNODB; +INSERT INTO t VALUES(1); + +echo # xtrabackup backup; +let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; +--let $backup_log=$MYSQLTEST_VARDIR/tmp/backup.log + +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir > $backup_log 2>&1; +--enable_result_log + +echo # xtrabackup prepare; +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir; + +# If MDEV-28187 is not fixed, the following tries to copy backup to data +# directory will fail, because their destination path will be the same as +# their source path + +--let $backup_opts=--innodb_undo_directory=./ +--source include/restart_and_restore.inc + +--let $backup_opts=--innodb_log_group_home_dir=./ +--source include/restart_and_restore.inc + +--let $backup_opts=--innodb_data_home_dir=./ +--source include/restart_and_restore.inc + +--enable_result_log + +SELECT * FROM t; +DROP TABLE t; +rmdir $targetdir; diff --git a/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result b/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result new file mode 100644 index 00000000000..8f3a17a0fc5 --- /dev/null +++ b/mysql-test/suite/perfschema/r/events_waits_current_MDEV-29091.result @@ -0,0 +1,41 @@ +SET default_storage_engine=InnoDB; +SELECT @save_instrument_enabled := ENABLED +, @save_instrument_timed := TIMED +FROM performance_schema.setup_instruments +WHERE NAME = 'wait/lock/table/sql/handler'; +@save_instrument_enabled := ENABLED @save_instrument_timed := TIMED +YES YES +SELECT @save_consumer_enabled := ENABLED +FROM performance_schema.setup_consumers +WHERE NAME = 'events_waits_current'; +@save_consumer_enabled := ENABLED +YES +UPDATE performance_schema.setup_instruments +SET ENABLED = 'YES', TIMED = 'YES' +WHERE NAME = 'wait/lock/table/sql/handler'; +UPDATE performance_schema.setup_consumers +SET ENABLED = 'YES' +WHERE NAME = 'events_waits_current'; +CREATE TABLE t1 (id1 INT(11), col1 VARCHAR (200)); +INSERT INTO t1 VALUES (1, 'aa'); +INSERT INTO t1 VALUES (2, 'bb'); +connect con1,localhost,root,,test; +connect con2,localhost,root,,test; +connection con1; +START TRANSACTION; +connection con2; +START TRANSACTION; +SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; +connection default; +SELECT event_name FROM performance_schema.events_waits_current +WHERE event_name LIKE '%wait/lock/table/sql/handler%'; +event_name +UPDATE performance_schema.setup_instruments +SET ENABLED = @save_instrument_enabled, TIMED = @save_instrument_timed +WHERE NAME = 'wait/lock/table/sql/handler'; +UPDATE performance_schema.setup_consumers +SET ENABLED = @save_consumer_enabled +WHERE NAME = 'events_waits_current'; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result index 52ece6d289d..47f4d7ba346 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result @@ -202,10 +202,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -262,10 +260,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -328,10 +324,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -429,10 +423,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -500,10 +492,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -613,10 +603,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -689,10 +677,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -814,10 +800,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -891,10 +875,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -965,10 +947,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1039,10 +1019,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1110,10 +1088,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1180,10 +1156,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1249,10 +1223,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1317,10 +1289,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1387,10 +1357,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1456,10 +1424,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1525,10 +1491,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1594,10 +1558,8 @@ wait/io/table/sql/handler 71 wait/lock/table/sql/handler 56 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1663,10 +1625,8 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result index b0ea06f4254..9810d104ed9 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result @@ -211,12 +211,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -279,12 +276,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -353,12 +347,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -462,12 +453,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -541,12 +529,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -662,12 +647,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -746,12 +728,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -879,12 +858,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 41 TABLE test t1 +wait/lock/table/sql/handler 56 TABLE test t2 +wait/lock/table/sql/handler 78 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -964,12 +940,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1046,12 +1019,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1128,12 +1098,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1207,12 +1174,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1285,12 +1249,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1362,12 +1323,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1438,12 +1396,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1516,12 +1471,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1593,12 +1545,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1670,12 +1619,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1747,12 +1693,9 @@ wait/io/table/sql/handler 103 wait/lock/table/sql/handler 84 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 17 11 6 11 2 4 0 @@ -1824,12 +1767,9 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 21 TABLE test t1 -wait/lock/table/sql/handler 24 TABLE test t1 -wait/io/table/sql/handler 32 TABLE test t2 -wait/lock/table/sql/handler 28 TABLE test t2 -wait/io/table/sql/handler 50 TABLE test t3 -wait/lock/table/sql/handler 32 TABLE test t3 +wait/lock/table/sql/handler 45 TABLE test t1 +wait/lock/table/sql/handler 60 TABLE test t2 +wait/lock/table/sql/handler 82 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result index 0a6dea739e4..367a8a089eb 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result @@ -202,10 +202,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -262,10 +260,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -328,10 +324,8 @@ wait/io/table/sql/handler 23 wait/lock/table/sql/handler 24 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -429,10 +423,8 @@ wait/io/table/sql/handler 58 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -500,10 +492,8 @@ wait/io/table/sql/handler 58 wait/lock/table/sql/handler 48 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -613,10 +603,8 @@ wait/io/table/sql/handler 106 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -689,10 +677,8 @@ wait/io/table/sql/handler 106 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -814,10 +800,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 96 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 40 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 56 TABLE test t3 +wait/lock/table/sql/handler 87 TABLE test t1 +wait/lock/table/sql/handler 176 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -891,10 +875,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -965,10 +947,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1039,10 +1019,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1110,10 +1088,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1180,10 +1156,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1249,10 +1223,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1317,10 +1289,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1387,10 +1357,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1456,10 +1424,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1525,10 +1491,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1594,10 +1558,8 @@ wait/io/table/sql/handler 167 wait/lock/table/sql/handler 104 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1663,10 +1625,8 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result index c2eda2b8f23..ed1ac9d979b 100644 --- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result +++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result @@ -211,12 +211,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -279,12 +276,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -353,12 +347,9 @@ wait/io/table/sql/handler 33 wait/lock/table/sql/handler 36 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 8 TABLE test t1 -wait/lock/table/sql/handler 10 TABLE test t1 -wait/io/table/sql/handler 10 TABLE test t2 -wait/lock/table/sql/handler 12 TABLE test t2 -wait/io/table/sql/handler 15 TABLE test t3 -wait/lock/table/sql/handler 14 TABLE test t3 +wait/lock/table/sql/handler 18 TABLE test t1 +wait/lock/table/sql/handler 22 TABLE test t2 +wait/lock/table/sql/handler 29 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 6 4 2 4 1 1 0 @@ -462,12 +453,9 @@ wait/io/table/sql/handler 84 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 26 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 50 TABLE test t2 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -541,12 +529,9 @@ wait/io/table/sql/handler 84 wait/lock/table/sql/handler 72 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 18 TABLE test t1 -wait/lock/table/sql/handler 20 TABLE test t1 -wait/io/table/sql/handler 26 TABLE test t2 -wait/lock/table/sql/handler 24 TABLE test t2 -wait/io/table/sql/handler 40 TABLE test t3 -wait/lock/table/sql/handler 28 TABLE test t3 +wait/lock/table/sql/handler 38 TABLE test t1 +wait/lock/table/sql/handler 50 TABLE test t2 +wait/lock/table/sql/handler 68 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 14 9 5 9 2 3 0 @@ -662,12 +647,9 @@ wait/io/table/sql/handler 154 wait/lock/table/sql/handler 108 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 48 TABLE test t2 -wait/lock/table/sql/handler 36 TABLE test t2 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 84 TABLE test t2 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -746,12 +728,9 @@ wait/io/table/sql/handler 154 wait/lock/table/sql/handler 108 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 31 TABLE test t1 -wait/lock/table/sql/handler 30 TABLE test t1 -wait/io/table/sql/handler 48 TABLE test t2 -wait/lock/table/sql/handler 36 TABLE test t2 -wait/io/table/sql/handler 75 TABLE test t3 -wait/lock/table/sql/handler 42 TABLE test t3 +wait/lock/table/sql/handler 61 TABLE test t1 +wait/lock/table/sql/handler 84 TABLE test t2 +wait/lock/table/sql/handler 117 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 25 16 9 16 3 6 0 @@ -879,12 +858,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 144 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 40 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 48 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 56 TABLE test t3 +wait/lock/table/sql/handler 87 TABLE test t1 +wait/lock/table/sql/handler 124 TABLE test t2 +wait/lock/table/sql/handler 176 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -964,12 +940,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1046,12 +1019,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1128,12 +1098,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1207,12 +1174,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1285,12 +1249,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1362,12 +1323,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1438,12 +1396,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1516,12 +1471,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1593,12 +1545,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1670,12 +1619,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1747,12 +1693,9 @@ wait/io/table/sql/handler 243 wait/lock/table/sql/handler 156 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 39 25 14 25 4 10 0 @@ -1824,12 +1767,9 @@ wait/io/table/sql/handler 0 wait/lock/table/sql/handler 0 execute dump_waits_history; event_name count(event_name) object_type object_schema object_name -wait/io/table/sql/handler 47 TABLE test t1 -wait/lock/table/sql/handler 44 TABLE test t1 -wait/io/table/sql/handler 76 TABLE test t2 -wait/lock/table/sql/handler 52 TABLE test t2 -wait/io/table/sql/handler 120 TABLE test t3 -wait/lock/table/sql/handler 60 TABLE test t3 +wait/lock/table/sql/handler 91 TABLE test t1 +wait/lock/table/sql/handler 128 TABLE test t2 +wait/lock/table/sql/handler 180 TABLE test t3 execute dump_waits_index_io; object_type object_schema object_name index_name count_star count_read count_write count_fetch count_insert count_update count_delete TABLE test t1 NULL 0 0 0 0 0 0 0 diff --git a/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test b/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test new file mode 100644 index 00000000000..d9330ee5a9d --- /dev/null +++ b/mysql-test/suite/perfschema/t/events_waits_current_MDEV-29091.test @@ -0,0 +1,62 @@ +# +# proper event name wait/lock/table/sql/handler recorded in +# PERFORMANCE_SCHEMA.EVENTS_WAITS_CURRENT. Before this fix, it was +# labeled as wait/io/table/sql/handler. +# + +--source include/have_innodb.inc +--source include/have_perfschema.inc +--source include/not_embedded.inc + +SET default_storage_engine=InnoDB; + +SELECT @save_instrument_enabled := ENABLED +, @save_instrument_timed := TIMED +FROM performance_schema.setup_instruments +WHERE NAME = 'wait/lock/table/sql/handler'; + +SELECT @save_consumer_enabled := ENABLED +FROM performance_schema.setup_consumers +WHERE NAME = 'events_waits_current'; + +UPDATE performance_schema.setup_instruments +SET ENABLED = 'YES', TIMED = 'YES' +WHERE NAME = 'wait/lock/table/sql/handler'; + +UPDATE performance_schema.setup_consumers +SET ENABLED = 'YES' +WHERE NAME = 'events_waits_current'; + +CREATE TABLE t1 (id1 INT(11), col1 VARCHAR (200)); +INSERT INTO t1 VALUES (1, 'aa'); +INSERT INTO t1 VALUES (2, 'bb'); + +connect (con1,localhost,root,,test); +connect (con2,localhost,root,,test); + +connection con1; +START TRANSACTION; +let $wait_condition= + SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; +--source include/wait_condition.inc + +connection con2; +START TRANSACTION; +send SELECT id1 FROM t1 WHERE id1=1 FOR UPDATE; + +connection default; +SELECT event_name FROM performance_schema.events_waits_current +WHERE event_name LIKE '%wait/lock/table/sql/handler%'; + +# clean up +UPDATE performance_schema.setup_instruments +SET ENABLED = @save_instrument_enabled, TIMED = @save_instrument_timed +WHERE NAME = 'wait/lock/table/sql/handler'; + +UPDATE performance_schema.setup_consumers +SET ENABLED = @save_consumer_enabled +WHERE NAME = 'events_waits_current'; + +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/mysql-test/suite/plugins/r/feedback_plugin_install.result b/mysql-test/suite/plugins/r/feedback_plugin_install.result index c7f7a5c79f3..d2291f20b4f 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_install.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_install.result @@ -11,6 +11,6 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test uninstall plugin feedback; diff --git a/mysql-test/suite/plugins/r/feedback_plugin_load.result b/mysql-test/suite/plugins/r/feedback_plugin_load.result index 4323dcce0a6..9043f6bf4a1 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_load.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_load.result @@ -13,7 +13,7 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK WHERE VARIABLE_NAME LIKE 'Collation used %' diff --git a/mysql-test/suite/plugins/r/feedback_plugin_send.result b/mysql-test/suite/plugins/r/feedback_plugin_send.result index 69046e16dd9..028c69c6f16 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_send.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_send.result @@ -13,7 +13,7 @@ FEEDBACK version 1.1 FEEDBACK_HTTP_PROXY FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 -FEEDBACK_URL http://mariadb.org/feedback_plugin/post +FEEDBACK_URL http://feedback.mariadb.org/rest/v1/post FEEDBACK_USER_INFO mysql-test SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK WHERE VARIABLE_NAME LIKE 'Collation used %' @@ -42,5 +42,5 @@ VARIABLE_VALUE>0 VARIABLE_NAME deallocate prepare stmt; set global sql_mode=ONLY_FULL_GROUP_BY; # restart -feedback plugin: report to 'http://mariadb.org/feedback_plugin/post' was sent +feedback plugin: report to 'http://feedback.mariadb.org/rest/v1/post' was sent feedback plugin: server replied 'ok' diff --git a/mysql-test/suite/plugins/r/locales.result b/mysql-test/suite/plugins/r/locales.result index e906d27c21e..5229b7ffaa8 100644 --- a/mysql-test/suite/plugins/r/locales.result +++ b/mysql-test/suite/plugins/r/locales.result @@ -57,7 +57,7 @@ ID NAME DESCRIPTION MAX_MONTH_NAME_LENGTH MAX_DAY_NAME_LENGTH DECIMAL_POINT THOU 53 uk_UA Ukrainian - Ukraine 8 9 , . ukrainian 54 ur_PK Urdu - Pakistan 6 6 . , english 55 vi_VN Vietnamese - Vietnam 16 11 , . english -56 zh_CN Chinese - Peoples Republic of China 3 3 . , english +56 zh_CN Chinese - Peoples Republic of China 3 3 . , chinese 57 zh_TW Chinese - Taiwan 3 2 . , english 58 ar_DZ Arabic - Algeria 6 8 . , english 59 ar_EG Arabic - Egypt 6 8 . , english @@ -170,7 +170,7 @@ Id Name Description Error_Message_Language 53 uk_UA Ukrainian - Ukraine ukrainian 54 ur_PK Urdu - Pakistan english 55 vi_VN Vietnamese - Vietnam english -56 zh_CN Chinese - Peoples Republic of China english +56 zh_CN Chinese - Peoples Republic of China chinese 57 zh_TW Chinese - Taiwan english 58 ar_DZ Arabic - Algeria english 59 ar_EG Arabic - Egypt english diff --git a/mysql-test/suite/roles/role_grant_propagate.result b/mysql-test/suite/roles/role_grant_propagate.result index 7804b7b7a3c..111fd4dbc28 100644 --- a/mysql-test/suite/roles/role_grant_propagate.result +++ b/mysql-test/suite/roles/role_grant_propagate.result @@ -163,5 +163,18 @@ drop role student; drop role admin; drop database crm; # +# MDEV-30526 Assertion `rights == merged->cols' failed in update_role_columns +# +create table t1 ( pk int, i int); +create role a; +grant select (i), update (pk) on t1 to a; +revoke update (pk) on t1 from a; +show grants for a; +Grants for a +GRANT USAGE ON *.* TO `a` +GRANT SELECT (`i`) ON `test`.`t1` TO `a` +drop role a; +drop table t1; +# # End of 10.3 tests # diff --git a/mysql-test/suite/roles/role_grant_propagate.test b/mysql-test/suite/roles/role_grant_propagate.test index bf20bc00809..02d451f0afd 100644 --- a/mysql-test/suite/roles/role_grant_propagate.test +++ b/mysql-test/suite/roles/role_grant_propagate.test @@ -197,5 +197,16 @@ drop role admin; drop database crm; --echo # +--echo # MDEV-30526 Assertion `rights == merged->cols' failed in update_role_columns +--echo # +create table t1 ( pk int, i int); +create role a; +grant select (i), update (pk) on t1 to a; +revoke update (pk) on t1 from a; +show grants for a; +drop role a; +drop table t1; + +--echo # --echo # End of 10.3 tests --echo # diff --git a/mysql-test/suite/rpl/include/rpl_binlog_errors.inc b/mysql-test/suite/rpl/include/rpl_binlog_errors.inc deleted file mode 100644 index bf92736a2af..00000000000 --- a/mysql-test/suite/rpl/include/rpl_binlog_errors.inc +++ /dev/null @@ -1,438 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# -# Usage: -# --let $binlog_limit= X[,Y] # optional -# -# Semantics of the value is the same as in include/show_binlog_events.inc -# which the script calls as a part of the test flow. -# The goal is to print the event demonstrating the triggered error, -# so normally Y should be 1 (print the exact event only); -# however, depending on test-specific server options, the offset X -# can be different. -# - -# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error -# when generating new name. -# -# WHY -# === -# -# We want to check whether error is reported or not when -# new_file_impl fails (this may happen when rotation is not -# possible because there is some problem finding an -# unique filename). -# -# HOW -# === -# -# Test cases are documented inline. - --- source include/have_innodb.inc --- source include/have_debug.inc --- source include/master-slave.inc - --- echo ####################################################################### --- echo ####################### PART 1: MASTER TESTS ########################## --- echo ####################################################################### - - -### ACTION: stopping slave as it is not needed for the first part of -### the test - --- connection slave --- source include/stop_slave.inc --- connection master - -call mtr.add_suppression("Can't generate a unique log-filename"); -call mtr.add_suppression("Writing one row to the row-based binary log failed.*"); -call mtr.add_suppression("Error writing file .*"); -call mtr.add_suppression("Could not use master-bin for logging"); - -SET @old_debug= @@global.debug_dbug; - -### ACTION: create a large file (> 4096 bytes) that will be later used -### in LOAD DATA INFILE to check binlog errors in its vacinity --- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data --- let $MYSQLD_DATADIR= `select @@datadir` --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file' - -### ACTION: create a small file (< 4096 bytes) that will be later used -### in LOAD DATA INFILE to check for absence of binlog errors -### when file loading this file does not force flushing and -### rotating the binary log --- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data --- let $MYSQLD_DATADIR= `select @@datadir` --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2' - -RESET MASTER; - --- echo ###################### TEST #1 - -### ASSERTION: no problem flushing logs (should show two binlogs) -FLUSH LOGS; --- echo # assert: must show two binlogs --- source include/show_binary_logs.inc - --- echo ###################### TEST #2 - -### ASSERTION: check that FLUSH LOGS actually fails and reports -### failure back to the user if find_uniq_filename fails -### (should show just one binlog) - -RESET MASTER; -SET @@global.debug_dbug="d,error_unique_log_filename"; --- error ER_NO_UNIQUE_LOGFILE -FLUSH LOGS; --- echo # assert: must show one binlog --- source include/show_binary_logs.inc - -### ACTION: clean up and move to next test -SET @@global.debug_dbug=@old_debug; -RESET MASTER; - --- echo ###################### TEST #3 - -### ACTION: create some tables (t1, t2, t4) and insert some values in -### table t1 -CREATE TABLE t1 (a INT); -CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB; -CREATE TABLE t4 (a VARCHAR(16384)); -INSERT INTO t1 VALUES (1); -RESET MASTER; - -### ASSERTION: we force rotation of the binary log because it exceeds -### the max_binlog_size option (should show two binary -### logs) - --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 - -# shows two binary logs --- echo # assert: must show two binlog --- source include/show_binary_logs.inc - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #4 - -### ASSERTION: load the big file into a transactional table and check -### that it reports error. The table will contain the -### changes performed despite the fact that it reported an -### error. - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 - -# show table --- echo # assert: must show one entry -SELECT count(*) FROM t2; - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #5 - -### ASSERTION: load the small file into a transactional table and -### check that it succeeds - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2 - -# show table --- echo # assert: must show one entry -SELECT count(*) FROM t2; - -# clean up the table and the binlog to be used in next part of test -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #6 - -### ASSERTION: check that even if one is using a transactional table -### and explicit transactions (no autocommit) if rotation -### fails we get the error. Transaction is not rolledback -### because rotation happens after the commit. - -SET @@global.debug_dbug="d,error_unique_log_filename"; -SET AUTOCOMMIT=0; -INSERT INTO t2 VALUES ('muse'); --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 -INSERT INTO t2 VALUES ('muse'); --- error ER_NO_UNIQUE_LOGFILE -COMMIT; - -### ACTION: Show the contents of the table after the test --- echo # assert: must show three entries -SELECT count(*) FROM t2; - -### ACTION: clean up and move to the next test -SET AUTOCOMMIT= 1; -SET @@global.debug_dbug=@old_debug; -DELETE FROM t2; -RESET MASTER; - --- echo ###################### TEST #7 - -### ASSERTION: check that on a non-transactional table, if rotation -### fails then an error is reported and an incident event -### is written to the current binary log. - -SET @@global.debug_dbug="d,error_unique_log_filename"; - -# Disable logging Annotate_rows events to preserve events count. -let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; -SET @@binlog_annotate_row_events= 0; - -SELECT count(*) FROM t4; --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 - --- echo # assert: must show 1 entry -SELECT count(*) FROM t4; - --- echo ### check that the incident event is written to the current log -SET @@global.debug_dbug=@old_debug; -if (!$binlog_limit) -{ - -- let $binlog_limit= 4,1 -} --- source include/show_binlog_events.inc - -# clean up and move to next test -DELETE FROM t4; - ---disable_query_log -eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; ---enable_query_log - -RESET MASTER; - --- echo ###################### TEST #8 - -### ASSERTION: check that statements end up in error but they succeed -### on changing the data. - -SET @@global.debug_dbug="d,error_unique_log_filename"; --- echo # must show 0 entries -SELECT count(*) FROM t4; -SELECT count(*) FROM t2; - --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 --- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- error ER_NO_UNIQUE_LOGFILE --- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 --- error ER_NO_UNIQUE_LOGFILE -INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'); - --- echo # INFO: Count(*) Before Offending DELETEs --- echo # assert: must show 1 entry -SELECT count(*) FROM t4; --- echo # assert: must show 4 entries -SELECT count(*) FROM t2; - --- error ER_NO_UNIQUE_LOGFILE -DELETE FROM t4; --- error ER_NO_UNIQUE_LOGFILE -DELETE FROM t2; - --- echo # INFO: Count(*) After Offending DELETEs --- echo # assert: must show zero entries -SELECT count(*) FROM t4; -SELECT count(*) FROM t2; - -# remove fault injection -SET @@global.debug_dbug=@old_debug; - --- echo ###################### TEST #9 - -### ASSERTION: check that if we disable binlogging, then statements -### succeed. -SET @@global.debug_dbug="d,error_unique_log_filename"; -SET SQL_LOG_BIN=0; -INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd'); -INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh'); --- echo # assert: must show four entries -SELECT count(*) FROM t2; -SELECT count(*) FROM t4; -DELETE FROM t2; -DELETE FROM t4; --- echo # assert: must show zero entries -SELECT count(*) FROM t2; -SELECT count(*) FROM t4; -SET SQL_LOG_BIN=1; -SET @@global.debug_dbug=@old_debug; - --- echo ###################### TEST #10 - -### ASSERTION: check that error is reported if there is a failure -### while registering the index file and the binary log -### file or failure to write the rotate event. - -call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); -call mtr.add_suppression("Could not use .*"); - -RESET MASTER; -SHOW WARNINGS; - -# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open -SET @@global.debug_dbug="d,fault_injection_registering_index"; --- replace_regex /\.[\\\/]master/master/ --- error ER_CANT_OPEN_FILE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_NO_BINARY_LOGGING -SHOW BINARY LOGS; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - --- echo ###################### TEST #11 - -### ASSERTION: check that error is reported if there is a failure -### while opening the index file and the binary log file or -### failure to write the rotate event. - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - -# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file -SET @@global.debug_dbug="d,fault_injection_openning_index"; --- replace_regex /\.[\\\/]master/master/ --- error ER_CANT_OPEN_FILE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_FLUSH_MASTER_BINLOG_CLOSED -RESET MASTER; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - --- echo ###################### TEST #12 - -### ASSERTION: check that error is reported if there is a failure -### while writing the rotate event when creating a new log -### file. - -# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl -SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event"; --- error ER_ERROR_ON_WRITE -FLUSH LOGS; -SET @@global.debug_dbug=@old_debug; - --- error ER_FLUSH_MASTER_BINLOG_CLOSED -RESET MASTER; - -# issue some statements and check that they don't fail -CREATE TABLE t5 (a INT); -INSERT INTO t4 VALUES ('bbbbb'); -INSERT INTO t2 VALUES ('aaaaa'); -DELETE FROM t4; -DELETE FROM t2; -DROP TABLE t5; -flush tables; - -# restart the server so that we have binlog again ---let $rpl_server_number= 1 ---source include/rpl_restart_server.inc - -## clean up -DROP TABLE t1, t2, t4; -RESET MASTER; - -# restart slave again --- connection slave --- source include/start_slave.inc --- connection master - --- echo ####################################################################### --- echo ####################### PART 2: SLAVE TESTS ########################### --- echo ####################################################################### - -### setup ---source include/rpl_reset.inc --- connection slave - -# slave suppressions - -call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*"); -call mtr.add_suppression("Error writing file .*"); -call mtr.add_suppression("Could not use .*"); -call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); -call mtr.add_suppression("Can't generate a unique log-filename .*"); --- echo ###################### TEST #13 - -#### ASSERTION: check against unique log filename error --- let $io_thd_injection_fault_flag= error_unique_log_filename --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #14 - -#### ASSERTION: check against rotate failing --- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #15 - -#### ASSERTION: check against relay log open failure --- let $io_thd_injection_fault_flag= fault_injection_registering_index --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - --- echo ###################### TEST #16 - -#### ASSERTION: check against relay log index open failure --- let $io_thd_injection_fault_flag= fault_injection_openning_index --- let $slave_io_errno= 1595 --- let $show_slave_io_error= 1 --- source include/io_thd_fault_injection.inc - -### clean up --- source include/stop_slave_sql.inc -RESET SLAVE; -RESET MASTER; ---remove_file $load_file ---remove_file $load_file2 ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc b/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc deleted file mode 100644 index 7dfef023947..00000000000 --- a/mysql-test/suite/rpl/include/rpl_cant_read_event_incident.inc +++ /dev/null @@ -1,83 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# Bug#11747416 : 32228 A disk full makes binary log corrupt. -# -# -# The test demonstrates reading from binlog error propagation to slave -# and reporting there. -# Conditions for the bug include a crash at time of the last event to -# the binlog was written partly. With the fixes the event is not sent out -# any longer, but rather the dump thread sends out a sound error message. -# -# Crash is not simulated. A binlog with partly written event in its end is installed -# and replication is started from it. -# - ---source include/have_binlog_format_mixed.inc ---source include/master-slave.inc - ---connection slave -# Make sure the slave is stopped while we are messing with master. -# Otherwise we get occasional failures as the slave manages to re-connect -# to the newly started master and we get extra events applied, causing -# conflicts. ---source include/stop_slave.inc - ---connection master -call mtr.add_suppression("Error in Log_event::read_log_event()"); ---let $datadir= `SELECT @@datadir` - ---let $rpl_server_number= 1 ---source include/rpl_stop_server.inc - ---remove_file $datadir/master-bin.000001 ---copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001 - ---let $rpl_server_number= 1 ---source include/rpl_start_server.inc - ---source include/wait_until_connected_again.inc - -# evidence of the partial binlog ---error ER_ERROR_WHEN_EXECUTING_COMMAND -show binlog events; - ---connection slave -call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log"); -reset slave; -start slave; - -# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236 ---let $slave_param=Last_IO_Errno ---let $slave_param_value=1236 ---source include/wait_for_slave_param.inc - ---let $slave_field_result_replace= / at [0-9]*/ at XXX/ ---let $status_items= Last_IO_Errno, Last_IO_Error ---source include/show_slave_status.inc - -# -# Cleanup -# - ---connection master -reset master; - ---connection slave -stop slave; -reset slave; -# Table was created from binlog, it may not be created if SQL thread is running -# slowly and IO thread reaches incident before SQL thread applies it. ---disable_warnings -drop table if exists t; ---enable_warnings -reset master; - ---echo End of the tests ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_checksum.inc b/mysql-test/suite/rpl/include/rpl_checksum.inc deleted file mode 100644 index 17a986dc308..00000000000 --- a/mysql-test/suite/rpl/include/rpl_checksum.inc +++ /dev/null @@ -1,335 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# WL2540 replication events checksum -# Testing configuration parameters - ---source include/have_debug.inc ---source include/have_binlog_format_mixed.inc ---source include/master-slave.inc - -call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log'); -call mtr.add_suppression('Replication event checksum verification failed'); -# due to C failure simulation -call mtr.add_suppression('Relay log write failure: could not queue event from master'); -call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them'); - -# A. read/write access to the global vars: -# binlog_checksum master_verify_checksum slave_sql_verify_checksum - -connection master; - -set @master_save_binlog_checksum= @@global.binlog_checksum; -set @save_master_verify_checksum = @@global.master_verify_checksum; - -select @@global.binlog_checksum as 'must be CRC32 because of the command line option'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.binlog_checksum as 'no session var'; - -select @@global.master_verify_checksum as 'must be zero because of default'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.master_verify_checksum as 'no session var'; - -connection slave; - -set @slave_save_binlog_checksum= @@global.binlog_checksum; -set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum; - -select @@global.slave_sql_verify_checksum as 'must be one because of default'; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.slave_sql_verify_checksum as 'no session var'; - -connection master; - -source include/show_binary_logs.inc; -set @@global.binlog_checksum = NONE; -select @@global.binlog_checksum; ---echo *** must be rotations seen *** -source include/show_binary_logs.inc; - -set @@global.binlog_checksum = default; -select @@global.binlog_checksum; - -# testing lack of side-effects in non-effective update of binlog_checksum: -set @@global.binlog_checksum = CRC32; -select @@global.binlog_checksum; -set @@global.binlog_checksum = CRC32; - -set @@global.master_verify_checksum = 0; -set @@global.master_verify_checksum = default; - ---error ER_WRONG_VALUE_FOR_VAR -set @@global.binlog_checksum = ADLER32; ---error ER_WRONG_VALUE_FOR_VAR -set @@global.master_verify_checksum = 2; # the var is of bool type - -connection slave; - -set @@global.slave_sql_verify_checksum = 0; -set @@global.slave_sql_verify_checksum = default; ---error ER_WRONG_VALUE_FOR_VAR -set @@global.slave_sql_verify_checksum = 2; # the var is of bool type - -# -# B. Old Slave to New master conditions -# -# while master does not send a checksum-ed binlog the Old Slave can -# work with the New Master - -connection master; - -set @@global.binlog_checksum = NONE; -create table t1 (a int); - -# testing that binlog rotation preserves opt_binlog_checksum value -flush logs; -flush logs; --- source include/wait_for_binlog_checkpoint.inc -flush logs; - -sync_slave_with_master; -#connection slave; -# checking that rotation on the slave side leaves slave stable -flush logs; -flush logs; -flush logs; -select count(*) as zero from t1; - -source include/stop_slave.inc; - -connection master; -set @@global.binlog_checksum = CRC32; --- source include/wait_for_binlog_checkpoint.inc -insert into t1 values (1) /* will not be applied on slave due to simulation */; - -# instruction to the dump thread - -connection slave; -set @saved_dbug = @@global.debug_dbug; -set @@global.debug_dbug='d,simulate_slave_unaware_checksum'; -start slave; ---let $slave_io_errno= 1236 ---let $show_slave_io_error= 1 -source include/wait_for_slave_io_error.inc; - -select count(*) as zero from t1; - -set @@global.debug_dbug = @saved_dbug; - -connection slave; -source include/start_slave.inc; - -# -# C. checksum failure simulations -# - -# C1. Failure by a client thread -connection master; -set @@global.master_verify_checksum = 1; -set @save_dbug = @@session.debug_dbug; -set @@session.debug_dbug='d,simulate_checksum_test_failure'; ---error ER_ERROR_WHEN_EXECUTING_COMMAND -show binlog events; -SET debug_dbug= @save_dbug; -set @@global.master_verify_checksum = default; - -#connection master; -sync_slave_with_master; - -connection slave; -source include/stop_slave.inc; - -connection master; -create table t2 (a int); -let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1); - -connection slave; - -# C2. Failure by IO thread -# instruction to io thread -set @saved_dbug = @@global.debug_dbug; -set @@global.debug_dbug='d,simulate_checksum_test_failure'; -start slave io_thread; -# When the checksum error is detected, the slave sets error code 1913 -# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately -# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). -# So we usually get 1595, but it is occasionally possible to get 1913. ---let $slave_io_errno= 1595,1913 ---let $show_slave_io_error= 0 -source include/wait_for_slave_io_error.inc; -set @@global.debug_dbug = @saved_dbug; - -# to make IO thread re-read it again w/o the failure -start slave io_thread; -let $slave_param= Read_Master_Log_Pos; -let $slave_param_value= $pos_master; -source include/wait_for_slave_param.inc; - -# C3. Failure by SQL thread -# instruction to sql thread; -set @@global.slave_sql_verify_checksum = 1; - -set @@global.debug_dbug='d,simulate_checksum_test_failure'; - -start slave sql_thread; ---let $slave_sql_errno= 1593 ---let $show_slave_sql_error= 1 -source include/wait_for_slave_sql_error.inc; - -# resuming SQL thread to parse out the event w/o the failure - -set @@global.debug_dbug = @saved_dbug; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be zero' from t2; - -# -# D. Reset slave, Change-Master, Binlog & Relay-log rotations with -# random value on binlog_checksum on both master and slave -# -connection slave; -stop slave; -reset slave; - -# randomize slave server's own checksum policy -set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); -flush logs; - -connection master; -set @@global.binlog_checksum= CRC32; -reset master; -flush logs; -create table t3 (a int, b char(5)); - -connection slave; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be zero' from t3; -source include/stop_slave.inc; ---replace_result $MASTER_MYPORT MASTER_PORT -eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root'; - -connection master; -flush logs; -reset master; -insert into t3 value (1, @@global.binlog_checksum); - -connection slave; -source include/start_slave.inc; -flush logs; - -connection master; -sync_slave_with_master; - -#connection slave; -select count(*) as 'must be one' from t3; - -connection master; -set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); -insert into t3 value (1, @@global.binlog_checksum); -sync_slave_with_master; - -#connection slave; - -#clean-up - -connection master; -drop table t1, t2, t3; -set @@global.binlog_checksum = @master_save_binlog_checksum; -set @@global.master_verify_checksum = @save_master_verify_checksum; - -# -# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540 -# -# Sanity check that verifies that no assertions are triggered because -# of old FD events (generated by versions prior to server released with -# checksums feature) -# -# There is no need for query log, if something wrong this should trigger -# an assertion - ---disable_query_log - -BINLOG ' -MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== -'; - ---enable_query_log - -#connection slave; -sync_slave_with_master; - - ---echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** - ---connection master - ---source include/wait_for_binlog_checkpoint.inc -CREATE TABLE t4 (a INT PRIMARY KEY); -INSERT INTO t4 VALUES (1); - -SET sql_log_bin=0; -CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); -SET sql_log_bin=1; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET debug_dbug= '+d,binlog_inject_new_name_error'; ---error ER_NO_UNIQUE_LOGFILE -FLUSH LOGS; -SET debug_dbug= @old_dbug; - -INSERT INTO t4 VALUES (2); - ---connection slave ---let $slave_sql_errno= 1590 ---source include/wait_for_slave_sql_error.inc - -# Search the error log for the error message. -# The bug was that 4 garbage bytes were output in the middle of the error -# message; by searching for a pattern that spans that location, we can -# catch the error. -let $log_error_= `SELECT @@GLOBAL.log_error`; -if(!$log_error_) -{ - # MySQL Server on windows is started with --console and thus - # does not know the location of its .err log, use default location - let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; -} ---let SEARCH_FILE= $log_error_ ---let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 ---source include/search_pattern_in_file.inc - -SELECT * FROM t4 ORDER BY a; -STOP SLAVE IO_THREAD; -SET sql_slave_skip_counter= 1; ---source include/start_slave.inc - ---connection master ---save_master_pos - ---connection slave ---sync_with_master -SELECT * FROM t4 ORDER BY a; - - ---connection slave -set @@global.binlog_checksum = @slave_save_binlog_checksum; -set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; - ---echo End of tests - ---connection master -DROP TABLE t4; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_checksum_cache.inc b/mysql-test/suite/rpl/include/rpl_checksum_cache.inc deleted file mode 100644 index e04f618b81e..00000000000 --- a/mysql-test/suite/rpl/include/rpl_checksum_cache.inc +++ /dev/null @@ -1,261 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - --- source include/have_innodb.inc --- source include/master-slave.inc - ---disable_warnings -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*"); -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*"); ---enable_warnings - -connection master; -set @save_binlog_cache_size = @@global.binlog_cache_size; -set @save_binlog_checksum = @@global.binlog_checksum; -set @save_master_verify_checksum = @@global.master_verify_checksum; -set @@global.binlog_cache_size = 4096; -set @@global.binlog_checksum = CRC32; -set @@global.master_verify_checksum = 1; - -# restart slave to force the dump thread to verify events (on master side) -connection slave; -source include/stop_slave.inc; -source include/start_slave.inc; - -connection master; - -# -# Testing a critical part of checksum handling dealing with transaction cache. -# The cache's buffer size is set to be less than the transaction's footprint -# in binlog. -# -# To verify combined buffer-by-buffer read out of the file and fixing crc per event -# there are the following parts: -# -# 1. the event size is much less than the cache's buffer -# 2. the event size is bigger than the cache's buffer -# 3. the event size if approximately the same as the cache's buffer -# 4. all in above - -# -# 1. the event size is much less than the cache's buffer -# - -flush status; -show status like "binlog_cache_use"; -show status like "binlog_cache_disk_use"; ---disable_warnings -drop table if exists t1; ---enable_warnings - -# -# parameter to ensure the test slightly varies binlog content -# between different invocations -# -let $deviation_size=32; -eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb; - -# Now we are going to create transaction which is long enough so its -# transaction binlog will be flushed to disk... - -delimiter |; -create procedure test.p_init (n int, size int) -begin - while n > 0 do - select round(RAND() * size) into @act_size; - set @data = repeat('a', @act_size); - insert into t1 values(n, @data ); - set n= n-1; - end while; -end| - -delimiter ;| - -let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s - -begin; ---disable_warnings -# todo: check if it is really so. -#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave. -eval call test.p_init($1, $deviation_size); ---enable_warnings -commit; - -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t1, slave:test.t1; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t1; -commit; - -sync_slave_with_master; - - -# -# 2. the event size is bigger than the cache's buffer -# -connection master; - -flush status; -let $t2_data_size= `select 3 * @@global.binlog_cache_size`; -let $t2_aver_size= `select 2 * @@global.binlog_cache_size`; -let $t2_max_rand= `select 1 * @@global.binlog_cache_size`; - -eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb; -let $1=100; ---disable_query_log -begin; -while ($1) -{ - eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; - set @data = repeat('a', @act_size); - insert into t2 set data = @data; - dec $1; -} -commit; ---enable_query_log -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t2, slave:test.t2; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t2; -commit; - -sync_slave_with_master; - -# -# 3. the event size if approximately the same as the cache's buffer -# - -connection master; - -flush status; -let $t3_data_size= `select 2 * @@global.binlog_cache_size`; -let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`; -let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`; - -eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb; - -let $1= 300; ---disable_query_log -begin; -while ($1) -{ - eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; - insert into t3 set data= repeat('a', @act_size); - dec $1; -} -commit; ---enable_query_log -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t3, slave:test.t3; -source include/diff_tables.inc; - -# undoing changes with verifying the above once again -connection master; - -begin; -delete from t3; -commit; - -sync_slave_with_master; - - -# -# 4. all in above -# - -connection master; -flush status; - -delimiter |; -eval create procedure test.p1 (n int) -begin - while n > 0 do - case (select (round(rand()*100) % 3) + 1) - when 1 then - select round(RAND() * $deviation_size) into @act_size; - set @data = repeat('a', @act_size); - insert into t1 values(n, @data); - when 2 then - begin - select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; - insert into t2 set data=repeat('a', @act_size); - end; - when 3 then - begin - select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; - insert into t3 set data= repeat('a', @act_size); - end; - end case; - set n= n-1; - end while; -end| -delimiter ;| - -let $1= 1000; -set autocommit= 0; -begin; ---disable_warnings -eval call test.p1($1); ---enable_warnings -commit; - -show status like "binlog_cache_use"; ---echo *** binlog_cache_disk_use must be non-zero *** -show status like "binlog_cache_disk_use"; - -sync_slave_with_master; - -let $diff_tables=master:test.t1, slave:test.t1; -source include/diff_tables.inc; - -let $diff_tables=master:test.t2, slave:test.t2; -source include/diff_tables.inc; - -let $diff_tables=master:test.t3, slave:test.t3; -source include/diff_tables.inc; - - -connection master; - -begin; -delete from t1; -delete from t2; -delete from t3; -commit; - -drop table t1, t2, t3; -set @@global.binlog_cache_size = @save_binlog_cache_size; -set @@global.binlog_checksum = @save_binlog_checksum; -set @@global.master_verify_checksum = @save_master_verify_checksum; -drop procedure test.p_init; -drop procedure test.p1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_corruption.inc b/mysql-test/suite/rpl/include/rpl_corruption.inc deleted file mode 100644 index c7a913af9d7..00000000000 --- a/mysql-test/suite/rpl/include/rpl_corruption.inc +++ /dev/null @@ -1,175 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -############################################################ -# Purpose: WL#5064 Testing with corrupted events. -# The test emulates the corruption at the vary stages -# of replication: -# - in binlog file -# - in network -# - in relay log -############################################################ - -# -# The tests intensively utilize @@global.debug. Note, -# Bug#11765758 - 58754, -# @@global.debug is read by the slave threads through dbug-interface. -# Hence, before a client thread set @@global.debug we have to ensure that: -# (a) the slave threads are stopped, or (b) the slave threads are in -# sync and waiting. - ---source include/have_debug.inc ---source include/master-slave.inc - -# Block legal errors for MTR -call mtr.add_suppression('Found invalid event in binary log'); -call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master'); -call mtr.add_suppression('event read from binlog did not pass crc check'); -call mtr.add_suppression('Replication event checksum verification failed'); -call mtr.add_suppression('Event crc check failed! Most likely there is event corruption'); -call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593'); - -SET @old_master_verify_checksum = @@master_verify_checksum; - -# Creating test table/data and set corruption position for testing ---echo # 1. Creating test table/data and set corruption position for testing ---connection master ---echo * insert/update/delete rows in table t1 * -# Corruption algorithm modifies only the first event and -# then will be reset. To avoid checking always the first event -# from binlog (usually it is FD) we randomly execute different -# statements and set position for corruption inside events. - -CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100)); ---disable_query_log -let $i=`SELECT 3+CEILING(10*RAND())`; -let $j=1; -let $pos=0; -while ($i) { - eval INSERT INTO t1 VALUES ($j, 'a', NULL); - if (`SELECT RAND() > 0.7`) - { - eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j; - } - if (`SELECT RAND() > 0.8`) - { - eval DELETE FROM t1 WHERE a = $j; - } - if (!$pos) { - let $pos= query_get_value(SHOW MASTER STATUS, Position, 1); - --sync_slave_with_master - --source include/stop_slave.inc - --disable_query_log - --connection master - } - dec $i; - inc $j; -} ---enable_query_log - - -# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing ---echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS -SET @saved_dbug = @@global.debug_dbug; -SET @@global.debug_dbug="d,corrupt_read_log_event_char"; ---echo SHOW BINLOG EVENTS; ---disable_query_log -send_eval SHOW BINLOG EVENTS FROM $pos; ---enable_query_log ---error ER_ERROR_WHEN_EXECUTING_COMMAND -reap; - -SET @@global.debug_dbug=@saved_dbug; - -# Emulate corruption on master with crc checking on master ---echo # 3. Master read a corrupted event from binlog and send the error to slave - -# We have a rare but nasty potential race here: if the dump thread on -# the master for the _old_ slave connection has not yet discovered -# that the slave has disconnected, we will inject the corrupt event on -# the wrong connection, and the test will fail -# (+d,corrupt_read_log_event2 corrupts only one event). -# So kill any lingering dump thread (we need to kill; otherwise dump thread -# could manage to send all events down the socket before seeing it close, and -# hang forever waiting for new binlog events to be created). -let $id= `select id from information_schema.processlist where command = "Binlog Dump"`; -if ($id) -{ - --disable_query_log - --error 0,1094 - eval kill $id; - --enable_query_log -} -let $wait_condition= - SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump'; ---source include/wait_condition.inc - -SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; ---connection slave -START SLAVE IO_THREAD; -let $slave_io_errno= 1236; ---let $slave_timeout= 10 ---source include/wait_for_slave_io_error.inc ---connection master -SET @@global.debug_dbug=@saved_dbug; - -# Emulate corruption on master without crc checking on master ---echo # 4. Master read a corrupted event from binlog and send it to slave ---connection master -SET GLOBAL master_verify_checksum=0; -SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; ---connection slave -START SLAVE IO_THREAD; -# When the checksum error is detected, the slave sets error code 1743 -# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately -# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). -# So we usually get 1595, but it is occasionally possible to get 1743. -let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE ---source include/wait_for_slave_io_error.inc ---connection master -SET @@global.debug_dbug=@saved_dbug; -SET GLOBAL master_verify_checksum=1; - -# Emulate corruption in network ---echo # 5. Slave. Corruption in network ---connection slave -SET @saved_dbug_slave = @@GLOBAL.debug_dbug; -SET @@global.debug_dbug="d,corrupt_queue_event"; -START SLAVE IO_THREAD; -let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE ---source include/wait_for_slave_io_error.inc -SET @@global.debug_dbug=@saved_dbug_slave; - -# Emulate corruption in relay log ---echo # 6. Slave. Corruption in relay log - -SET @@global.debug_dbug="d,corrupt_read_log_event_char"; - -START SLAVE SQL_THREAD; -let $slave_sql_errno= 1593; ---source include/wait_for_slave_sql_error.inc - -SET @@global.debug_dbug=@saved_dbug_slave; - -# Start normal replication and compare same table on master -# and slave ---echo # 7. Seek diff for tables on master and slave ---connection slave ---source include/start_slave.inc ---connection master ---sync_slave_with_master -let $diff_tables= master:test.t1, slave:test.t1; ---source include/diff_tables.inc - -# Clean up ---echo # 8. Clean up ---connection master -set @@global.debug_dbug = @saved_dbug; -SET GLOBAL master_verify_checksum = @old_master_verify_checksum; -DROP TABLE t1; ---sync_slave_with_master - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_gtid_basic.inc b/mysql-test/suite/rpl/include/rpl_gtid_basic.inc deleted file mode 100644 index 68a5d05ffe9..00000000000 --- a/mysql-test/suite/rpl/include/rpl_gtid_basic.inc +++ /dev/null @@ -1,572 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/have_innodb.inc ---let $rpl_topology=1->2->3->4 ---source include/rpl_init.inc - -# Set up a 4-deep replication topology, then test various fail-overs -# using GTID. -# -# A -> B -> C -> D - -connection server_1; ---source include/wait_for_binlog_checkpoint.inc ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---echo *** GTID position should be empty here *** ---replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); - -CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; -CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1, "m1"); -INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4"); -INSERT INTO t2 VALUES (1, "i1"); -BEGIN; -INSERT INTO t2 VALUES (2, "i2"), (3, "i3"); -INSERT INTO t2 VALUES (4, "i4"); -COMMIT; -save_master_pos; -source include/wait_for_binlog_checkpoint.inc; ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos` ---echo *** GTID position should be non-empty here *** ---replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1> -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); - -connection server_2; -sync_with_master; -source include/wait_for_binlog_checkpoint.inc; ---let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) ---let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) ---echo *** GTID position should be the same as on server_1 *** ---replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1> -eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; -save_master_pos; - -connection server_3; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; -save_master_pos; - -connection server_4; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - - ---echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A *** -connection server_4; ---source include/stop_slave.inc - -connection server_1; -INSERT INTO t1 VALUES (5, "m1a"); -INSERT INTO t2 VALUES (5, "i1a"); -save_master_pos; - -connection server_4; ---replace_result $MASTER_MYPORT MASTER_PORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - ---echo *** Now move B to D (C is still replicating from B) *** -connection server_2; ---source include/stop_slave.inc ---replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc - -connection server_4; -UPDATE t2 SET b="j1a" WHERE a=5; -save_master_pos; - -connection server_2; -sync_with_master; -SELECT * FROM t1 ORDER BY a; -SELECT * FROM t2 ORDER BY a; - ---echo *** Now move C to D, after letting it fall a little behind *** -connection server_3; ---source include/stop_slave.inc - -connection server_1; -INSERT INTO t2 VALUES (6, "i6b"); -INSERT INTO t2 VALUES (7, "i7b"); ---source include/save_master_gtid.inc - -connection server_3; ---replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, - MASTER_USE_GTID=CURRENT_POS; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t2 ORDER BY a; - ---echo *** Now change everything back to what it was, to make rpl_end.inc happy -# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled. -connection server_2; -# We need to sync up server_2 before switching. If it happened to have reached -# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to -# server_1, which is (deliberately) missing that transaction. ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc ---replace_result $MASTER_MYPORT MASTER_MYPORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT; ---source include/start_slave.inc ---source include/wait_for_slave_to_start.inc - -connection server_3; ---source include/stop_slave.inc ---replace_result $SLAVE_MYPORT SLAVE_MYPORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -connection server_4; ---source include/stop_slave.inc ---replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; ---source include/start_slave.inc - -connection server_1; -DROP TABLE t1,t2; ---source include/save_master_gtid.inc - ---echo *** A few more checks for BINLOG_GTID_POS function *** ---let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1) ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS(); ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS('a'); ---error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT -SELECT BINLOG_GTID_POS('a',1,NULL); -SELECT BINLOG_GTID_POS(1,'a'); -SELECT BINLOG_GTID_POS(NULL,NULL); -SELECT BINLOG_GTID_POS('',1); -SELECT BINLOG_GTID_POS('a',1); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615); -eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616); - - ---echo *** Some tests of @@GLOBAL.gtid_binlog_state *** ---connection server_2 ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc - ---connection server_1 -SET @old_state= @@GLOBAL.gtid_binlog_state; - ---error ER_BINLOG_MUST_BE_EMPTY -SET GLOBAL gtid_binlog_state = ''; -RESET MASTER; -SET GLOBAL gtid_binlog_state = ''; -FLUSH LOGS; ---source include/show_binary_logs.inc -SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; ---source include/show_binary_logs.inc ---let $binlog_file= master-bin.000001 ---let $binlog_start= 4 ---source include/show_binlog_events.inc -#SELECT @@GLOBAL.gtid_binlog_pos; -#SELECT @@GLOBAL.gtid_binlog_state; ---error ER_BINLOG_MUST_BE_EMPTY -SET GLOBAL gtid_binlog_state = @old_state; -RESET MASTER; -SET GLOBAL gtid_binlog_state = @old_state; - -# Check that slave can reconnect again, despite the RESET MASTER, as we -# restored the state. - -CREATE TABLE t1 (a INT PRIMARY KEY); -SET gtid_seq_no=100; -INSERT INTO t1 VALUES (1); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc -# We cannot just use sync_with_master as we've done RESET MASTER, so -# slave old-style position is wrong. -# So sync on gtid position instead. ---source include/sync_with_master_gtid.inc - -SELECT * FROM t1; -# Check that the IO gtid position in SHOW SLAVE STATUS is also correct. ---let $status_items= Gtid_IO_Pos ---source include/show_slave_status.inc - ---echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() *** - ---connection server_1 -DROP TABLE t1; -CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; ---save_master_pos - ---connection server_2 ---sync_with_master ---source include/stop_slave.inc - ---connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SELECT @@last_gtid; -SET gtid_seq_no=110; -SELECT @@last_gtid; -BEGIN; -SELECT @@last_gtid; -INSERT INTO t1 VALUES (2); -SELECT @@last_gtid; -COMMIT; -SELECT @@last_gtid; ---let $pos= `SELECT @@gtid_binlog_pos` - ---connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -eval SET @pos= '$pos'; -# Check NULL argument. -SELECT master_gtid_wait(NULL); -# Check empty argument returns immediately. -SELECT master_gtid_wait('', NULL); -# Check this gets counted -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_time'; -# Let's check that we get a timeout -SELECT master_gtid_wait(@pos, 0.5); -SELECT * FROM t1 ORDER BY a; -# Now actually wait until the slave reaches the position -send SELECT master_gtid_wait(@pos); - ---connection server_2 ---source include/start_slave.inc - ---connection s1 -reap; -SELECT * FROM t1 ORDER BY a; - -# Test waiting on a domain that does not exist yet. ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id= 1; -INSERT INTO t1 VALUES (3); ---let $pos= `SELECT @@gtid_binlog_pos` - ---connection s1 ---replace_result $pos POS -eval SET @pos= '$pos'; -SELECT master_gtid_wait(@pos, 0); -SELECT * FROM t1 WHERE a >= 3; -send SELECT master_gtid_wait(@pos, -1); - ---connection server_2 ---source include/start_slave.inc - ---connection s1 -reap; -SELECT * FROM t1 WHERE a >= 3; -# Waiting for only part of the position. -SELECT master_gtid_wait('1-1-1', 0); - -# Now test a lot of parallel master_gtid_wait() calls, completing in different -# order, and some of which time out or get killed on the way. - ---connection s1 -send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110'); - ---connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -# This will time out. No event 0-1-1000 exists -send SELECT master_gtid_wait('0-1-1000', 0.5); - ---connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -# This one we will kill ---let $kill1_id= `SELECT connection_id()` -send SELECT master_gtid_wait('0-1-2000'); - ---connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-10'); - ---connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-6', 1); - -# This one we will kill also. ---connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,) ---let $kill2_id= `SELECT connection_id()` -send SELECT master_gtid_wait('2-1-5'); - ---connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-10'); - ---connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110'); - ---connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('2-1-2'); - ---connection server_2 -# This one completes immediately. -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SELECT master_gtid_wait('1-1-1'); -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); ---replace_result $wait_time MASTER_GTID_WAIT_TIME -eval SET @a= $wait_time; -SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected")) - AS Master_gtid_wait_time_as_expected; - - ---connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -send SELECT master_gtid_wait('0-1-109'); - ---connection server_2 -# This one should time out. -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -SELECT master_gtid_wait('2-1-2', 0.5); -SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; -SHOW STATUS LIKE 'Master_gtid_wait_count'; -let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); ---replace_result $wait_time MASTER_GTID_WAIT_TIME -eval SET @a= $wait_time; -# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling -# and timer inaccuracies could introduce significant jitter. So allow a -# generous interval. -SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected; - ---replace_result $kill1_id KILL_ID -eval KILL QUERY $kill1_id; ---connection s3 ---error ER_QUERY_INTERRUPTED -reap; - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=2; -INSERT INTO t1 VALUES (4); - ---connection s9 -reap; - ---connection server_2 ---replace_result $kill2_id KILL_ID -eval KILL CONNECTION $kill2_id; - ---connection s6 ---error 2013,ER_CONNECTION_KILLED -reap; - ---connection server_1 -SET gtid_domain_id=1; -SET gtid_seq_no=4; -INSERT INTO t1 VALUES (5); -SET gtid_domain_id=2; -SET gtid_seq_no=5; -INSERT INTO t1 VALUES (6); - ---connection s8 -reap; ---connection s1 -reap; ---connection s2 -reap; ---connection s5 -reap; ---connection s10 -reap; - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=10; -INSERT INTO t1 VALUES (7); - ---connection s4 -reap; ---connection s7 -reap; - - ---echo *** Test gtid_slave_pos when used with GTID *** - ---connection server_2 ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1000; -INSERT INTO t1 VALUES (10); -INSERT INTO t1 VALUES (11); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 1; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1010; -INSERT INTO t1 VALUES (12); -INSERT INTO t1 VALUES (13); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 2; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1020; -INSERT INTO t1 VALUES (14); -INSERT INTO t1 VALUES (15); -INSERT INTO t1 VALUES (16); ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 3; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=2; -SET gtid_seq_no=1030; -# Disable logging Annotate_rows events to preserve events count. -let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; -SET @@binlog_annotate_row_events= 0; -INSERT INTO t1 VALUES (17); -INSERT INTO t1 VALUES (18); -INSERT INTO t1 VALUES (19); -eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; ---save_master_pos - ---connection server_2 -SET sql_slave_skip_counter= 5; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 10 ORDER BY a; -SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - - ---source include/stop_slave.inc - ---connection server_1 -SET gtid_domain_id=3; -SET gtid_seq_no=100; -CREATE TABLE t2 (a INT PRIMARY KEY); -DROP TABLE t2; -SET gtid_domain_id=2; -SET gtid_seq_no=1040; -INSERT INTO t1 VALUES (20); ---save_master_pos - ---connection server_2 -SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; -SET GLOBAL slave_ddl_exec_mode=STRICT; -SET sql_slave_skip_counter=1; -START SLAVE UNTIL master_gtid_pos="3-1-100"; ---let $master_pos=3-1-100 ---source include/sync_with_master_gtid.inc ---source include/wait_for_slave_to_stop.inc ---error ER_NO_SUCH_TABLE -SELECT * FROM t2; -SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -# Start the slave again, it should fail on the DROP TABLE as the table is not there. -SET sql_log_bin=0; -CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); -SET sql_log_bin=1; -START SLAVE; ---let $slave_sql_errno=1051 ---source include/wait_for_slave_sql_error.inc -SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -STOP SLAVE IO_THREAD; -SET sql_slave_skip_counter=2; ---source include/start_slave.inc ---sync_with_master - -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; -SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; - -SET GLOBAL slave_ddl_exec_mode= @saved_mode; - - ---echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. *** - -# Create an out-of-order binlog on server 2. -# Let server 3 replicate to an out-of-order point, stop it, restart it, -# and check that it replicates correctly despite the out-of-order. - ---connection server_1 -SET gtid_domain_id= @@GLOBAL.gtid_domain_id; -INSERT INTO t1 VALUES (31); ---save_master_pos - ---connection server_2 ---sync_with_master -SET gtid_domain_id= @@GLOBAL.gtid_domain_id; -INSERT INTO t1 VALUES (32); - ---connection server_1 -INSERT INTO t1 VALUES (33); ---save_master_pos - ---connection server_2 ---sync_with_master ---save_master_pos - ---connection server_3 ---sync_with_master ---source include/stop_slave.inc - ---connection server_1 -INSERT INTO t1 VALUES (34); ---save_master_pos - ---connection server_2 ---sync_with_master ---save_master_pos - ---connection server_3 ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 30 ORDER BY a; ---save_master_pos - ---connection server_4 ---sync_with_master -SELECT * FROM t1 WHERE a >= 30 ORDER BY a; - - -# Clean up. ---connection server_1 -DROP TABLE t1; - - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_incident.inc b/mysql-test/suite/rpl/include/rpl_incident.inc deleted file mode 100644 index 75d28d6a6c6..00000000000 --- a/mysql-test/suite/rpl/include/rpl_incident.inc +++ /dev/null @@ -1,61 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/have_debug.inc ---source include/master-slave.inc - -SET @old_binlog_checksum=@@binlog_checksum; -SET GLOBAL BINLOG_CHECKSUM=none; -connection slave; -SET @old_binlog_checksum=@@binlog_checksum; -SET GLOBAL BINLOG_CHECKSUM=none; -connection master; - ---echo **** On Master **** -CREATE TABLE t1 (a INT); - -INSERT INTO t1 VALUES (1),(2),(3); -SELECT * FROM t1; - -set @saved_dbug = @@global.debug_dbug; -SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*'; - -# This will generate an incident log event and store it in the binary -# log before the replace statement. -REPLACE INTO t1 VALUES (4); ---save_master_pos -SELECT * FROM t1; - -set @@global.debug_dbug = @saved_dbug; - -connection slave; -# Wait until SQL thread stops with error LOST_EVENT on master -call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590"); -let $slave_sql_errno= 1590; -let $show_slave_sql_error= 1; -source include/wait_for_slave_sql_error.inc; - -# The 4 should not be inserted into the table, since the incident log -# event should have stop the slave. ---echo **** On Slave **** -SELECT * FROM t1; - -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; -START SLAVE; ---sync_with_master - -# Now, we should have inserted the row into the table and the slave -# should be running. We should also have rotated to a new binary log. - -SELECT * FROM t1; -source include/check_slave_is_running.inc; - -connection master; -SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; -DROP TABLE t1; ---sync_slave_with_master -SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc b/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc deleted file mode 100644 index 46673ea4764..00000000000 --- a/mysql-test/suite/rpl/include/rpl_init_slave_errors.inc +++ /dev/null @@ -1,96 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -###################################################################### -# Some errors that cause the slave SQL thread to stop are not shown in -# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error -# is only in the server's error log. -# -# Two failures and their respective reporting are verified: -# -# 1 - Failures during slave thread initialization -# 2 - Failures while processing queries passed through the init_slave -# option. -# -# In order to check the first type of failure, we inject a fault in the -# SQL/IO Threads through SET GLOBAL debug. -# -# To check the second type, we set @@global.init_slave to an invalid -# command thus preventing the initialization of the SQL Thread. -# -# Obs: -# 1 - Note that testing failures while initializing the relay log position -# is hard as the same function is called before the code reaches the point -# that we want to test. -# -# 2 - This test does not target failures that are reported while applying -# events such as duplicate keys, errors while reading the relay-log.bin*, -# etc. Such errors are already checked on other tests. -###################################################################### - -###################################################################### -# Configuring the Environment -###################################################################### -source include/have_debug.inc; -source include/have_log_bin.inc; -source include/master-slave.inc; - -connection slave; - ---disable_warnings -stop slave; ---enable_warnings -reset slave; - -###################################################################### -# Injecting faults in the threads' initialization -###################################################################### -connection slave; - -# Set debug flags on slave to force errors to occur -set @saved_dbug = @@global.debug_dbug; -SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init"; - -start slave; - -# -# slave is going to stop because of emulated failures -# but there won't be any crashes nor asserts hit. -# -# 1593 = ER_SLAVE_FATAL_ERROR ---let $slave_sql_errno= 1593 ---let $show_slave_sql_error= 1 ---source include/wait_for_slave_sql_error.inc - -call mtr.add_suppression("Failed during slave.* thread initialization"); - -set @@global.debug_dbug = @saved_dbug; - -###################################################################### -# Injecting faults in the init_slave option -###################################################################### -connection slave; - -reset slave; - -SET GLOBAL init_slave= "garbage"; - -start slave; -# 1064 = ER_PARSE_ERROR ---let $slave_sql_errno= 1064 ---let $show_slave_sql_error= 1 ---source include/wait_for_slave_sql_error.inc - -###################################################################### -# Clean up -###################################################################### -SET GLOBAL init_slave= ""; - -# Clean up Last_SQL_Error ---source include/stop_slave_io.inc -RESET SLAVE; ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_loadfile.inc b/mysql-test/suite/rpl/include/rpl_loadfile.inc deleted file mode 100644 index 9cd64530690..00000000000 --- a/mysql-test/suite/rpl/include/rpl_loadfile.inc +++ /dev/null @@ -1,120 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -############################################################################# -# Original Author: JBM # -# Original Date: Aug/18/2005 # -############################################################################# -# TEST: To test the LOAD_FILE() in rbr # -############################################################################# -# Change Author: JBM -# Change Date: 2006-01-16 -########## - -# Includes --- source include/have_binlog_format_mixed_or_row.inc --- source include/master-slave.inc - --- source suite/rpl/include/rpl_loadfile.test - -# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE -# -# DESCRIPTION -# -# Problem: when using load_file string function and mixed binlogging format -# there was no switch to row based binlogging format. This leads -# to scenarios on which the slave replicates the statement and it -# will try to load the file from local file system, which in most -# likely it will not exist. -# -# Solution: -# Marking this function as unsafe for statement format, makes the -# statement using it to be logged in row based format. As such, data -# replicated from the master, becomes the content of the loaded file. -# Consequently, the slave receives the necessary data to complete -# the load_file instruction correctly. -# -# IMPLEMENTATION -# -# The test is implemented as follows: -# -# On Master, -# i) write to file the desired content. -# ii) create table and stored procedure with load_file -# iii) stop slave -# iii) execute load_file -# iv) remove file -# -# On Slave, -# v) start slave -# vi) sync it with master so that it gets the updates from binlog (which -# should have bin logged in row format). -# -# If the the binlog format does not change to row, then the assertion -# done in the following step fails. This happens because tables differ -# since the file does not exist anymore, meaning that when slave -# attempts to execute LOAD_FILE statement it inserts NULL on table -# instead of the same contents that the master loaded when it executed -# the procedure (which was executed when file existed). -# -# vii) assert that the contents of master and slave -# table are the same - ---source include/rpl_reset.inc - -connection master; -let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data; - ---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---eval SELECT repeat('x',20) INTO OUTFILE '$file' - -disable_warnings; -DROP TABLE IF EXISTS t1; -enable_warnings; - -CREATE TABLE t1 (t text); -DELIMITER |; -CREATE PROCEDURE p(file varchar(4096)) - BEGIN - INSERT INTO t1 VALUES (LOAD_FILE(file)); - END| -DELIMITER ;| - -# stop slave before issuing the load_file on master -connection slave; -source include/stop_slave.inc; - -connection master; - -# test: check that logging falls back to rbr. ---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---eval CALL p('$file') - -# test: remove the file from the filesystem and assert that slave still -# gets the loaded file -remove_file $file; - -# now that the file is removed it is safe (regarding what we want to test) -# to start slave -connection slave; -source include/start_slave.inc; - -connection master; -sync_slave_with_master; - -# assertion: assert that the slave got the updates even -# if the file was removed before the slave started, -# meaning that contents were indeed transfered -# through binlog (in row format) -let $diff_tables= master:t1, slave:t1; -source include/diff_tables.inc; - -# CLEAN UP ---connection master -DROP TABLE t1; -DROP PROCEDURE p; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_packet.inc b/mysql-test/suite/rpl/include/rpl_packet.inc deleted file mode 100644 index cbde486bcbb..00000000000 --- a/mysql-test/suite/rpl/include/rpl_packet.inc +++ /dev/null @@ -1,184 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# ==== Purpose ==== -# -# Check replication protocol packet size handling -# -# ==== Related bugs ==== -# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave -# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits -# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors -# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET - -# max-out size db name -source include/have_binlog_format_row.inc; -source include/master-slave.inc; - -call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); -call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); -let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; -disable_warnings; -eval drop database if exists $db; -enable_warnings; -eval create database $db; - -connection master; -let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`; -let $old_net_buffer_length= `SELECT @@global.net_buffer_length`; -let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`; -SET @@global.max_allowed_packet=1024; -SET @@global.net_buffer_length=1024; - -sync_slave_with_master; -# Restart slave for setting to take effect -source include/stop_slave.inc; -source include/start_slave.inc; - -# Reconnect to master for new setting to take effect -disconnect master; - -# alas, can't use eval here; if db name changed apply the change here -connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________); - -connection master; -select @@net_buffer_length, @@max_allowed_packet; - -create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM; - -INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023'); -sync_slave_with_master; - -eval select count(*) from `$db`.`t1` /* must be 1 */; - -SHOW STATUS LIKE 'Slave_running'; -select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING'; -connection master; -eval drop database $db; -sync_slave_with_master; - -# -# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits -# -# Check that a situation when the size of event on the master is greater than -# max_allowed_packet on the slave does not lead to infinite re-transmits. - -connection master; - -# Change the max packet size on master - -SET @@global.max_allowed_packet=4096; -SET @@global.net_buffer_length=4096; - -# Restart slave for new setting to take effect -connection slave; -source include/stop_slave.inc; -source include/start_slave.inc; - -# Reconnect to master for new setting to take effect -disconnect master; -connect (master, localhost, root); -connection master; - -CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM; - -sync_slave_with_master; - -connection master; - -INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); - - -# -# Bug#42914: The slave I/O thread must stop after trying to read the above -# event, However there is no Last_IO_Error report. -# - -# The slave I/O thread must stop after trying to read the above event -connection slave; -# 1153 = ER_NET_PACKET_TOO_LARGE ---let $slave_io_errno= 1153 ---let $show_slave_io_error= 1 ---source include/wait_for_slave_io_error.inc - -# TODO: this is needed because of BUG#55790. Remove once that is fixed. ---source include/stop_slave_sql.inc - -# -# Bug#42914: On the master, if a binary log event is larger than -# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG -# is sent to a slave when it requests a dump from the master, thus leading the -# I/O thread to stop. However, there is no Last_IO_Error reported. -# - ---let $rpl_only_running_threads= 1 ---source include/rpl_reset.inc ---connection master -DROP TABLE t1; ---sync_slave_with_master - - -connection master; -CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM; -sync_slave_with_master; - -connection master; -INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet)); - -connection slave; -# The slave I/O thread must stop after receiving -# 1153 = ER_NET_PACKET_TOO_LARGE ---let $slave_io_errno= 1153 ---let $show_slave_io_error= 1 ---source include/wait_for_slave_io_error.inc - -# Remove the bad binlog and clear error status on slave. -STOP SLAVE; -RESET SLAVE; ---connection master -RESET MASTER; - - -# -# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET -# -# In BUG#55322, @@session.max_allowed_packet increased each time SHOW -# BINLOG EVENTS was issued. To verify that this bug is fixed, we -# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet -# never changes. We turn off the result log because we don't care -# about the contents of the binlog. - ---disable_result_log -SET @max_allowed_packet_0= @@session.max_allowed_packet; -SHOW BINLOG EVENTS; -SET @max_allowed_packet_1= @@session.max_allowed_packet; -SHOW BINLOG EVENTS; -SET @max_allowed_packet_2= @@session.max_allowed_packet; ---enable_result_log -if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`) -{ - --echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS - --source include/show_rpl_debug_info.inc - SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2; - --die @max_allowed_packet changed after executing SHOW BINLOG EVENTS -} - - ---echo ==== clean up ==== -connection master; -DROP TABLE t1; -eval SET @@global.max_allowed_packet= $old_max_allowed_packet; -eval SET @@global.net_buffer_length= $old_net_buffer_length; -eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet; -# slave is stopped -connection slave; -DROP TABLE t1; - -# Clear Last_IO_Error -RESET SLAVE; - ---source include/rpl_end.inc -# End of tests diff --git a/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc b/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc deleted file mode 100644 index 7a6a758a508..00000000000 --- a/mysql-test/suite/rpl/include/rpl_parallel_ignored_errors.inc +++ /dev/null @@ -1,112 +0,0 @@ -# ==== Purpose ==== -# -# Test verifies that, in parallel replication, transaction failure notification -# is propagated to all the workers. Workers should abort the execution of -# transaction event groups, whose event positions are higher than the failing -# transaction group. -# -# ==== Implementation ==== -# -# Steps: -# 0 - Create a table t1 on master which has a primary key. Enable parallel -# replication on slave with slave_parallel_mode='optimistic' and -# slave_parallel_threads=3. -# 1 - On slave start a transaction and execute a local INSERT statement -# which will insert value 32. This is done to block the INSERT coming -# from master. -# 2 - On master execute an INSERT statement with value 32, so that it is -# blocked on slave. -# 3 - On slave enable a debug sync point such that it holds the worker thread -# execution as soon as work is scheduled to it. -# 4 - INSERT value 33 on master. It will be held on slave by other worker -# thread due to debug simulation. -# 5 - INSERT value 34 on master. -# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for -# its prior transactions to commit. -# 7 - Commit the local INSERT 32 on slave server so that first worker will -# error out. -# 8 - Now send a continue signal to second worker processing 33. It should -# wakeup and propagate the error to INSERT 34. -# 9 - Upon slave stop due to error, check that no rows are found after the -# failed INSERT 32. -# -# ==== References ==== -# -# MDEV-20645: Replication consistency is broken as workers miss the error -# notification from an earlier failed group. -# - ---source include/have_innodb.inc ---source include/have_debug.inc ---source include/have_debug_sync.inc ---source include/have_binlog_format_statement.inc ---source include/master-slave.inc - ---enable_connect_log ---connection server_2 ---source include/stop_slave.inc -SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; -SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; -SET @old_debug= @@GLOBAL.debug_dbug; -SET GLOBAL slave_parallel_mode='optimistic'; -SET GLOBAL slave_parallel_threads= 3; -CHANGE MASTER TO master_use_gtid=slave_pos; -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); ---source include/start_slave.inc - ---connection server_1 -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc - ---connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -BEGIN; -INSERT INTO t1 VALUES (32); - ---connection server_1 -INSERT INTO t1 VALUES (32); - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)" ---source include/wait_condition.inc -SET GLOBAL debug_dbug="+d,hold_worker_on_schedule"; -SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker"; - ---connection server_1 -SET gtid_seq_no=100; -INSERT INTO t1 VALUES (33); - ---connection server_2 -SET debug_sync='now WAIT_FOR reached_pause'; - ---connection server_1 -INSERT INTO t1 VALUES (34); - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" ---source include/wait_condition.inc ---connection con_temp2 -COMMIT; - -# Clean up. ---connection server_2 ---source include/stop_slave.inc ---let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32 ---let $assert_text= table t1 should have zero rows where a>32 ---source include/assert.inc -SELECT * FROM t1 WHERE a>32; -DELETE FROM t1 WHERE a=32; - -SET GLOBAL slave_parallel_threads=@old_parallel_threads; -SET GLOBAL slave_parallel_mode=@old_parallel_mode; -SET GLOBAL debug_dbug=@old_debug; -SET DEBUG_SYNC= 'RESET'; ---source include/start_slave.inc - ---connection server_1 -DROP TABLE t1; ---disable_connect_log ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc b/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc deleted file mode 100644 index cddc9286bd2..00000000000 --- a/mysql-test/suite/rpl/include/rpl_parallel_show_binlog_events_purge_logs.inc +++ /dev/null @@ -1,38 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER -# -# The function mysql_show_binlog_events has a local stack variable -# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however -# this variable goes out of scope and is destroyed before clean -# thd->current_linfo. -# -# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure -# that with the fix local variable linfo is valid along all -# mysql_show_binlog_events function scope. -# ---source include/have_debug.inc ---source include/have_debug_sync.inc ---source include/master-slave.inc - ---connection slave -SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end'; ---send SHOW BINLOG EVENTS - ---connection slave1 -SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events'; -FLUSH LOGS; -SET DEBUG_SYNC= 'now SIGNAL end'; - ---connection slave ---disable_result_log ---reap ---enable_result_log -SET DEBUG_SYNC= 'RESET'; - ---connection master ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_relayrotate.inc b/mysql-test/suite/rpl/include/rpl_relayrotate.inc deleted file mode 100644 index 4de554d3143..00000000000 --- a/mysql-test/suite/rpl/include/rpl_relayrotate.inc +++ /dev/null @@ -1,18 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -####################################################### -# Wrapper for rpl_relayrotate.test to allow multi # -# Engines to reuse test code. By JBM 2006-02-15 # -####################################################### --- source include/have_innodb.inc -# Slow test, don't run during staging part --- source include/not_staging.inc --- source include/master-slave.inc - -let $engine_type=innodb; --- source suite/rpl/include/rpl_relayrotate.test ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_semi_sync.inc b/mysql-test/suite/rpl/include/rpl_semi_sync.inc deleted file mode 100644 index c3cd918b5fc..00000000000 --- a/mysql-test/suite/rpl/include/rpl_semi_sync.inc +++ /dev/null @@ -1,525 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -source include/not_embedded.inc; -source include/have_innodb.inc; -source include/master-slave.inc; - -let $engine_type= InnoDB; - -# Suppress warnings that might be generated during the test -connection master; -call mtr.add_suppression("Timeout waiting for reply of binlog"); -call mtr.add_suppression("Read semi-sync reply"); -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); -call mtr.add_suppression("mysqld: Got an error reading communication packets"); -connection slave; -call mtr.add_suppression("Master server does not support semi-sync"); -call mtr.add_suppression("Semi-sync slave .* reply"); -call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group"); -connection master; - -# wait for dying connections (if any) to disappear -let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed'; ---source include/wait_condition.inc - -# After fix of BUG#45848, semi-sync slave should not create any extra -# connections on master, save the count of connections before start -# semi-sync slave for comparison below. -let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); - ---echo # ---echo # Uninstall semi-sync plugins on master and slave ---echo # -connection slave; -source include/stop_slave.inc; -reset slave; -set global rpl_semi_sync_master_enabled= 0; -set global rpl_semi_sync_slave_enabled= 0; - -connection master; -reset master; -set global rpl_semi_sync_master_enabled= 0; -set global rpl_semi_sync_slave_enabled= 0; - ---echo # ---echo # Main test of semi-sync replication start here ---echo # - -connection master; - -set global rpl_semi_sync_master_timeout= 60000; # 60s - -echo [ default state of semi-sync on master should be OFF ]; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ enable semi-sync on master ]; -set global rpl_semi_sync_master_enabled = 1; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ status of semi-sync on master should be ON even without any semi-sync slaves ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed ---echo # BUG#45673 Semisynch reports correct operation even if no slave is connected ---echo # - -# BUG#45672 When semi-sync is enabled on master, it would allocate -# transaction node even without semi-sync slave connected, and would -# finally result in transaction node allocation error. -# -# Semi-sync master will pre-allocate 'max_connections' transaction -# nodes, so here we do more than that much transactions to check if it -# will fail or not. -# select @@global.max_connections + 1; -let $i= `select @@global.max_connections + 1`; -disable_query_log; -eval create table t1 (a int) engine=$engine_type; -while ($i) -{ - eval insert into t1 values ($i); - dec $i; -} -drop table t1; -enable_query_log; - -# BUG#45673 -echo [ status of semi-sync on master should be OFF ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# reset master to make sure the following test will start with a clean environment -reset master; - -connection slave; - -echo [ default state of semi-sync on slave should be OFF ]; -show variables like 'rpl_semi_sync_slave_enabled'; - -echo [ enable semi-sync on slave ]; -set global rpl_semi_sync_slave_enabled = 1; -show variables like 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; - -connection master; - -# NOTE: Rpl_semi_sync_master_client will only be updated when -# semi-sync slave has started binlog dump request -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; - -echo [ initial master state after the semi-sync slave connected ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1(a int) engine = $engine_type; - -echo [ master state after CREATE TABLE statement ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# After fix of BUG#45848, semi-sync slave should not create any extra -# connections on master. -let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); -replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE; -eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0'; - -echo [ insert records to table ]; -insert t1 values (10); -insert t1 values (9); -insert t1 values (8); -insert t1 values (7); -insert t1 values (6); -insert t1 values (5); -insert t1 values (4); -insert t1 values (3); -insert t1 values (2); -insert t1 values (1); - -echo [ master status after inserts ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -sync_slave_with_master; - -echo [ slave status after replicated inserts ]; -show status like 'Rpl_semi_sync_slave_status'; - -select count(distinct a) from t1; -select min(a) from t1; -select max(a) from t1; - ---echo ---echo # BUG#50157 ---echo # semi-sync replication crashes when replicating a transaction which ---echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ; - -connection master; -SET SESSION AUTOCOMMIT= 0; -CREATE TABLE t2(c1 INT) ENGINE=innodb; -sync_slave_with_master; - -connection master; -BEGIN; ---echo ---echo # Even though it is in a transaction, this statement is binlogged into binlog ---echo # file immediately. ---disable_warnings -CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1; ---enable_warnings ---echo ---echo # These statements will not be binlogged until the transaction is committed -INSERT INTO t2 VALUES(11); -INSERT INTO t2 VALUES(22); -COMMIT; - -DROP TABLE t2, t3; -SET SESSION AUTOCOMMIT= 1; -sync_slave_with_master; - - ---echo # ---echo # Test semi-sync master will switch OFF after one transaction ---echo # timeout waiting for slave reply. ---echo # -connection slave; -source include/stop_slave.inc; - -connection master; ---source include/kill_binlog_dump_threads.inc -set global rpl_semi_sync_master_timeout= 5000; - -# The first semi-sync check should be on because after slave stop, -# there are no transactions on the master. -echo [ master status should be ON ]; - -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; - -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 0; -source include/wait_for_status_var.inc; - -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -echo [ semi-sync replication of these transactions will fail ]; -insert into t1 values (500); - -# Wait for the semi-sync replication of this transaction to timeout -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= OFF; -source include/wait_for_status_var.inc; - -# The second semi-sync check should be off because one transaction -# times out during waiting. -echo [ master status should be OFF ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -# Semi-sync status on master is now OFF, so all these transactions -# will be replicated asynchronously. -delete from t1 where a=10; -delete from t1 where a=9; -delete from t1 where a=8; -delete from t1 where a=7; -delete from t1 where a=6; -delete from t1 where a=5; -delete from t1 where a=4; -delete from t1 where a=3; -delete from t1 where a=2; -delete from t1 where a=1; - -insert into t1 values (100); - -echo [ master status should be OFF ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Test semi-sync status on master will be ON again when slave catches up ---echo # - -# Save the master position for later use. -save_master_pos; - -connection slave; - -echo [ slave status should be OFF ]; -show status like 'Rpl_semi_sync_slave_status'; -source include/start_slave.inc; -sync_with_master; - -echo [ slave status should be ON ]; -show status like 'Rpl_semi_sync_slave_status'; - -select count(distinct a) from t1; -select min(a) from t1; -select max(a) from t1; - -connection master; - -# The master semi-sync status should be on again after slave catches up. -echo [ master status should be ON again after slave catches up ]; - -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; -show status like 'Rpl_semi_sync_master_clients'; - ---echo # ---echo # Test disable/enable master semi-sync on the fly. ---echo # - -drop table t1; -sync_slave_with_master; - -source include/stop_slave.inc; - ---echo # ---echo # Flush status ---echo # -connection master; -echo [ Semi-sync master status variables before FLUSH STATUS ]; -SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; -SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; -# Do not write the FLUSH STATUS to binlog, to make sure we'll get a -# clean status after this. -FLUSH NO_WRITE_TO_BINLOG STATUS; -echo [ Semi-sync master status variables after FLUSH STATUS ]; -SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; -SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; - -connection master; - -source include/show_master_logs.inc; -show variables like 'rpl_semi_sync_master_enabled'; - -echo [ disable semi-sync on the fly ]; -set global rpl_semi_sync_master_enabled=0; -show variables like 'rpl_semi_sync_master_enabled'; -show status like 'Rpl_semi_sync_master_status'; - -echo [ enable semi-sync on the fly ]; -set global rpl_semi_sync_master_enabled=1; -show variables like 'rpl_semi_sync_master_enabled'; -show status like 'Rpl_semi_sync_master_status'; - ---echo # ---echo # Test RESET MASTER/SLAVE ---echo # - -connection slave; - -source include/start_slave.inc; - -connection master; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1 (a int) engine = $engine_type; -drop table t1; - -sync_slave_with_master; - -echo [ test reset master ]; -connection master; - -reset master; - -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - -connection slave; - -source include/stop_slave.inc; -reset slave; - -# Kill the dump thread on master for previous slave connection and ---source include/kill_binlog_dump_threads.inc - -connection slave; -source include/start_slave.inc; - -connection master; - -# Wait for dump thread to start, Rpl_semi_sync_master_clients will be -# 1 after dump thread started. -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; - -replace_result $engine_type ENGINE_TYPE; -eval create table t1 (a int) engine = $engine_type; -insert into t1 values (1); -insert into t1 values (2), (3); - -sync_slave_with_master; - -select * from t1; - -connection master; - -echo [ master semi-sync status should be ON ]; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Start semi-sync replication without SUPER privilege ---echo # -connection slave; -source include/stop_slave.inc; -reset slave; -connection master; -reset master; - -# Kill the dump thread on master for previous slave connection and wait for it to exit ---source include/kill_binlog_dump_threads.inc - -# Do not binlog the following statement because it will generate -# different events for ROW and STATEMENT format -set sql_log_bin=0; -grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; -flush privileges; -set sql_log_bin=1; -connection slave; -grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; -flush privileges; -change master to master_user='rpl',master_password='rpl_password'; -source include/start_slave.inc; -show status like 'Rpl_semi_sync_slave_status'; -connection master; - -# Wait for the semi-sync binlog dump thread to start -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; -echo [ master semi-sync should be ON ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; -insert into t1 values (4); -insert into t1 values (5); -echo [ master semi-sync should be ON ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -show status like 'Rpl_semi_sync_master_no_tx'; -show status like 'Rpl_semi_sync_master_yes_tx'; - ---echo # ---echo # Test semi-sync slave connect to non-semi-sync master ---echo # - -# Disable semi-sync on master -connection slave; -source include/stop_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - -connection master; - -# Kill the dump thread on master for previous slave connection and wait for it to exit ---source include/kill_binlog_dump_threads.inc - -echo [ Semi-sync status on master should be ON ]; -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 0; -source include/wait_for_status_var.inc; -show status like 'Rpl_semi_sync_master_status'; -let $status_var= Rpl_semi_sync_master_status; -let $status_var_value= ON; -source include/wait_for_status_var.inc; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; -connection master; -insert into t1 values (8); -let $status_var= Rpl_semi_sync_master_clients; -let $status_var_value= 1; -source include/wait_for_status_var.inc; -echo [ master semi-sync clients should be 1, status should be OFF ]; -show status like 'Rpl_semi_sync_master_clients'; -show status like 'Rpl_semi_sync_master_status'; -sync_slave_with_master; -show status like 'Rpl_semi_sync_slave_status'; - -# Uninstall semi-sync plugin on master -connection slave; -source include/stop_slave.inc; -connection master; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -source include/start_slave.inc; - -connection master; -insert into t1 values (10); -sync_slave_with_master; - ---echo # ---echo # Test non-semi-sync slave connect to semi-sync master ---echo # - -connection master; -set global rpl_semi_sync_master_timeout= 5000; # 5s -set global rpl_semi_sync_master_enabled= 1; - -connection slave; -source include/stop_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - -echo [ uninstall semi-sync slave plugin ]; -set global rpl_semi_sync_slave_enabled= 0; - -echo [ reinstall semi-sync slave plugin and disable semi-sync ]; -SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; -source include/start_slave.inc; -SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; - ---echo # ---echo # Clean up ---echo # - -connection slave; -source include/stop_slave.inc; -set global rpl_semi_sync_slave_enabled= 0; - -connection master; -set global rpl_semi_sync_master_enabled= 0; - -connection slave; -change master to master_user='root',master_password=''; -source include/start_slave.inc; - -connection master; -drop table t1; -sync_slave_with_master; - -connection master; -drop user rpl@127.0.0.1; -flush privileges; -set global rpl_semi_sync_master_timeout= default; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_skip_replication.inc b/mysql-test/suite/rpl/include/rpl_skip_replication.inc deleted file mode 100644 index 97fc961d438..00000000000 --- a/mysql-test/suite/rpl/include/rpl_skip_replication.inc +++ /dev/null @@ -1,402 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it. -# -# Usage: -# -# --let $use_remote_mysqlbinlog= 1 # optional -# --source suite/rpl/include/rpl_skip_replication.inc -# -# The script uses MYSQLBINLOG to verify certain results. -# By default, it uses binary logs directly. If it is undesirable, -# this behavior can be overridden by setting $use_remote_binlog -# as shown above. -# The value will be unset after every execution of the script, -# so if it is needed, it should be set explicitly before each call. -# - ---source include/have_innodb.inc ---source include/master-slave.inc - -connection slave; -# Test that SUPER is required to change @@replicate_events_marked_for_skip. -CREATE USER 'nonsuperuser'@'127.0.0.1'; -GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE, - SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1'; -connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,); -connection nonpriv; ---error ER_SPECIFIC_ACCESS_DENIED_ERROR -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -disconnect nonpriv; -connection slave; -DROP USER'nonsuperuser'@'127.0.0.1'; - -SELECT @@global.replicate_events_marked_for_skip; ---error ER_SLAVE_MUST_STOP -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -SELECT @@global.replicate_events_marked_for_skip; -STOP SLAVE; ---error ER_GLOBAL_VARIABLE -SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER; -SELECT @@global.replicate_events_marked_for_skip; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -SELECT @@global.replicate_events_marked_for_skip; -START SLAVE; - -connection master; -SELECT @@skip_replication; ---error ER_LOCAL_VARIABLE -SET GLOBAL skip_replication=1; -SELECT @@skip_replication; - -CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb; -INSERT INTO t1(a) VALUES (1); -INSERT INTO t2(a) VALUES (1); - - -# Test that master-side filtering works. -SET skip_replication=1; - -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t1(a) VALUES (2); -INSERT INTO t2(a) VALUES (2); - -# Inject a rotate event in the binlog stream sent to slave (otherwise we will -# fail sync_slave_with_master as the last event on the master is not present -# on the slave). -FLUSH NO_WRITE_TO_BINLOG LOGS; - -sync_slave_with_master; -connection slave; -SHOW TABLES; -SELECT * FROM t1; -SELECT * FROM t2; - -connection master; -DROP TABLE t3; - -FLUSH NO_WRITE_TO_BINLOG LOGS; -sync_slave_with_master; - - -# Test that slave-side filtering works. -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE; - -connection master; -SET skip_replication=1; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t1(a) VALUES (3); -INSERT INTO t2(a) VALUES (3); - -# Inject a rotate event in the binlog stream sent to slave (otherwise we will -# fail sync_slave_with_master as the last event on the master is not present -# on the slave). -FLUSH NO_WRITE_TO_BINLOG LOGS; - -sync_slave_with_master; -connection slave; -SHOW TABLES; -SELECT * FROM t1; -SELECT * FROM t2; - -connection master; -DROP TABLE t3; - -FLUSH NO_WRITE_TO_BINLOG LOGS; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; - - -# Test that events with @@skip_replication=1 are not filtered when filtering is -# not set on slave. -connection master; -SET skip_replication=1; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; -INSERT INTO t3(a) VALUES(2); -sync_slave_with_master; -connection slave; -SELECT * FROM t3; -connection master; -DROP TABLE t3; - -# -# Test that the slave will preserve the @@skip_replication flag in its -# own binlog. -# - -TRUNCATE t1; -sync_slave_with_master; -connection slave; -RESET MASTER; - -connection master; -SET skip_replication=0; -INSERT INTO t1 VALUES (1,0); -SET skip_replication=1; -INSERT INTO t1 VALUES (2,0); -SET skip_replication=0; -INSERT INTO t1 VALUES (3,0); - -sync_slave_with_master; -connection slave; -# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have -# applied all events. -SELECT * FROM t1 ORDER by a; - -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -let $SLAVE_DATADIR= `select @@datadir`; - -connection master; -TRUNCATE t1; - -# Now apply the slave binlog to the master, to check that both the slave -# and mysqlbinlog will preserve the @@skip_replication flag. - ---let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001 -if ($use_remote_mysqlbinlog) -{ - --let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001 - --let $use_remote_mysqlbinlog= 0 -} ---exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog ---exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog - -# The master should have all three events. -SELECT * FROM t1 ORDER by a; - -# The slave should be missing event 2, which is marked with the -# @@skip_replication flag. - -connection slave; -START SLAVE; - -connection master; -sync_slave_with_master; - -connection slave; -SELECT * FROM t1 ORDER by a; - -# -# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication -# events. -# - -connection master; -TRUNCATE t1; - -sync_slave_with_master; -connection slave; -STOP SLAVE; -# We will skip two INSERTs (in addition to any skipped due to -# @@skip_replication). Since from 5.5 every statement is wrapped in -# BEGIN ... END, we need to skip 6 events for this. -SET GLOBAL sql_slave_skip_counter=6; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE; - -connection master; -# Need to fix @@binlog_format to get consistent event count. -SET @old_binlog_format= @@binlog_format; -SET binlog_format= statement; -SET skip_replication=0; -INSERT INTO t1 VALUES (1,5); -SET skip_replication=1; -INSERT INTO t1 VALUES (2,5); -SET skip_replication=0; -INSERT INTO t1 VALUES (3,5); -INSERT INTO t1 VALUES (4,5); -SET binlog_format= @old_binlog_format; - -sync_slave_with_master; -connection slave; - -# The slave should have skipped the first three inserts (number 1 and 3 due -# to @@sql_slave_skip_counter=2, number 2 due to -# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4 -# should be left. -SELECT * FROM t1; - - -# -# Check that BINLOG statement preserves the @@skip_replication flag. -# -connection slave; -# Need row @@binlog_format for BINLOG statements containing row events. ---source include/stop_slave.inc -SET @old_slave_binlog_format= @@global.binlog_format; -SET GLOBAL binlog_format= row; ---source include/start_slave.inc - -connection master; -TRUNCATE t1; - -SET @old_binlog_format= @@binlog_format; -SET binlog_format= row; -# Format description log event. -BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAA371saA=='; -# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1 -BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC -wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA='; -# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0 -BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC -wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA='; -SET binlog_format= @old_binlog_format; - -SELECT * FROM t1 ORDER BY a; -sync_slave_with_master; -connection slave; -# Slave should have only the second insert, the first should be ignored due to -# the @@skip_replication flag. -SELECT * FROM t1 ORDER by a; - ---source include/stop_slave.inc -SET GLOBAL binlog_format= @old_slave_binlog_format; ---source include/start_slave.inc - - -# Test that it is not possible to change @@skip_replication inside a -# transaction or statement, thereby replicating only parts of statements -# or transactions. -connection master; -SET skip_replication=0; - -BEGIN; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=0; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -ROLLBACK; -SET skip_replication=1; -BEGIN; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=0; ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -COMMIT; -SET autocommit=0; -INSERT INTO t2(a) VALUES(100); ---error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET skip_replication=1; -ROLLBACK; -SET autocommit=1; - -SET skip_replication=1; ---delimiter | -CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END| -CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END| -CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END| ---delimiter ; ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SELECT foo(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SELECT baz(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET @a= foo(1); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -SET @a= baz(1); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -UPDATE t2 SET b=foo(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -UPDATE t2 SET b=baz(0); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -INSERT INTO t1 VALUES (101, foo(1)); ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION -INSERT INTO t1 VALUES (101, baz(0)); -SELECT @@skip_replication; -CALL bar(0); -SELECT @@skip_replication; -CALL bar(1); -SELECT @@skip_replication; -DROP FUNCTION foo; -DROP PROCEDURE bar; -DROP FUNCTION baz; - - -# Test that master-side filtering happens on the master side, and that -# slave-side filtering happens on the slave. - -# First test that events do not reach the slave when master-side filtering -# is configured. Do this by replicating first with only the IO thread running -# and master-side filtering; then change to no filtering and start the SQL -# thread. This should still skip the events, as master-side filtering -# means the events never reached the slave. -connection master; -SET skip_replication= 0; -TRUNCATE t1; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; -START SLAVE IO_THREAD; -connection master; -SET skip_replication= 1; -INSERT INTO t1(a) VALUES (1); -SET skip_replication= 0; -INSERT INTO t1(a) VALUES (2); ---source include/save_master_pos.inc -connection slave; ---source include/sync_io_with_master.inc -STOP SLAVE IO_THREAD; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; -connection master; -sync_slave_with_master; -connection slave; -# Now only the second insert of (2) should be visible, as the first was -# filtered on the master, so even though the SQL thread ran without skipping -# events, it will never see the event in the first place. -SELECT * FROM t1; - -# Now tests that when slave-side filtering is configured, events _do_ reach -# the slave. -connection master; -SET skip_replication= 0; -TRUNCATE t1; -sync_slave_with_master; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; -START SLAVE IO_THREAD; -connection master; -SET skip_replication= 1; -INSERT INTO t1(a) VALUES (1); -SET skip_replication= 0; -INSERT INTO t1(a) VALUES (2); ---source include/save_master_pos.inc -connection slave; ---source include/sync_io_with_master.inc -STOP SLAVE IO_THREAD; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; -connection master; -sync_slave_with_master; -connection slave; -# Now both inserts should be visible. Since filtering was configured to be -# slave-side, the event is in the relay log, and when the SQL thread ran we -# had disabled filtering again. -SELECT * FROM t1 ORDER BY a; - - -# Clean up. -connection master; -SET skip_replication=0; -DROP TABLE t1,t2; -connection slave; -STOP SLAVE; -SET GLOBAL replicate_events_marked_for_skip=REPLICATE; -START SLAVE; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_special_charset.inc b/mysql-test/suite/rpl/include/rpl_special_charset.inc deleted file mode 100644 index 641aa483d32..00000000000 --- a/mysql-test/suite/rpl/include/rpl_special_charset.inc +++ /dev/null @@ -1,32 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -################################################################################ -# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS -# Problem: IO thread fails to connect to master if servers are configured with -# special character sets like utf16, utf32, ucs2. -# -# Analysis: MySQL server does not support few special character sets like -# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2). -# When IO thread is trying to connect to Master, it sets server's character -# set as client's character set. When Slave server is started with these -# special character sets, IO thread (a connection to Master) fails because -# of the above said reason. -# -# Fix: If server's character set is not supported as client's character set, -# then set default's client character set(latin1) as client's character set. -############################################################################### ---source include/master-slave.inc -call mtr.add_suppression("'utf16' can not be used as client character set"); -CREATE TABLE t1(i VARCHAR(20)); -INSERT INTO t1 VALUES (0xFFFF); ---sync_slave_with_master ---let diff_tables=master:t1, slave:t1 ---source include/diff_tables.inc -# Cleanup ---connection master -DROP TABLE t1; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_sporadic_master.inc b/mysql-test/suite/rpl/include/rpl_sporadic_master.inc deleted file mode 100644 index ad4c44cbf74..00000000000 --- a/mysql-test/suite/rpl/include/rpl_sporadic_master.inc +++ /dev/null @@ -1,32 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# test to see if replication can continue when master sporadically fails on -# COM_BINLOG_DUMP and additionally limits the number of events per dump - -source include/master-slave.inc; - -create table t2(n int); -create table t1(n int not null auto_increment primary key); -insert into t1 values (NULL),(NULL); -truncate table t1; -# We have to use 4 in the following to make this test work with all table types -insert into t1 values (4),(NULL); -sync_slave_with_master; ---source include/stop_slave.inc ---source include/start_slave.inc -connection master; -insert into t1 values (NULL),(NULL); -flush logs; -truncate table t1; -insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL); -sync_slave_with_master; -select * from t1 ORDER BY n; -connection master; -drop table t1,t2; -sync_slave_with_master; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_ssl.inc b/mysql-test/suite/rpl/include/rpl_ssl.inc deleted file mode 100644 index 59a2af9f137..00000000000 --- a/mysql-test/suite/rpl/include/rpl_ssl.inc +++ /dev/null @@ -1,116 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -source include/have_ssl_communication.inc; -source include/master-slave.inc; -source include/no_valgrind_without_big.inc; - -# create a user for replication that requires ssl encryption -connection master; -create user replssl@localhost; -grant replication slave on *.* to replssl@localhost require ssl; -create table t1 (t int auto_increment, KEY(t)); - -sync_slave_with_master; - -# Set slave to use SSL for connection to master -stop slave; ---replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR -eval change master to - master_user='replssl', - master_password='', - master_ssl=1, - master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem', - master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem', - master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem'; -start slave; - -# Switch to master and insert one record, then sync it to slave -connection master; -insert into t1 values(1); -sync_slave_with_master; - -# The record should now be on slave -select * from t1; - -# The slave is synced and waiting/reading from master -# SHOW SLAVE STATUS will show "Waiting for master to send event" -let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key; -source include/show_slave_status.inc; -source include/check_slave_is_running.inc; - -# Stop the slave, as reported in bug#21871 it would hang -STOP SLAVE; - -select * from t1; - -# Do the same thing a number of times -disable_query_log; -disable_result_log; -# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows -# After discussions with Engineering, I'm disabling this part of the test to avoid it causing -# red trees. -disable_parsing; -let $i= 100; -while ($i) -{ - start slave; - connection master; - insert into t1 values (NULL); - select * from t1; # Some variance - connection slave; - select * from t1; # Some variance - stop slave; - dec $i; -} -enable_parsing; -START SLAVE; -enable_query_log; -enable_result_log; -connection master; -# INSERT one more record to make sure -# the sync has something to do -insert into t1 values (NULL); -let $master_count= `select count(*) from t1`; - -sync_slave_with_master; ---source include/wait_for_slave_to_start.inc -source include/show_slave_status.inc; -source include/check_slave_is_running.inc; - -let $slave_count= `select count(*) from t1`; - -if ($slave_count != $master_count) -{ - echo master and slave differed in number of rows; - echo master: $master_count; - echo slave: $slave_count; - - connection master; - select count(*) t1; - select * from t1; - connection slave; - select count(*) t1; - select * from t1; - query_vertical show slave status; -} - -connection master; -drop user replssl@localhost; -drop table t1; -sync_slave_with_master; - ---source include/stop_slave.inc -CHANGE MASTER TO - master_user = 'root', - master_ssl = 0, - master_ssl_ca = '', - master_ssl_cert = '', - master_ssl_key = ''; - ---echo End of 5.0 tests ---let $rpl_only_running_threads= 1 ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc b/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc deleted file mode 100644 index 654a5d47cb9..00000000000 --- a/mysql-test/suite/rpl/include/rpl_stm_relay_ign_space.inc +++ /dev/null @@ -1,107 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# BUG#12400313 / BUG#64503 test case -# -# -# Description -# ----------- -# -# This test case starts the slave server with: -# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096 -# -# Then it issues some queries that will cause the slave to reach -# relay-log-space-limit. We lock the table so that the SQL thread is -# not able to purge the log and then we issue some more statements. -# -# The purpose is to show that the IO thread will honor the limits -# while the SQL thread is not able to purge the relay logs, which did -# not happen before this patch. In addition we assert that while -# ignoring the limit (SQL thread needs to rotate before purging), the -# IO thread does not do it in an uncontrolled manner. - ---source include/have_binlog_format_statement.inc ---source include/have_innodb.inc ---source include/master-slave.inc - ---disable_query_log -CREATE TABLE t1 (c1 TEXT) engine=InnoDB; - -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); - ---sync_slave_with_master - -# wait for the SQL thread to sleep ---let $show_statement= SHOW PROCESSLIST ---let $field= State ---let $condition= = 'Slave has read all relay log; waiting for more updates' ---source include/wait_show_condition.inc - -# now the io thread has set rli->ignore_space_limit -# lets lock the table so that once the SQL thread awakes -# it blocks there and does not set rli->ignore_space_limit -# back to zero -LOCK TABLE t1 WRITE; - -# now issue more statements that will overflow the -# rli->log_space_limit (in this case ~10K) ---connection master - -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); -INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); - ---connection slave - -# ASSERT that the IO thread waits for the SQL thread to release some -# space before continuing ---let $show_statement= SHOW PROCESSLIST ---let $field= State ---let $condition= LIKE 'Waiting for %' -# before the patch (IO would have transfered everything) -#--let $condition= = 'Waiting for master to send event' -# after the patch (now it waits for space to be freed) -#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space' ---source include/wait_show_condition.inc - -# without the patch we can uncomment the following two lines and -# watch the IO thread synchronize with the master, thus writing -# relay logs way over the space limit -#--connection master -#--source include/sync_slave_io_with_master.inc - -## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge ---let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1) ---let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1) ---let $assert_text= Assert that relay log space is close to the limit ---let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15 ---source include/assert.inc - -# unlock the table and let SQL thread continue applying events -UNLOCK TABLES; - ---connection master ---sync_slave_with_master ---let $diff_tables=master:test.t1,slave:test.t1 ---source include/diff_tables.inc - ---connection master -DROP TABLE t1; ---enable_query_log ---sync_slave_with_master - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc b/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc deleted file mode 100644 index 31b80732c60..00000000000 --- a/mysql-test/suite/rpl/include/rpl_switch_stm_row_mixed.inc +++ /dev/null @@ -1,633 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -# -# rpl_switch_stm_row_mixed tests covers -# -# - Master is switching explicitly between STATEMENT, ROW, and MIXED -# binlog format showing when it is possible and when not. -# - Master switching from MIXED to RBR implicitly listing all use -# cases, e.g a query invokes UUID(), thereafter to serve as the -# definition of MIXED binlog format -# - correctness of execution - - --- source include/have_binlog_format_mixed_or_row.inc --- source include/master-slave.inc - -# Since this test generates row-based events in the binary log, the -# slave SQL thread cannot be in STATEMENT mode to execute this test, -# so we only execute it for MIXED and ROW as default value of -# BINLOG_FORMAT. - -connection slave; - -connection master; ---disable_warnings -drop database if exists mysqltest1; -create database mysqltest1; ---enable_warnings -use mysqltest1; - -# Save binlog format -set @my_binlog_format= @@global.binlog_format; - -# play with switching -set session binlog_format=mixed; -show session variables like "binlog_format%"; -set session binlog_format=statement; -show session variables like "binlog_format%"; -set session binlog_format=row; -show session variables like "binlog_format%"; - -set global binlog_format=DEFAULT; -show global variables like "binlog_format%"; -set global binlog_format=MIXED; -show global variables like "binlog_format%"; -set global binlog_format=STATEMENT; -show global variables like "binlog_format%"; -set global binlog_format=ROW; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; - -CREATE TABLE t1 (a varchar(100)); - -prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency_1_"; -insert into t1 values("work_2_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work_3_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_4_",UUID())); -insert into t1 select "yesterday_5_"; - -# verify that temp tables prevent a switch to SBR -create temporary table tmp(a char(100)); -insert into tmp values("see_6_"); ---error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR -set binlog_format=statement; -insert into t1 select * from tmp; -drop temporary table tmp; - -# Now we go to SBR -set binlog_format=statement; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; -set global binlog_format=statement; -show global variables like "binlog_format%"; -show session variables like "binlog_format%"; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency_7_"; -insert into t1 values("work_8_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work_9_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values("for_10_"); -insert into t1 select "yesterday_11_"; - -# test statement (is not default after wl#3368) -set binlog_format=statement; -select @@global.binlog_format, @@session.binlog_format; -set global binlog_format=statement; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select ?'; -set @string="emergency_12_"; -insert into t1 values("work_13_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values("work_14_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values("for_15_"); -insert into t1 select "yesterday_16_"; - -# and now the mixed mode - -set global binlog_format=mixed; -select @@global.binlog_format, @@session.binlog_format; -set binlog_format=default; -select @@global.binlog_format, @@session.binlog_format; - -prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; -set @string="emergency_17_"; -insert into t1 values("work_18_"); -execute stmt1 using @string; -deallocate prepare stmt1; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work_19_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_20_",UUID())); -insert into t1 select "yesterday_21_"; - -prepare stmt1 from 'insert into t1 select ?'; -insert into t1 values(concat(UUID(),"work_22_")); -execute stmt1 using @string; -deallocate prepare stmt1; - -insert into t1 values(concat("for_23_",UUID())); -insert into t1 select "yesterday_24_"; - -# Test of CREATE TABLE SELECT - -create table t2 ENGINE=MyISAM select rpad(UUID(),100,' '); -create table t3 select 1 union select UUID(); ---disable_warnings -SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR -create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3); ---enable_warnings -SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR -create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); -# what if UUID() is first: ---disable_warnings -insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); ---enable_warnings - -# inside a stored procedure - -delimiter |; -create procedure foo() -begin -insert into t1 values("work_25_"); -insert into t1 values(concat("for_26_",UUID())); -insert into t1 select "yesterday_27_"; -end| -create procedure foo2() -begin -insert into t1 values(concat("emergency_28_",UUID())); -insert into t1 values("work_29_"); -insert into t1 values(concat("for_30_",UUID())); -set session binlog_format=row; # accepted for stored procs -insert into t1 values("more work_31_"); -set session binlog_format=mixed; -end| -create function foo3() returns bigint unsigned -begin - set session binlog_format=row; # rejected for stored funcs - insert into t1 values("alarm"); - return 100; -end| -create procedure foo4(x varchar(100)) -begin -insert into t1 values(concat("work_250_",x)); -insert into t1 select "yesterday_270_"; -end| -delimiter ;| -call foo(); -call foo2(); -call foo4("hello"); -call foo4(UUID()); -call foo4("world"); - -# test that can't SET in a stored function ---error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT -select foo3(); -select * from t1 where a="alarm"; - -# Tests of stored functions/triggers/views for BUG#20930 "Mixed -# binlogging mode does not work with stored functions, triggers, -# views" - -# Function which calls procedure -drop function foo3; -delimiter |; -create function foo3() returns bigint unsigned -begin - insert into t1 values("foo3_32_"); - call foo(); - return 100; -end| -delimiter ;| -insert into t2 select foo3(); - -prepare stmt1 from 'insert into t2 select foo3()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# Test if stored function calls stored function which calls procedure -# which requires row-based. - -delimiter |; -create function foo4() returns bigint unsigned -begin - insert into t2 select foo3(); - return 100; -end| -delimiter ;| -select foo4(); - -prepare stmt1 from 'select foo4()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# A simple stored function -delimiter |; -create function foo5() returns bigint unsigned -begin - insert into t2 select UUID(); - return 100; -end| -delimiter ;| -select foo5(); - -prepare stmt1 from 'select foo5()'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# A simple stored function where UUID() is in the argument -delimiter |; -create function foo6(x varchar(100)) returns bigint unsigned -begin - insert into t2 select x; - return 100; -end| -delimiter ;| -select foo6("foo6_1_"); -select foo6(concat("foo6_2_",UUID())); - -prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))'; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - - -# Test of views using UUID() - -create view v1 as select uuid(); -create table t11 (data varchar(255)); -insert into t11 select * from v1; -# Test of querying INFORMATION_SCHEMA which parses the view's body, -# to verify that it binlogs statement-based (is not polluted by -# the parsing of the view's body). -insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); -prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; -execute stmt1; -execute stmt1; -deallocate prepare stmt1; - -# Test of triggers with UUID() -delimiter |; -create trigger t11_bi before insert on t11 for each row -begin - set NEW.data = concat(NEW.data,UUID()); -end| -delimiter ;| -insert into t11 values("try_560_"); - -# Test that INSERT DELAYED works in mixed mode (BUG#20649) -insert delayed into t2 values("delay_1_"); -insert delayed into t2 values(concat("delay_2_",UUID())); -insert delayed into t2 values("delay_6_"); - -# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not -# replicate fine in statement-based ; we test that in mixed mode it -# works). -insert delayed into t2 values(rand()); -set @a=2.345; -insert delayed into t2 values(@a); - -# With INSERT DELAYED, rows are written to the binlog after they are -# written to the table. Therefore, it is not enough to wait until the -# rows make it to t2 on the master (the rows may not be in the binlog -# at that time, and may still not be in the binlog when -# sync_slave_with_master is later called). Instead, we wait until the -# rows make it to t2 on the slave. We first call -# sync_slave_with_master, so that we are sure that t2 has been created -# on the slave. -sync_slave_with_master; -let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2; ---source include/wait_condition.inc -connection master; - -# If you want to do manual testing of the mixed mode regarding UDFs (not -# testable automatically as quite platform- and compiler-dependent), -# you just need to set the variable below to 1, and to -# "make udf_example.so" in sql/, and to copy sql/udf_example.so to -# MYSQL_TEST_DIR/lib/mysql. -let $you_want_to_test_UDF=0; -if ($you_want_to_test_UDF) -{ - CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so'; - prepare stmt1 from 'insert into t1 select metaphon(?)'; - set @string="emergency_133_"; - insert into t1 values("work_134_"); - execute stmt1 using @string; - deallocate prepare stmt1; - prepare stmt1 from 'insert into t1 select ?'; - insert into t1 values(metaphon("work_135_")); - execute stmt1 using @string; - deallocate prepare stmt1; - insert into t1 values(metaphon("for_136_")); - insert into t1 select "yesterday_137_"; - create table t6 select metaphon("for_138_"); - create table t7 select 1 union select metaphon("for_139_"); - create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3); - create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); -} - -create table t20 select * from t1; # save for comparing later -create table t21 select * from t2; -create table t22 select * from t3; -drop table t1,t2,t3; - -# This tests the fix to -# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog -# We verify that under the mixed binlog mode, a stored function -# modifying at least two tables having an auto_increment column, -# is binlogged row-based. Indeed in statement-based binlogging, -# only the auto_increment value generated for the first table -# is recorded in the binlog, the value generated for the 2nd table -# lacking. - -create table t1 (a int primary key auto_increment, b varchar(100)); -create table t2 (a int primary key auto_increment, b varchar(100)); -create table t3 (b varchar(100)); -delimiter |; -create function f (x varchar(100)) returns int deterministic -begin - insert into t1 values(null,x); - insert into t2 values(null,x); - return 1; -end| -delimiter ;| -select f("try_41_"); -# Two operations which compensate each other except that their net -# effect is that they advance the auto_increment counter of t2 on slave: -sync_slave_with_master; -use mysqltest1; -insert into t2 values(2,null),(3,null),(4,null); -delete from t2 where a>=2; - -connection master; -# this is the call which didn't replicate well -select f("try_42_"); -sync_slave_with_master; - -# now use prepared statement and test again, just to see that the RBB -# mode isn't set at PREPARE but at EXECUTE. - -insert into t2 values(3,null),(4,null); -delete from t2 where a>=3; - -connection master; -prepare stmt1 from 'select f(?)'; -set @string="try_43_"; -insert into t1 values(null,"try_44_"); # should be SBB -execute stmt1 using @string; # should be RBB -deallocate prepare stmt1; -sync_slave_with_master; - -# verify that if only one table has auto_inc, it does not trigger RBB -# (we'll check in binlog further below) - -connection master; -create table t12 select * from t1; # save for comparing later -drop table t1; -create table t1 (a int, b varchar(100), key(a)); -select f("try_45_"); - -# restore table's key -create table t13 select * from t1; -drop table t1; -create table t1 (a int primary key auto_increment, b varchar(100)); - -# now test if it's two functions, each of them inserts in one table - -drop function f; -# we need a unique key to have sorting of rows by mysqldump -create table t14 (unique (a)) select * from t2; -truncate table t2; -delimiter |; -create function f1 (x varchar(100)) returns int deterministic -begin - insert into t1 values(null,x); - return 1; -end| -create function f2 (x varchar(100)) returns int deterministic -begin - insert into t2 values(null,x); - return 1; -end| -delimiter ;| -select f1("try_46_"),f2("try_47_"); - -sync_slave_with_master; -insert into t2 values(2,null),(3,null),(4,null); -delete from t2 where a>=2; - -connection master; -# Test with SELECT and INSERT -select f1("try_48_"),f2("try_49_"); -insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); -sync_slave_with_master; - -# verify that if f2 does only read on an auto_inc table, this does not -# switch to RBB -connection master; -drop function f2; -delimiter |; -create function f2 (x varchar(100)) returns int deterministic -begin - declare y int; - insert into t1 values(null,x); - set y = (select count(*) from t2); - return y; -end| -delimiter ;| -select f1("try_53_"),f2("try_54_"); -sync_slave_with_master; - -# And now, a normal statement with a trigger (no stored functions) - -connection master; -drop function f2; -delimiter |; -create trigger t1_bi before insert on t1 for each row -begin - insert into t2 values(null,"try_55_"); -end| -delimiter ;| -insert into t1 values(null,"try_56_"); -# and now remove one auto_increment and verify SBB -alter table t1 modify a int, drop primary key; -insert into t1 values(null,"try_57_"); -sync_slave_with_master; - -# Test for BUG#20499 "mixed mode with temporary table breaks binlog" -# Slave used to have only 2 rows instead of 3. -connection master; -CREATE TEMPORARY TABLE t15 SELECT UUID(); -create table t16 like t15; -INSERT INTO t16 SELECT * FROM t15; -# we'll verify that this one is done RBB -insert into t16 values("try_65_"); -drop table t15; -# we'll verify that this one is done SBB -insert into t16 values("try_66_"); -sync_slave_with_master; - -# and now compare: - -connection master; - -# first check that data on master is sensible -select count(*) from t1; -select count(*) from t2; -select count(*) from t3; -select count(*) from t4; -select count(*) from t5; -select count(*) from t11; -select count(*) from t20; -select count(*) from t21; -select count(*) from t22; -select count(*) from t12; -select count(*) from t13; -select count(*) from t14; -select count(*) from t16; -if ($you_want_to_test_UDF) -{ - select count(*) from t6; - select count(*) from t7; - select count(*) from t8; - select count(*) from t9; -} - -sync_slave_with_master; - -# -# Bug#20863 If binlog format is changed between update and unlock of -# tables, wrong binlog -# - -connection master; -DROP TABLE IF EXISTS t11; -SET SESSION BINLOG_FORMAT=STATEMENT; -CREATE TABLE t11 (song VARCHAR(255)); -LOCK TABLES t11 WRITE; -SET SESSION BINLOG_FORMAT=ROW; -INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); -SET SESSION BINLOG_FORMAT=STATEMENT; -INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); -UNLOCK TABLES; - ---query_vertical SELECT * FROM t11 -sync_slave_with_master; -USE mysqltest1; ---query_vertical SELECT * FROM t11 - -connection master; -DROP TABLE IF EXISTS t12; -SET SESSION BINLOG_FORMAT=MIXED; -CREATE TABLE t12 (data LONG); -LOCK TABLES t12 WRITE; -INSERT INTO t12 VALUES(UUID()); -UNLOCK TABLES; -sync_slave_with_master; - -# -# BUG#28086: SBR of USER() becomes corrupted on slave -# - -connection master; - -# Just to get something that is non-trivial, albeit still simple, we -# stuff the result of USER() and CURRENT_USER() into a variable. ---delimiter $$ -CREATE FUNCTION my_user() - RETURNS CHAR(64) -BEGIN - DECLARE user CHAR(64); - SELECT USER() INTO user; - RETURN user; -END $$ ---delimiter ; - ---delimiter $$ -CREATE FUNCTION my_current_user() - RETURNS CHAR(64) -BEGIN - DECLARE user CHAR(64); - SELECT CURRENT_USER() INTO user; - RETURN user; -END $$ ---delimiter ; - -DROP TABLE IF EXISTS t13; -CREATE TABLE t13 (data CHAR(64)); -INSERT INTO t13 VALUES (USER()); -INSERT INTO t13 VALUES (my_user()); -INSERT INTO t13 VALUES (CURRENT_USER()); -INSERT INTO t13 VALUES (my_current_user()); - -sync_slave_with_master; - -# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID ---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql ---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql - -# Let's compare. Note: If they match test will pass, if they do not match -# the test will show that the diff statement failed and not reject file -# will be created. You will need to go to the mysql-test dir and diff -# the files your self to see what is not matching - -diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; - -connection master; - -# Now test that mysqlbinlog works fine on a binlog generated by the -# mixed mode - -# BUG#11312 "DELIMITER is not written to the binary log that causes -# syntax error" makes that mysqlbinlog will fail if we pass it the -# text of queries; this forces us to use --base64-output here. - -# BUG#20929 "BINLOG command causes invalid free plus assertion -# failure" makes mysqld segfault when receiving --base64-output - -# So I can't enable this piece of test -# SIGH - -if ($enable_when_11312_or_20929_fixed) -{ ---exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql -drop database mysqltest1; ---exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql ---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql -# the old mysqldump output on slave is the same as what it was on -# master before restoring on master. -diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; -} - -drop database mysqltest1; -sync_slave_with_master; - -connection master; -# Restore binlog format setting -set global binlog_format =@my_binlog_format; ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_sync_test.inc b/mysql-test/suite/rpl/include/rpl_sync_test.inc deleted file mode 100644 index 1e2ec2ca83b..00000000000 --- a/mysql-test/suite/rpl/include/rpl_sync_test.inc +++ /dev/null @@ -1,159 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - -######################################################################################## -# This test verifies the options --sync-relay-log-info and --relay-log-recovery by -# crashing the slave in two different situations: -# (case-1) - Corrupt the relay log with changes which were not processed by -# the SQL Thread and crashes it. -# (case-2) - Corrupt the master.info with wrong coordinates and crashes it. -# -# Case 1: -# 1 - Stops the SQL Thread -# 2 - Inserts new records into the master. -# 3 - Corrupts the relay-log.bin* which most likely has such changes. -# 4 - Crashes the slave -# 5 - Verifies if the slave is sync with the master which means that the information -# loss was circumvented by the recovery process. -# -# Case 2: -# 1 - Stops the SQL/IO Threads -# 2 - Inserts new records into the master. -# 3 - Corrupts the master.info with wrong coordinates. -# 4 - Crashes the slave -# 5 - Verifies if the slave is sync with the master which means that the information -# loss was circumvented by the recovery process. -######################################################################################## - -######################################################################################## -# Configuring the environment -######################################################################################## ---echo =====Configuring the enviroment=======; ---source include/not_embedded.inc ---source include/not_valgrind.inc ---source include/have_debug.inc ---source include/have_innodb.inc ---source include/not_crashrep.inc ---source include/master-slave.inc - -call mtr.add_suppression('Attempting backtrace'); -call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001"); -# Use innodb so we do not get "table should be repaired" issues. -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -flush tables; -CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb; - -insert into t1(a) values(1); -insert into t1(a) values(2); -insert into t1(a) values(3); - -######################################################################################## -# Case 1: Corrupt a relay-log.bin* -######################################################################################## ---echo =====Inserting data on the master but without the SQL Thread being running=======; -sync_slave_with_master; - -connection slave; -let $MYSQLD_SLAVE_DATADIR= `select @@datadir`; ---replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR ---copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup ---source include/stop_slave_sql.inc - -connection master; -insert into t1(a) values(4); -insert into t1(a) values(5); -insert into t1(a) values(6); - ---echo =====Removing relay log files and crashing/recoverying the slave=======; -connection slave; ---source include/stop_slave_io.inc - -let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1); - ---let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file -perl; -$file= $ENV{'FILE_TO_CORRUPT'}; -open(FILE, ">$file") || die "Unable to open $file."; -truncate(FILE,0); -print FILE "failure"; -close ($file); -EOF - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect -SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; ---error 2013 -FLUSH LOGS; - ---let $rpl_server_number= 2 ---source include/rpl_reconnect.inc - ---echo =====Dumping and comparing tables=======; ---source include/start_slave.inc - -connection master; -sync_slave_with_master; - -let $diff_tables=master:t1,slave:t1; -source include/diff_tables.inc; - -######################################################################################## -# Case 2: Corrupt a master.info -######################################################################################## ---echo =====Corrupting the master.info=======; -connection slave; ---source include/stop_slave.inc - -connection master; -FLUSH LOGS; - -insert into t1(a) values(7); -insert into t1(a) values(8); -insert into t1(a) values(9); - -connection slave; -let MYSQLD_SLAVE_DATADIR=`select @@datadir`; - ---perl -use strict; -use warnings; -my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup"; -my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info"; -open(FILE, "<", $src) or die; -my @content= <FILE>; -close FILE; -open(FILE, ">", $dst) or die; -binmode FILE; -print FILE @content; -close FILE; -EOF - ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect -SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; ---error 2013 -FLUSH LOGS; - ---let $rpl_server_number= 2 ---source include/rpl_reconnect.inc - ---echo =====Dumping and comparing tables=======; ---source include/start_slave.inc - -connection master; -sync_slave_with_master; - -let $diff_tables=master:t1,slave:t1; -source include/diff_tables.inc; - -######################################################################################## -# Clean up -######################################################################################## ---echo =====Clean up=======; -connection master; -drop table t1; - ---remove_file $MYSQLD_SLAVE_DATADIR/master.backup ---source include/rpl_end.inc - diff --git a/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc b/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc deleted file mode 100644 index 6728ff55d6f..00000000000 --- a/mysql-test/suite/rpl/include/rpl_temporal_format_default_to_default.inc +++ /dev/null @@ -1,82 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/master-slave.inc - -if ($force_master_mysql56_temporal_format) -{ - connection master; - eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format; -} - -if ($force_slave_mysql56_temporal_format) -{ - connection slave; - eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format; -} - -connection master; -SELECT @@global.mysql56_temporal_format AS on_master; -connection slave; -SELECT @@global.mysql56_temporal_format AS on_slave; -connection master; - -CREATE TABLE t1 -( - c0 TIME(0), - c1 TIME(1), - c2 TIME(2), - c3 TIME(3), - c4 TIME(4), - c5 TIME(5), - c6 TIME(6) -); -CREATE TABLE t2 -( - c0 TIMESTAMP(0), - c1 TIMESTAMP(1), - c2 TIMESTAMP(2), - c3 TIMESTAMP(3), - c4 TIMESTAMP(4), - c5 TIMESTAMP(5), - c6 TIMESTAMP(6) -); - -CREATE TABLE t3 -( - c0 DATETIME(0), - c1 DATETIME(1), - c2 DATETIME(2), - c3 DATETIME(3), - c4 DATETIME(4), - c5 DATETIME(5), - c6 DATETIME(6) -); -INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111'); -INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); -INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); -SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES -WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; -sync_slave_with_master; - -connection slave; ---query_vertical SELECT * FROM t1; ---query_vertical SELECT * FROM t2; ---query_vertical SELECT * FROM t3; -SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES -WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; - -connection master; -DROP TABLE t1; -DROP TABLE t2; -DROP TABLE t3; - -connection slave; -SET @@global.mysql56_temporal_format=DEFAULT; -connection master; -SET @@global.mysql56_temporal_format=DEFAULT; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_typeconv.inc b/mysql-test/suite/rpl/include/rpl_typeconv.inc deleted file mode 100644 index 9e566258882..00000000000 --- a/mysql-test/suite/rpl/include/rpl_typeconv.inc +++ /dev/null @@ -1,78 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption suite). -# Please check all dependent tests after modifying it -# - ---source include/have_binlog_format_row.inc ---source include/master-slave.inc - -connection slave; -set @saved_slave_type_conversions = @@global.slave_type_conversions; -CREATE TABLE type_conversions ( - TestNo INT AUTO_INCREMENT PRIMARY KEY, - Source TEXT, - Target TEXT, - Flags TEXT, - On_Master LONGTEXT, - On_Slave LONGTEXT, - Expected LONGTEXT, - Compare INT, - Error TEXT); - -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; -SELECT @@global.slave_type_conversions; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; -SELECT @@global.slave_type_conversions; ---error ER_WRONG_VALUE_FOR_VAR -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT'; -SELECT @@global.slave_type_conversions; - -# Checking strict interpretation of type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; -source suite/rpl/include/type_conversions.test; - -# Checking lossy integer type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; -source suite/rpl/include/type_conversions.test; - -# Checking non-lossy integer type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; -source suite/rpl/include/type_conversions.test; - -# Checking all type conversions -connection slave; -SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; -source suite/rpl/include/type_conversions.test; - -connection slave; ---echo **** Result of conversions **** -disable_query_log; -SELECT RPAD(Source, 15, ' ') AS Source_Type, - RPAD(Target, 15, ' ') AS Target_Type, - RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags, - IF(Compare IS NULL AND Error IS NOT NULL, '<Correct error>', - IF(Compare, '<Correct value>', - CONCAT("'", On_Slave, "' != '", Expected, "'"))) - AS Value_On_Slave - FROM type_conversions; -enable_query_log; -DROP TABLE type_conversions; - -call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677"); - -connection master; -DROP TABLE t1; -sync_slave_with_master; - -set global slave_type_conversions = @saved_slave_type_conversions; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_loaddatalocal.result b/mysql-test/suite/rpl/r/rpl_loaddata_local.result index f0d24df2cb2..f0d24df2cb2 100644 --- a/mysql-test/suite/rpl/r/rpl_loaddatalocal.result +++ b/mysql-test/suite/rpl/r/rpl_loaddata_local.result diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result new file mode 100644 index 00000000000..48672651c36 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_error_stop.result @@ -0,0 +1,81 @@ +include/rpl_init.inc [topology=1->2] +call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit"); +call mtr.add_suppression("Slave: Duplicate entry '99'"); +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1,1); +INSERT INTO t1 VALUES(2,1); +INSERT INTO t1 VALUES(3,1); +INSERT INTO t1 VALUES(4,1); +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET @old_debug_dbug = @@global.debug_dbug; +SET @@global.debug_dbug = "d,hold_worker2_favor_worker3"; +SET GLOBAL slave_parallel_threads=4; +CHANGE MASTER TO master_use_gtid=slave_pos; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='optimistic'; +connection server_1; +SET @@gtid_seq_no = 2001; +BEGIN; +UPDATE t1 SET b = 11 WHERE a = 4; +UPDATE t1 SET b = 11 WHERE a = 3; +UPDATE t1 SET a = 99 WHERE a = 1; +COMMIT; +UPDATE t1 SET b = 2 WHERE a = 2; +UPDATE t1 SET b = 3 WHERE a = 3; +DROP TABLE IF EXISTS phantom_1; +Warnings: +Note 1051 Unknown table 'test.phantom_1' +include/save_master_gtid.inc +connect slave_local_0, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 set b = 11 where a = 4; +connect slave_local_1, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +INSERT INTO t1 VALUES (99, 11); +connect slave_local_2, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 SET b = 12 WHERE a = 2; +connect slave_local_3, 127.0.0.1, root,, test, $SLAVE_MYPORT,; +begin; +UPDATE t1 SET b = 13 WHERE a = 3; +connection server_2; +include/start_slave.inc +# W4 is waiting to start its DROP +connection slave_local_3; +rollback; +connection slave_local_0; +rollback; +SELECT count(*) = 0 as "W3 undid its commit state" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%"; +W3 undid its commit state +1 +connection slave_local_2; +rollback; +connection slave_local_1; +commit; +SELECT COUNT(*) = 1 as "W4 remains with the same status" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%"; +W4 remains with the same status +1 +# Slave_SQL_Running YES = Yes +# while W2 is held back ... +SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; +include/wait_for_slave_sql_error.inc [errno=1062] +DELETE FROM t1 WHERE a=99; +include/start_slave.inc +include/sync_with_master_gtid.inc +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET @@global.debug_dbug = @old_debug_dbug; +SET debug_sync = RESET; +include/start_slave.inc +connection server_1; +DROP TABLE t1; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result index a0ea06afa89..c303abc672c 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result @@ -7,8 +7,6 @@ SET @@global.rpl_semi_sync_master_timeout = 60000; SET @@global.rpl_semi_sync_master_wait_no_slave = 1; # It's okay to see "Killed" but we should not see "Timeout" in the log. call mtr.add_suppression("Killed waiting for reply of binlog"); -call mtr.add_suppression("Run function 'after_commit' in plugin 'rpl_semi_sync_master' failed"); -call mtr.add_suppression("Run function 'after_sync' in plugin 'rpl_semi_sync_master' failed"); # # Test wait point = AFTER_COMMIT # diff --git a/mysql-test/suite/rpl/t/rpl_binlog_errors.test b/mysql-test/suite/rpl/t/rpl_binlog_errors.test index 30faaf79613..bf92736a2af 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_errors.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_errors.test @@ -1 +1,438 @@ ---source include/rpl_binlog_errors.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# +# Usage: +# --let $binlog_limit= X[,Y] # optional +# +# Semantics of the value is the same as in include/show_binlog_events.inc +# which the script calls as a part of the test flow. +# The goal is to print the event demonstrating the triggered error, +# so normally Y should be 1 (print the exact event only); +# however, depending on test-specific server options, the offset X +# can be different. +# + +# BUG#46166: MYSQL_BIN_LOG::new_file_impl is not propagating error +# when generating new name. +# +# WHY +# === +# +# We want to check whether error is reported or not when +# new_file_impl fails (this may happen when rotation is not +# possible because there is some problem finding an +# unique filename). +# +# HOW +# === +# +# Test cases are documented inline. + +-- source include/have_innodb.inc +-- source include/have_debug.inc +-- source include/master-slave.inc + +-- echo ####################################################################### +-- echo ####################### PART 1: MASTER TESTS ########################## +-- echo ####################################################################### + + +### ACTION: stopping slave as it is not needed for the first part of +### the test + +-- connection slave +-- source include/stop_slave.inc +-- connection master + +call mtr.add_suppression("Can't generate a unique log-filename"); +call mtr.add_suppression("Writing one row to the row-based binary log failed.*"); +call mtr.add_suppression("Error writing file .*"); +call mtr.add_suppression("Could not use master-bin for logging"); + +SET @old_debug= @@global.debug_dbug; + +### ACTION: create a large file (> 4096 bytes) that will be later used +### in LOAD DATA INFILE to check binlog errors in its vacinity +-- let $load_file= $MYSQLTEST_VARDIR/tmp/bug_46166.data +-- let $MYSQLD_DATADIR= `select @@datadir` +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval SELECT repeat('x',8192) INTO OUTFILE '$load_file' + +### ACTION: create a small file (< 4096 bytes) that will be later used +### in LOAD DATA INFILE to check for absence of binlog errors +### when file loading this file does not force flushing and +### rotating the binary log +-- let $load_file2= $MYSQLTEST_VARDIR/tmp/bug_46166-2.data +-- let $MYSQLD_DATADIR= `select @@datadir` +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval SELECT repeat('x',10) INTO OUTFILE '$load_file2' + +RESET MASTER; + +-- echo ###################### TEST #1 + +### ASSERTION: no problem flushing logs (should show two binlogs) +FLUSH LOGS; +-- echo # assert: must show two binlogs +-- source include/show_binary_logs.inc + +-- echo ###################### TEST #2 + +### ASSERTION: check that FLUSH LOGS actually fails and reports +### failure back to the user if find_uniq_filename fails +### (should show just one binlog) + +RESET MASTER; +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +-- echo # assert: must show one binlog +-- source include/show_binary_logs.inc + +### ACTION: clean up and move to next test +SET @@global.debug_dbug=@old_debug; +RESET MASTER; + +-- echo ###################### TEST #3 + +### ACTION: create some tables (t1, t2, t4) and insert some values in +### table t1 +CREATE TABLE t1 (a INT); +CREATE TABLE t2 (a VARCHAR(16384)) Engine=InnoDB; +CREATE TABLE t4 (a VARCHAR(16384)); +INSERT INTO t1 VALUES (1); +RESET MASTER; + +### ASSERTION: we force rotation of the binary log because it exceeds +### the max_binlog_size option (should show two binary +### logs) + +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 + +# shows two binary logs +-- echo # assert: must show two binlog +-- source include/show_binary_logs.inc + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #4 + +### ASSERTION: load the big file into a transactional table and check +### that it reports error. The table will contain the +### changes performed despite the fact that it reported an +### error. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 + +# show table +-- echo # assert: must show one entry +SELECT count(*) FROM t2; + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #5 + +### ASSERTION: load the small file into a transactional table and +### check that it succeeds + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file2' INTO TABLE t2 + +# show table +-- echo # assert: must show one entry +SELECT count(*) FROM t2; + +# clean up the table and the binlog to be used in next part of test +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #6 + +### ASSERTION: check that even if one is using a transactional table +### and explicit transactions (no autocommit) if rotation +### fails we get the error. Transaction is not rolledback +### because rotation happens after the commit. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +SET AUTOCOMMIT=0; +INSERT INTO t2 VALUES ('muse'); +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 +INSERT INTO t2 VALUES ('muse'); +-- error ER_NO_UNIQUE_LOGFILE +COMMIT; + +### ACTION: Show the contents of the table after the test +-- echo # assert: must show three entries +SELECT count(*) FROM t2; + +### ACTION: clean up and move to the next test +SET AUTOCOMMIT= 1; +SET @@global.debug_dbug=@old_debug; +DELETE FROM t2; +RESET MASTER; + +-- echo ###################### TEST #7 + +### ASSERTION: check that on a non-transactional table, if rotation +### fails then an error is reported and an incident event +### is written to the current binary log. + +SET @@global.debug_dbug="d,error_unique_log_filename"; + +# Disable logging Annotate_rows events to preserve events count. +let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; +SET @@binlog_annotate_row_events= 0; + +SELECT count(*) FROM t4; +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 + +-- echo # assert: must show 1 entry +SELECT count(*) FROM t4; + +-- echo ### check that the incident event is written to the current log +SET @@global.debug_dbug=@old_debug; +if (!$binlog_limit) +{ + -- let $binlog_limit= 4,1 +} +-- source include/show_binlog_events.inc + +# clean up and move to next test +DELETE FROM t4; + +--disable_query_log +eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; +--enable_query_log + +RESET MASTER; + +-- echo ###################### TEST #8 + +### ASSERTION: check that statements end up in error but they succeed +### on changing the data. + +SET @@global.debug_dbug="d,error_unique_log_filename"; +-- echo # must show 0 entries +SELECT count(*) FROM t4; +SELECT count(*) FROM t2; + +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t4 +-- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +-- error ER_NO_UNIQUE_LOGFILE +-- eval LOAD DATA INFILE '$load_file' INTO TABLE t2 +-- error ER_NO_UNIQUE_LOGFILE +INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'); + +-- echo # INFO: Count(*) Before Offending DELETEs +-- echo # assert: must show 1 entry +SELECT count(*) FROM t4; +-- echo # assert: must show 4 entries +SELECT count(*) FROM t2; + +-- error ER_NO_UNIQUE_LOGFILE +DELETE FROM t4; +-- error ER_NO_UNIQUE_LOGFILE +DELETE FROM t2; + +-- echo # INFO: Count(*) After Offending DELETEs +-- echo # assert: must show zero entries +SELECT count(*) FROM t4; +SELECT count(*) FROM t2; + +# remove fault injection +SET @@global.debug_dbug=@old_debug; + +-- echo ###################### TEST #9 + +### ASSERTION: check that if we disable binlogging, then statements +### succeed. +SET @@global.debug_dbug="d,error_unique_log_filename"; +SET SQL_LOG_BIN=0; +INSERT INTO t2 VALUES ('aaa'), ('bbb'), ('ccc'), ('ddd'); +INSERT INTO t4 VALUES ('eee'), ('fff'), ('ggg'), ('hhh'); +-- echo # assert: must show four entries +SELECT count(*) FROM t2; +SELECT count(*) FROM t4; +DELETE FROM t2; +DELETE FROM t4; +-- echo # assert: must show zero entries +SELECT count(*) FROM t2; +SELECT count(*) FROM t4; +SET SQL_LOG_BIN=1; +SET @@global.debug_dbug=@old_debug; + +-- echo ###################### TEST #10 + +### ASSERTION: check that error is reported if there is a failure +### while registering the index file and the binary log +### file or failure to write the rotate event. + +call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); +call mtr.add_suppression("Could not use .*"); + +RESET MASTER; +SHOW WARNINGS; + +# +d,fault_injection_registering_index => injects fault on MYSQL_BIN_LOG::open +SET @@global.debug_dbug="d,fault_injection_registering_index"; +-- replace_regex /\.[\\\/]master/master/ +-- error ER_CANT_OPEN_FILE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_NO_BINARY_LOGGING +SHOW BINARY LOGS; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +-- echo ###################### TEST #11 + +### ASSERTION: check that error is reported if there is a failure +### while opening the index file and the binary log file or +### failure to write the rotate event. + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +# +d,fault_injection_openning_index => injects fault on MYSQL_BIN_LOG::open_index_file +SET @@global.debug_dbug="d,fault_injection_openning_index"; +-- replace_regex /\.[\\\/]master/master/ +-- error ER_CANT_OPEN_FILE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_FLUSH_MASTER_BINLOG_CLOSED +RESET MASTER; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +-- echo ###################### TEST #12 + +### ASSERTION: check that error is reported if there is a failure +### while writing the rotate event when creating a new log +### file. + +# +d,fault_injection_new_file_rotate_event => injects fault on MYSQL_BIN_LOG::MYSQL_BIN_LOG::new_file_impl +SET @@global.debug_dbug="d,fault_injection_new_file_rotate_event"; +-- error ER_ERROR_ON_WRITE +FLUSH LOGS; +SET @@global.debug_dbug=@old_debug; + +-- error ER_FLUSH_MASTER_BINLOG_CLOSED +RESET MASTER; + +# issue some statements and check that they don't fail +CREATE TABLE t5 (a INT); +INSERT INTO t4 VALUES ('bbbbb'); +INSERT INTO t2 VALUES ('aaaaa'); +DELETE FROM t4; +DELETE FROM t2; +DROP TABLE t5; +flush tables; + +# restart the server so that we have binlog again +--let $rpl_server_number= 1 +--source include/rpl_restart_server.inc + +## clean up +DROP TABLE t1, t2, t4; +RESET MASTER; + +# restart slave again +-- connection slave +-- source include/start_slave.inc +-- connection master + +-- echo ####################################################################### +-- echo ####################### PART 2: SLAVE TESTS ########################### +-- echo ####################################################################### + +### setup +--source include/rpl_reset.inc +-- connection slave + +# slave suppressions + +call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*"); +call mtr.add_suppression("Error writing file .*"); +call mtr.add_suppression("Could not use .*"); +call mtr.add_suppression("MYSQL_BIN_LOG::open failed to sync the index file."); +call mtr.add_suppression("Can't generate a unique log-filename .*"); +-- echo ###################### TEST #13 + +#### ASSERTION: check against unique log filename error +-- let $io_thd_injection_fault_flag= error_unique_log_filename +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #14 + +#### ASSERTION: check against rotate failing +-- let $io_thd_injection_fault_flag= fault_injection_new_file_rotate_event +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #15 + +#### ASSERTION: check against relay log open failure +-- let $io_thd_injection_fault_flag= fault_injection_registering_index +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +-- echo ###################### TEST #16 + +#### ASSERTION: check against relay log index open failure +-- let $io_thd_injection_fault_flag= fault_injection_openning_index +-- let $slave_io_errno= 1595 +-- let $show_slave_io_error= 1 +-- source include/io_thd_fault_injection.inc + +### clean up +-- source include/stop_slave_sql.inc +RESET SLAVE; +RESET MASTER; +--remove_file $load_file +--remove_file $load_file2 +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test b/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test index 573c1d111fc..7dfef023947 100644 --- a/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test +++ b/mysql-test/suite/rpl/t/rpl_cant_read_event_incident.test @@ -1 +1,83 @@ ---source include/rpl_cant_read_event_incident.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# Bug#11747416 : 32228 A disk full makes binary log corrupt. +# +# +# The test demonstrates reading from binlog error propagation to slave +# and reporting there. +# Conditions for the bug include a crash at time of the last event to +# the binlog was written partly. With the fixes the event is not sent out +# any longer, but rather the dump thread sends out a sound error message. +# +# Crash is not simulated. A binlog with partly written event in its end is installed +# and replication is started from it. +# + +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--connection slave +# Make sure the slave is stopped while we are messing with master. +# Otherwise we get occasional failures as the slave manages to re-connect +# to the newly started master and we get extra events applied, causing +# conflicts. +--source include/stop_slave.inc + +--connection master +call mtr.add_suppression("Error in Log_event::read_log_event()"); +--let $datadir= `SELECT @@datadir` + +--let $rpl_server_number= 1 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master-bin.000001 +--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001 + +--let $rpl_server_number= 1 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +# evidence of the partial binlog +--error ER_ERROR_WHEN_EXECUTING_COMMAND +show binlog events; + +--connection slave +call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log"); +reset slave; +start slave; + +# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236 +--let $slave_param=Last_IO_Errno +--let $slave_param_value=1236 +--source include/wait_for_slave_param.inc + +--let $slave_field_result_replace= / at [0-9]*/ at XXX/ +--let $status_items= Last_IO_Errno, Last_IO_Error +--source include/show_slave_status.inc + +# +# Cleanup +# + +--connection master +reset master; + +--connection slave +stop slave; +reset slave; +# Table was created from binlog, it may not be created if SQL thread is running +# slowly and IO thread reaches incident before SQL thread applies it. +--disable_warnings +drop table if exists t; +--enable_warnings +reset master; + +--echo End of the tests +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum.test b/mysql-test/suite/rpl/t/rpl_checksum.test index 0edf8fda7f3..17a986dc308 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum.test +++ b/mysql-test/suite/rpl/t/rpl_checksum.test @@ -1 +1,335 @@ ---source include/rpl_checksum.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# WL2540 replication events checksum +# Testing configuration parameters + +--source include/have_debug.inc +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +call mtr.add_suppression('Slave can not handle replication events with the checksum that master is configured to log'); +call mtr.add_suppression('Replication event checksum verification failed'); +# due to C failure simulation +call mtr.add_suppression('Relay log write failure: could not queue event from master'); +call mtr.add_suppression('Master is configured to log replication events with checksum, but will not send such events to slaves that cannot process them'); + +# A. read/write access to the global vars: +# binlog_checksum master_verify_checksum slave_sql_verify_checksum + +connection master; + +set @master_save_binlog_checksum= @@global.binlog_checksum; +set @save_master_verify_checksum = @@global.master_verify_checksum; + +select @@global.binlog_checksum as 'must be CRC32 because of the command line option'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.binlog_checksum as 'no session var'; + +select @@global.master_verify_checksum as 'must be zero because of default'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.master_verify_checksum as 'no session var'; + +connection slave; + +set @slave_save_binlog_checksum= @@global.binlog_checksum; +set @save_slave_sql_verify_checksum = @@global.slave_sql_verify_checksum; + +select @@global.slave_sql_verify_checksum as 'must be one because of default'; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.slave_sql_verify_checksum as 'no session var'; + +connection master; + +source include/show_binary_logs.inc; +set @@global.binlog_checksum = NONE; +select @@global.binlog_checksum; +--echo *** must be rotations seen *** +source include/show_binary_logs.inc; + +set @@global.binlog_checksum = default; +select @@global.binlog_checksum; + +# testing lack of side-effects in non-effective update of binlog_checksum: +set @@global.binlog_checksum = CRC32; +select @@global.binlog_checksum; +set @@global.binlog_checksum = CRC32; + +set @@global.master_verify_checksum = 0; +set @@global.master_verify_checksum = default; + +--error ER_WRONG_VALUE_FOR_VAR +set @@global.binlog_checksum = ADLER32; +--error ER_WRONG_VALUE_FOR_VAR +set @@global.master_verify_checksum = 2; # the var is of bool type + +connection slave; + +set @@global.slave_sql_verify_checksum = 0; +set @@global.slave_sql_verify_checksum = default; +--error ER_WRONG_VALUE_FOR_VAR +set @@global.slave_sql_verify_checksum = 2; # the var is of bool type + +# +# B. Old Slave to New master conditions +# +# while master does not send a checksum-ed binlog the Old Slave can +# work with the New Master + +connection master; + +set @@global.binlog_checksum = NONE; +create table t1 (a int); + +# testing that binlog rotation preserves opt_binlog_checksum value +flush logs; +flush logs; +-- source include/wait_for_binlog_checkpoint.inc +flush logs; + +sync_slave_with_master; +#connection slave; +# checking that rotation on the slave side leaves slave stable +flush logs; +flush logs; +flush logs; +select count(*) as zero from t1; + +source include/stop_slave.inc; + +connection master; +set @@global.binlog_checksum = CRC32; +-- source include/wait_for_binlog_checkpoint.inc +insert into t1 values (1) /* will not be applied on slave due to simulation */; + +# instruction to the dump thread + +connection slave; +set @saved_dbug = @@global.debug_dbug; +set @@global.debug_dbug='d,simulate_slave_unaware_checksum'; +start slave; +--let $slave_io_errno= 1236 +--let $show_slave_io_error= 1 +source include/wait_for_slave_io_error.inc; + +select count(*) as zero from t1; + +set @@global.debug_dbug = @saved_dbug; + +connection slave; +source include/start_slave.inc; + +# +# C. checksum failure simulations +# + +# C1. Failure by a client thread +connection master; +set @@global.master_verify_checksum = 1; +set @save_dbug = @@session.debug_dbug; +set @@session.debug_dbug='d,simulate_checksum_test_failure'; +--error ER_ERROR_WHEN_EXECUTING_COMMAND +show binlog events; +SET debug_dbug= @save_dbug; +set @@global.master_verify_checksum = default; + +#connection master; +sync_slave_with_master; + +connection slave; +source include/stop_slave.inc; + +connection master; +create table t2 (a int); +let $pos_master= query_get_value(SHOW MASTER STATUS, Position, 1); + +connection slave; + +# C2. Failure by IO thread +# instruction to io thread +set @saved_dbug = @@global.debug_dbug; +set @@global.debug_dbug='d,simulate_checksum_test_failure'; +start slave io_thread; +# When the checksum error is detected, the slave sets error code 1913 +# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately +# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). +# So we usually get 1595, but it is occasionally possible to get 1913. +--let $slave_io_errno= 1595,1913 +--let $show_slave_io_error= 0 +source include/wait_for_slave_io_error.inc; +set @@global.debug_dbug = @saved_dbug; + +# to make IO thread re-read it again w/o the failure +start slave io_thread; +let $slave_param= Read_Master_Log_Pos; +let $slave_param_value= $pos_master; +source include/wait_for_slave_param.inc; + +# C3. Failure by SQL thread +# instruction to sql thread; +set @@global.slave_sql_verify_checksum = 1; + +set @@global.debug_dbug='d,simulate_checksum_test_failure'; + +start slave sql_thread; +--let $slave_sql_errno= 1593 +--let $show_slave_sql_error= 1 +source include/wait_for_slave_sql_error.inc; + +# resuming SQL thread to parse out the event w/o the failure + +set @@global.debug_dbug = @saved_dbug; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be zero' from t2; + +# +# D. Reset slave, Change-Master, Binlog & Relay-log rotations with +# random value on binlog_checksum on both master and slave +# +connection slave; +stop slave; +reset slave; + +# randomize slave server's own checksum policy +set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); +flush logs; + +connection master; +set @@global.binlog_checksum= CRC32; +reset master; +flush logs; +create table t3 (a int, b char(5)); + +connection slave; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be zero' from t3; +source include/stop_slave.inc; +--replace_result $MASTER_MYPORT MASTER_PORT +eval change master to master_host='127.0.0.1',master_port=$MASTER_MYPORT, master_user='root'; + +connection master; +flush logs; +reset master; +insert into t3 value (1, @@global.binlog_checksum); + +connection slave; +source include/start_slave.inc; +flush logs; + +connection master; +sync_slave_with_master; + +#connection slave; +select count(*) as 'must be one' from t3; + +connection master; +set @@global.binlog_checksum= IF(floor((rand()*1000)%2), "CRC32", "NONE"); +insert into t3 value (1, @@global.binlog_checksum); +sync_slave_with_master; + +#connection slave; + +#clean-up + +connection master; +drop table t1, t2, t3; +set @@global.binlog_checksum = @master_save_binlog_checksum; +set @@global.master_verify_checksum = @save_master_verify_checksum; + +# +# BUG#58564: flush_read_lock fails in mysql-trunk-bugfixing after merging with WL#2540 +# +# Sanity check that verifies that no assertions are triggered because +# of old FD events (generated by versions prior to server released with +# checksums feature) +# +# There is no need for query log, if something wrong this should trigger +# an assertion + +--disable_query_log + +BINLOG ' +MfmqTA8BAAAAZwAAAGsAAAABAAQANS41LjctbTMtZGVidWctbG9nAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== +'; + +--enable_query_log + +#connection slave; +sync_slave_with_master; + + +--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** + +--connection master + +--source include/wait_for_binlog_checkpoint.inc +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); + +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +--error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +SET debug_dbug= @old_dbug; + +INSERT INTO t4 VALUES (2); + +--connection slave +--let $slave_sql_errno= 1590 +--source include/wait_for_slave_sql_error.inc + +# Search the error log for the error message. +# The bug was that 4 garbage bytes were output in the middle of the error +# message; by searching for a pattern that spans that location, we can +# catch the error. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE= $log_error_ +--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + +SELECT * FROM t4 ORDER BY a; +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc + +--connection master +--save_master_pos + +--connection slave +--sync_with_master +SELECT * FROM t4 ORDER BY a; + + +--connection slave +set @@global.binlog_checksum = @slave_save_binlog_checksum; +set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; + +--echo End of tests + +--connection master +DROP TABLE t4; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum_cache.test b/mysql-test/suite/rpl/t/rpl_checksum_cache.test index 59b338d2556..e04f618b81e 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum_cache.test +++ b/mysql-test/suite/rpl/t/rpl_checksum_cache.test @@ -1 +1,261 @@ ---source include/rpl_checksum_cache.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +-- source include/have_innodb.inc +-- source include/master-slave.inc + +--disable_warnings +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t2 set data=repeat.*'a', @act_size.*"); +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. .*Statement: insert into t1 values.* NAME_CONST.*'n',.*, @data .*"); +--enable_warnings + +connection master; +set @save_binlog_cache_size = @@global.binlog_cache_size; +set @save_binlog_checksum = @@global.binlog_checksum; +set @save_master_verify_checksum = @@global.master_verify_checksum; +set @@global.binlog_cache_size = 4096; +set @@global.binlog_checksum = CRC32; +set @@global.master_verify_checksum = 1; + +# restart slave to force the dump thread to verify events (on master side) +connection slave; +source include/stop_slave.inc; +source include/start_slave.inc; + +connection master; + +# +# Testing a critical part of checksum handling dealing with transaction cache. +# The cache's buffer size is set to be less than the transaction's footprint +# in binlog. +# +# To verify combined buffer-by-buffer read out of the file and fixing crc per event +# there are the following parts: +# +# 1. the event size is much less than the cache's buffer +# 2. the event size is bigger than the cache's buffer +# 3. the event size if approximately the same as the cache's buffer +# 4. all in above + +# +# 1. the event size is much less than the cache's buffer +# + +flush status; +show status like "binlog_cache_use"; +show status like "binlog_cache_disk_use"; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# parameter to ensure the test slightly varies binlog content +# between different invocations +# +let $deviation_size=32; +eval create table t1 (a int PRIMARY KEY, b CHAR($deviation_size)) engine=innodb; + +# Now we are going to create transaction which is long enough so its +# transaction binlog will be flushed to disk... + +delimiter |; +create procedure test.p_init (n int, size int) +begin + while n > 0 do + select round(RAND() * size) into @act_size; + set @data = repeat('a', @act_size); + insert into t1 values(n, @data ); + set n= n-1; + end while; +end| + +delimiter ;| + +let $1 = 4000; # PB2 can run it slow to time out on following sync_slave_with_master:s + +begin; +--disable_warnings +# todo: check if it is really so. +#+Note 1592 Unsafe statement binlogged in statement format since BINLOG_FORMAT = STATEMENT. Reason for unsafeness: Statement uses a system function whose value may differ on slave. +eval call test.p_init($1, $deviation_size); +--enable_warnings +commit; + +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t1, slave:test.t1; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t1; +commit; + +sync_slave_with_master; + + +# +# 2. the event size is bigger than the cache's buffer +# +connection master; + +flush status; +let $t2_data_size= `select 3 * @@global.binlog_cache_size`; +let $t2_aver_size= `select 2 * @@global.binlog_cache_size`; +let $t2_max_rand= `select 1 * @@global.binlog_cache_size`; + +eval create table t2(a int auto_increment primary key, data VARCHAR($t2_data_size)) ENGINE=Innodb; +let $1=100; +--disable_query_log +begin; +while ($1) +{ + eval select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; + set @data = repeat('a', @act_size); + insert into t2 set data = @data; + dec $1; +} +commit; +--enable_query_log +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t2, slave:test.t2; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t2; +commit; + +sync_slave_with_master; + +# +# 3. the event size if approximately the same as the cache's buffer +# + +connection master; + +flush status; +let $t3_data_size= `select 2 * @@global.binlog_cache_size`; +let $t3_aver_size= `select (9 * @@global.binlog_cache_size) / 10`; +let $t3_max_rand= `select (2 * @@global.binlog_cache_size) / 10`; + +eval create table t3(a int auto_increment primary key, data VARCHAR($t3_data_size)) engine=innodb; + +let $1= 300; +--disable_query_log +begin; +while ($1) +{ + eval select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; + insert into t3 set data= repeat('a', @act_size); + dec $1; +} +commit; +--enable_query_log +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t3, slave:test.t3; +source include/diff_tables.inc; + +# undoing changes with verifying the above once again +connection master; + +begin; +delete from t3; +commit; + +sync_slave_with_master; + + +# +# 4. all in above +# + +connection master; +flush status; + +delimiter |; +eval create procedure test.p1 (n int) +begin + while n > 0 do + case (select (round(rand()*100) % 3) + 1) + when 1 then + select round(RAND() * $deviation_size) into @act_size; + set @data = repeat('a', @act_size); + insert into t1 values(n, @data); + when 2 then + begin + select round($t2_aver_size + RAND() * $t2_max_rand) into @act_size; + insert into t2 set data=repeat('a', @act_size); + end; + when 3 then + begin + select round($t3_aver_size + RAND() * $t3_max_rand) into @act_size; + insert into t3 set data= repeat('a', @act_size); + end; + end case; + set n= n-1; + end while; +end| +delimiter ;| + +let $1= 1000; +set autocommit= 0; +begin; +--disable_warnings +eval call test.p1($1); +--enable_warnings +commit; + +show status like "binlog_cache_use"; +--echo *** binlog_cache_disk_use must be non-zero *** +show status like "binlog_cache_disk_use"; + +sync_slave_with_master; + +let $diff_tables=master:test.t1, slave:test.t1; +source include/diff_tables.inc; + +let $diff_tables=master:test.t2, slave:test.t2; +source include/diff_tables.inc; + +let $diff_tables=master:test.t3, slave:test.t3; +source include/diff_tables.inc; + + +connection master; + +begin; +delete from t1; +delete from t2; +delete from t3; +commit; + +drop table t1, t2, t3; +set @@global.binlog_cache_size = @save_binlog_cache_size; +set @@global.binlog_checksum = @save_binlog_checksum; +set @@global.master_verify_checksum = @save_master_verify_checksum; +drop procedure test.p_init; +drop procedure test.p1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_corruption.test b/mysql-test/suite/rpl/t/rpl_corruption.test index e51d1c65e95..c7a913af9d7 100644 --- a/mysql-test/suite/rpl/t/rpl_corruption.test +++ b/mysql-test/suite/rpl/t/rpl_corruption.test @@ -1 +1,175 @@ ---source include/rpl_corruption.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +############################################################ +# Purpose: WL#5064 Testing with corrupted events. +# The test emulates the corruption at the vary stages +# of replication: +# - in binlog file +# - in network +# - in relay log +############################################################ + +# +# The tests intensively utilize @@global.debug. Note, +# Bug#11765758 - 58754, +# @@global.debug is read by the slave threads through dbug-interface. +# Hence, before a client thread set @@global.debug we have to ensure that: +# (a) the slave threads are stopped, or (b) the slave threads are in +# sync and waiting. + +--source include/have_debug.inc +--source include/master-slave.inc + +# Block legal errors for MTR +call mtr.add_suppression('Found invalid event in binary log'); +call mtr.add_suppression('Slave I/O: Relay log write failure: could not queue event from master'); +call mtr.add_suppression('event read from binlog did not pass crc check'); +call mtr.add_suppression('Replication event checksum verification failed'); +call mtr.add_suppression('Event crc check failed! Most likely there is event corruption'); +call mtr.add_suppression('Slave SQL: Error initializing relay log position: I/O error reading event at position .*, error.* 1593'); + +SET @old_master_verify_checksum = @@master_verify_checksum; + +# Creating test table/data and set corruption position for testing +--echo # 1. Creating test table/data and set corruption position for testing +--connection master +--echo * insert/update/delete rows in table t1 * +# Corruption algorithm modifies only the first event and +# then will be reset. To avoid checking always the first event +# from binlog (usually it is FD) we randomly execute different +# statements and set position for corruption inside events. + +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c VARCHAR(100)); +--disable_query_log +let $i=`SELECT 3+CEILING(10*RAND())`; +let $j=1; +let $pos=0; +while ($i) { + eval INSERT INTO t1 VALUES ($j, 'a', NULL); + if (`SELECT RAND() > 0.7`) + { + eval UPDATE t1 SET c = REPEAT('a', 20) WHERE a = $j; + } + if (`SELECT RAND() > 0.8`) + { + eval DELETE FROM t1 WHERE a = $j; + } + if (!$pos) { + let $pos= query_get_value(SHOW MASTER STATUS, Position, 1); + --sync_slave_with_master + --source include/stop_slave.inc + --disable_query_log + --connection master + } + dec $i; + inc $j; +} +--enable_query_log + + +# Emulate corruption in binlog file when SHOW BINLOG EVENTS is executing +--echo # 2. Corruption in master binlog and SHOW BINLOG EVENTS +SET @saved_dbug = @@global.debug_dbug; +SET @@global.debug_dbug="d,corrupt_read_log_event_char"; +--echo SHOW BINLOG EVENTS; +--disable_query_log +send_eval SHOW BINLOG EVENTS FROM $pos; +--enable_query_log +--error ER_ERROR_WHEN_EXECUTING_COMMAND +reap; + +SET @@global.debug_dbug=@saved_dbug; + +# Emulate corruption on master with crc checking on master +--echo # 3. Master read a corrupted event from binlog and send the error to slave + +# We have a rare but nasty potential race here: if the dump thread on +# the master for the _old_ slave connection has not yet discovered +# that the slave has disconnected, we will inject the corrupt event on +# the wrong connection, and the test will fail +# (+d,corrupt_read_log_event2 corrupts only one event). +# So kill any lingering dump thread (we need to kill; otherwise dump thread +# could manage to send all events down the socket before seeing it close, and +# hang forever waiting for new binlog events to be created). +let $id= `select id from information_schema.processlist where command = "Binlog Dump"`; +if ($id) +{ + --disable_query_log + --error 0,1094 + eval kill $id; + --enable_query_log +} +let $wait_condition= + SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE command = 'Binlog Dump'; +--source include/wait_condition.inc + +SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; +--connection slave +START SLAVE IO_THREAD; +let $slave_io_errno= 1236; +--let $slave_timeout= 10 +--source include/wait_for_slave_io_error.inc +--connection master +SET @@global.debug_dbug=@saved_dbug; + +# Emulate corruption on master without crc checking on master +--echo # 4. Master read a corrupted event from binlog and send it to slave +--connection master +SET GLOBAL master_verify_checksum=0; +SET @@global.debug_dbug="d,corrupt_read_log_event2_set"; +--connection slave +START SLAVE IO_THREAD; +# When the checksum error is detected, the slave sets error code 1743 +# (ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE) in queue_event(), then immediately +# sets error 1595 (ER_SLAVE_RELAY_LOG_WRITE_FAILURE) in handle_slave_io(). +# So we usually get 1595, but it is occasionally possible to get 1743. +let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE +--source include/wait_for_slave_io_error.inc +--connection master +SET @@global.debug_dbug=@saved_dbug; +SET GLOBAL master_verify_checksum=1; + +# Emulate corruption in network +--echo # 5. Slave. Corruption in network +--connection slave +SET @saved_dbug_slave = @@GLOBAL.debug_dbug; +SET @@global.debug_dbug="d,corrupt_queue_event"; +START SLAVE IO_THREAD; +let $slave_io_errno= 1595,1743; # ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE +--source include/wait_for_slave_io_error.inc +SET @@global.debug_dbug=@saved_dbug_slave; + +# Emulate corruption in relay log +--echo # 6. Slave. Corruption in relay log + +SET @@global.debug_dbug="d,corrupt_read_log_event_char"; + +START SLAVE SQL_THREAD; +let $slave_sql_errno= 1593; +--source include/wait_for_slave_sql_error.inc + +SET @@global.debug_dbug=@saved_dbug_slave; + +# Start normal replication and compare same table on master +# and slave +--echo # 7. Seek diff for tables on master and slave +--connection slave +--source include/start_slave.inc +--connection master +--sync_slave_with_master +let $diff_tables= master:test.t1, slave:test.t1; +--source include/diff_tables.inc + +# Clean up +--echo # 8. Clean up +--connection master +set @@global.debug_dbug = @saved_dbug; +SET GLOBAL master_verify_checksum = @old_master_verify_checksum; +DROP TABLE t1; +--sync_slave_with_master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test index 004003ea524..5975c6f03c3 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test @@ -1,4 +1,575 @@ ---source include/rpl_gtid_basic.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/have_innodb.inc +--let $rpl_topology=1->2->3->4 +--source include/rpl_init.inc + +# Set up a 4-deep replication topology, then test various fail-overs +# using GTID. +# +# A -> B -> C -> D + +connection server_1; +--source include/wait_for_binlog_checkpoint.inc +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--echo *** GTID position should be empty here *** +--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); + +CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; +CREATE TABLE t2 (a INT PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, "m1"); +INSERT INTO t1 VALUES (2, "m2"), (3, "m3"), (4, "m4"); +INSERT INTO t2 VALUES (1, "i1"); +BEGIN; +INSERT INTO t2 VALUES (2, "i2"), (3, "i3"); +INSERT INTO t2 VALUES (4, "i4"); +COMMIT; +save_master_pos; +source include/wait_for_binlog_checkpoint.inc; +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--let $gtid_pos_server_1 = `SELECT @@gtid_binlog_pos` +--echo *** GTID position should be non-empty here *** +--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1> +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); + +connection server_2; +sync_with_master; +source include/wait_for_binlog_checkpoint.inc; +--let $binlog_file = query_get_value(SHOW MASTER STATUS,File,1) +--let $binlog_pos = query_get_value(SHOW MASTER STATUS,Position,1) +--echo *** GTID position should be the same as on server_1 *** +--replace_result $binlog_file <BINLOG_FILE> $binlog_pos <BINLOG_POS> $gtid_pos_server_1 <GTID_POS_SERVER_1> +eval SELECT BINLOG_GTID_POS('$binlog_file',$binlog_pos); +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; +save_master_pos; + +connection server_3; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; +save_master_pos; + +connection server_4; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + + +--echo *** Now take out D, let it fall behind a bit, and then test re-attaching it to A *** +connection server_4; +--source include/stop_slave.inc + +connection server_1; +INSERT INTO t1 VALUES (5, "m1a"); +INSERT INTO t2 VALUES (5, "i1a"); +save_master_pos; + +connection server_4; +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + +--echo *** Now move B to D (C is still replicating from B) *** +connection server_2; +--source include/stop_slave.inc +--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc + +connection server_4; +UPDATE t2 SET b="j1a" WHERE a=5; +save_master_pos; + +connection server_2; +sync_with_master; +SELECT * FROM t1 ORDER BY a; +SELECT * FROM t2 ORDER BY a; + +--echo *** Now move C to D, after letting it fall a little behind *** +connection server_3; +--source include/stop_slave.inc + +connection server_1; +INSERT INTO t2 VALUES (6, "i6b"); +INSERT INTO t2 VALUES (7, "i7b"); +--source include/save_master_gtid.inc + +connection server_3; +--replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4, + MASTER_USE_GTID=CURRENT_POS; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t2 ORDER BY a; + +--echo *** Now change everything back to what it was, to make rpl_end.inc happy +# Also check that MASTER_USE_GTID=CURRENT_POS is still enabled. +connection server_2; +# We need to sync up server_2 before switching. If it happened to have reached +# the point 'UPDATE t2 SET b="j1a" WHERE a=5' it will fail to connect to +# server_1, which is (deliberately) missing that transaction. +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +--replace_result $MASTER_MYPORT MASTER_MYPORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT; +--source include/start_slave.inc +--source include/wait_for_slave_to_start.inc + +connection server_3; +--source include/stop_slave.inc +--replace_result $SLAVE_MYPORT SLAVE_MYPORT +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SLAVE_MYPORT; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +connection server_4; +--source include/stop_slave.inc +--replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 +eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; +--source include/start_slave.inc + +connection server_1; +DROP TABLE t1,t2; +--source include/save_master_gtid.inc + +--echo *** A few more checks for BINLOG_GTID_POS function *** +--let $valid_binlog_name = query_get_value(SHOW BINARY LOGS,Log_name,1) +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS(); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS('a'); +--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT +SELECT BINLOG_GTID_POS('a',1,NULL); +SELECT BINLOG_GTID_POS(1,'a'); +SELECT BINLOG_GTID_POS(NULL,NULL); +SELECT BINLOG_GTID_POS('',1); +SELECT BINLOG_GTID_POS('a',1); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',-1); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',0); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551615); +eval SELECT BINLOG_GTID_POS('$valid_binlog_name',18446744073709551616); + + +--echo *** Some tests of @@GLOBAL.gtid_binlog_state *** +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc + +--connection server_1 +SET @old_state= @@GLOBAL.gtid_binlog_state; + +--error ER_BINLOG_MUST_BE_EMPTY +SET GLOBAL gtid_binlog_state = ''; +RESET MASTER; +SET GLOBAL gtid_binlog_state = ''; +FLUSH LOGS; +--source include/show_binary_logs.inc +SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; +--source include/show_binary_logs.inc +--let $binlog_file= master-bin.000001 +--let $binlog_start= 4 +--source include/show_binlog_events.inc +#SELECT @@GLOBAL.gtid_binlog_pos; +#SELECT @@GLOBAL.gtid_binlog_state; +--error ER_BINLOG_MUST_BE_EMPTY +SET GLOBAL gtid_binlog_state = @old_state; +RESET MASTER; +SET GLOBAL gtid_binlog_state = @old_state; + +# Check that slave can reconnect again, despite the RESET MASTER, as we +# restored the state. + +CREATE TABLE t1 (a INT PRIMARY KEY); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (1); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +# We cannot just use sync_with_master as we've done RESET MASTER, so +# slave old-style position is wrong. +# So sync on gtid position instead. +--source include/sync_with_master_gtid.inc + +SELECT * FROM t1; +# Check that the IO gtid position in SHOW SLAVE STATUS is also correct. +--let $status_items= Gtid_IO_Pos +--source include/show_slave_status.inc + +--echo *** Test @@LAST_GTID and MASTER_GTID_WAIT() *** + +--connection server_1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +--connect (m1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SELECT @@last_gtid; +SET gtid_seq_no=110; +SELECT @@last_gtid; +BEGIN; +SELECT @@last_gtid; +INSERT INTO t1 VALUES (2); +SELECT @@last_gtid; +COMMIT; +SELECT @@last_gtid; +--let $pos= `SELECT @@gtid_binlog_pos` + +--connect (s1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +eval SET @pos= '$pos'; +# Check NULL argument. +SELECT master_gtid_wait(NULL); +# Check empty argument returns immediately. +SELECT master_gtid_wait('', NULL); +# Check this gets counted +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_time'; +# Let's check that we get a timeout +SELECT master_gtid_wait(@pos, 0.5); +SELECT * FROM t1 ORDER BY a; +# Now actually wait until the slave reaches the position +send SELECT master_gtid_wait(@pos); + +--connection server_2 +--source include/start_slave.inc + +--connection s1 +reap; +SELECT * FROM t1 ORDER BY a; + +# Test waiting on a domain that does not exist yet. +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id= 1; +INSERT INTO t1 VALUES (3); +--let $pos= `SELECT @@gtid_binlog_pos` + +--connection s1 +--replace_result $pos POS +eval SET @pos= '$pos'; +SELECT master_gtid_wait(@pos, 0); +SELECT * FROM t1 WHERE a >= 3; +send SELECT master_gtid_wait(@pos, -1); + +--connection server_2 +--source include/start_slave.inc + +--connection s1 +reap; +SELECT * FROM t1 WHERE a >= 3; +# Waiting for only part of the position. +SELECT master_gtid_wait('1-1-1', 0); + +# Now test a lot of parallel master_gtid_wait() calls, completing in different +# order, and some of which time out or get killed on the way. + +--connection s1 +send SELECT master_gtid_wait('2-1-1,1-1-4,0-1-110'); + +--connect (s2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +# This will time out. No event 0-1-1000 exists +send SELECT master_gtid_wait('0-1-1000', 0.5); + +--connect (s3,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +# This one we will kill +--let $kill1_id= `SELECT connection_id()` +send SELECT master_gtid_wait('0-1-2000'); + +--connect (s4,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-10'); + +--connect (s5,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-6', 1); + +# This one we will kill also. +--connect (s6,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +--let $kill2_id= `SELECT connection_id()` +send SELECT master_gtid_wait('2-1-5'); + +--connect (s7,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-10'); + +--connect (s8,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-5,1-1-4,0-1-110'); + +--connect (s9,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('2-1-2'); + +--connection server_2 +# This one completes immediately. +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SELECT master_gtid_wait('1-1-1'); +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); +--replace_result $wait_time MASTER_GTID_WAIT_TIME +eval SET @a= $wait_time; +SELECT IF(@a <= 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " is larger than expected")) + AS Master_gtid_wait_time_as_expected; + + +--connect (s10,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +send SELECT master_gtid_wait('0-1-109'); + +--connection server_2 +# This one should time out. +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +SELECT master_gtid_wait('2-1-2', 0.5); +SHOW STATUS LIKE 'Master_gtid_wait_timeouts'; +SHOW STATUS LIKE 'Master_gtid_wait_count'; +let $wait_time = query_get_value(SHOW STATUS LIKE 'Master_gtid_wait_time', Value, 1); +--replace_result $wait_time MASTER_GTID_WAIT_TIME +eval SET @a= $wait_time; +# We expect a wait time of just a bit over 0.5 seconds. But thread scheduling +# and timer inaccuracies could introduce significant jitter. So allow a +# generous interval. +SELECT IF(@a BETWEEN 0.4*1000*1000 AND 100*1000*1000, "OK", CONCAT("Error: wait time ", @a, " not as expected")) AS Master_gtid_wait_time_as_expected; + +--replace_result $kill1_id KILL_ID +eval KILL QUERY $kill1_id; +--connection s3 +--error ER_QUERY_INTERRUPTED +reap; + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=2; +INSERT INTO t1 VALUES (4); + +--connection s9 +reap; + +--connection server_2 +--replace_result $kill2_id KILL_ID +eval KILL CONNECTION $kill2_id; + +--connection s6 +--error 2013,ER_CONNECTION_KILLED +reap; + +--connection server_1 +SET gtid_domain_id=1; +SET gtid_seq_no=4; +INSERT INTO t1 VALUES (5); +SET gtid_domain_id=2; +SET gtid_seq_no=5; +INSERT INTO t1 VALUES (6); + +--connection s8 +reap; +--connection s1 +reap; +--connection s2 +reap; +--connection s5 +reap; +--connection s10 +reap; + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=10; +INSERT INTO t1 VALUES (7); + +--connection s4 +reap; +--connection s7 +reap; + + +--echo *** Test gtid_slave_pos when used with GTID *** + +--connection server_2 +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 2; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 3; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +# Disable logging Annotate_rows events to preserve events count. +let $binlog_annotate_row_events_saved= `SELECT @@binlog_annotate_row_events`; +SET @@binlog_annotate_row_events= 0; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +eval SET @@binlog_annotate_row_events= $binlog_annotate_row_events_saved; +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 5; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +--save_master_pos + +--connection server_2 +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +--let $master_pos=3-1-100 +--source include/sync_with_master_gtid.inc +--source include/wait_for_slave_to_stop.inc +--error ER_NO_SUCH_TABLE +SELECT * FROM t2; +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +# Start the slave again, it should fail on the DROP TABLE as the table is not there. +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +--let $slave_sql_errno=1051 +--source include/wait_for_slave_sql_error.inc +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +SET GLOBAL slave_ddl_exec_mode= @saved_mode; + + +--echo *** Test GTID-connecting to a master with out-of-order sequence numbers in the binlog. *** + +# Create an out-of-order binlog on server 2. +# Let server 3 replicate to an out-of-order point, stop it, restart it, +# and check that it replicates correctly despite the out-of-order. + +--connection server_1 +SET gtid_domain_id= @@GLOBAL.gtid_domain_id; +INSERT INTO t1 VALUES (31); +--save_master_pos + +--connection server_2 +--sync_with_master +SET gtid_domain_id= @@GLOBAL.gtid_domain_id; +INSERT INTO t1 VALUES (32); + +--connection server_1 +INSERT INTO t1 VALUES (33); +--save_master_pos + +--connection server_2 +--sync_with_master +--save_master_pos + +--connection server_3 +--sync_with_master +--source include/stop_slave.inc + +--connection server_1 +INSERT INTO t1 VALUES (34); +--save_master_pos + +--connection server_2 +--sync_with_master +--save_master_pos + +--connection server_3 +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 30 ORDER BY a; +--save_master_pos + +--connection server_4 +--sync_with_master +SELECT * FROM t1 WHERE a >= 30 ORDER BY a; + + +# Clean up. +--connection server_1 +DROP TABLE t1; + + +--source include/rpl_end.inc --echo # --echo # Start of 10.2 tests diff --git a/mysql-test/suite/rpl/t/rpl_incident.test b/mysql-test/suite/rpl/t/rpl_incident.test index 4bb6477ca98..75d28d6a6c6 100644 --- a/mysql-test/suite/rpl/t/rpl_incident.test +++ b/mysql-test/suite/rpl/t/rpl_incident.test @@ -1 +1,61 @@ ---source include/rpl_incident.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/have_debug.inc +--source include/master-slave.inc + +SET @old_binlog_checksum=@@binlog_checksum; +SET GLOBAL BINLOG_CHECKSUM=none; +connection slave; +SET @old_binlog_checksum=@@binlog_checksum; +SET GLOBAL BINLOG_CHECKSUM=none; +connection master; + +--echo **** On Master **** +CREATE TABLE t1 (a INT); + +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; + +set @saved_dbug = @@global.debug_dbug; +SET GLOBAL debug_dbug= '+d,incident_database_resync_on_replace,*'; + +# This will generate an incident log event and store it in the binary +# log before the replace statement. +REPLACE INTO t1 VALUES (4); +--save_master_pos +SELECT * FROM t1; + +set @@global.debug_dbug = @saved_dbug; + +connection slave; +# Wait until SQL thread stops with error LOST_EVENT on master +call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occurred on the master.* 1590"); +let $slave_sql_errno= 1590; +let $show_slave_sql_error= 1; +source include/wait_for_slave_sql_error.inc; + +# The 4 should not be inserted into the table, since the incident log +# event should have stop the slave. +--echo **** On Slave **** +SELECT * FROM t1; + +SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; +START SLAVE; +--sync_with_master + +# Now, we should have inserted the row into the table and the slave +# should be running. We should also have rotated to a new binary log. + +SELECT * FROM t1; +source include/check_slave_is_running.inc; + +connection master; +SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; +DROP TABLE t1; +--sync_slave_with_master +SET GLOBAL BINLOG_CHECKSUM=@old_binlog_checksum; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test index 6f6ab7e8d7c..46673ea4764 100644 --- a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test +++ b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test @@ -1 +1,96 @@ ---source include/rpl_init_slave_errors.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +###################################################################### +# Some errors that cause the slave SQL thread to stop are not shown in +# the Slave_SQL_Error column of "SHOW SLAVE STATUS". Instead, the error +# is only in the server's error log. +# +# Two failures and their respective reporting are verified: +# +# 1 - Failures during slave thread initialization +# 2 - Failures while processing queries passed through the init_slave +# option. +# +# In order to check the first type of failure, we inject a fault in the +# SQL/IO Threads through SET GLOBAL debug. +# +# To check the second type, we set @@global.init_slave to an invalid +# command thus preventing the initialization of the SQL Thread. +# +# Obs: +# 1 - Note that testing failures while initializing the relay log position +# is hard as the same function is called before the code reaches the point +# that we want to test. +# +# 2 - This test does not target failures that are reported while applying +# events such as duplicate keys, errors while reading the relay-log.bin*, +# etc. Such errors are already checked on other tests. +###################################################################### + +###################################################################### +# Configuring the Environment +###################################################################### +source include/have_debug.inc; +source include/have_log_bin.inc; +source include/master-slave.inc; + +connection slave; + +--disable_warnings +stop slave; +--enable_warnings +reset slave; + +###################################################################### +# Injecting faults in the threads' initialization +###################################################################### +connection slave; + +# Set debug flags on slave to force errors to occur +set @saved_dbug = @@global.debug_dbug; +SET GLOBAL debug_dbug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init"; + +start slave; + +# +# slave is going to stop because of emulated failures +# but there won't be any crashes nor asserts hit. +# +# 1593 = ER_SLAVE_FATAL_ERROR +--let $slave_sql_errno= 1593 +--let $show_slave_sql_error= 1 +--source include/wait_for_slave_sql_error.inc + +call mtr.add_suppression("Failed during slave.* thread initialization"); + +set @@global.debug_dbug = @saved_dbug; + +###################################################################### +# Injecting faults in the init_slave option +###################################################################### +connection slave; + +reset slave; + +SET GLOBAL init_slave= "garbage"; + +start slave; +# 1064 = ER_PARSE_ERROR +--let $slave_sql_errno= 1064 +--let $show_slave_sql_error= 1 +--source include/wait_for_slave_sql_error.inc + +###################################################################### +# Clean up +###################################################################### +SET GLOBAL init_slave= ""; + +# Clean up Last_SQL_Error +--source include/stop_slave_io.inc +RESET SLAVE; +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_loaddata_local.inc b/mysql-test/suite/rpl/t/rpl_loaddata_local.test index 20962d74e98..20962d74e98 100644 --- a/mysql-test/suite/rpl/include/rpl_loaddata_local.inc +++ b/mysql-test/suite/rpl/t/rpl_loaddata_local.test diff --git a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test b/mysql-test/suite/rpl/t/rpl_loaddatalocal.test deleted file mode 100644 index 712041467ab..00000000000 --- a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test +++ /dev/null @@ -1 +0,0 @@ ---source include/rpl_loaddata_local.inc diff --git a/mysql-test/suite/rpl/t/rpl_loadfile.test b/mysql-test/suite/rpl/t/rpl_loadfile.test index 10fecf1f653..9cd64530690 100644 --- a/mysql-test/suite/rpl/t/rpl_loadfile.test +++ b/mysql-test/suite/rpl/t/rpl_loadfile.test @@ -1 +1,120 @@ ---source include/rpl_loadfile.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +############################################################################# +# Original Author: JBM # +# Original Date: Aug/18/2005 # +############################################################################# +# TEST: To test the LOAD_FILE() in rbr # +############################################################################# +# Change Author: JBM +# Change Date: 2006-01-16 +########## + +# Includes +-- source include/have_binlog_format_mixed_or_row.inc +-- source include/master-slave.inc + +-- source suite/rpl/include/rpl_loadfile.test + +# BUG#39701: Mixed binlog format does not switch to row mode on LOAD_FILE +# +# DESCRIPTION +# +# Problem: when using load_file string function and mixed binlogging format +# there was no switch to row based binlogging format. This leads +# to scenarios on which the slave replicates the statement and it +# will try to load the file from local file system, which in most +# likely it will not exist. +# +# Solution: +# Marking this function as unsafe for statement format, makes the +# statement using it to be logged in row based format. As such, data +# replicated from the master, becomes the content of the loaded file. +# Consequently, the slave receives the necessary data to complete +# the load_file instruction correctly. +# +# IMPLEMENTATION +# +# The test is implemented as follows: +# +# On Master, +# i) write to file the desired content. +# ii) create table and stored procedure with load_file +# iii) stop slave +# iii) execute load_file +# iv) remove file +# +# On Slave, +# v) start slave +# vi) sync it with master so that it gets the updates from binlog (which +# should have bin logged in row format). +# +# If the the binlog format does not change to row, then the assertion +# done in the following step fails. This happens because tables differ +# since the file does not exist anymore, meaning that when slave +# attempts to execute LOAD_FILE statement it inserts NULL on table +# instead of the same contents that the master loaded when it executed +# the procedure (which was executed when file existed). +# +# vii) assert that the contents of master and slave +# table are the same + +--source include/rpl_reset.inc + +connection master; +let $file= $MYSQLTEST_VARDIR/tmp/bug_39701.data; + +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT repeat('x',20) INTO OUTFILE '$file' + +disable_warnings; +DROP TABLE IF EXISTS t1; +enable_warnings; + +CREATE TABLE t1 (t text); +DELIMITER |; +CREATE PROCEDURE p(file varchar(4096)) + BEGIN + INSERT INTO t1 VALUES (LOAD_FILE(file)); + END| +DELIMITER ;| + +# stop slave before issuing the load_file on master +connection slave; +source include/stop_slave.inc; + +connection master; + +# test: check that logging falls back to rbr. +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval CALL p('$file') + +# test: remove the file from the filesystem and assert that slave still +# gets the loaded file +remove_file $file; + +# now that the file is removed it is safe (regarding what we want to test) +# to start slave +connection slave; +source include/start_slave.inc; + +connection master; +sync_slave_with_master; + +# assertion: assert that the slave got the updates even +# if the file was removed before the slave started, +# meaning that contents were indeed transfered +# through binlog (in row format) +let $diff_tables= master:t1, slave:t1; +source include/diff_tables.inc; + +# CLEAN UP +--connection master +DROP TABLE t1; +DROP PROCEDURE p; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_packet.test b/mysql-test/suite/rpl/t/rpl_packet.test index 1bf99c2366b..cbde486bcbb 100644 --- a/mysql-test/suite/rpl/t/rpl_packet.test +++ b/mysql-test/suite/rpl/t/rpl_packet.test @@ -1 +1,184 @@ ---source include/rpl_packet.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# ==== Purpose ==== +# +# Check replication protocol packet size handling +# +# ==== Related bugs ==== +# Bug#19402 SQL close to the size of the max_allowed_packet fails on slave +# BUG#23755: Replicated event larger that max_allowed_packet infinitely re-transmits +# BUG#42914: No LAST_IO_ERROR for max_allowed_packet errors +# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET + +# max-out size db name +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); +call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); +let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; +disable_warnings; +eval drop database if exists $db; +enable_warnings; +eval create database $db; + +connection master; +let $old_max_allowed_packet= `SELECT @@global.max_allowed_packet`; +let $old_net_buffer_length= `SELECT @@global.net_buffer_length`; +let $old_slave_max_allowed_packet= `SELECT @@global.slave_max_allowed_packet`; +SET @@global.max_allowed_packet=1024; +SET @@global.net_buffer_length=1024; + +sync_slave_with_master; +# Restart slave for setting to take effect +source include/stop_slave.inc; +source include/start_slave.inc; + +# Reconnect to master for new setting to take effect +disconnect master; + +# alas, can't use eval here; if db name changed apply the change here +connect (master,localhost,root,,DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________); + +connection master; +select @@net_buffer_length, @@max_allowed_packet; + +create table `t1` (`f1` LONGTEXT) ENGINE=MyISAM; + +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1023'); +sync_slave_with_master; + +eval select count(*) from `$db`.`t1` /* must be 1 */; + +SHOW STATUS LIKE 'Slave_running'; +select * from information_schema.session_status where variable_name= 'SLAVE_RUNNING'; +connection master; +eval drop database $db; +sync_slave_with_master; + +# +# Bug #23755: Replicated event larger that max_allowed_packet infinitely re-transmits +# +# Check that a situation when the size of event on the master is greater than +# max_allowed_packet on the slave does not lead to infinite re-transmits. + +connection master; + +# Change the max packet size on master + +SET @@global.max_allowed_packet=4096; +SET @@global.net_buffer_length=4096; + +# Restart slave for new setting to take effect +connection slave; +source include/stop_slave.inc; +source include/start_slave.inc; + +# Reconnect to master for new setting to take effect +disconnect master; +connect (master, localhost, root); +connection master; + +CREATE TABLE `t1` (`f1` LONGTEXT) ENGINE=MyISAM; + +sync_slave_with_master; + +connection master; + +INSERT INTO `t1`(`f1`) VALUES ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2048'); + + +# +# Bug#42914: The slave I/O thread must stop after trying to read the above +# event, However there is no Last_IO_Error report. +# + +# The slave I/O thread must stop after trying to read the above event +connection slave; +# 1153 = ER_NET_PACKET_TOO_LARGE +--let $slave_io_errno= 1153 +--let $show_slave_io_error= 1 +--source include/wait_for_slave_io_error.inc + +# TODO: this is needed because of BUG#55790. Remove once that is fixed. +--source include/stop_slave_sql.inc + +# +# Bug#42914: On the master, if a binary log event is larger than +# max_allowed_packet, the error message ER_MASTER_FATAL_ERROR_READING_BINLOG +# is sent to a slave when it requests a dump from the master, thus leading the +# I/O thread to stop. However, there is no Last_IO_Error reported. +# + +--let $rpl_only_running_threads= 1 +--source include/rpl_reset.inc +--connection master +DROP TABLE t1; +--sync_slave_with_master + + +connection master; +CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM; +sync_slave_with_master; + +connection master; +INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet)); + +connection slave; +# The slave I/O thread must stop after receiving +# 1153 = ER_NET_PACKET_TOO_LARGE +--let $slave_io_errno= 1153 +--let $show_slave_io_error= 1 +--source include/wait_for_slave_io_error.inc + +# Remove the bad binlog and clear error status on slave. +STOP SLAVE; +RESET SLAVE; +--connection master +RESET MASTER; + + +# +# BUG#55322: SHOW BINLOG EVENTS increases @@SESSION.MAX_ALLOWED_PACKET +# +# In BUG#55322, @@session.max_allowed_packet increased each time SHOW +# BINLOG EVENTS was issued. To verify that this bug is fixed, we +# execute SHOW BINLOG EVENTS twice and check that max_allowed_packet +# never changes. We turn off the result log because we don't care +# about the contents of the binlog. + +--disable_result_log +SET @max_allowed_packet_0= @@session.max_allowed_packet; +SHOW BINLOG EVENTS; +SET @max_allowed_packet_1= @@session.max_allowed_packet; +SHOW BINLOG EVENTS; +SET @max_allowed_packet_2= @@session.max_allowed_packet; +--enable_result_log +if (`SELECT NOT(@max_allowed_packet_0 = @max_allowed_packet_1 AND @max_allowed_packet_1 = @max_allowed_packet_2)`) +{ + --echo ERROR: max_allowed_packet changed after executing SHOW BINLOG EVENTS + --source include/show_rpl_debug_info.inc + SELECT @max_allowed_packet_0, @max_allowed_packet_1, @max_allowed_packet_2; + --die @max_allowed_packet changed after executing SHOW BINLOG EVENTS +} + + +--echo ==== clean up ==== +connection master; +DROP TABLE t1; +eval SET @@global.max_allowed_packet= $old_max_allowed_packet; +eval SET @@global.net_buffer_length= $old_net_buffer_length; +eval SET @@global.slave_max_allowed_packet= $old_slave_max_allowed_packet; +# slave is stopped +connection slave; +DROP TABLE t1; + +# Clear Last_IO_Error +RESET SLAVE; + +--source include/rpl_end.inc +# End of tests diff --git a/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test b/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test index 90f09a76546..7a6a758a508 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_ignored_errors.test @@ -1 +1,112 @@ ---source include/rpl_parallel_ignored_errors.inc +# ==== Purpose ==== +# +# Test verifies that, in parallel replication, transaction failure notification +# is propagated to all the workers. Workers should abort the execution of +# transaction event groups, whose event positions are higher than the failing +# transaction group. +# +# ==== Implementation ==== +# +# Steps: +# 0 - Create a table t1 on master which has a primary key. Enable parallel +# replication on slave with slave_parallel_mode='optimistic' and +# slave_parallel_threads=3. +# 1 - On slave start a transaction and execute a local INSERT statement +# which will insert value 32. This is done to block the INSERT coming +# from master. +# 2 - On master execute an INSERT statement with value 32, so that it is +# blocked on slave. +# 3 - On slave enable a debug sync point such that it holds the worker thread +# execution as soon as work is scheduled to it. +# 4 - INSERT value 33 on master. It will be held on slave by other worker +# thread due to debug simulation. +# 5 - INSERT value 34 on master. +# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for +# its prior transactions to commit. +# 7 - Commit the local INSERT 32 on slave server so that first worker will +# error out. +# 8 - Now send a continue signal to second worker processing 33. It should +# wakeup and propagate the error to INSERT 34. +# 9 - Upon slave stop due to error, check that no rows are found after the +# failed INSERT 32. +# +# ==== References ==== +# +# MDEV-20645: Replication consistency is broken as workers miss the error +# notification from an earlier failed group. +# + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--enable_connect_log +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET @old_debug= @@GLOBAL.debug_dbug; +SET GLOBAL slave_parallel_mode='optimistic'; +SET GLOBAL slave_parallel_threads= 3; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +BEGIN; +INSERT INTO t1 VALUES (32); + +--connection server_1 +INSERT INTO t1 VALUES (32); + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)" +--source include/wait_condition.inc +SET GLOBAL debug_dbug="+d,hold_worker_on_schedule"; +SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker"; + +--connection server_1 +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (33); + +--connection server_2 +SET debug_sync='now WAIT_FOR reached_pause'; + +--connection server_1 +INSERT INTO t1 VALUES (34); + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" +--source include/wait_condition.inc +--connection con_temp2 +COMMIT; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +--let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32 +--let $assert_text= table t1 should have zero rows where a>32 +--source include/assert.inc +SELECT * FROM t1 WHERE a>32; +DELETE FROM t1 WHERE a=32; + +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL debug_dbug=@old_debug; +SET DEBUG_SYNC= 'RESET'; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; +--disable_connect_log +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test new file mode 100644 index 00000000000..27f38d47bdb --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_error_stop.test @@ -0,0 +1,180 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit"); +call mtr.add_suppression("Slave: Duplicate entry '99'"); + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1,1); # hit a dup entry on slave +INSERT INTO t1 VALUES(2,1); # races to "win" the last exit +INSERT INTO t1 VALUES(3,1); +INSERT INTO t1 VALUES(4,1); # make W3 race over W1 +--save_master_pos + +--connection server_2 +--sync_with_master +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET @old_debug_dbug = @@global.debug_dbug; +# In a group of W1,W2,W3 of the same batch W2 simulates slowness. +SET @@global.debug_dbug = "d,hold_worker2_favor_worker3"; +SET GLOBAL slave_parallel_threads=4; +CHANGE MASTER TO master_use_gtid=slave_pos; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='optimistic'; + +# MDEV-30780 optimistic parallel slave hangs after hit an error +# Test workers hang scenario to prove it's no more neither +# out-of-order access to the active gco list. +# +# The test provides how to reproduce on the OLD version, false by default. +# That branch approximates the original hang with an assert that +# confirms the OLD version indeed could access already reclaimed gco. +--let $old_version_regression=0 + + +--connection server_1 + +# Let W1,W2,W3,W4 parallel workers that are going to execute +# the following transaction. +# W1 holds on with the 1st statement +# then crashes W3 with the 2nd into retry, +# finally hits with the 3rd a dup entry, on slave. +SET @@gtid_seq_no = 2001; +BEGIN; + UPDATE t1 SET b = 11 WHERE a = 4; + UPDATE t1 SET b = 11 WHERE a = 3; + UPDATE t1 SET a = 99 WHERE a = 1; +COMMIT; +# In the buggy version W2 races to "win" the exit last (of W1..3) +# and by that to access last a gco struct, garbage-collected. +UPDATE t1 SET b = 2 WHERE a = 2; +# W3 garbage-collects the gco struct in the buggy version. +UPDATE t1 SET b = 3 WHERE a = 3; +# W4 resides in following "singleton" batch to a W2 replacement +# in the buggy version to allow W3 reclaim the batch's gco. +DROP TABLE IF EXISTS phantom_1; + +--source include/save_master_gtid.inc + +--connect (slave_local_0, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 set b = 11 where a = 4; +--connect (slave_local_1, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + INSERT INTO t1 VALUES (99, 11); + +--connect (slave_local_2, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 SET b = 12 WHERE a = 2; + +--connect (slave_local_3, 127.0.0.1, root,, test, $SLAVE_MYPORT,) +begin; + UPDATE t1 SET b = 13 WHERE a = 3; + +--connection server_2 +--source include/start_slave.inc + +--echo # W4 is waiting to start its DROP + +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%" +--source include/wait_condition.inc + +--connection slave_local_3 +# make W3 to set E.cc <- 1 + rollback; +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc + +--connection slave_local_0 +# make W3 into retry and delay it to let W1 hit a dupicate error first, +# see 'commit' by slave_local_1. + rollback; +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "debug sync point: now" +--source include/wait_condition.inc +SELECT count(*) = 0 as "W3 undid its commit state" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%"; + + +--connection slave_local_2 + rollback; +# wait for W2 to start committing E.cc <- 2 +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit" +--source include/wait_condition.inc + +--connection slave_local_1 + +# W1 errors out +# A. to alert W3 +# B. W3 will *not* wake up W4 in the fixed version, having to wait for W2 demise. +# C. W2 will notify W3 that releases W4 as it would do in normal cases. +commit; + +if (!$old_version_regression) +{ +# A. In the fixed version show-processlist W4 is still in the ordered waiting +SELECT COUNT(*) = 1 as "W4 remains with the same status" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit%"; +--let $status= query_get_value("show slave status", Slave_SQL_Running, 1) +--echo # Slave_SQL_Running YES = $status + +# B. In the fixed version W3 is waiting for W2,... +--let $wait_condition= SELECT count(*) = 1 as "W4 is waiting" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc +--echo # while W2 is held back ... +--let $wait_condition= SELECT count(*) = 1 as "W2 simulates slowness" FROM information_schema.processlist WHERE state LIKE "debug sync point: now" +--source include/wait_condition.inc + +# C. # ...until NOW. +SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; + +} + +# To reproduce the hang on the OLD version ... +if ($old_version_regression) +{ + # replace the actual fixes block with checking W3,W4 have actually committed, + # followed by signaling to W2 like on behalf of W4 which would end up in the hang. + --let $wait_condition= SELECT COUNT(*) = 0 as "W4 has moved on" FROM information_schema.processlist WHERE state like "Waiting for prior transaction to start commit" + --source include/wait_condition.inc + --let $wait_condition= SELECT count(*) = 0 as "W3 does not wait on W2" FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit%" +--source include/wait_condition.inc + + --let $wait_condition= SELECT count(*) = 1 as "W2 simulates slowness" FROM information_schema.processlist WHERE state LIKE "debug sync point: now" + --source include/wait_condition.inc + + # Like above, but signaling is done after W4 is done to violate the commit order + # that must fire a debug assert. + SET DEBUG_SYNC = 'now SIGNAL cont_worker2'; +} + +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +# Restore the slave data and resume with replication +DELETE FROM t1 WHERE a=99; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +# +# Clean up. +# +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET @@global.debug_dbug = @old_debug_dbug; +SET debug_sync = RESET; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test b/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test index 8c8892d5370..cddc9286bd2 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test @@ -1 +1,38 @@ ---source include/rpl_parallel_show_binlog_events_purge_logs.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# BUG#13979418: SHOW BINLOG EVENTS MAY CRASH THE SERVER +# +# The function mysql_show_binlog_events has a local stack variable +# 'LOG_INFO linfo;', which is assigned to thd->current_linfo, however +# this variable goes out of scope and is destroyed before clean +# thd->current_linfo. +# +# This test case runs SHOW BINLOG EVENTS and FLUSH LOGS to make sure +# that with the fix local variable linfo is valid along all +# mysql_show_binlog_events function scope. +# +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection slave +SET DEBUG_SYNC= 'after_show_binlog_events SIGNAL on_show_binlog_events WAIT_FOR end'; +--send SHOW BINLOG EVENTS + +--connection slave1 +SET DEBUG_SYNC= 'now WAIT_FOR on_show_binlog_events'; +FLUSH LOGS; +SET DEBUG_SYNC= 'now SIGNAL end'; + +--connection slave +--disable_result_log +--reap +--enable_result_log +SET DEBUG_SYNC= 'RESET'; + +--connection master +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_relayrotate.test b/mysql-test/suite/rpl/t/rpl_relayrotate.test index 720739e14c0..4de554d3143 100644 --- a/mysql-test/suite/rpl/t/rpl_relayrotate.test +++ b/mysql-test/suite/rpl/t/rpl_relayrotate.test @@ -1 +1,18 @@ ---source include/rpl_relayrotate.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +####################################################### +# Wrapper for rpl_relayrotate.test to allow multi # +# Engines to reuse test code. By JBM 2006-02-15 # +####################################################### +-- source include/have_innodb.inc +# Slow test, don't run during staging part +-- source include/not_staging.inc +-- source include/master-slave.inc + +let $engine_type=innodb; +-- source suite/rpl/include/rpl_relayrotate.test +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync.test b/mysql-test/suite/rpl/t/rpl_semi_sync.test index 5c17bcb2344..c3cd918b5fc 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync.test @@ -1 +1,525 @@ ---source include/rpl_semi_sync.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +source include/not_embedded.inc; +source include/have_innodb.inc; +source include/master-slave.inc; + +let $engine_type= InnoDB; + +# Suppress warnings that might be generated during the test +connection master; +call mtr.add_suppression("Timeout waiting for reply of binlog"); +call mtr.add_suppression("Read semi-sync reply"); +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); +call mtr.add_suppression("mysqld: Got an error reading communication packets"); +connection slave; +call mtr.add_suppression("Master server does not support semi-sync"); +call mtr.add_suppression("Semi-sync slave .* reply"); +call mtr.add_suppression("Slave SQL.*Request to stop slave SQL Thread received while applying a group that has non-transactional changes; waiting for completion of the group"); +connection master; + +# wait for dying connections (if any) to disappear +let $wait_condition= select count(*) = 0 from information_schema.processlist where command='killed'; +--source include/wait_condition.inc + +# After fix of BUG#45848, semi-sync slave should not create any extra +# connections on master, save the count of connections before start +# semi-sync slave for comparison below. +let $_connections_normal_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); + +--echo # +--echo # Uninstall semi-sync plugins on master and slave +--echo # +connection slave; +source include/stop_slave.inc; +reset slave; +set global rpl_semi_sync_master_enabled= 0; +set global rpl_semi_sync_slave_enabled= 0; + +connection master; +reset master; +set global rpl_semi_sync_master_enabled= 0; +set global rpl_semi_sync_slave_enabled= 0; + +--echo # +--echo # Main test of semi-sync replication start here +--echo # + +connection master; + +set global rpl_semi_sync_master_timeout= 60000; # 60s + +echo [ default state of semi-sync on master should be OFF ]; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ enable semi-sync on master ]; +set global rpl_semi_sync_master_enabled = 1; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ status of semi-sync on master should be ON even without any semi-sync slaves ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # BUG#45672 Semisync repl: ActiveTranx:insert_tranx_node: transaction node allocation failed +--echo # BUG#45673 Semisynch reports correct operation even if no slave is connected +--echo # + +# BUG#45672 When semi-sync is enabled on master, it would allocate +# transaction node even without semi-sync slave connected, and would +# finally result in transaction node allocation error. +# +# Semi-sync master will pre-allocate 'max_connections' transaction +# nodes, so here we do more than that much transactions to check if it +# will fail or not. +# select @@global.max_connections + 1; +let $i= `select @@global.max_connections + 1`; +disable_query_log; +eval create table t1 (a int) engine=$engine_type; +while ($i) +{ + eval insert into t1 values ($i); + dec $i; +} +drop table t1; +enable_query_log; + +# BUG#45673 +echo [ status of semi-sync on master should be OFF ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# reset master to make sure the following test will start with a clean environment +reset master; + +connection slave; + +echo [ default state of semi-sync on slave should be OFF ]; +show variables like 'rpl_semi_sync_slave_enabled'; + +echo [ enable semi-sync on slave ]; +set global rpl_semi_sync_slave_enabled = 1; +show variables like 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; + +connection master; + +# NOTE: Rpl_semi_sync_master_client will only be updated when +# semi-sync slave has started binlog dump request +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; + +echo [ initial master state after the semi-sync slave connected ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1(a int) engine = $engine_type; + +echo [ master state after CREATE TABLE statement ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# After fix of BUG#45848, semi-sync slave should not create any extra +# connections on master. +let $_connections_semisync_slave= query_get_value(SHOW STATUS LIKE 'Threads_connected', Value, 1); +replace_result $_connections_normal_slave CONNECTIONS_NORMAL_SLAVE $_connections_semisync_slave CONNECTIONS_SEMISYNC_SLAVE; +eval select $_connections_semisync_slave - $_connections_normal_slave as 'Should be 0'; + +echo [ insert records to table ]; +insert t1 values (10); +insert t1 values (9); +insert t1 values (8); +insert t1 values (7); +insert t1 values (6); +insert t1 values (5); +insert t1 values (4); +insert t1 values (3); +insert t1 values (2); +insert t1 values (1); + +echo [ master status after inserts ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +sync_slave_with_master; + +echo [ slave status after replicated inserts ]; +show status like 'Rpl_semi_sync_slave_status'; + +select count(distinct a) from t1; +select min(a) from t1; +select max(a) from t1; + +--echo +--echo # BUG#50157 +--echo # semi-sync replication crashes when replicating a transaction which +--echo # include 'CREATE TEMPORARY TABLE `MyISAM_t` SELECT * FROM `Innodb_t` ; + +connection master; +SET SESSION AUTOCOMMIT= 0; +CREATE TABLE t2(c1 INT) ENGINE=innodb; +sync_slave_with_master; + +connection master; +BEGIN; +--echo +--echo # Even though it is in a transaction, this statement is binlogged into binlog +--echo # file immediately. +--disable_warnings +CREATE TEMPORARY TABLE t3 SELECT c1 FROM t2 where 1=1; +--enable_warnings +--echo +--echo # These statements will not be binlogged until the transaction is committed +INSERT INTO t2 VALUES(11); +INSERT INTO t2 VALUES(22); +COMMIT; + +DROP TABLE t2, t3; +SET SESSION AUTOCOMMIT= 1; +sync_slave_with_master; + + +--echo # +--echo # Test semi-sync master will switch OFF after one transaction +--echo # timeout waiting for slave reply. +--echo # +connection slave; +source include/stop_slave.inc; + +connection master; +--source include/kill_binlog_dump_threads.inc +set global rpl_semi_sync_master_timeout= 5000; + +# The first semi-sync check should be on because after slave stop, +# there are no transactions on the master. +echo [ master status should be ON ]; + +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; + +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +echo [ semi-sync replication of these transactions will fail ]; +insert into t1 values (500); + +# Wait for the semi-sync replication of this transaction to timeout +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= OFF; +source include/wait_for_status_var.inc; + +# The second semi-sync check should be off because one transaction +# times out during waiting. +echo [ master status should be OFF ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +# Semi-sync status on master is now OFF, so all these transactions +# will be replicated asynchronously. +delete from t1 where a=10; +delete from t1 where a=9; +delete from t1 where a=8; +delete from t1 where a=7; +delete from t1 where a=6; +delete from t1 where a=5; +delete from t1 where a=4; +delete from t1 where a=3; +delete from t1 where a=2; +delete from t1 where a=1; + +insert into t1 values (100); + +echo [ master status should be OFF ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Test semi-sync status on master will be ON again when slave catches up +--echo # + +# Save the master position for later use. +save_master_pos; + +connection slave; + +echo [ slave status should be OFF ]; +show status like 'Rpl_semi_sync_slave_status'; +source include/start_slave.inc; +sync_with_master; + +echo [ slave status should be ON ]; +show status like 'Rpl_semi_sync_slave_status'; + +select count(distinct a) from t1; +select min(a) from t1; +select max(a) from t1; + +connection master; + +# The master semi-sync status should be on again after slave catches up. +echo [ master status should be ON again after slave catches up ]; + +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; +show status like 'Rpl_semi_sync_master_clients'; + +--echo # +--echo # Test disable/enable master semi-sync on the fly. +--echo # + +drop table t1; +sync_slave_with_master; + +source include/stop_slave.inc; + +--echo # +--echo # Flush status +--echo # +connection master; +echo [ Semi-sync master status variables before FLUSH STATUS ]; +SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; +SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; +# Do not write the FLUSH STATUS to binlog, to make sure we'll get a +# clean status after this. +FLUSH NO_WRITE_TO_BINLOG STATUS; +echo [ Semi-sync master status variables after FLUSH STATUS ]; +SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx'; +SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx'; + +connection master; + +source include/show_master_logs.inc; +show variables like 'rpl_semi_sync_master_enabled'; + +echo [ disable semi-sync on the fly ]; +set global rpl_semi_sync_master_enabled=0; +show variables like 'rpl_semi_sync_master_enabled'; +show status like 'Rpl_semi_sync_master_status'; + +echo [ enable semi-sync on the fly ]; +set global rpl_semi_sync_master_enabled=1; +show variables like 'rpl_semi_sync_master_enabled'; +show status like 'Rpl_semi_sync_master_status'; + +--echo # +--echo # Test RESET MASTER/SLAVE +--echo # + +connection slave; + +source include/start_slave.inc; + +connection master; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1 (a int) engine = $engine_type; +drop table t1; + +sync_slave_with_master; + +echo [ test reset master ]; +connection master; + +reset master; + +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +connection slave; + +source include/stop_slave.inc; +reset slave; + +# Kill the dump thread on master for previous slave connection and +--source include/kill_binlog_dump_threads.inc + +connection slave; +source include/start_slave.inc; + +connection master; + +# Wait for dump thread to start, Rpl_semi_sync_master_clients will be +# 1 after dump thread started. +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; + +replace_result $engine_type ENGINE_TYPE; +eval create table t1 (a int) engine = $engine_type; +insert into t1 values (1); +insert into t1 values (2), (3); + +sync_slave_with_master; + +select * from t1; + +connection master; + +echo [ master semi-sync status should be ON ]; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Start semi-sync replication without SUPER privilege +--echo # +connection slave; +source include/stop_slave.inc; +reset slave; +connection master; +reset master; + +# Kill the dump thread on master for previous slave connection and wait for it to exit +--source include/kill_binlog_dump_threads.inc + +# Do not binlog the following statement because it will generate +# different events for ROW and STATEMENT format +set sql_log_bin=0; +grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; +flush privileges; +set sql_log_bin=1; +connection slave; +grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password'; +flush privileges; +change master to master_user='rpl',master_password='rpl_password'; +source include/start_slave.inc; +show status like 'Rpl_semi_sync_slave_status'; +connection master; + +# Wait for the semi-sync binlog dump thread to start +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; +echo [ master semi-sync should be ON ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; +insert into t1 values (4); +insert into t1 values (5); +echo [ master semi-sync should be ON ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_no_tx'; +show status like 'Rpl_semi_sync_master_yes_tx'; + +--echo # +--echo # Test semi-sync slave connect to non-semi-sync master +--echo # + +# Disable semi-sync on master +connection slave; +source include/stop_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +connection master; + +# Kill the dump thread on master for previous slave connection and wait for it to exit +--source include/kill_binlog_dump_threads.inc + +echo [ Semi-sync status on master should be ON ]; +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; +show status like 'Rpl_semi_sync_master_status'; +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; +connection master; +insert into t1 values (8); +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; +echo [ master semi-sync clients should be 1, status should be OFF ]; +show status like 'Rpl_semi_sync_master_clients'; +show status like 'Rpl_semi_sync_master_status'; +sync_slave_with_master; +show status like 'Rpl_semi_sync_slave_status'; + +# Uninstall semi-sync plugin on master +connection slave; +source include/stop_slave.inc; +connection master; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +source include/start_slave.inc; + +connection master; +insert into t1 values (10); +sync_slave_with_master; + +--echo # +--echo # Test non-semi-sync slave connect to semi-sync master +--echo # + +connection master; +set global rpl_semi_sync_master_timeout= 5000; # 5s +set global rpl_semi_sync_master_enabled= 1; + +connection slave; +source include/stop_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +echo [ uninstall semi-sync slave plugin ]; +set global rpl_semi_sync_slave_enabled= 0; + +echo [ reinstall semi-sync slave plugin and disable semi-sync ]; +SHOW VARIABLES LIKE 'rpl_semi_sync_slave_enabled'; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; +source include/start_slave.inc; +SHOW STATUS LIKE 'Rpl_semi_sync_slave_status'; + +--echo # +--echo # Clean up +--echo # + +connection slave; +source include/stop_slave.inc; +set global rpl_semi_sync_slave_enabled= 0; + +connection master; +set global rpl_semi_sync_master_enabled= 0; + +connection slave; +change master to master_user='root',master_password=''; +source include/start_slave.inc; + +connection master; +drop table t1; +sync_slave_with_master; + +connection master; +drop user rpl@127.0.0.1; +flush privileges; +set global rpl_semi_sync_master_timeout= default; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test b/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test index dcff4030fdb..5eae91a55f2 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_wait_point.test @@ -23,8 +23,6 @@ SET @@global.rpl_semi_sync_master_wait_no_slave = 1; --echo # It's okay to see "Killed" but we should not see "Timeout" in the log. call mtr.add_suppression("Killed waiting for reply of binlog"); -call mtr.add_suppression("Run function 'after_commit' in plugin 'rpl_semi_sync_master' failed"); -call mtr.add_suppression("Run function 'after_sync' in plugin 'rpl_semi_sync_master' failed"); --echo # --echo # Test wait point = AFTER_COMMIT diff --git a/mysql-test/suite/rpl/t/rpl_skip_replication.test b/mysql-test/suite/rpl/t/rpl_skip_replication.test index 66fdbb8915a..97fc961d438 100644 --- a/mysql-test/suite/rpl/t/rpl_skip_replication.test +++ b/mysql-test/suite/rpl/t/rpl_skip_replication.test @@ -1 +1,402 @@ ---source include/rpl_skip_replication.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it. +# +# Usage: +# +# --let $use_remote_mysqlbinlog= 1 # optional +# --source suite/rpl/include/rpl_skip_replication.inc +# +# The script uses MYSQLBINLOG to verify certain results. +# By default, it uses binary logs directly. If it is undesirable, +# this behavior can be overridden by setting $use_remote_binlog +# as shown above. +# The value will be unset after every execution of the script, +# so if it is needed, it should be set explicitly before each call. +# + +--source include/have_innodb.inc +--source include/master-slave.inc + +connection slave; +# Test that SUPER is required to change @@replicate_events_marked_for_skip. +CREATE USER 'nonsuperuser'@'127.0.0.1'; +GRANT ALTER,CREATE,DELETE,DROP,EVENT,INSERT,PROCESS,REPLICATION SLAVE, + SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1'; +connect(nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,); +connection nonpriv; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +disconnect nonpriv; +connection slave; +DROP USER'nonsuperuser'@'127.0.0.1'; + +SELECT @@global.replicate_events_marked_for_skip; +--error ER_SLAVE_MUST_STOP +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +SELECT @@global.replicate_events_marked_for_skip; +STOP SLAVE; +--error ER_GLOBAL_VARIABLE +SET SESSION replicate_events_marked_for_skip=FILTER_ON_MASTER; +SELECT @@global.replicate_events_marked_for_skip; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +SELECT @@global.replicate_events_marked_for_skip; +START SLAVE; + +connection master; +SELECT @@skip_replication; +--error ER_LOCAL_VARIABLE +SET GLOBAL skip_replication=1; +SELECT @@skip_replication; + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=innodb; +INSERT INTO t1(a) VALUES (1); +INSERT INTO t2(a) VALUES (1); + + +# Test that master-side filtering works. +SET skip_replication=1; + +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t1(a) VALUES (2); +INSERT INTO t2(a) VALUES (2); + +# Inject a rotate event in the binlog stream sent to slave (otherwise we will +# fail sync_slave_with_master as the last event on the master is not present +# on the slave). +FLUSH NO_WRITE_TO_BINLOG LOGS; + +sync_slave_with_master; +connection slave; +SHOW TABLES; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; +DROP TABLE t3; + +FLUSH NO_WRITE_TO_BINLOG LOGS; +sync_slave_with_master; + + +# Test that slave-side filtering works. +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE; + +connection master; +SET skip_replication=1; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t1(a) VALUES (3); +INSERT INTO t2(a) VALUES (3); + +# Inject a rotate event in the binlog stream sent to slave (otherwise we will +# fail sync_slave_with_master as the last event on the master is not present +# on the slave). +FLUSH NO_WRITE_TO_BINLOG LOGS; + +sync_slave_with_master; +connection slave; +SHOW TABLES; +SELECT * FROM t1; +SELECT * FROM t2; + +connection master; +DROP TABLE t3; + +FLUSH NO_WRITE_TO_BINLOG LOGS; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; + + +# Test that events with @@skip_replication=1 are not filtered when filtering is +# not set on slave. +connection master; +SET skip_replication=1; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=myisam; +INSERT INTO t3(a) VALUES(2); +sync_slave_with_master; +connection slave; +SELECT * FROM t3; +connection master; +DROP TABLE t3; + +# +# Test that the slave will preserve the @@skip_replication flag in its +# own binlog. +# + +TRUNCATE t1; +sync_slave_with_master; +connection slave; +RESET MASTER; + +connection master; +SET skip_replication=0; +INSERT INTO t1 VALUES (1,0); +SET skip_replication=1; +INSERT INTO t1 VALUES (2,0); +SET skip_replication=0; +INSERT INTO t1 VALUES (3,0); + +sync_slave_with_master; +connection slave; +# Since slave has @@replicate_events_marked_for_skip=REPLICATE, it should have +# applied all events. +SELECT * FROM t1 ORDER by a; + +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +let $SLAVE_DATADIR= `select @@datadir`; + +connection master; +TRUNCATE t1; + +# Now apply the slave binlog to the master, to check that both the slave +# and mysqlbinlog will preserve the @@skip_replication flag. + +--let $mysqlbinlog_args= $SLAVE_DATADIR/slave-bin.000001 +if ($use_remote_mysqlbinlog) +{ + --let $mysqlbinlog_args= --read-from-remote-server --protocol=tcp --host=127.0.0.1 --port=$SLAVE_MYPORT -uroot slave-bin.000001 + --let $use_remote_mysqlbinlog= 0 +} +--exec $MYSQL_BINLOG $mysqlbinlog_args > $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/rpl_skip_replication.binlog + +# The master should have all three events. +SELECT * FROM t1 ORDER by a; + +# The slave should be missing event 2, which is marked with the +# @@skip_replication flag. + +connection slave; +START SLAVE; + +connection master; +sync_slave_with_master; + +connection slave; +SELECT * FROM t1 ORDER by a; + +# +# Test that @@sql_slave_skip_counter does not count skipped @@skip_replication +# events. +# + +connection master; +TRUNCATE t1; + +sync_slave_with_master; +connection slave; +STOP SLAVE; +# We will skip two INSERTs (in addition to any skipped due to +# @@skip_replication). Since from 5.5 every statement is wrapped in +# BEGIN ... END, we need to skip 6 events for this. +SET GLOBAL sql_slave_skip_counter=6; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE; + +connection master; +# Need to fix @@binlog_format to get consistent event count. +SET @old_binlog_format= @@binlog_format; +SET binlog_format= statement; +SET skip_replication=0; +INSERT INTO t1 VALUES (1,5); +SET skip_replication=1; +INSERT INTO t1 VALUES (2,5); +SET skip_replication=0; +INSERT INTO t1 VALUES (3,5); +INSERT INTO t1 VALUES (4,5); +SET binlog_format= @old_binlog_format; + +sync_slave_with_master; +connection slave; + +# The slave should have skipped the first three inserts (number 1 and 3 due +# to @@sql_slave_skip_counter=2, number 2 due to +# @@replicate_events_marked_for_skip=FILTER_ON_SLAVE). So only number 4 +# should be left. +SELECT * FROM t1; + + +# +# Check that BINLOG statement preserves the @@skip_replication flag. +# +connection slave; +# Need row @@binlog_format for BINLOG statements containing row events. +--source include/stop_slave.inc +SET @old_slave_binlog_format= @@global.binlog_format; +SET GLOBAL binlog_format= row; +--source include/start_slave.inc + +connection master; +TRUNCATE t1; + +SET @old_binlog_format= @@binlog_format; +SET binlog_format= row; +# Format description log event. +BINLOG 'wlZOTw8BAAAA8QAAAPUAAAAAAAQANS41LjIxLU1hcmlhREItZGVidWctbG9nAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAEzgNAAgAEgAEBAQEEgAA2QAEGggAAAAICAgCAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAA371saA=='; +# INSERT INTO t1 VALUES (1,8) # with @@skip_replication=1 +BINLOG 'wlZOTxMBAAAAKgAAAGMBAAAAgCkAAAAAAAEABHRlc3QAAnQxAAIDAwAC +wlZOTxcBAAAAJgAAAIkBAAAAgCkAAAAAAAEAAv/8AQAAAAgAAAA='; +# INSERT INTO t1 VALUES (2,8) # with @@skip_replication=0 +BINLOG 'wlZOTxMBAAAAKgAAADwCAAAAACkAAAAAAAEABHRlc3QAAnQxAAIDAwAC +wlZOTxcBAAAAJgAAAGICAAAAACkAAAAAAAEAAv/8AgAAAAgAAAA='; +SET binlog_format= @old_binlog_format; + +SELECT * FROM t1 ORDER BY a; +sync_slave_with_master; +connection slave; +# Slave should have only the second insert, the first should be ignored due to +# the @@skip_replication flag. +SELECT * FROM t1 ORDER by a; + +--source include/stop_slave.inc +SET GLOBAL binlog_format= @old_slave_binlog_format; +--source include/start_slave.inc + + +# Test that it is not possible to change @@skip_replication inside a +# transaction or statement, thereby replicating only parts of statements +# or transactions. +connection master; +SET skip_replication=0; + +BEGIN; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=0; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +ROLLBACK; +SET skip_replication=1; +BEGIN; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=0; +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +COMMIT; +SET autocommit=0; +INSERT INTO t2(a) VALUES(100); +--error ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET skip_replication=1; +ROLLBACK; +SET autocommit=1; + +SET skip_replication=1; +--delimiter | +CREATE FUNCTION foo (x INT) RETURNS INT BEGIN SET SESSION skip_replication=x; RETURN x; END| +CREATE PROCEDURE bar(x INT) BEGIN SET SESSION skip_replication=x; END| +CREATE FUNCTION baz (x INT) RETURNS INT BEGIN CALL bar(x); RETURN x; END| +--delimiter ; +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SELECT foo(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SELECT baz(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET @a= foo(1); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +SET @a= baz(1); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +UPDATE t2 SET b=foo(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +UPDATE t2 SET b=baz(0); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +INSERT INTO t1 VALUES (101, foo(1)); +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION +INSERT INTO t1 VALUES (101, baz(0)); +SELECT @@skip_replication; +CALL bar(0); +SELECT @@skip_replication; +CALL bar(1); +SELECT @@skip_replication; +DROP FUNCTION foo; +DROP PROCEDURE bar; +DROP FUNCTION baz; + + +# Test that master-side filtering happens on the master side, and that +# slave-side filtering happens on the slave. + +# First test that events do not reach the slave when master-side filtering +# is configured. Do this by replicating first with only the IO thread running +# and master-side filtering; then change to no filtering and start the SQL +# thread. This should still skip the events, as master-side filtering +# means the events never reached the slave. +connection master; +SET skip_replication= 0; +TRUNCATE t1; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER; +START SLAVE IO_THREAD; +connection master; +SET skip_replication= 1; +INSERT INTO t1(a) VALUES (1); +SET skip_replication= 0; +INSERT INTO t1(a) VALUES (2); +--source include/save_master_pos.inc +connection slave; +--source include/sync_io_with_master.inc +STOP SLAVE IO_THREAD; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; +connection master; +sync_slave_with_master; +connection slave; +# Now only the second insert of (2) should be visible, as the first was +# filtered on the master, so even though the SQL thread ran without skipping +# events, it will never see the event in the first place. +SELECT * FROM t1; + +# Now tests that when slave-side filtering is configured, events _do_ reach +# the slave. +connection master; +SET skip_replication= 0; +TRUNCATE t1; +sync_slave_with_master; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE; +START SLAVE IO_THREAD; +connection master; +SET skip_replication= 1; +INSERT INTO t1(a) VALUES (1); +SET skip_replication= 0; +INSERT INTO t1(a) VALUES (2); +--source include/save_master_pos.inc +connection slave; +--source include/sync_io_with_master.inc +STOP SLAVE IO_THREAD; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; +connection master; +sync_slave_with_master; +connection slave; +# Now both inserts should be visible. Since filtering was configured to be +# slave-side, the event is in the relay log, and when the SQL thread ran we +# had disabled filtering again. +SELECT * FROM t1 ORDER BY a; + + +# Clean up. +connection master; +SET skip_replication=0; +DROP TABLE t1,t2; +connection slave; +STOP SLAVE; +SET GLOBAL replicate_events_marked_for_skip=REPLICATE; +START SLAVE; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_special_charset.test b/mysql-test/suite/rpl/t/rpl_special_charset.test index fa19a17b1e4..641aa483d32 100644 --- a/mysql-test/suite/rpl/t/rpl_special_charset.test +++ b/mysql-test/suite/rpl/t/rpl_special_charset.test @@ -1 +1,32 @@ ---source include/rpl_special_charset.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +################################################################################ +# Bug#19855907 IO THREAD AUTHENTICATION ISSUE WITH SOME CHARACTER SETS +# Problem: IO thread fails to connect to master if servers are configured with +# special character sets like utf16, utf32, ucs2. +# +# Analysis: MySQL server does not support few special character sets like +# utf16,utf32 and ucs2 as "client's character set"(eg: utf16,utf32, ucs2). +# When IO thread is trying to connect to Master, it sets server's character +# set as client's character set. When Slave server is started with these +# special character sets, IO thread (a connection to Master) fails because +# of the above said reason. +# +# Fix: If server's character set is not supported as client's character set, +# then set default's client character set(latin1) as client's character set. +############################################################################### +--source include/master-slave.inc +call mtr.add_suppression("'utf16' can not be used as client character set"); +CREATE TABLE t1(i VARCHAR(20)); +INSERT INTO t1 VALUES (0xFFFF); +--sync_slave_with_master +--let diff_tables=master:t1, slave:t1 +--source include/diff_tables.inc +# Cleanup +--connection master +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sporadic_master.test b/mysql-test/suite/rpl/t/rpl_sporadic_master.test index 397756af396..ad4c44cbf74 100644 --- a/mysql-test/suite/rpl/t/rpl_sporadic_master.test +++ b/mysql-test/suite/rpl/t/rpl_sporadic_master.test @@ -1 +1,32 @@ ---source include/rpl_sporadic_master.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# test to see if replication can continue when master sporadically fails on +# COM_BINLOG_DUMP and additionally limits the number of events per dump + +source include/master-slave.inc; + +create table t2(n int); +create table t1(n int not null auto_increment primary key); +insert into t1 values (NULL),(NULL); +truncate table t1; +# We have to use 4 in the following to make this test work with all table types +insert into t1 values (4),(NULL); +sync_slave_with_master; +--source include/stop_slave.inc +--source include/start_slave.inc +connection master; +insert into t1 values (NULL),(NULL); +flush logs; +truncate table t1; +insert into t1 values (10),(NULL),(NULL),(NULL),(NULL),(NULL); +sync_slave_with_master; +select * from t1 ORDER BY n; +connection master; +drop table t1,t2; +sync_slave_with_master; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_ssl.test b/mysql-test/suite/rpl/t/rpl_ssl.test index c4a534b9294..59a2af9f137 100644 --- a/mysql-test/suite/rpl/t/rpl_ssl.test +++ b/mysql-test/suite/rpl/t/rpl_ssl.test @@ -1 +1,116 @@ ---source include/rpl_ssl.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +source include/have_ssl_communication.inc; +source include/master-slave.inc; +source include/no_valgrind_without_big.inc; + +# create a user for replication that requires ssl encryption +connection master; +create user replssl@localhost; +grant replication slave on *.* to replssl@localhost require ssl; +create table t1 (t int auto_increment, KEY(t)); + +sync_slave_with_master; + +# Set slave to use SSL for connection to master +stop slave; +--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR +eval change master to + master_user='replssl', + master_password='', + master_ssl=1, + master_ssl_ca ='$MYSQL_TEST_DIR/std_data/cacert.pem', + master_ssl_cert='$MYSQL_TEST_DIR/std_data/client-cert.pem', + master_ssl_key='$MYSQL_TEST_DIR/std_data/client-key.pem'; +start slave; + +# Switch to master and insert one record, then sync it to slave +connection master; +insert into t1 values(1); +sync_slave_with_master; + +# The record should now be on slave +select * from t1; + +# The slave is synced and waiting/reading from master +# SHOW SLAVE STATUS will show "Waiting for master to send event" +let $status_items= Master_SSL_Allowed, Master_SSL_CA_Path, Master_SSL_CA_File, Master_SSL_Crl, Master_SSL_Crlpath, Master_SSL_Cert, Master_SSL_Key; +source include/show_slave_status.inc; +source include/check_slave_is_running.inc; + +# Stop the slave, as reported in bug#21871 it would hang +STOP SLAVE; + +select * from t1; + +# Do the same thing a number of times +disable_query_log; +disable_result_log; +# 2007-11-27 mats Bug #32756 Starting and stopping the slave in a loop can lose rows +# After discussions with Engineering, I'm disabling this part of the test to avoid it causing +# red trees. +disable_parsing; +let $i= 100; +while ($i) +{ + start slave; + connection master; + insert into t1 values (NULL); + select * from t1; # Some variance + connection slave; + select * from t1; # Some variance + stop slave; + dec $i; +} +enable_parsing; +START SLAVE; +enable_query_log; +enable_result_log; +connection master; +# INSERT one more record to make sure +# the sync has something to do +insert into t1 values (NULL); +let $master_count= `select count(*) from t1`; + +sync_slave_with_master; +--source include/wait_for_slave_to_start.inc +source include/show_slave_status.inc; +source include/check_slave_is_running.inc; + +let $slave_count= `select count(*) from t1`; + +if ($slave_count != $master_count) +{ + echo master and slave differed in number of rows; + echo master: $master_count; + echo slave: $slave_count; + + connection master; + select count(*) t1; + select * from t1; + connection slave; + select count(*) t1; + select * from t1; + query_vertical show slave status; +} + +connection master; +drop user replssl@localhost; +drop table t1; +sync_slave_with_master; + +--source include/stop_slave.inc +CHANGE MASTER TO + master_user = 'root', + master_ssl = 0, + master_ssl_ca = '', + master_ssl_cert = '', + master_ssl_key = ''; + +--echo End of 5.0 tests +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test b/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test index b4e53358712..654a5d47cb9 100644 --- a/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test +++ b/mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test @@ -1 +1,107 @@ ---source include/rpl_stm_relay_ign_space.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# BUG#12400313 / BUG#64503 test case +# +# +# Description +# ----------- +# +# This test case starts the slave server with: +# --relay-log-space-limit=8192 --relay-log-purge --max-relay-log-size=4096 +# +# Then it issues some queries that will cause the slave to reach +# relay-log-space-limit. We lock the table so that the SQL thread is +# not able to purge the log and then we issue some more statements. +# +# The purpose is to show that the IO thread will honor the limits +# while the SQL thread is not able to purge the relay logs, which did +# not happen before this patch. In addition we assert that while +# ignoring the limit (SQL thread needs to rotate before purging), the +# IO thread does not do it in an uncontrolled manner. + +--source include/have_binlog_format_statement.inc +--source include/have_innodb.inc +--source include/master-slave.inc + +--disable_query_log +CREATE TABLE t1 (c1 TEXT) engine=InnoDB; + +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); + +--sync_slave_with_master + +# wait for the SQL thread to sleep +--let $show_statement= SHOW PROCESSLIST +--let $field= State +--let $condition= = 'Slave has read all relay log; waiting for more updates' +--source include/wait_show_condition.inc + +# now the io thread has set rli->ignore_space_limit +# lets lock the table so that once the SQL thread awakes +# it blocks there and does not set rli->ignore_space_limit +# back to zero +LOCK TABLE t1 WRITE; + +# now issue more statements that will overflow the +# rli->log_space_limit (in this case ~10K) +--connection master + +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); +INSERT INTO t1 VALUES ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); + +--connection slave + +# ASSERT that the IO thread waits for the SQL thread to release some +# space before continuing +--let $show_statement= SHOW PROCESSLIST +--let $field= State +--let $condition= LIKE 'Waiting for %' +# before the patch (IO would have transfered everything) +#--let $condition= = 'Waiting for master to send event' +# after the patch (now it waits for space to be freed) +#--let $condition= = 'Waiting for the slave SQL thread to free enough relay log space' +--source include/wait_show_condition.inc + +# without the patch we can uncomment the following two lines and +# watch the IO thread synchronize with the master, thus writing +# relay logs way over the space limit +#--connection master +#--source include/sync_slave_io_with_master.inc + +## ASSERT that the IO thread has honored the limit+few bytes required to be able to purge +--let $relay_log_space_while_sql_is_executing = query_get_value(SHOW SLAVE STATUS, Relay_Log_Space, 1) +--let $relay_log_space_limit = query_get_value(SHOW VARIABLES LIKE "relay_log_space_limit", Value, 1) +--let $assert_text= Assert that relay log space is close to the limit +--let $assert_cond= $relay_log_space_while_sql_is_executing <= $relay_log_space_limit * 1.15 +--source include/assert.inc + +# unlock the table and let SQL thread continue applying events +UNLOCK TABLES; + +--connection master +--sync_slave_with_master +--let $diff_tables=master:test.t1,slave:test.t1 +--source include/diff_tables.inc + +--connection master +DROP TABLE t1; +--enable_query_log +--sync_slave_with_master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test index 2625508515b..31b80732c60 100644 --- a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test @@ -1 +1,633 @@ ---source include/rpl_switch_stm_row_mixed.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +# +# rpl_switch_stm_row_mixed tests covers +# +# - Master is switching explicitly between STATEMENT, ROW, and MIXED +# binlog format showing when it is possible and when not. +# - Master switching from MIXED to RBR implicitly listing all use +# cases, e.g a query invokes UUID(), thereafter to serve as the +# definition of MIXED binlog format +# - correctness of execution + + +-- source include/have_binlog_format_mixed_or_row.inc +-- source include/master-slave.inc + +# Since this test generates row-based events in the binary log, the +# slave SQL thread cannot be in STATEMENT mode to execute this test, +# so we only execute it for MIXED and ROW as default value of +# BINLOG_FORMAT. + +connection slave; + +connection master; +--disable_warnings +drop database if exists mysqltest1; +create database mysqltest1; +--enable_warnings +use mysqltest1; + +# Save binlog format +set @my_binlog_format= @@global.binlog_format; + +# play with switching +set session binlog_format=mixed; +show session variables like "binlog_format%"; +set session binlog_format=statement; +show session variables like "binlog_format%"; +set session binlog_format=row; +show session variables like "binlog_format%"; + +set global binlog_format=DEFAULT; +show global variables like "binlog_format%"; +set global binlog_format=MIXED; +show global variables like "binlog_format%"; +set global binlog_format=STATEMENT; +show global variables like "binlog_format%"; +set global binlog_format=ROW; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; + +CREATE TABLE t1 (a varchar(100)); + +prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; +set @string="emergency_1_"; +insert into t1 values("work_2_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(UUID(),"work_3_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_4_",UUID())); +insert into t1 select "yesterday_5_"; + +# verify that temp tables prevent a switch to SBR +create temporary table tmp(a char(100)); +insert into tmp values("see_6_"); +--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR +set binlog_format=statement; +insert into t1 select * from tmp; +drop temporary table tmp; + +# Now we go to SBR +set binlog_format=statement; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; +set global binlog_format=statement; +show global variables like "binlog_format%"; +show session variables like "binlog_format%"; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select ?'; +set @string="emergency_7_"; +insert into t1 values("work_8_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values("work_9_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values("for_10_"); +insert into t1 select "yesterday_11_"; + +# test statement (is not default after wl#3368) +set binlog_format=statement; +select @@global.binlog_format, @@session.binlog_format; +set global binlog_format=statement; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select ?'; +set @string="emergency_12_"; +insert into t1 values("work_13_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values("work_14_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values("for_15_"); +insert into t1 select "yesterday_16_"; + +# and now the mixed mode + +set global binlog_format=mixed; +select @@global.binlog_format, @@session.binlog_format; +set binlog_format=default; +select @@global.binlog_format, @@session.binlog_format; + +prepare stmt1 from 'insert into t1 select concat(UUID(),?)'; +set @string="emergency_17_"; +insert into t1 values("work_18_"); +execute stmt1 using @string; +deallocate prepare stmt1; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(UUID(),"work_19_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_20_",UUID())); +insert into t1 select "yesterday_21_"; + +prepare stmt1 from 'insert into t1 select ?'; +insert into t1 values(concat(UUID(),"work_22_")); +execute stmt1 using @string; +deallocate prepare stmt1; + +insert into t1 values(concat("for_23_",UUID())); +insert into t1 select "yesterday_24_"; + +# Test of CREATE TABLE SELECT + +create table t2 ENGINE=MyISAM select rpad(UUID(),100,' '); +create table t3 select 1 union select UUID(); +--disable_warnings +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3); +--enable_warnings +SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR +create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); +# what if UUID() is first: +--disable_warnings +insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4); +--enable_warnings + +# inside a stored procedure + +delimiter |; +create procedure foo() +begin +insert into t1 values("work_25_"); +insert into t1 values(concat("for_26_",UUID())); +insert into t1 select "yesterday_27_"; +end| +create procedure foo2() +begin +insert into t1 values(concat("emergency_28_",UUID())); +insert into t1 values("work_29_"); +insert into t1 values(concat("for_30_",UUID())); +set session binlog_format=row; # accepted for stored procs +insert into t1 values("more work_31_"); +set session binlog_format=mixed; +end| +create function foo3() returns bigint unsigned +begin + set session binlog_format=row; # rejected for stored funcs + insert into t1 values("alarm"); + return 100; +end| +create procedure foo4(x varchar(100)) +begin +insert into t1 values(concat("work_250_",x)); +insert into t1 select "yesterday_270_"; +end| +delimiter ;| +call foo(); +call foo2(); +call foo4("hello"); +call foo4(UUID()); +call foo4("world"); + +# test that can't SET in a stored function +--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT +select foo3(); +select * from t1 where a="alarm"; + +# Tests of stored functions/triggers/views for BUG#20930 "Mixed +# binlogging mode does not work with stored functions, triggers, +# views" + +# Function which calls procedure +drop function foo3; +delimiter |; +create function foo3() returns bigint unsigned +begin + insert into t1 values("foo3_32_"); + call foo(); + return 100; +end| +delimiter ;| +insert into t2 select foo3(); + +prepare stmt1 from 'insert into t2 select foo3()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test if stored function calls stored function which calls procedure +# which requires row-based. + +delimiter |; +create function foo4() returns bigint unsigned +begin + insert into t2 select foo3(); + return 100; +end| +delimiter ;| +select foo4(); + +prepare stmt1 from 'select foo4()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function +delimiter |; +create function foo5() returns bigint unsigned +begin + insert into t2 select UUID(); + return 100; +end| +delimiter ;| +select foo5(); + +prepare stmt1 from 'select foo5()'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# A simple stored function where UUID() is in the argument +delimiter |; +create function foo6(x varchar(100)) returns bigint unsigned +begin + insert into t2 select x; + return 100; +end| +delimiter ;| +select foo6("foo6_1_"); +select foo6(concat("foo6_2_",UUID())); + +prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))'; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + + +# Test of views using UUID() + +create view v1 as select uuid(); +create table t11 (data varchar(255)); +insert into t11 select * from v1; +# Test of querying INFORMATION_SCHEMA which parses the view's body, +# to verify that it binlogs statement-based (is not polluted by +# the parsing of the view's body). +insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11'); +prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')"; +execute stmt1; +execute stmt1; +deallocate prepare stmt1; + +# Test of triggers with UUID() +delimiter |; +create trigger t11_bi before insert on t11 for each row +begin + set NEW.data = concat(NEW.data,UUID()); +end| +delimiter ;| +insert into t11 values("try_560_"); + +# Test that INSERT DELAYED works in mixed mode (BUG#20649) +insert delayed into t2 values("delay_1_"); +insert delayed into t2 values(concat("delay_2_",UUID())); +insert delayed into t2 values("delay_6_"); + +# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not +# replicate fine in statement-based ; we test that in mixed mode it +# works). +insert delayed into t2 values(rand()); +set @a=2.345; +insert delayed into t2 values(@a); + +# With INSERT DELAYED, rows are written to the binlog after they are +# written to the table. Therefore, it is not enough to wait until the +# rows make it to t2 on the master (the rows may not be in the binlog +# at that time, and may still not be in the binlog when +# sync_slave_with_master is later called). Instead, we wait until the +# rows make it to t2 on the slave. We first call +# sync_slave_with_master, so that we are sure that t2 has been created +# on the slave. +sync_slave_with_master; +let $wait_condition= SELECT COUNT(*) = 19 FROM mysqltest1.t2; +--source include/wait_condition.inc +connection master; + +# If you want to do manual testing of the mixed mode regarding UDFs (not +# testable automatically as quite platform- and compiler-dependent), +# you just need to set the variable below to 1, and to +# "make udf_example.so" in sql/, and to copy sql/udf_example.so to +# MYSQL_TEST_DIR/lib/mysql. +let $you_want_to_test_UDF=0; +if ($you_want_to_test_UDF) +{ + CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so'; + prepare stmt1 from 'insert into t1 select metaphon(?)'; + set @string="emergency_133_"; + insert into t1 values("work_134_"); + execute stmt1 using @string; + deallocate prepare stmt1; + prepare stmt1 from 'insert into t1 select ?'; + insert into t1 values(metaphon("work_135_")); + execute stmt1 using @string; + deallocate prepare stmt1; + insert into t1 values(metaphon("for_136_")); + insert into t1 select "yesterday_137_"; + create table t6 select metaphon("for_138_"); + create table t7 select 1 union select metaphon("for_139_"); + create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3); + create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3); +} + +create table t20 select * from t1; # save for comparing later +create table t21 select * from t2; +create table t22 select * from t3; +drop table t1,t2,t3; + +# This tests the fix to +# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog +# We verify that under the mixed binlog mode, a stored function +# modifying at least two tables having an auto_increment column, +# is binlogged row-based. Indeed in statement-based binlogging, +# only the auto_increment value generated for the first table +# is recorded in the binlog, the value generated for the 2nd table +# lacking. + +create table t1 (a int primary key auto_increment, b varchar(100)); +create table t2 (a int primary key auto_increment, b varchar(100)); +create table t3 (b varchar(100)); +delimiter |; +create function f (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f("try_41_"); +# Two operations which compensate each other except that their net +# effect is that they advance the auto_increment counter of t2 on slave: +sync_slave_with_master; +use mysqltest1; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# this is the call which didn't replicate well +select f("try_42_"); +sync_slave_with_master; + +# now use prepared statement and test again, just to see that the RBB +# mode isn't set at PREPARE but at EXECUTE. + +insert into t2 values(3,null),(4,null); +delete from t2 where a>=3; + +connection master; +prepare stmt1 from 'select f(?)'; +set @string="try_43_"; +insert into t1 values(null,"try_44_"); # should be SBB +execute stmt1 using @string; # should be RBB +deallocate prepare stmt1; +sync_slave_with_master; + +# verify that if only one table has auto_inc, it does not trigger RBB +# (we'll check in binlog further below) + +connection master; +create table t12 select * from t1; # save for comparing later +drop table t1; +create table t1 (a int, b varchar(100), key(a)); +select f("try_45_"); + +# restore table's key +create table t13 select * from t1; +drop table t1; +create table t1 (a int primary key auto_increment, b varchar(100)); + +# now test if it's two functions, each of them inserts in one table + +drop function f; +# we need a unique key to have sorting of rows by mysqldump +create table t14 (unique (a)) select * from t2; +truncate table t2; +delimiter |; +create function f1 (x varchar(100)) returns int deterministic +begin + insert into t1 values(null,x); + return 1; +end| +create function f2 (x varchar(100)) returns int deterministic +begin + insert into t2 values(null,x); + return 1; +end| +delimiter ;| +select f1("try_46_"),f2("try_47_"); + +sync_slave_with_master; +insert into t2 values(2,null),(3,null),(4,null); +delete from t2 where a>=2; + +connection master; +# Test with SELECT and INSERT +select f1("try_48_"),f2("try_49_"); +insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_"))); +sync_slave_with_master; + +# verify that if f2 does only read on an auto_inc table, this does not +# switch to RBB +connection master; +drop function f2; +delimiter |; +create function f2 (x varchar(100)) returns int deterministic +begin + declare y int; + insert into t1 values(null,x); + set y = (select count(*) from t2); + return y; +end| +delimiter ;| +select f1("try_53_"),f2("try_54_"); +sync_slave_with_master; + +# And now, a normal statement with a trigger (no stored functions) + +connection master; +drop function f2; +delimiter |; +create trigger t1_bi before insert on t1 for each row +begin + insert into t2 values(null,"try_55_"); +end| +delimiter ;| +insert into t1 values(null,"try_56_"); +# and now remove one auto_increment and verify SBB +alter table t1 modify a int, drop primary key; +insert into t1 values(null,"try_57_"); +sync_slave_with_master; + +# Test for BUG#20499 "mixed mode with temporary table breaks binlog" +# Slave used to have only 2 rows instead of 3. +connection master; +CREATE TEMPORARY TABLE t15 SELECT UUID(); +create table t16 like t15; +INSERT INTO t16 SELECT * FROM t15; +# we'll verify that this one is done RBB +insert into t16 values("try_65_"); +drop table t15; +# we'll verify that this one is done SBB +insert into t16 values("try_66_"); +sync_slave_with_master; + +# and now compare: + +connection master; + +# first check that data on master is sensible +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t11; +select count(*) from t20; +select count(*) from t21; +select count(*) from t22; +select count(*) from t12; +select count(*) from t13; +select count(*) from t14; +select count(*) from t16; +if ($you_want_to_test_UDF) +{ + select count(*) from t6; + select count(*) from t7; + select count(*) from t8; + select count(*) from t9; +} + +sync_slave_with_master; + +# +# Bug#20863 If binlog format is changed between update and unlock of +# tables, wrong binlog +# + +connection master; +DROP TABLE IF EXISTS t11; +SET SESSION BINLOG_FORMAT=STATEMENT; +CREATE TABLE t11 (song VARCHAR(255)); +LOCK TABLES t11 WRITE; +SET SESSION BINLOG_FORMAT=ROW; +INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict'); +SET SESSION BINLOG_FORMAT=STATEMENT; +INSERT INTO t11 VALUES('Careful With That Axe, Eugene'); +UNLOCK TABLES; + +--query_vertical SELECT * FROM t11 +sync_slave_with_master; +USE mysqltest1; +--query_vertical SELECT * FROM t11 + +connection master; +DROP TABLE IF EXISTS t12; +SET SESSION BINLOG_FORMAT=MIXED; +CREATE TABLE t12 (data LONG); +LOCK TABLES t12 WRITE; +INSERT INTO t12 VALUES(UUID()); +UNLOCK TABLES; +sync_slave_with_master; + +# +# BUG#28086: SBR of USER() becomes corrupted on slave +# + +connection master; + +# Just to get something that is non-trivial, albeit still simple, we +# stuff the result of USER() and CURRENT_USER() into a variable. +--delimiter $$ +CREATE FUNCTION my_user() + RETURNS CHAR(64) +BEGIN + DECLARE user CHAR(64); + SELECT USER() INTO user; + RETURN user; +END $$ +--delimiter ; + +--delimiter $$ +CREATE FUNCTION my_current_user() + RETURNS CHAR(64) +BEGIN + DECLARE user CHAR(64); + SELECT CURRENT_USER() INTO user; + RETURN user; +END $$ +--delimiter ; + +DROP TABLE IF EXISTS t13; +CREATE TABLE t13 (data CHAR(64)); +INSERT INTO t13 VALUES (USER()); +INSERT INTO t13 VALUES (my_user()); +INSERT INTO t13 VALUES (CURRENT_USER()); +INSERT INTO t13 VALUES (my_current_user()); + +sync_slave_with_master; + +# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql +--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql + +# Let's compare. Note: If they match test will pass, if they do not match +# the test will show that the diff statement failed and not reject file +# will be created. You will need to go to the mysql-test dir and diff +# the files your self to see what is not matching + +diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; + +connection master; + +# Now test that mysqlbinlog works fine on a binlog generated by the +# mixed mode + +# BUG#11312 "DELIMITER is not written to the binary log that causes +# syntax error" makes that mysqlbinlog will fail if we pass it the +# text of queries; this forces us to use --base64-output here. + +# BUG#20929 "BINLOG command causes invalid free plus assertion +# failure" makes mysqld segfault when receiving --base64-output + +# So I can't enable this piece of test +# SIGH + +if ($enable_when_11312_or_20929_fixed) +{ +--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +drop database mysqltest1; +--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql +--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql +# the old mysqldump output on slave is the same as what it was on +# master before restoring on master. +diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql; +} + +drop database mysqltest1; +sync_slave_with_master; + +connection master; +# Restore binlog format setting +set global binlog_format =@my_binlog_format; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sync.test b/mysql-test/suite/rpl/t/rpl_sync.test index bdb0d8ec4cc..1e2ec2ca83b 100644 --- a/mysql-test/suite/rpl/t/rpl_sync.test +++ b/mysql-test/suite/rpl/t/rpl_sync.test @@ -1,2 +1,159 @@ ---source include/rpl_sync_test.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +######################################################################################## +# This test verifies the options --sync-relay-log-info and --relay-log-recovery by +# crashing the slave in two different situations: +# (case-1) - Corrupt the relay log with changes which were not processed by +# the SQL Thread and crashes it. +# (case-2) - Corrupt the master.info with wrong coordinates and crashes it. +# +# Case 1: +# 1 - Stops the SQL Thread +# 2 - Inserts new records into the master. +# 3 - Corrupts the relay-log.bin* which most likely has such changes. +# 4 - Crashes the slave +# 5 - Verifies if the slave is sync with the master which means that the information +# loss was circumvented by the recovery process. +# +# Case 2: +# 1 - Stops the SQL/IO Threads +# 2 - Inserts new records into the master. +# 3 - Corrupts the master.info with wrong coordinates. +# 4 - Crashes the slave +# 5 - Verifies if the slave is sync with the master which means that the information +# loss was circumvented by the recovery process. +######################################################################################## + +######################################################################################## +# Configuring the environment +######################################################################################## +--echo =====Configuring the enviroment=======; +--source include/not_embedded.inc +--source include/not_valgrind.inc +--source include/have_debug.inc +--source include/have_innodb.inc +--source include/not_crashrep.inc +--source include/master-slave.inc + +call mtr.add_suppression('Attempting backtrace'); +call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001"); +# Use innodb so we do not get "table should be repaired" issues. +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +flush tables; +CREATE TABLE t1(a INT, PRIMARY KEY(a)) engine=innodb; + +insert into t1(a) values(1); +insert into t1(a) values(2); +insert into t1(a) values(3); + +######################################################################################## +# Case 1: Corrupt a relay-log.bin* +######################################################################################## +--echo =====Inserting data on the master but without the SQL Thread being running=======; +sync_slave_with_master; + +connection slave; +let $MYSQLD_SLAVE_DATADIR= `select @@datadir`; +--replace_result $MYSQLD_SLAVE_DATADIR MYSQLD_SLAVE_DATADIR +--copy_file $MYSQLD_SLAVE_DATADIR/master.info $MYSQLD_SLAVE_DATADIR/master.backup +--source include/stop_slave_sql.inc + +connection master; +insert into t1(a) values(4); +insert into t1(a) values(5); +insert into t1(a) values(6); + +--echo =====Removing relay log files and crashing/recoverying the slave=======; +connection slave; +--source include/stop_slave_io.inc + +let $file= query_get_value("SHOW SLAVE STATUS", Relay_Log_File, 1); + +--let FILE_TO_CORRUPT= $MYSQLD_SLAVE_DATADIR/$file +perl; +$file= $ENV{'FILE_TO_CORRUPT'}; +open(FILE, ">$file") || die "Unable to open $file."; +truncate(FILE,0); +print FILE "failure"; +close ($file); +EOF + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; +--error 2013 +FLUSH LOGS; + +--let $rpl_server_number= 2 +--source include/rpl_reconnect.inc + +--echo =====Dumping and comparing tables=======; +--source include/start_slave.inc + +connection master; +sync_slave_with_master; + +let $diff_tables=master:t1,slave:t1; +source include/diff_tables.inc; + +######################################################################################## +# Case 2: Corrupt a master.info +######################################################################################## +--echo =====Corrupting the master.info=======; +connection slave; +--source include/stop_slave.inc + +connection master; +FLUSH LOGS; + +insert into t1(a) values(7); +insert into t1(a) values(8); +insert into t1(a) values(9); + +connection slave; +let MYSQLD_SLAVE_DATADIR=`select @@datadir`; + +--perl +use strict; +use warnings; +my $src= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.backup"; +my $dst= "$ENV{'MYSQLD_SLAVE_DATADIR'}/master.info"; +open(FILE, "<", $src) or die; +my @content= <FILE>; +close FILE; +open(FILE, ">", $dst) or die; +binmode FILE; +print FILE @content; +close FILE; +EOF + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +SET SESSION debug_dbug="d,crash_before_rotate_relaylog"; +--error 2013 +FLUSH LOGS; + +--let $rpl_server_number= 2 +--source include/rpl_reconnect.inc + +--echo =====Dumping and comparing tables=======; +--source include/start_slave.inc + +connection master; +sync_slave_with_master; + +let $diff_tables=master:t1,slave:t1; +source include/diff_tables.inc; + +######################################################################################## +# Clean up +######################################################################################## +--echo =====Clean up=======; +connection master; +drop table t1; + +--remove_file $MYSQLD_SLAVE_DATADIR/master.backup +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test b/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test index d976ae3757b..6728ff55d6f 100644 --- a/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test +++ b/mysql-test/suite/rpl/t/rpl_temporal_format_default_to_default.test @@ -1 +1,82 @@ ---source include/rpl_temporal_format_default_to_default.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption). +# Please check all dependent tests after modifying it +# + +--source include/master-slave.inc + +if ($force_master_mysql56_temporal_format) +{ + connection master; + eval SET @@global.mysql56_temporal_format=$force_master_mysql56_temporal_format; +} + +if ($force_slave_mysql56_temporal_format) +{ + connection slave; + eval SET @@global.mysql56_temporal_format=$force_slave_mysql56_temporal_format; +} + +connection master; +SELECT @@global.mysql56_temporal_format AS on_master; +connection slave; +SELECT @@global.mysql56_temporal_format AS on_slave; +connection master; + +CREATE TABLE t1 +( + c0 TIME(0), + c1 TIME(1), + c2 TIME(2), + c3 TIME(3), + c4 TIME(4), + c5 TIME(5), + c6 TIME(6) +); +CREATE TABLE t2 +( + c0 TIMESTAMP(0), + c1 TIMESTAMP(1), + c2 TIMESTAMP(2), + c3 TIMESTAMP(3), + c4 TIMESTAMP(4), + c5 TIMESTAMP(5), + c6 TIMESTAMP(6) +); + +CREATE TABLE t3 +( + c0 DATETIME(0), + c1 DATETIME(1), + c2 DATETIME(2), + c3 DATETIME(3), + c4 DATETIME(4), + c5 DATETIME(5), + c6 DATETIME(6) +); +INSERT INTO t1 VALUES ('01:01:01','01:01:01.1','01:01:01.11','01:01:01.111','01:01:01.1111','01:01:01.11111','01:01:01.111111'); +INSERT INTO t2 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); +INSERT INTO t3 VALUES ('2001-01-01 01:01:01','2001-01-01 01:01:01.1','2001-01-01 01:01:01.11','2001-01-01 01:01:01.111','2001-01-01 01:01:01.1111','2001-01-01 01:01:01.11111','2001-01-01 01:01:01.111111'); +SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; +sync_slave_with_master; + +connection slave; +--query_vertical SELECT * FROM t1; +--query_vertical SELECT * FROM t2; +--query_vertical SELECT * FROM t3; +SELECT TABLE_NAME, TABLE_ROWS, AVG_ROW_LENGTH,DATA_LENGTH FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME RLIKE 't[1-3]' ORDER BY TABLE_NAME; + +connection master; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +connection slave; +SET @@global.mysql56_temporal_format=DEFAULT; +connection master; +SET @@global.mysql56_temporal_format=DEFAULT; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_typeconv.test b/mysql-test/suite/rpl/t/rpl_typeconv.test index c2517086258..9e566258882 100644 --- a/mysql-test/suite/rpl/t/rpl_typeconv.test +++ b/mysql-test/suite/rpl/t/rpl_typeconv.test @@ -1 +1,78 @@ ---source include/rpl_typeconv.inc +# +# This include file is used by more than one test suite +# (currently rpl and binlog_encryption suite). +# Please check all dependent tests after modifying it +# + +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +connection slave; +set @saved_slave_type_conversions = @@global.slave_type_conversions; +CREATE TABLE type_conversions ( + TestNo INT AUTO_INCREMENT PRIMARY KEY, + Source TEXT, + Target TEXT, + Flags TEXT, + On_Master LONGTEXT, + On_Slave LONGTEXT, + Expected LONGTEXT, + Compare INT, + Error TEXT); + +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; +SELECT @@global.slave_type_conversions; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; +SELECT @@global.slave_type_conversions; +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY,NONEXISTING_BIT'; +SELECT @@global.slave_type_conversions; + +# Checking strict interpretation of type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS=''; +source suite/rpl/include/type_conversions.test; + +# Checking lossy integer type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY'; +source suite/rpl/include/type_conversions.test; + +# Checking non-lossy integer type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY'; +source suite/rpl/include/type_conversions.test; + +# Checking all type conversions +connection slave; +SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_LOSSY,ALL_NON_LOSSY'; +source suite/rpl/include/type_conversions.test; + +connection slave; +--echo **** Result of conversions **** +disable_query_log; +SELECT RPAD(Source, 15, ' ') AS Source_Type, + RPAD(Target, 15, ' ') AS Target_Type, + RPAD(Flags, 25, ' ') AS All_Type_Conversion_Flags, + IF(Compare IS NULL AND Error IS NOT NULL, '<Correct error>', + IF(Compare, '<Correct value>', + CONCAT("'", On_Slave, "' != '", Expected, "'"))) + AS Value_On_Slave + FROM type_conversions; +enable_query_log; +DROP TABLE type_conversions; + +call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t1. cannot be converted from type.* error.* 1677"); + +connection master; +DROP TABLE t1; +sync_slave_with_master; + +set global slave_type_conversions = @saved_slave_type_conversions; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index e82c03a12f3..d29b16882d8 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -221,7 +221,7 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL -READ_ONLY NO +READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_INSTANCES SESSION_VALUE NULL diff --git a/mysql-test/suite/sys_vars/t/all_vars.test b/mysql-test/suite/sys_vars/t/all_vars.test index 06edc04d25c..41c89a4bed9 100644 --- a/mysql-test/suite/sys_vars/t/all_vars.test +++ b/mysql-test/suite/sys_vars/t/all_vars.test @@ -12,8 +12,6 @@ eval INSTALL PLUGIN federated SONAME "$HA_FEDERATEDX_SO"; eval INSTALL PLUGIN oqgraph SONAME "$HA_OQGRAPH_SO"; eval INSTALL PLUGIN sphinx SONAME "$HA_SPHINX_SO"; eval INSTALL PLUGIN innodb SONAME "$HA_INNODB_SO"; -eval INSTALL PLUGIN rpl_semi_sync_master SONAME "$SEMISYNC_MASTER_SO"; -eval INSTALL PLUGIN rpl_semi_sync_slave SONAME "$SEMISYNC_SLAVE_SO"; --enable_abort_on_error --enable_result_log --enable_query_log diff --git a/mysql-test/suite/versioning/r/delete_history.result b/mysql-test/suite/versioning/r/delete_history.result index cf68536d4a6..d07c71ee9f5 100644 --- a/mysql-test/suite/versioning/r/delete_history.result +++ b/mysql-test/suite/versioning/r/delete_history.result @@ -168,6 +168,8 @@ x explain extended delete history from t1 before system_time '2039-01-01 23:00'; id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where +Warnings: +Note 1003 delete from `test`.`t1` FOR SYSTEM_TIME BEFORE TIMESTAMP '2039-01-01 23:00' where `test`.`t1`.`row_end` < '2039-01-01 23:00' and is_history(`test`.`t1`.`row_end`) create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00'; call p; select * from t1; diff --git a/mysys/my_atomic_writes.c b/mysys/my_atomic_writes.c index cfd7dbb3cbf..bc756efab63 100644 --- a/mysys/my_atomic_writes.c +++ b/mysys/my_atomic_writes.c @@ -117,7 +117,7 @@ static my_bool test_if_shannon_card_exists() char path[32]; struct stat stat_buff; - sprintf(path, "/dev/df%c", dev_part); + snprintf(path, sizeof(path), "/dev/df%c", dev_part); #ifdef TEST_SHANNON if (stat(path, &stat_buff) < 0) { @@ -126,8 +126,10 @@ static my_bool test_if_shannon_card_exists() } #endif shannon_devices[shannon_found_devices].st_dev= stat_buff.st_rdev; - sprintf(shannon_devices[shannon_found_devices].dev_name, "/dev/sct%c", - dev_part); + snprintf(shannon_devices[shannon_found_devices].dev_name, + sizeof(shannon_devices[shannon_found_devices].dev_name), + "/dev/sct%c", + dev_part); #ifdef TEST_SHANNON printf("%s(): i=%d, stat_buff.st_dev=0x%lx, stat_buff.st_rdev=0x%lx, st_rdev=0x%lx, dev_name=%s\n", @@ -150,13 +152,15 @@ static my_bool test_if_shannon_card_exists() for (dev_no= 1 ; dev_no < 9 ; dev_no++) { - sprintf(path, "/dev/df%c%d", dev_part, dev_no); + snprintf(path, sizeof(path), "/dev/df%c%d", dev_part, dev_no); if (stat(path, &stat_buff) < 0) break; shannon_devices[shannon_found_devices].st_dev= stat_buff.st_rdev; - sprintf(shannon_devices[shannon_found_devices].dev_name, "/dev/sct%c%d", - dev_part, dev_no); + snprintf(shannon_devices[shannon_found_devices].dev_name, + sizeof(shannon_devices[shannon_found_devices].dev_name), + "/dev/sct%c%d", + dev_part, dev_no); #ifdef TEST_SHANNON printf("%s(): i=%d, st_dev=0x%lx, st_rdev=0x%lx, dev_name=%s\n", diff --git a/mysys/my_conio.c b/mysys/my_conio.c index 04750635dd3..ec30b9dc6c7 100644 --- a/mysys/my_conio.c +++ b/mysys/my_conio.c @@ -50,7 +50,7 @@ int my_pthread_auto_mutex_lock(HANDLE* ph, const char* name, int id, int time) DWORD res; char tname[FN_REFLEN]; - sprintf(tname, "%s-%08X", name, id); + snprintf(tname, sizeof(tname), "%s-%08X", name, id); *ph= CreateMutex(NULL, FALSE, tname); if (*ph == NULL) diff --git a/mysys/my_likely.c b/mysys/my_likely.c index 2ae587be18d..d52074f01e4 100644 --- a/mysys/my_likely.c +++ b/mysys/my_likely.c @@ -76,7 +76,7 @@ void end_my_likely(FILE *out) if (!(likely_file= out)) { char name[80]; - sprintf(name, "/tmp/unlikely-%lu.out", (ulong) getpid()); + snprintf(name, sizeof(name), "/tmp/unlikely-%lu.out", (ulong) getpid()); if ((likely_file= my_fopen(name, O_TRUNC | O_WRONLY, MYF(MY_WME)))) do_close= 1; else diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c index 24b4b3aff15..d9f0e41de8d 100644 --- a/mysys/my_thr_init.c +++ b/mysys/my_thr_init.c @@ -421,7 +421,7 @@ const char *my_thread_name(void) if (!tmp->name[0]) { my_thread_id id= my_thread_dbug_id(); - sprintf(name_buff,"T@%lu", (ulong) id); + snprintf(name_buff, sizeof(name_buff), "T@%lu", (ulong) id); strmake_buf(tmp->name, name_buff); } return tmp->name; diff --git a/plugin/auth_pam/testing/pam_mariadb_mtr.c b/plugin/auth_pam/testing/pam_mariadb_mtr.c index f61a8da7682..108aeb941ac 100644 --- a/plugin/auth_pam/testing/pam_mariadb_mtr.c +++ b/plugin/auth_pam/testing/pam_mariadb_mtr.c @@ -45,6 +45,7 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags __attribute__((unused)), else { free(resp); + resp= NULL; msg[0].msg_style = PAM_PROMPT_ECHO_ON; msg[0].msg = (char*)"PIN:"; pam_err = (*conv->conv)(1, msgp, &resp, conv->appdata_ptr); diff --git a/plugin/feedback/feedback.cc b/plugin/feedback/feedback.cc index 3b2e95f1e13..160545995d0 100644 --- a/plugin/feedback/feedback.cc +++ b/plugin/feedback/feedback.cc @@ -367,7 +367,7 @@ static MYSQL_SYSVAR_STR(user_info, user_info, NULL, NULL, ""); static MYSQL_SYSVAR_STR(url, url, PLUGIN_VAR_READONLY | PLUGIN_VAR_RQCMDARG, "Space separated URLs to send the feedback report to.", NULL, NULL, - DEFAULT_PROTO "mariadb.org/feedback_plugin/post"); + DEFAULT_PROTO "feedback.mariadb.org/rest/v1/post"); static MYSQL_SYSVAR_ULONG(send_timeout, send_timeout, PLUGIN_VAR_RQCMDARG, "Timeout (in seconds) for the sending the report.", NULL, NULL, 60, 1, 60*60*24, 1); diff --git a/plugin/type_inet/sql_type_inet.cc b/plugin/type_inet/sql_type_inet.cc index 1c6e0e02e73..789ae078fd5 100644 --- a/plugin/type_inet/sql_type_inet.cc +++ b/plugin/type_inet/sql_type_inet.cc @@ -1443,7 +1443,7 @@ void Type_handler_inet6::Item_param_setup_conversion(THD *thd, void Type_handler_inet6::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_str) const { DBUG_ASSERT(item->type_handler() == this); NativeBufferInet6 tmp; @@ -1466,7 +1466,7 @@ void Type_handler_inet6::make_sort_key_part(uchar *to, Item *item, uint Type_handler_inet6::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_str) const { DBUG_ASSERT(item->type_handler() == this); NativeBufferInet6 tmp; diff --git a/plugin/type_inet/sql_type_inet.h b/plugin/type_inet/sql_type_inet.h index 64d26953cff..80d8544e6c9 100644 --- a/plugin/type_inet/sql_type_inet.h +++ b/plugin/type_inet/sql_type_inet.h @@ -528,11 +528,11 @@ public: } void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index 161ab767f73..a16c086339f 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -239,7 +239,6 @@ SET GLOBAL general_log = 'OFF'; ALTER TABLE general_log MODIFY event_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, MODIFY user_host MEDIUMTEXT NOT NULL, - MODIFY thread_id INTEGER NOT NULL, MODIFY server_id INTEGER UNSIGNED NOT NULL, MODIFY command_type VARCHAR(64) NOT NULL, MODIFY argument MEDIUMTEXT NOT NULL, @@ -841,3 +840,8 @@ IF 1 = (SELECT count(*) FROM information_schema.VIEWS WHERE TABLE_CATALOG = 'def END IF// DELIMITER ; + +# MDEV-22683 - upgrade Host and Owner of servers +ALTER TABLE servers + MODIFY Host varchar(2048) NOT NULL DEFAULT '', + MODIFY Owner varchar(512) NOT NULL DEFAULT ''; diff --git a/sql-common/client.c b/sql-common/client.c index 0bac44d1db0..811bd4e8e58 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -4213,7 +4213,7 @@ int STDCALL mysql_set_character_set(MYSQL *mysql, const char *cs_name) /* Skip execution of "SET NAMES" for pre-4.1 servers */ if (mysql_get_server_version(mysql) < 40100) return 0; - sprintf(buff, "SET NAMES %s", cs_name); + snprintf(buff, sizeof(buff), "SET NAMES %s", cs_name); if (!mysql_real_query(mysql, buff, (uint) strlen(buff))) { mysql->charset= cs; diff --git a/sql/filesort.cc b/sql/filesort.cc index 4e5aeccb78e..262ffecb882 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -223,7 +223,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str); DEBUG_SYNC(thd, "filesort_start"); - if (!(sort= new SORT_INFO)) + if (!(sort= new SORT_INFO)) // Note that this is not automatically freed! return 0; if (subselect && subselect->filesort_buffer.is_allocated()) @@ -434,6 +434,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, error= 0; err: + param.tmp_buffer.free(); if (!subselect || !subselect->is_uncacheable()) { if (!param.using_addon_fields()) @@ -1105,7 +1106,7 @@ void store_length(uchar *to, uint length, uint pack_length) void Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null; @@ -1113,7 +1114,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - String *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp_buffer); if (!res) { if (maybe_null) @@ -1175,7 +1176,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { longlong value= item->val_int_result(); make_sort_key_longlong(to, item->maybe_null, item->null_value, @@ -1186,7 +1187,7 @@ Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -1208,7 +1209,7 @@ Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_timestamp_common::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -1301,7 +1302,7 @@ Type_handler::make_packed_sort_key_longlong(uchar *to, bool maybe_null, void Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null) @@ -1321,7 +1322,7 @@ Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_real_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { double value= item->val_result(); if (item->maybe_null) @@ -2566,7 +2567,7 @@ void Sort_param::try_to_pack_sortkeys() uint Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null; @@ -2574,7 +2575,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - Binary_string *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp); if (!res) { if (maybe_null) @@ -2605,7 +2606,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { longlong value= item->val_int_result(); return make_packed_sort_key_longlong(to, item->maybe_null, @@ -2617,7 +2618,7 @@ Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null) @@ -2639,7 +2640,7 @@ Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { double value= item->val_result(); if (item->maybe_null) @@ -2660,7 +2661,7 @@ Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -2682,7 +2683,7 @@ Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -3026,7 +3027,8 @@ static uint make_sortkey(Sort_param *param, uchar *to) { // Item sort_field->item->type_handler()->make_sort_key_part(to, sort_field->item, - sort_field, param); + sort_field, + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null)) to++; } @@ -3079,7 +3081,7 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to) Item *item= sort_field->item; length= item->type_handler()->make_packed_sort_key_part(to, item, sort_field, - param); + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null)) to++; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1faee3216c8..b4f6982101e 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1303,7 +1303,18 @@ public: The following code is not safe if you are using different storage engines or different index types per partition. */ - return m_file[0]->index_flags(inx, part, all_parts); + ulong part_flags= m_file[0]->index_flags(inx, part, all_parts); + + /* + The underlying storage engine might support Rowid Filtering. But + ha_partition does not forward the needed SE API calls, so the feature + will not be used. + + Note: It's the same with IndexConditionPushdown, except for its variant + of IndexConditionPushdown+BatchedKeyAccess (that one works). Because of + that, we do not clear HA_DO_INDEX_COND_PUSHDOWN here. + */ + return part_flags & ~HA_DO_RANGE_FILTER_PUSHDOWN; } /** diff --git a/sql/handler.cc b/sql/handler.cc index 7c42b3bbb6f..4afd30021ee 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -4321,7 +4321,7 @@ int handler::check_collation_compatibility() { ulong mysql_version= table->s->mysql_version; - if (mysql_version < 50124) + if (mysql_version < Charset::latest_mariadb_version_with_collation_change()) { KEY *key= table->key_info; KEY *key_end= key + table->s->keys; @@ -4335,18 +4335,7 @@ int handler::check_collation_compatibility() continue; Field *field= table->field[key_part->fieldnr - 1]; uint cs_number= field->charset()->number; - if ((mysql_version < 50048 && - (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ - cs_number == 41 || /* latin7_general_ci - bug #29461 */ - cs_number == 42 || /* latin7_general_cs - bug #29461 */ - cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ - cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ - cs_number == 22 || /* koi8u_general_ci - bug #29461 */ - cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ - cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ - (mysql_version < 50124 && - (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ - cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + if (Charset::collation_changed_order(mysql_version, cs_number)) return HA_ADMIN_NEEDS_UPGRADE; } } @@ -7644,11 +7633,13 @@ static int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_key_length) { int res = 0; + uint to_delete_counter= 0; + INDEX_STATS *index_stats_to_delete[MAX_INDEXES]; DBUG_ENTER("del_global_index_stats_for_table"); mysql_mutex_lock(&LOCK_global_index_stats); - for (uint i= 0; i < global_index_stats.records;) + for (uint i= 0; i < global_index_stats.records; i++) { INDEX_STATS *index_stats = (INDEX_STATS*) my_hash_element(&global_index_stats, i); @@ -7658,19 +7649,13 @@ int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_ke index_stats->index_name_length >= cache_key_length && !memcmp(index_stats->index, cache_key, cache_key_length)) { - res= my_hash_delete(&global_index_stats, (uchar*)index_stats); - /* - In our HASH implementation on deletion one elements - is moved into a place where a deleted element was, - and the last element is moved into the empty space. - Thus we need to re-examine the current element, but - we don't have to restart the search from the beginning. - */ + index_stats_to_delete[to_delete_counter++]= index_stats; } - else - i++; } + for (uint i= 0; i < to_delete_counter; i++) + res= my_hash_delete(&global_index_stats, (uchar*)index_stats_to_delete[i]); + mysql_mutex_unlock(&LOCK_global_index_stats); DBUG_RETURN(res); } diff --git a/sql/item.cc b/sql/item.cc index 19c0b341982..85bb1f42632 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5717,7 +5717,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { Item::Type ref_type= (*reference)->type(); @@ -5743,7 +5744,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) (Item_ident*) (*reference) : 0), false); if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -6081,7 +6083,6 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (!thd->lex->current_select->no_wrap_view_item && thd->lex->in_sum_func && - thd->lex == select->parent_lex && thd->lex->in_sum_func->nest_level == select->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8116,7 +8117,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8140,7 +8142,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8155,7 +8158,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) 1. outer reference (will be fixed later by the fix_inner_refs function); 2. an unnamed reference inside an aggregate function. */ - if (!((*ref)->type() == REF_ITEM && + if (!set_properties_only && + !((*ref)->type() == REF_ITEM && ((Item_ref *)(*ref))->ref_type() == OUTER_REF) && (((*ref)->with_sum_func() && name.str && !(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE && diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 9472e184124..4aaf66ec695 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -376,6 +376,11 @@ class Item_func_json_length: public Item_long_func { bool check_arguments() const { + if (arg_count == 0 || arg_count > 2) + { + my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), func_name()); + return true; + } return args[0]->check_type_can_return_text(func_name()) || (arg_count > 1 && args[1]->check_type_general_purpose_string(func_name())); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 000d198eb0c..0bea808077e 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -3711,6 +3711,7 @@ String *Item_func_weight_string::val_str(String *str) flags); DBUG_ASSERT(frm_length <= tmp_length); + str->set_charset(&my_charset_bin); str->length(frm_length); null_value= 0; return str; @@ -3790,6 +3791,7 @@ String *Item_func_unhex::val_str(String *str) from= res->ptr(); null_value= 0; + str->set_charset(&my_charset_bin); str->length(length); to= (char*) str->ptr(); if (res->length() % 2) diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 9baf945644e..46942c0c785 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -409,7 +409,8 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->get_with_sum_func_cache()->set_with_sum_func(); } - thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); + if (aggr_sel) + thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); if ((thd->lex->describe & DESCRIBE_EXTENDED) && aggr_sel) { diff --git a/sql/mysqld.h b/sql/mysqld.h index d31f0159eb6..fe34a438405 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -885,6 +885,8 @@ enum enum_query_type // it evaluates to. Should be used for error messages, so that they // don't reveal values. QT_NO_DATA_EXPANSION= (1 << 9), + // Remove wrappers added for TVC when creating or showing view + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW= (1 << 12), }; diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 6332e02453d..3df6c2d9f66 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -261,6 +261,12 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, STRING_WITH_LEN("now WAIT_FOR proceed_by_1000")); } }); + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2001) { + DBUG_ASSERT(!rgi->worker_error || entry->stop_on_error_sub_id == sub_id); + debug_sync_set_action(thd, STRING_WITH_LEN("now SIGNAL cont_worker3")); + } + }); #endif if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING) @@ -284,6 +290,11 @@ signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err) In case we get an error during commit, inform following transactions that we aborted our commit. */ + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2002) { + debug_sync_set_action(thd, STRING_WITH_LEN("now WAIT_FOR cont_worker2")); + }}); + rgi->unmark_start_commit(); rgi->cleanup_context(thd, true); rgi->rli->abort_slave= true; @@ -790,7 +801,14 @@ do_retry: thd->reset_killed(); thd->clear_error(); rgi->killed_for_retry = rpl_group_info::RETRY_KILL_NONE; - +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2003) { + debug_sync_set_action(thd, + STRING_WITH_LEN("now WAIT_FOR cont_worker3")); + } + }); +#endif /* If we retry due to a deadlock kill that occurred during the commit step, we might have already updated (but not committed) an update of table @@ -808,15 +826,12 @@ do_retry: for (;;) { mysql_mutex_lock(&entry->LOCK_parallel_entry); - if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || + register_wait_for_prior_event_group_commit(rgi, entry); + if (!(entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || #ifndef DBUG_OFF - (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) || + (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) || #endif - rgi->gtid_sub_id < entry->stop_on_error_sub_id) - { - register_wait_for_prior_event_group_commit(rgi, entry); - } - else + rgi->gtid_sub_id < entry->stop_on_error_sub_id)) { /* A failure of a preceding "parent" transaction may not be @@ -1993,6 +2008,9 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, gco->prior_sub_id= prior_sub_id; gco->installed= false; gco->flags= 0; +#ifndef DBUG_OFF + gco->gc_done= false; +#endif return gco; } @@ -2000,6 +2018,10 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, void rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco) { +#ifndef DBUG_OFF + DBUG_ASSERT(!gco->gc_done); + gco->gc_done= true; +#endif if (!loc_gco_list) loc_gco_last_ptr_ptr= &gco->next_gco; else diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index b88e77d5427..9da0c70d12e 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -90,6 +90,9 @@ struct group_commit_orderer { FORCE_SWITCH= 2 }; uint8 flags; +#ifndef DBUG_OFF + bool gc_done; +#endif }; diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index dbb54c7d0f0..de5fcb83a7d 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -2425,8 +2425,13 @@ mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco, uint64 count= ++e->count_committing_event_groups; /* Signal any following GCO whose wait_count has been reached now. */ tmp= gco; + + DBUG_ASSERT(!tmp->gc_done); + while ((tmp= tmp->next_gco)) { + DBUG_ASSERT(!tmp->gc_done); + uint64 wait_count= tmp->wait_count; if (wait_count > count) break; diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index b57fc199826..17adeed86e7 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -317,8 +317,8 @@ void Active_tranx::clear_active_tranx_nodes(const char *log_file_name, /******************************************************************************* * - * <Repl_semi_sync_master> class: the basic code layer for syncsync master. - * <Repl_semi_sync_slave> class: the basic code layer for syncsync slave. + * <Repl_semi_sync_master> class: the basic code layer for semisync master. + * <Repl_semi_sync_slave> class: the basic code layer for semisync slave. * * The most important functions during semi-syn replication listed: * @@ -809,8 +809,6 @@ void Repl_semi_sync_master::dump_end(THD* thd) remove_slave(); ack_receiver.remove_slave(thd); - - return; } int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 44e31125d49..b739634d8e4 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -27,6 +27,7 @@ #ifdef __WIN__ #include <crtdbg.h> +#include <direct.h> #define SIGNAL_FMT "exception 0x%x" #else #define SIGNAL_FMT "signal %d" @@ -66,30 +67,30 @@ static inline void output_core_info() (int) len, buff); } #ifdef __FreeBSD__ - if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/curproc/rlimit", O_RDONLY, MYF(0))) >= 0) #else - if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/self/limits", O_RDONLY, MYF(0))) >= 0) #endif { my_safe_printf_stderr("Resource Limits:\n"); - while ((len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0))) > 0) + while ((len= read(fd, (uchar*)buff, sizeof(buff))) > 0) { my_write_stderr(buff, len); } - my_close(fd, MYF(0)); + close(fd); } #ifdef __linux__ - if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/sys/kernel/core_pattern", O_RDONLY, MYF(0))) >= 0) { - len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } - if ((fd= my_open("/proc/version", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/version", O_RDONLY)) >= 0) { - len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } #endif #elif defined(__APPLE__) || defined(__FreeBSD__) @@ -103,11 +104,14 @@ static inline void output_core_info() { my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); } -#else +#elif defined(HAVE_GETCWD) char buff[80]; - my_getwd(buff, sizeof(buff), 0); - my_safe_printf_stderr("Writing a core file at %s\n", buff); - fflush(stderr); + + if (getcwd(buff, sizeof(buff))) + { + my_safe_printf_stderr("Writing a core file at %.*s\n", (int) sizeof(buff), buff); + fflush(stderr); + } #endif } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ce9f13ca01d..3cfb68b69b4 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2565,6 +2565,8 @@ static bool acl_load(THD *thd, const Grant_tables& tables) "possible to remove this privilege using REVOKE.", host.host.hostname, host.db); } + else if (!host.db) + host.db= const_cast<char*>(host_not_specified.str); host.access= host_table.get_access(); host.access= fix_rights_for_db(host.access); host.sort= get_magic_sort("hd", host.host.hostname, host.db); @@ -2573,8 +2575,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables) { sql_print_warning("'host' entry '%s|%s' " "ignored in --skip-name-resolve mode.", - safe_str(host.host.hostname), - safe_str(host.db)); + host.host.hostname, host.db); continue; } #ifndef TO_BE_REMOVED @@ -3666,7 +3667,7 @@ privilege_t acl_get(const char *host, const char *ip, ACL_HOST *acl_host=dynamic_element(&acl_hosts,i,ACL_HOST*); if (compare_hostname(&acl_host->host,host,ip)) { - if (!acl_host->db || !wild_compare(db,acl_host->db,db_is_pattern)) + if (!wild_compare(db, acl_host->db, db_is_pattern)) { host_access=acl_host->access; // Fully specified. Take it break; @@ -6691,6 +6692,7 @@ static int update_role_columns(GRANT_TABLE *merged, } } +restart: for (uint i=0 ; i < mh->records ; i++) { GRANT_COLUMN *col = (GRANT_COLUMN *)my_hash_element(mh, i); @@ -6699,6 +6701,7 @@ static int update_role_columns(GRANT_TABLE *merged, { changed= 1; my_hash_delete(mh, (uchar*)col); + goto restart; } } DBUG_ASSERT(rights == merged->cols); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 6ccc9be9901..3364e8ad639 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -5960,7 +5960,10 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length, if (cached_field_index < table->s->fields && !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name.str, name)) + { field= table->field[cached_field_index]; + DEBUG_SYNC(thd, "table_field_cached"); + } else { LEX_CSTRING fname= {name, length}; @@ -6411,6 +6414,13 @@ find_field_in_tables(THD *thd, Item_ident *item, if (last_table) last_table= last_table->next_name_resolution_table; + uint fake_index_for_duplicate_search= NO_CACHED_FIELD_INDEX; + /* + For the field search it will point to field cache, but for duplicate + search it will point to fake_index_for_duplicate_search (no cache + present). + */ + uint *current_cache= &(item->cached_field_index); for (; cur_table != last_table ; cur_table= cur_table->next_name_resolution_table) { @@ -6420,7 +6430,7 @@ find_field_in_tables(THD *thd, Item_ident *item, SQLCOM_SHOW_FIELDS) ? false : check_privileges, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6435,7 +6445,7 @@ find_field_in_tables(THD *thd, Item_ident *item, item->name.str, db, table_name, ref, false, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6452,8 +6462,19 @@ find_field_in_tables(THD *thd, Item_ident *item, Store the original table of the field, which may be different from cur_table in the case of NATURAL/USING join. */ - item->cached_table= (!actual_table->cacheable_table || found) ? - 0 : actual_table; + if (actual_table->cacheable_table /*(1)*/ && !found /*(2)*/) + { + /* + We have just found a field allowed to cache (1) and + it is not dublicate search (2). + */ + item->cached_table= actual_table; + } + else + { + item->cached_table= NULL; + item->cached_field_index= NO_CACHED_FIELD_INDEX; + } DBUG_ASSERT(thd->where); /* @@ -6472,6 +6493,7 @@ find_field_in_tables(THD *thd, Item_ident *item, return (Field*) 0; } found= cur_field; + current_cache= &fake_index_for_duplicate_search; } } @@ -7926,9 +7948,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, table_list; table_list= table_list->next_local) { - if (table_list->merge_underlying_list) + if (table_list->is_merged_derived() && table_list->merge_underlying_list) { - DBUG_ASSERT(table_list->is_merged_derived()); Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); bool res; diff --git a/sql/sql_class.h b/sql/sql_class.h index a5a5f3df44d..68a69762354 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1841,7 +1841,7 @@ show_system_thread(enum_thread_type thread) RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_BACKGROUND); RETURN_NAME_AS_STRING(SYSTEM_THREAD_SEMISYNC_MASTER_BACKGROUND); default: - sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread); + snprintf(buf, sizeof(buf), "<UNKNOWN SYSTEM THREAD: %d>", thread); return buf; } #undef RETURN_NAME_AS_STRING @@ -7429,7 +7429,7 @@ public: if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root, dst->length + 1)))) return true; - sprintf(tmp, "%.*s%.*s%.*s", + snprintf(tmp, dst->length + 1, "%.*s%.*s%.*s", (int) m_db.length, (m_db.length ? m_db.str : ""), dot, ".", (int) m_name.length, m_name.str); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e628ce60d2d..81c3141c252 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -1070,6 +1070,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds, DBUG_RETURN(TRUE); select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index fa060afde8d..30a464d06e9 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -351,24 +351,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_RETURN(FALSE); } - if (dt_select->uncacheable & UNCACHEABLE_RAND) - { - /* There is random function => fall back to materialization. */ - cause= "Random function in the select"; - if (unlikely(thd->trace_started())) - { - OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived, - derived->is_derived() ? "derived" : "view", - derived->alias.str ? derived->alias.str : "<NULL>", - derived->get_unit()->first_select()->select_number, - "materialized"); - trace_derived.add("cause", cause); - } - derived->change_refs_to_fields(); - derived->set_materialized_derived(); - DBUG_RETURN(FALSE); - } - if (derived->dt_handler) { derived->change_refs_to_fields(); diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1b59dce10b9..e8e8a55540b 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -161,7 +161,7 @@ void Explain_query::query_plan_ready() Send EXPLAIN output to the client. */ -int Explain_query::send_explain(THD *thd) +int Explain_query::send_explain(THD *thd, bool extended) { select_result *result; LEX *lex= thd->lex; @@ -174,8 +174,22 @@ int Explain_query::send_explain(THD *thd) if (thd->lex->explain_json) print_explain_json(result, thd->lex->analyze_stmt); else + { res= print_explain(result, lex->describe, thd->lex->analyze_stmt); - + if (extended) + { + char buff[1024]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + /* + The warnings system requires input in utf8, @see + mysqld_show_warnings(). + */ + lex->unit.print(&str, QT_EXPLAIN_EXTENDED); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_YES, str.c_ptr_safe()); + } + } if (res) result->abort_result_set(); else @@ -185,6 +199,7 @@ int Explain_query::send_explain(THD *thd) } + /* The main entry point to print EXPLAIN of the entire query */ diff --git a/sql/sql_explain.h b/sql/sql_explain.h index df0a165860d..780945acbdc 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -474,7 +474,7 @@ public: bool is_analyze); /* Send tabular EXPLAIN to the client */ - int send_explain(THD *thd); + int send_explain(THD *thd, bool extended); /* Return tabular EXPLAIN output as a text string */ bool print_explain_str(THD *thd, String *out_str, bool is_analyze); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 132934773f9..c17c93c5a9c 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -853,7 +853,8 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, save_insert_query_plan(thd, table_list); if (thd->lex->describe) { - retval= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + retval= thd->lex->explain->send_explain(thd, extended); goto abort; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index fad9aabdc90..91837ffb5c3 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1314,6 +1314,8 @@ void LEX::start(THD *thd_arg) frame_bottom_bound= NULL; win_spec= NULL; + upd_del_where= NULL; + vers_conditions.empty(); period_conditions.empty(); @@ -3024,6 +3026,7 @@ void st_select_lex::init_select() curr_tvc_name= 0; in_tvc= false; versioned_tables= 0; + is_tvc_wrapper= false; nest_flags= 0; } @@ -3922,40 +3925,45 @@ LEX::LEX() } +bool LEX::can_be_merged() +{ + return unit.can_be_merged(); +} + + /* - Check whether the merging algorithm can be used on this VIEW + Check whether the merging algorithm can be used for this unit SYNOPSIS - LEX::can_be_merged() + st_select_lex_unit::can_be_merged() DESCRIPTION - We can apply merge algorithm if it is single SELECT view with - subqueries only in WHERE clause (we do not count SELECTs of underlying - views, and second level subqueries) and we have not grpouping, ordering, - HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and - several underlying tables. + We can apply merge algorithm for a unit if it is single SELECT with + subqueries only in WHERE clauses or in ON conditions or in select list + (we do not count SELECTs of underlying views/derived tables/CTEs and + second level subqueries) and we have no grouping, ordering, HAVING + clause, aggregate functions, DISTINCT clause, LIMIT clause. RETURN FALSE - only temporary table algorithm can be used TRUE - merge algorithm can be used */ -bool LEX::can_be_merged() +bool st_select_lex_unit::can_be_merged() { // TODO: do not forget implement case when select_lex.table_list.elements==0 /* find non VIEW subqueries/unions */ - bool selects_allow_merge= (first_select_lex()->next_select() == 0 && - !(first_select_lex()->uncacheable & + bool selects_allow_merge= (first_select()->next_select() == 0 && + !(first_select()->uncacheable & UNCACHEABLE_RAND)); if (selects_allow_merge) { - for (SELECT_LEX_UNIT *tmp_unit= first_select_lex()->first_inner_unit(); + for (SELECT_LEX_UNIT *tmp_unit= first_select()->first_inner_unit(); tmp_unit; tmp_unit= tmp_unit->next_unit()) { - if (tmp_unit->first_select()->parent_lex == this && - (tmp_unit->item != 0 && + if ((tmp_unit->item != 0 && (tmp_unit->item->place() != IN_WHERE && tmp_unit->item->place() != IN_ON && tmp_unit->item->place() != SELECT_LIST))) @@ -3967,12 +3975,12 @@ bool LEX::can_be_merged() } return (selects_allow_merge && - first_select_lex()->group_list.elements == 0 && - first_select_lex()->having == 0 && - first_select_lex()->with_sum_func == 0 && - first_select_lex()->table_list.elements >= 1 && - !(first_select_lex()->options & SELECT_DISTINCT) && - first_select_lex()->select_limit == 0); + first_select()->group_list.elements == 0 && + first_select()->having == 0 && + first_select()->with_sum_func == 0 && + first_select()->table_list.elements >= 1 && + !(first_select()->options & SELECT_DISTINCT) && + first_select()->select_limit == 0); } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 5d3aed56740..f7db8c6942c 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1034,6 +1034,8 @@ public: bool set_lock_to_the_last_select(Lex_select_lock l); + bool can_be_merged(); + friend class st_select_lex; }; @@ -1163,7 +1165,8 @@ public: st_select_lex. */ uint curr_tvc_name; - + /* true <=> select has been created a TVC wrapper */ + bool is_tvc_wrapper; /* Needed to correctly generate 'PRIMARY' or 'SIMPLE' for select_type column of EXPLAIN @@ -1443,6 +1446,10 @@ public: } bool setup_ref_array(THD *thd, uint order_group_num); void print(THD *thd, String *str, enum_query_type query_type); + void print_item_list(THD *thd, String *str, enum_query_type query_type); + void print_set_clause(THD *thd, String *str, enum_query_type query_type); + void print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type); static void print_order(String *str, ORDER *order, enum_query_type query_type); @@ -3541,6 +3548,8 @@ public: Window_frame_bound *frame_bottom_bound; Window_spec *win_spec; + Item *upd_del_where; + /* System Versioning */ vers_select_conds_t vers_conditions; vers_select_conds_t period_conditions; diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index dd19807dd6d..60e7abc3fa2 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -29,7 +29,7 @@ enum err_msgs_index { - en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, + en_US= 0, zh_CN, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK, es_ES, sv_SE, uk_UA, hi_IN } ERR_MSGS_INDEX; @@ -38,6 +38,7 @@ enum err_msgs_index MY_LOCALE_ERRMSGS global_errmsgs[]= { {"english", NULL}, + {"chinese", NULL}, {"czech", NULL}, {"danish", NULL}, {"dutch", NULL}, @@ -2095,7 +2096,7 @@ MY_LOCALE my_locale_zh_CN '.', /* decimal point zh_CN */ ',', /* thousands_sep zh_CN */ "\x03", /* grouping zh_CN */ - &global_errmsgs[en_US] + &global_errmsgs[zh_CN] ); /***** LOCALE END zh_CN *****/ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7176eebd3f3..62d33d2a007 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4635,7 +4635,10 @@ mysql_execute_command(THD *thd) thd->protocol= save_protocol; } if (!res && thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } delete sel_result; MYSQL_INSERT_DONE(res, (ulong) thd->get_row_count_func()); /* @@ -4815,7 +4818,10 @@ mysql_execute_command(THD *thd) thd->protocol= save_protocol; } if (!res && (explain || lex->analyze_stmt)) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } /* revert changes for SP */ MYSQL_INSERT_SELECT_DONE(res, (ulong) thd->get_row_count_func()); @@ -4882,7 +4888,10 @@ mysql_execute_command(THD *thd) if (thd->lex->analyze_stmt || thd->lex->describe) { if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } delete sel_result; @@ -4943,7 +4952,10 @@ mysql_execute_command(THD *thd) else { if (lex->describe || lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } multi_delete_error: delete result; @@ -6328,7 +6340,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) thd->protocol= save_protocol; } if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } } } @@ -9363,7 +9378,9 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg) if (!(arg->thd->security_ctx->master_access & PRIV_KILL_OTHER_USER_PROCESS) && !arg->thd->security_ctx->user_matches(thd->security_ctx)) - return 1; + { + return MY_TEST(arg->thd->security_ctx->master_access & PROCESS_ACL); + } if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root)) { mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete @@ -9485,7 +9502,10 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) my_ok(thd, rows); break; case ER_KILL_DENIED_ERROR: - my_error(error, MYF(0), (long long) thd->thread_id); + char buf[DEFINER_LENGTH+1]; + strxnmov(buf, sizeof(buf), user->user.str, "@", user->host.str, NULL); + my_printf_error(ER_KILL_DENIED_ERROR, ER_THD(thd, ER_CANNOT_USER), MYF(0), + "KILL USER", buf); break; case ER_OUT_OF_RESOURCES: default: diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index decddba6c4f..36dc0853908 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -432,7 +432,7 @@ static int send_file(THD *thd) /** Internal to mysql_binlog_send() routine that recalculates checksum for - 1. FD event (asserted) that needs additional arranment prior sending to slave. + 1. FD event (asserted) that needs additional arrangement prior sending to slave. 2. Start_encryption_log_event whose Ignored flag is set TODO DBUG_ASSERT can be removed if this function is used for more general cases */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8441ec685dc..774898d8c26 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -244,10 +244,12 @@ static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, - Item *having); + SORT_FIELD *sortorder, ulong keylength, + Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, - uint field_count, Field **first_field, - ulong key_length,Item *having); + uint field_count, Field **first_field, + SORT_FIELD *sortorder, + ulong key_length,Item *having); static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref); static bool setup_new_fields(THD *thd, List<Item> &fields, List<Item> &all_fields, ORDER *new_order); @@ -303,6 +305,9 @@ static Item **get_sargable_cond(JOIN *join, TABLE *table); bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item); +void print_list_item(String *str, List_item *list, + enum_query_type query_type); + static bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond, table_map allowed); @@ -7659,7 +7664,6 @@ best_access_path(JOIN *join, rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables Json_writer_object trace_access_idx(thd); - double eq_ref_rows= 0.0, eq_ref_cost= 0.0; /* full text keys require special treatment */ @@ -7700,14 +7704,13 @@ best_access_path(JOIN *join, type= JT_EQ_REF; trace_access_idx.add("access_type", join_type_str[type]) .add("index", keyinfo->name); + if (!found_ref && table->opt_range_keys.is_set(key)) tmp= adjust_quick_cost(table->opt_range[key].cost, 1); else tmp= table->file->avg_io_cost(); - eq_ref_rows= prev_record_reads(join_positions, idx, + tmp*= prev_record_reads(join_positions, idx, found_ref); - tmp*= eq_ref_rows; - eq_ref_cost= tmp; records=1.0; } else @@ -8009,28 +8012,7 @@ best_access_path(JOIN *join, (table->file->index_flags(start_key->key,0,1) & HA_DO_RANGE_FILTER_PUSHDOWN)) { - double rows; - if (type == JT_EQ_REF) - { - /* - Treat EQ_REF access in a special way: - 1. We have no cost for index-only read. Assume its cost is 50% of - the cost of the full read. - - 2. A regular ref access will do #record_count lookups, but eq_ref - has "lookup cache" which reduces the number of lookups made. - The estimation code uses prev_record_reads() call to estimate: - - tmp = prev_record_reads(join_positions, idx, found_ref); - - Set the effective number of rows from "tmp" here. - */ - keyread_tmp= COST_ADD(eq_ref_cost / 2, s->startup_cost); - rows= eq_ref_rows; - } - else - rows= record_count * records; - + double rows= record_count * records; /* If we use filter F with selectivity s the the cost of fetching data by key using this filter will be @@ -8052,46 +8034,63 @@ best_access_path(JOIN *join, cost_of_fetching_1_row = tmp/rows cost_of_fetching_1_key_tuple = keyread_tmp/rows - access_cost_factor is the gain we expect for using rowid filter. - An access_cost_factor of 1.0 means that keyread_tmp is 0 - (using key read is infinitely fast) and the gain for each row when - using filter is great. - An access_cost_factor if 0.0 means that using keyread has the - same cost as reading rows, so there is no gain to get with - filter. - access_cost_factor should never be bigger than 1.0 (if all - calculations are correct) as the cost of keyread should always be - smaller than the cost of fetching the same number of keys + rows. - access_cost_factor should also never be smaller than 0.0. - The one exception is if number of records is 1 (eq_ref), then - because we are comparing rows to cost of keyread_tmp, keyread_tmp - is higher by 1.0. This is a big that will be fixed in a later - version. - - If we have limited the cost (=tmp) of reading rows with 'worst_seek' - we cannot use filters as the cost calculation below would cause - tmp to become negative. The future resultion is to not limit - cost with worst_seek. + Here's a more detailed explanation that uses the formulas behind + the function the call filter->get_adjusted_gain(). The function + takes as a parameter the number of probes/look-ups into the filter + that is equal to the number of fetched key entries that is equal to + the number of row fetches when no filter is used (assuming no + index condition pushdown is employed for the used key access). + Let this number be N. Then the total gain from using the filter is + N*a_adj - b where b is the cost of building the filter and + a_adj is calcilated as follows: + a - (1-access_cost_factor)*(1-s) = + (1+1_cond_eval_cost)*(1-s)-1_probe_cost - (1-access_cost_factor)*(1-s) + = (1-s)*(1_cond_eval_cost+access_cost_factor) - 1_probe_cost. + Here ((1-s)*(1_cond_eval_cost) * N is the gain from checking less + conditions pushed into the table, 1_probe_cost*N is the cost of the + probes and (1*s) * access_cost_factor * N must be the gain from + accessing less rows. + It does not matter how we calculate the cost of N full row fetches + cost_of_fetching_N_rows or + how we calculate the cost of fetching N key entries + cost_of_fetching_N_key_entries + the gain from less row fetches will be + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) * (1-s) + and this should be equal to (1*s) * access_cost_factor * N. + Thus access_cost_factor must be calculated as + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) / N. + + For safety we clip cost_of_fetching_N_key_entries by the value + of cost_of_fetching_N_row though formally it's not necessary. */ - double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); + /* + For eq_ref access we assume that the cost of fetching N key entries + is equal to the half of fetching N rows + */ + double key_access_cost= + type == JT_EQ_REF ? 0.5 * tmp : MY_MIN(tmp, keyread_tmp); + double access_cost_factor= MY_MIN((tmp - key_access_cost) / rows, 1.0); + if (!(records < s->worst_seeks && records <= thd->variables.max_seeks_for_key)) + { + // Don't use rowid filter trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping"); - else if (access_cost_factor <= 0.0) - trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0"); + filter= NULL; + } else { filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); - DBUG_ASSERT(tmp >= 0); - trace_access_idx.add("rowid_filter_key", - table->key_info[filter->key_no].name); - } + } + if (filter) + { + tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); + DBUG_ASSERT(tmp >= 0); + trace_access_idx.add("rowid_filter_key", + table->key_info[filter->key_no].name); } } trace_access_idx.add("rows", records).add("cost", tmp); @@ -8240,27 +8239,23 @@ best_access_path(JOIN *join, if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE) { double rows= record_count * s->found_records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; /* See the comment concerning using rowid filter for with ref access */ - keyread_tmp= s->table->opt_range[key_no].index_only_cost * - record_count; - access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); - if (access_cost_factor > 0.0) + double row_access_cost= s->quick->read_time * record_count; + double key_access_cost= + MY_MIN(row_access_cost, + s->table->opt_range[key_no].index_only_cost * record_count); + double access_cost_factor= MY_MIN((row_access_cost - key_access_cost) / + rows, 1.0); + filter= + s->table->best_range_rowid_filter_for_partial_join(key_no, rows, + access_cost_factor); + if (filter) { - filter= - s->table-> - best_range_rowid_filter_for_partial_join(key_no, rows, - access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows); - DBUG_ASSERT(tmp >= 0); - } + tmp-= filter->get_adjusted_gain(rows); + DBUG_ASSERT(tmp >= 0); } - else - trace_access_scan.add("rowid_filter_skipped", "cost_factor <= 0"); type= JT_RANGE; } @@ -24522,39 +24517,71 @@ JOIN_TAB::remove_duplicates() { bool error; - ulong keylength= 0; - uint field_count; + ulong keylength= 0, sort_field_keylength= 0; + uint field_count, item_count; List<Item> *fields= (this-1)->fields; + Item *item; THD *thd= join->thd; - + SORT_FIELD *sortorder, *sorder; DBUG_ENTER("remove_duplicates"); DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE); THD_STAGE_INFO(join->thd, stage_removing_duplicates); - //join->explain->ops_tracker.report_duplicate_removal(); - - table->reginfo.lock_type=TL_WRITE; + if (!(sortorder= (SORT_FIELD*) my_malloc(PSI_INSTRUMENT_ME, + (fields->elements+1) * + sizeof(SORT_FIELD), + MYF(MY_WME)))) + DBUG_RETURN(TRUE); /* Calculate how many saved fields there is in list */ - field_count=0; + field_count= item_count= 0; + List_iterator<Item> it(*fields); - Item *item; - while ((item=it++)) + for (sorder= sortorder ; (item=it++) ;) { - if (item->get_tmp_table_field() && ! item->const_item()) - field_count++; + if (!item->const_item()) + { + if (item->get_tmp_table_field()) + { + /* Field is stored in temporary table, skipp */ + field_count++; + } + else + { + /* Item is not stored in temporary table, remember it */ + sorder->field= 0; // Safety, not used + sorder->item= item; + /* Calculate sorder->length */ + item->type_handler()->sort_length(thd, item, sorder); + sorder++; + item_count++; + } + } } + sorder->item= 0; // End marker - if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) - { // only const items with no OPTION_FOUND_ROWS + if ((field_count + item_count == 0) && ! having && + !(join->select_options & OPTION_FOUND_ROWS)) + { + // only const items with no OPTION_FOUND_ROWS join->unit->lim.set_single_row(); // Only send first row + my_free(sortorder); DBUG_RETURN(false); } + /* + The table contains first fields that will be in the output, then + temporary results pointed to by the fields list. + Example: SELECT DISTINCT sum(a), sum(d) > 2 FROM ... + In this case the temporary table contains sum(a), sum(d). + */ + Field **first_field=table->field+table->s->fields - field_count; for (Field **ptr=first_field; *ptr; ptr++) keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null(); + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + sort_field_keylength+= ptr->length + (ptr->item->maybe_null ? 1 : 0); /* Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely @@ -24565,30 +24592,80 @@ JOIN_TAB::remove_duplicates() thd->reset_killed(); table->file->info(HA_STATUS_VARIABLE); + table->reginfo.lock_type=TL_WRITE; + if (table->s->db_type() == heap_hton || (!table->s->blob_fields && ((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records < thd->variables.sortbuff_size))) - error=remove_dup_with_hash_index(join->thd, table, field_count, first_field, - keylength, having); + error= remove_dup_with_hash_index(join->thd, table, field_count, + first_field, sortorder, + keylength + sort_field_keylength, having); else - error=remove_dup_with_compare(join->thd, table, first_field, having); + error=remove_dup_with_compare(join->thd, table, first_field, sortorder, + sort_field_keylength, having); if (join->select_lex != join->select_lex->master_unit()->fake_select_lex) thd->lex->set_limit_rows_examined(); free_blobs(first_field); + my_free(sortorder); DBUG_RETURN(error); } +/* + Create a sort/compare key from items + + Key is of fixed length and binary comparable +*/ + +static uchar *make_sort_key(SORT_FIELD *sortorder, uchar *key_buffer, + String *tmp_value) +{ + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + { + ptr->item->type_handler()->make_sort_key_part(key_buffer, + ptr->item, + ptr, tmp_value); + key_buffer+= (ptr->item->maybe_null ? 1 : 0) + ptr->length; + } + return key_buffer; +} + + +/* + Remove duplicates by comparing all rows with all other rows + + @param thd THD + @param table Temporary table + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of key produced by sortorder + @param having Having expression (NULL if no having) +*/ + static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, + SORT_FIELD *sortorder, ulong keylength, Item *having) { handler *file=table->file; - uchar *record=table->record[0]; + uchar *record=table->record[0], *key_buffer, *key_buffer2; + char *tmp_buffer; int error; + String tmp_value; DBUG_ENTER("remove_dup_with_compare"); + if (unlikely(!my_multi_malloc(PSI_INSTRUMENT_ME, + MYF(MY_WME), + &key_buffer, keylength, + &key_buffer2, keylength, + &tmp_buffer, keylength+1, + NullS))) + DBUG_RETURN(1); + tmp_value.set(tmp_buffer, keylength, &my_charset_bin); + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); @@ -24597,8 +24674,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (unlikely(thd->check_killed())) { - error=0; - goto err; + error= 1; + goto end; } if (unlikely(error)) { @@ -24617,9 +24694,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY), MYF(ME_FATAL)); - error=0; - goto err; + error= 1; + goto end; } + make_sort_key(sortorder, key_buffer, &tmp_value); store_record(table,record[1]); /* Read through rest of file and mark duplicated rows deleted */ @@ -24632,7 +24710,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, break; goto err; } - if (compare_record(table, first_field) == 0) + make_sort_key(sortorder, key_buffer2, &tmp_value); + if (compare_record(table, first_field) == 0 && + (!keylength || + memcmp(key_buffer, key_buffer2, keylength) == 0)) { if (unlikely((error= file->ha_delete_row(record)))) goto err; @@ -24651,38 +24732,52 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, goto err; } + error= 0; +end: + my_free(key_buffer); file->extra(HA_EXTRA_NO_CACHE); (void) file->ha_rnd_end(); - DBUG_RETURN(0); + DBUG_RETURN(error); + err: - file->extra(HA_EXTRA_NO_CACHE); - (void) file->ha_rnd_end(); - if (error) - file->print_error(error,MYF(0)); - DBUG_RETURN(1); + DBUG_ASSERT(error); + file->print_error(error,MYF(0)); + goto end; } /** - Generate a hash index for each row to quickly find duplicate rows. + Generate a hash index for each row to quickly find duplicate rows. + + @param thd THD + @param table Temporary table + @param field_count Number of fields part of distinct + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of hash key + @param having Having expression (NULL if no having) - @note - Note that this will not work on tables with blobs! + @note + Note that this will not work on tables with blobs! */ static int remove_dup_with_hash_index(THD *thd, TABLE *table, uint field_count, Field **first_field, + SORT_FIELD *sortorder, ulong key_length, Item *having) { uchar *key_buffer, *key_pos, *record=table->record[0]; + char *tmp_buffer; int error; handler *file= table->file; ulong extra_length= ALIGN_SIZE(key_length)-key_length; uint *field_lengths, *field_length; HASH hash; - Field **ptr; + String tmp_value; DBUG_ENTER("remove_dup_with_hash_index"); if (!my_multi_malloc(key_memory_hash_index_key_buffer, MYF(MY_WME), @@ -24691,10 +24786,13 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, (long) file->stats.records), &field_lengths, (uint) (field_count*sizeof(*field_lengths)), + &tmp_buffer, key_length+1, NullS)) DBUG_RETURN(1); - for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) + tmp_value.set(tmp_buffer, key_length, &my_charset_bin); + field_length= field_lengths; + for (Field **ptr= first_field ; *ptr ; ptr++) (*field_length++)= (*ptr)->sort_length(); if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin, @@ -24708,7 +24806,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, if (unlikely((error= file->ha_rnd_init(1)))) goto err; - key_pos=key_buffer; + key_pos= key_buffer; for (;;) { uchar *org_key_pos; @@ -24733,11 +24831,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, /* copy fields to key buffer */ org_key_pos= key_pos; field_length=field_lengths; - for (ptr= first_field ; *ptr ; ptr++) + for (Field **ptr= first_field ; *ptr ; ptr++) { (*ptr)->make_sort_key_part(key_pos, *field_length); key_pos+= (*ptr)->maybe_null() + *field_length++; } + /* Copy result fields not stored in table to key buffer */ + key_pos= make_sort_key(sortorder, key_pos, &tmp_value); + /* Check if it exists before */ if (my_hash_search(&hash, org_key_pos, key_length)) { @@ -28156,6 +28257,162 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, } } +enum explainable_cmd_type +{ + SELECT_CMD, INSERT_CMD, REPLACE_CMD, UPDATE_CMD, DELETE_CMD, NO_CMD +}; + +static +const char * const explainable_cmd_name []= +{ + "select ", + "insert ", + "replace ", + "update ", + "delete ", +}; + +static +char const *get_explainable_cmd_name(enum explainable_cmd_type cmd) +{ + return explainable_cmd_name[cmd]; +} + +static +enum explainable_cmd_type get_explainable_cmd_type(THD *thd) +{ + switch (thd->lex->sql_command) { + case SQLCOM_SELECT: + return SELECT_CMD; + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + return INSERT_CMD; + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + return REPLACE_CMD; + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + return UPDATE_CMD; + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + return DELETE_CMD; + default: + return SELECT_CMD; + } +} + + +void TABLE_LIST::print_leaf_tables(THD *thd, String *str, + enum_query_type query_type) +{ + if (merge_underlying_list) + { + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + tbl->print_leaf_tables(thd, str, query_type); + } + else + print(thd, 0, str, query_type); +} + + +void st_select_lex::print_item_list(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + bool top_level= (get_master()->get_master() == 0); + List_iterator_fast<Item> it(item_list); + Item *item; + while ((item= it++)) + { + if (first) + first= 0; + else + str->append(','); + + if ((is_subquery_function() && item->is_autogenerated_name()) || + !item->name.str) + { + /* + Do not print auto-generated aliases in subqueries. It has no purpose + in a view definition or other contexts where the query is printed. + */ + item->print(str, query_type); + } + else + { + /* + Do not print illegal names (if it is not top level SELECT). + Top level view checked (and correct name are assigned), + other cases of top level SELECT are not important, because + it is not "table field". + */ + if (top_level || + !item->is_autogenerated_name() || + !check_column_name(item->name.str)) + item->print_item_w_name(str, query_type); + else + item->print(str, query_type); + } + } +} + + +void st_select_lex::print_set_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + List_iterator_fast<Item> it(item_list); + List_iterator_fast<Item> vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" set ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} + + +void st_select_lex::print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + List_iterator_fast<Item> it(thd->lex->update_list); + List_iterator_fast<Item> vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" on duplicate key update ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) { @@ -28167,6 +28424,67 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) return; } + if (is_tvc_wrapper && (query_type & QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)) + { + first_inner_unit()->first_select()->print(thd, str, query_type); + return; + } + + bool top_level= (get_master()->get_master() == 0); + enum explainable_cmd_type sel_type= SELECT_CMD; + if (top_level) + sel_type= get_explainable_cmd_type(thd); + + if (sel_type == INSERT_CMD || sel_type == REPLACE_CMD) + { + str->append(get_explainable_cmd_name(sel_type)); + str->append(STRING_WITH_LEN("into ")); + TABLE_LIST *tbl= thd->lex->query_tables; + while (tbl->merge_underlying_list) + tbl= tbl->merge_underlying_list; + tbl->print(thd, 0, str, query_type); + if (thd->lex->field_list.elements) + { + str->append ('('); + List_iterator_fast<Item> it(thd->lex->field_list); + Item *item; + bool first= true; + while ((item= it++)) + { + if (first) + first= false; + else + str->append(','); + str->append(item->name); + } + str->append(')'); + } + + str->append(' '); + + if (thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->sql_command == SQLCOM_REPLACE) + { + str->append(STRING_WITH_LEN("values ")); + bool is_first_elem= true; + List_iterator_fast<List_item> li(thd->lex->many_values); + List_item *list; + + while ((list= li++)) + { + if (is_first_elem) + is_first_elem= false; + else + str->append(','); + + print_list_item(str, list, query_type); + } + if (thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + return; + } + } + if ((query_type & QT_SHOW_SELECT_NUMBER) && thd->lex->all_selects_list && thd->lex->all_selects_list->link_next && @@ -28190,7 +28508,10 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(" */ "); } - str->append(STRING_WITH_LEN("select ")); + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + str->append(STRING_WITH_LEN("select ")); if (join && join->cleaned) { @@ -28236,57 +28557,66 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) } //Item List - bool first= 1; + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + print_item_list(thd, str, query_type); /* - outer_select() can not be used here because it is for name resolution - and will return NULL at any end of name resolution chain (view/derived) + from clause + TODO: support USING/FORCE/IGNORE index */ - bool top_level= (get_master()->get_master() == 0); - List_iterator_fast<Item> it(item_list); - Item *item; - while ((item= it++)) + if (table_list.elements) { - if (first) - first= 0; - else - str->append(','); - - if ((is_subquery_function() && item->is_autogenerated_name()) || - !item->name.str) + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) { - /* - Do not print auto-generated aliases in subqueries. It has no purpose - in a view definition or other contexts where the query is printed. - */ - item->print(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + /* go through join tree */ + print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, + query_type); } - else + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + str->append(STRING_WITH_LEN(get_explainable_cmd_name(sel_type))); + if (sel_type == DELETE_CMD) { - /* - Do not print illegal names (if it is not top level SELECT). - Top level view checked (and correct name are assigned), - other cases of top level SELECT are not important, because - it is not "table field". - */ - if (top_level || - !item->is_autogenerated_name() || - !check_column_name(item->name.str)) - item->print_item_w_name(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + bool first= true; + for (TABLE_LIST *target_tbl= thd->lex->auxiliary_table_list.first; + target_tbl; + target_tbl= target_tbl->next_local) + { + if (first) + first= false; + else + str->append(','); + target_tbl->correspondent_table->print_leaf_tables(thd, str, + query_type); + } + + if (!first) + str->append(STRING_WITH_LEN(" using ")); + } + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + { + if (join) + print_join(thd, 0, str, &top_join_list, query_type); else - item->print(str, query_type); + { + bool first= true; + List_iterator_fast<TABLE_LIST> li(leaf_tables); + TABLE_LIST *tbl; + while ((tbl= li++)) + { + if (first) + first= false; + else + str->append(','); + tbl->print(thd, 0, str, query_type); + } + } } } - - /* - from clause - TODO: support USING/FORCE/IGNORE index - */ - if (table_list.elements) - { - str->append(STRING_WITH_LEN(" from ")); - /* go through join tree */ - print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type); - } else if (where) { /* @@ -28296,10 +28626,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" from DUAL ")); } + if (sel_type == UPDATE_CMD) + print_set_clause(thd, str, query_type); + // Where Item *cur_where= where; if (join) cur_where= join->conds; + else if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + cur_where= thd->lex->upd_del_where; if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); @@ -28356,6 +28691,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) else if (lock_type == TL_WRITE) str->append(" for update"); + if ((sel_type == INSERT_CMD || sel_type == REPLACE_CMD) && + thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + + // returning clause + if (sel_type == DELETE_CMD && !item_list.elements) + { + print_item_list(thd, str, query_type); + } // PROCEDURE unsupported here } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 6405a919698..1f1e7d67a2a 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2629,7 +2629,8 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff) a different syntax, like when ANSI_QUOTES is defined. */ table->view->unit.print(buff, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); if (table->with_check != VIEW_CHECK_NONE) { diff --git a/sql/sql_sort.h b/sql/sql_sort.h index a474d7c25e9..7b9512404ff 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -20,6 +20,7 @@ #include "my_base.h" /* ha_rows */ #include <my_sys.h> /* qsort2_cmp */ #include "queues.h" +#include "sql_string.h" #include "sql_class.h" class Field; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index f4fa880eeb3..ed48591db4e 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -553,7 +553,7 @@ bool String::append(const char *s,size_t size) } /* - For an ASCII compatinble string we can just append. + For an ASCII compatible string we can just append. */ return Binary_string::append(s, arg_length); } diff --git a/sql/sql_string.h b/sql/sql_string.h index 32df8b668f2..6f8dd2773e1 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -197,6 +197,83 @@ public: { return m_charset != &my_charset_bin; } + + /* + The MariaDB version when the last collation change happened, + e.g. due to a bug fix. See functions below. + */ + static ulong latest_mariadb_version_with_collation_change() + { + return 110002; + } + + /* + Check if the collation with the given ID changed its order + since the given MariaDB version. + */ + static bool collation_changed_order(ulong mysql_version, uint cs_number) + { + if ((mysql_version < 50048 && + (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ + cs_number == 41 || /* latin7_general_ci - bug #29461 */ + cs_number == 42 || /* latin7_general_cs - bug #29461 */ + cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ + cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ + cs_number == 22 || /* koi8u_general_ci - bug #29461 */ + cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ + cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ + (mysql_version < 50124 && + (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ + cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + return true; + + if (cs_number == 159 && /* ucs2_general_mysql500_ci - MDEV-30746 */ + ((mysql_version >= 100400 && mysql_version < 100429) || + (mysql_version >= 100500 && mysql_version < 100520) || + (mysql_version >= 100600 && mysql_version < 100613) || + (mysql_version >= 100700 && mysql_version < 100708) || + (mysql_version >= 100800 && mysql_version < 100808) || + (mysql_version >= 100900 && mysql_version < 100906) || + (mysql_version >= 101000 && mysql_version < 101004) || + (mysql_version >= 101100 && mysql_version < 101103) || + (mysql_version >= 110000 && mysql_version < 110002))) + return true; + return false; + } + + /** + Check if a collation has changed ID since the given version. + Return the new ID. + + @param mysql_version + @param cs_number - collation ID + + @retval the new collation ID (or cs_number, if no change) + */ + + static uint upgrade_collation_id(ulong mysql_version, uint cs_number) + { + if (mysql_version >= 50300 && mysql_version <= 50399) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + } + } + if ((mysql_version >= 50500 && mysql_version <= 50599) || + (mysql_version >= 100000 && mysql_version <= 100005)) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci + case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci + case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci + } + } + return cs_number; + } + }; diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index b9219515b48..63dc5749b1d 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -705,6 +705,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl, wrapper_sl->parent_lex= lex; /* Used in init_query. */ wrapper_sl->init_query(); wrapper_sl->init_select(); + wrapper_sl->is_tvc_wrapper= true; wrapper_sl->nest_level= tvc_sl->nest_level; wrapper_sl->parsing_place= tvc_sl->parsing_place; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index d2939f5e6e9..5a411053a6d 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -9034,13 +9034,13 @@ Type_handler_timestamp_common::Item_val_native_with_conversion(THD *thd, Item *item, Native *to) const { - MYSQL_TIME ltime; if (item->type_handler()->type_handler_for_native_format() == &type_handler_timestamp2) return item->val_native(thd, to); + Datetime dt(thd, item, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)); return - item->get_date(thd, <ime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) || - TIME_to_native(thd, <ime, to, item->datetime_precision(thd)); + !dt.is_valid_datetime() || + TIME_to_native(thd, dt.get_mysql_time(), to, item->datetime_precision(thd)); } bool Type_handler_null::union_element_finalize(Item_type_holder *item) const diff --git a/sql/sql_type.h b/sql/sql_type.h index db4f59d4d9f..f3256ee01d3 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -4070,14 +4070,14 @@ public: */ virtual void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const= 0; + String *tmp) const= 0; /* create a compact size key part for a sort key */ virtual uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const=0; + String *tmp) const=0; virtual void sort_length(THD *thd, const Type_std_attributes *item, @@ -4484,12 +4484,12 @@ public: uint32 flags) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override + String *tmp) const override { MY_ASSERT_UNREACHABLE(); } uint make_packed_sort_key_part(uchar *, Item *, const SORT_FIELD_ATTR *, - Sort_param *) const override + String *) const override { MY_ASSERT_UNREACHABLE(); return 0; @@ -4830,10 +4830,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -4942,10 +4942,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5198,10 +5198,10 @@ public: TABLE_SHARE *share) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5309,10 +5309,10 @@ public: uchar *buff) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -5410,10 +5410,10 @@ public: CHARSET_INFO *cs) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -6635,10 +6635,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 244bd319205..9405eda1f7b 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1373,7 +1373,8 @@ produce_explain_and_leave: goto err; emit_explain_and_leave: - int err2= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + int err2= thd->lex->explain->send_explain(thd, extended); delete select; free_underlaid_joins(thd, select_lex); @@ -1447,6 +1448,8 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } @@ -1974,7 +1977,10 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields, else { if (thd->lex->describe || thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } thd->abort_on_warning= 0; DBUG_RETURN(res); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 0c1d0e13382..026ddd6ea0d 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -962,10 +962,12 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, Sql_mode_instant_remove sms(thd, MODE_ANSI_QUOTES); lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); lex->unit.print(&is_query, enum_query_type(QT_TO_SYSTEM_CHARSET | QT_WITHOUT_INTRODUCERS | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); } DBUG_PRINT("info", ("View: %.*s", view_query.length(), view_query.ptr())); diff --git a/sql/table.cc b/sql/table.cc index ff650eef257..1c13a244afd 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -938,39 +938,6 @@ static uint enum_value_with_check(THD *thd, TABLE_SHARE *share, } -/** - Check if a collation has changed number - - @param mysql_version - @param current collation number - - @retval new collation number (same as current collation number of no change) -*/ - -static uint upgrade_collation(ulong mysql_version, uint cs_number) -{ - if (mysql_version >= 50300 && mysql_version <= 50399) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - } - } - if ((mysql_version >= 50500 && mysql_version <= 50599) || - (mysql_version >= 100000 && mysql_version <= 100005)) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci - case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci - case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci - } - } - return cs_number; -} - - void Column_definition_attributes::frm_pack_basic(uchar *buff) const { int2store(buff + 3, length); @@ -1030,7 +997,7 @@ bool Column_definition_attributes::frm_unpack_charset(TABLE_SHARE *share, const uchar *buff) { uint cs_org= buff[14] + (((uint) buff[11]) << 8); - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; if (cs_new && !(charset= get_charset(cs_new, MYF(0)))) @@ -1857,7 +1824,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (!frm_image[32]) // New frm file in 3.23 { uint cs_org= (((uint) frm_image[41]) << 8) + (uint) frm_image[38]; - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; @@ -2965,6 +2932,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, goto err; field= key_part->field= share->field[key_part->fieldnr-1]; + if (Charset::collation_changed_order(share->mysql_version, + field->charset()->number)) + share->incompatible_version|= HA_CREATE_USED_CHARSET; key_part->type= field->key_type(); if (field->invisible > INVISIBLE_USER && !field->vers_sys_field()) @@ -6655,6 +6625,9 @@ bool TABLE_LIST::prepare_security(THD *thd) #ifndef DBUG_OFF void TABLE_LIST::set_check_merged() { + if (is_view()) + return; + DBUG_ASSERT(derived); /* It is not simple to check all, but at least this should be checked: @@ -6982,9 +6955,8 @@ void Field_iterator_table_ref::set_field_iterator() table_ref->alias.str)); } /* This is a merge view, so use field_translation. */ - else if (table_ref->field_translation) + else if (table_ref->is_merged_derived() && table_ref->field_translation) { - DBUG_ASSERT(table_ref->is_merged_derived()); field_it= &view_field_it; DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_view", table_ref->alias.str)); @@ -9477,15 +9449,15 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) set_derived(); } - if (!is_view() && + if (is_view() || !derived_table_optimization_done(this)) { /* A subquery might be forced to be materialized due to a side-effect. */ - if (!is_materialized_derived() && first_select->is_mergeable() && - optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && + if (!is_materialized_derived() && unit->can_be_merged() && + (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) || is_view()) && !thd->lex->can_not_use_merged() && - !(thd->lex->sql_command == SQLCOM_UPDATE_MULTI || - thd->lex->sql_command == SQLCOM_DELETE_MULTI) && + !((thd->lex->sql_command == SQLCOM_UPDATE_MULTI || + thd->lex->sql_command == SQLCOM_DELETE_MULTI) && !is_view()) && !is_recursive_with_table()) set_merged_derived(); else diff --git a/sql/table.h b/sql/table.h index 3ea8347de68..d8756deb43c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2690,6 +2690,8 @@ struct TABLE_LIST } void print(THD *thd, table_map eliminated_tables, String *str, enum_query_type query_type); + void print_leaf_tables(THD *thd, String *str, + enum_query_type query_type); bool check_single_table(TABLE_LIST **table, table_map map, TABLE_LIST *view); bool set_insert_values(MEM_ROOT *mem_root); @@ -2830,8 +2832,7 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : "<NULL>"), get_unit())); - derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) | - DTYPE_TABLE | DTYPE_MERGE); + derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) | DTYPE_MERGE); set_check_merged(); DBUG_VOID_RETURN; } @@ -2845,10 +2846,9 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : "<NULL>"), get_unit())); - derived= get_unit(); derived_type= static_cast<uint8>((derived_type & (derived ? DTYPE_MASK : DTYPE_VIEW)) | - DTYPE_TABLE | DTYPE_MATERIALIZE); + DTYPE_MATERIALIZE); set_check_materialized(); DBUG_VOID_RETURN; } diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 7d8296a75a1..d9988914c4d 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -502,7 +502,13 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ if (!WSREP_EMULATE_BINLOG(m_thd)) { wsrep_register_for_group_commit(m_thd); - ret = ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); + /* wait_for_prior_commit() ensures that all preceding transactions + have been committed and seqno has been synced into + storage engine. We don't release commit order here yet to + avoid following transactions to sync seqno before + wsrep_set_SE_checkpoint() below returns. This effectively pauses + group commit for the checkpoint operation, but is the only way to + ensure proper ordering. */ m_thd->wait_for_prior_commit(); } @@ -512,10 +518,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ { wsrep_unregister_from_group_commit(m_thd); } - else - { - ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); - } + ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); cs.after_applying(); } DBUG_RETURN(ret); diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 3c5cff2b741..8f998244ee6 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -1,4 +1,4 @@ -/* Copyright 2016-2022 Codership Oy <http://www.codership.com> +/* Copyright 2016-2023 Codership Oy <http://www.codership.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -217,6 +217,19 @@ static inline bool wsrep_run_commit_hook(THD* thd, bool all) } mysql_mutex_unlock(&thd->LOCK_thd_data); } + + mysql_mutex_lock(&thd->LOCK_thd_data); + /* Transaction creating sequence is TOI or RSU, + CREATE [TEMPORARY] SEQUENCE = CREATE + INSERT (initial value) + and replicated using statement based replication, thus + the commit hooks will be skipped */ + if (ret && + (thd->wsrep_cs().mode() == wsrep::client_state::m_toi || + thd->wsrep_cs().mode() == wsrep::client_state::m_rsu) && + thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE) + ret= false; + mysql_mutex_unlock(&thd->LOCK_thd_data); + DBUG_PRINT("wsrep", ("return: %d", ret)); DBUG_RETURN(ret); } diff --git a/storage/connect/mysql-test/connect/r/mysql.result b/storage/connect/mysql-test/connect/r/mysql.result index d3c244b277a..1dcbca88a7b 100644 --- a/storage/connect/mysql-test/connect/r/mysql.result +++ b/storage/connect/mysql-test/connect/r/mysql.result @@ -364,5 +364,16 @@ hex(col) DROP TABLE t2; DROP TABLE t1; # +# MDEV-29782 CONNECT engine converted YEAR to DATETIME, causing INSERT to fail +# +CREATE TABLE t1 (id year); +CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL DBNAME='test' TABNAME='t1' OPTION_LIST='host=localhost,user=root,port=PORT'; +INSERT INTO t2 VALUES (1999); +SELECT * FROM t2; +id +1999 +DROP TABLE t2; +DROP TABLE t1; +# # End of 10.3 tests # diff --git a/storage/connect/mysql-test/connect/t/mysql.test b/storage/connect/mysql-test/connect/t/mysql.test index a50db4a6457..cd52f78fb30 100644 --- a/storage/connect/mysql-test/connect/t/mysql.test +++ b/storage/connect/mysql-test/connect/t/mysql.test @@ -534,5 +534,20 @@ DROP TABLE t1; --echo # +--echo # MDEV-29782 CONNECT engine converted YEAR to DATETIME, causing INSERT to fail +--echo # + +CREATE TABLE t1 (id year); + +--replace_result $PORT PORT +--eval CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=MYSQL DBNAME='test' TABNAME='t1' OPTION_LIST='host=localhost,user=root,port=$PORT' + +INSERT INTO t2 VALUES (1999); +SELECT * FROM t2; + +DROP TABLE t2; +DROP TABLE t1; + +--echo # --echo # End of 10.3 tests --echo # diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index c49db48bfb3..45b2c46e217 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -183,6 +183,7 @@ int MYSQLtoPLG(int mytype, char *var) switch (mytype) { case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_YEAR: type = TYPE_SHORT; break; case MYSQL_TYPE_LONG: @@ -209,7 +210,6 @@ int MYSQLtoPLG(int mytype, char *var) case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_DATE: case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_YEAR: case MYSQL_TYPE_TIME: type = TYPE_DATE; break; diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 77b1886ea5f..36ea302a403 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2325,7 +2325,7 @@ unemployed: if (!recv_recovery_is_on() && !srv_startup_is_before_trx_rollback_phase && - srv_operation == SRV_OPERATION_NORMAL) + srv_operation <= SRV_OPERATION_EXPORT_RESTORED) log_checkpoint(); mysql_mutex_lock(&buf_pool.flush_list_mutex); @@ -2405,7 +2405,8 @@ do_checkpoint: DBUG_EXECUTE_IF("ib_log_checkpoint_avoid", goto next;); DBUG_EXECUTE_IF("ib_log_checkpoint_avoid_hard", goto next;); - if (!recv_recovery_is_on() && srv_operation == SRV_OPERATION_NORMAL) + if (!recv_recovery_is_on() + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) log_checkpoint(); } } @@ -2472,7 +2473,7 @@ next: ATTRIBUTE_COLD void buf_flush_page_cleaner_init() { ut_ad(!buf_page_cleaner_is_active); - ut_ad(srv_operation == SRV_OPERATION_NORMAL || + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); buf_flush_async_lsn= 0; diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 6d83d448bd6..19a0b5a3eb5 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1119,17 +1119,8 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, break; case FIL_PAGE_TYPE_ZBLOB: case FIL_PAGE_TYPE_ZBLOB2: - break; case FIL_PAGE_INDEX: case FIL_PAGE_RTREE: -#if defined UNIV_ZIP_DEBUG && defined BTR_CUR_HASH_ADAPT - /* During recovery, we only update the - compressed page, not the uncompressed one. */ - ut_a(recv_recovery_is_on() - || page_zip_validate( - &bpage->zip, page, - ((buf_block_t*) bpage)->index)); -#endif /* UNIV_ZIP_DEBUG && BTR_CUR_HASH_ADAPT */ break; default: ib::error() << "The compressed page to be" diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index e6def2968ea..fd2404a009a 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2575,10 +2575,15 @@ corrupted: } } + const bool operation_not_for_export = + srv_operation != SRV_OPERATION_RESTORE_EXPORT + && srv_operation != SRV_OPERATION_EXPORT_RESTORED; + /* Always look for a file at the default location. But don't log an error if the tablespace is already open in remote or dict. */ ut_a(df_default.filepath()); - const bool strict = (tablespaces_found == 0); + const bool strict = operation_not_for_export + && (tablespaces_found == 0); if (df_default.open_read_only(strict) == DB_SUCCESS) { ut_ad(df_default.is_open()); ++tablespaces_found; @@ -2624,9 +2629,11 @@ corrupted: /* Make sense of these three possible locations. First, bail out if no tablespace files were found. */ if (valid_tablespaces_found == 0) { - os_file_get_last_error(true); - ib::error() << "Could not find a valid tablespace file for `" - << tablename << "`. " << TROUBLESHOOT_DATADICT_MSG; + os_file_get_last_error( + operation_not_for_export, !operation_not_for_export); + if (operation_not_for_export) + ib::error() << "Could not find a valid tablespace file for `" + << tablename << "`. " << TROUBLESHOOT_DATADICT_MSG; goto corrupted; } if (!validate) { @@ -2962,6 +2969,7 @@ fil_ibd_discover( case SRV_OPERATION_RESTORE: break; case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: df_rem_per.set_name(db); if (df_rem_per.open_link_file() != DB_SUCCESS) { break; diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index f631c548591..f131e4e90da 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -775,7 +775,7 @@ the double write buffer. bool Datafile::restore_from_doublewrite() { - if (srv_operation != SRV_OPERATION_NORMAL) { + if (srv_operation > SRV_OPERATION_EXPORT_RESTORED) { return true; } diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc index cb9cce26011..07a8295a94e 100644 --- a/storage/innobase/fsp/fsp0sysspace.cc +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -582,7 +582,7 @@ SysTablespace::read_lsn_and_check_flags(lsn_t* flushed_lsn) ut_a(it->order() == 0); - if (srv_operation == SRV_OPERATION_NORMAL) { + if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { buf_dblwr.init_or_load_pages(it->handle(), it->filepath()); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index e3dfcc8c321..f7dd18e0e36 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -17999,44 +17999,6 @@ exit: return; } -/** Validate SET GLOBAL innodb_buffer_pool_filename. -On Windows, file names with colon (:) are not allowed. -@param thd connection -@param save &srv_buf_dump_filename -@param value new value to be validated -@return 0 for valid name */ -static int innodb_srv_buf_dump_filename_validate(THD *thd, st_mysql_sys_var*, - void *save, - st_mysql_value *value) -{ - char buff[OS_FILE_MAX_PATH]; - int len= sizeof buff; - - if (const char *buf_name= value->val_str(value, buff, &len)) - { -#ifdef _WIN32 - if (!is_filename_allowed(buf_name, len, FALSE)) - { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WRONG_ARGUMENTS, - "InnoDB: innodb_buffer_pool_filename " - "cannot have colon (:) in the file name."); - return 1; - } -#endif /* _WIN32 */ - if (buf_name == buff) - { - ut_ad(static_cast<size_t>(len) < sizeof buff); - buf_name= thd_strmake(thd, buf_name, len); - } - - *static_cast<const char**>(save)= buf_name; - return 0; - } - - return 1; -} - #ifdef UNIV_DEBUG static char* srv_buffer_pool_evict; @@ -19327,9 +19289,9 @@ static MYSQL_SYSVAR_ULONG(buffer_pool_instances, innodb_deprecated_ignored, NULL, NULL, 0, 0, 64, 0); static MYSQL_SYSVAR_STR(buffer_pool_filename, srv_buf_dump_filename, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Filename to/from which to dump/load the InnoDB buffer pool", - innodb_srv_buf_dump_filename_validate, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT); + NULL, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT); static MYSQL_SYSVAR_BOOL(buffer_pool_dump_now, innodb_buffer_pool_dump_now, PLUGIN_VAR_RQCMDARG, diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index e5dffe54400..3487f4ffc69 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -5186,10 +5186,10 @@ static ST_FIELD_INFO innodb_sysindex_fields_info[]= Column("N_FIELDS", SLong(), NOT_NULL), #define SYS_INDEX_PAGE_NO 5 - Column("PAGE_NO", SLong(), NOT_NULL), + Column("PAGE_NO", SLong(), NULLABLE), #define SYS_INDEX_SPACE 6 - Column("SPACE", SLong(), NOT_NULL), + Column("SPACE", SLong(), NULLABLE), #define SYS_INDEX_MERGE_THRESHOLD 7 Column("MERGE_THRESHOLD", SLong(), NOT_NULL), @@ -5241,12 +5241,14 @@ i_s_dict_fill_sys_indexes( if (index->page == FIL_NULL) { fields[SYS_INDEX_PAGE_NO]->set_null(); } else { + fields[SYS_INDEX_PAGE_NO]->set_notnull(); OK(fields[SYS_INDEX_PAGE_NO]->store(index->page, true)); } - if (space_id == ULINT_UNDEFINED) { + if (space_id == FIL_NULL) { fields[SYS_INDEX_SPACE]->set_null(); } else { + fields[SYS_INDEX_SPACE]->set_notnull(); OK(fields[SYS_INDEX_SPACE]->store(space_id, true)); } diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 572ada33653..a22dc3562b5 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -968,13 +968,14 @@ os_file_flush_func( /** Retrieves the last error number if an error occurs in a file io function. The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, -the OS error number + 100 is returned. -@param[in] report true if we want an error message printed - for all errors -@return error number, or OS error number + 100 */ -ulint -os_file_get_last_error( - bool report); +the OS error number + OS_FILE_ERROR_MAX is returned. +@param[in] report_all_errors true if we want an error message + printed of all errors +@param[in] on_error_silent true then don't print any diagnostic + to the log +@return error number, or OS error number + OS_FILE_ERROR_MAX */ +ulint os_file_get_last_error(bool report_all_errors, + bool on_error_silent= false); /** NOTE! Use the corresponding macro os_file_read(), not directly this function! diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 077e68e2a16..75718a92a10 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -387,6 +387,9 @@ extern my_bool srv_immediate_scrub_data_uncompressed; enum srv_operation_mode { /** Normal mode (MariaDB Server) */ SRV_OPERATION_NORMAL, + /** Mariabackup is executing server to export already restored + tablespaces */ + SRV_OPERATION_EXPORT_RESTORED, /** Mariabackup taking a backup */ SRV_OPERATION_BACKUP, /** Mariabackup restoring a backup for subsequent --copy-back */ diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index c4217d7a1cb..86e7f43015c 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -791,7 +791,7 @@ fil_name_process(char* name, ulint len, ulint space_id, bool deleted) return; } - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -2636,7 +2636,7 @@ static void log_sort_flush_list() @param last_batch whether it is possible to write more redo log */ void recv_sys_t::apply(bool last_batch) { - ut_ad(srv_operation == SRV_OPERATION_NORMAL || + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -2796,7 +2796,7 @@ void recv_sys_t::apply(bool last_batch) mysql_mutex_lock(&log_sys.mutex); } #if 1 /* Mariabackup FIXME: Remove or adjust rename_table_in_prepare() */ - else if (srv_operation != SRV_OPERATION_NORMAL); + else if (srv_operation > SRV_OPERATION_EXPORT_RESTORED); #endif else { @@ -3421,7 +3421,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) byte* buf; dberr_t err = DB_SUCCESS; - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); ut_d(mysql_mutex_lock(&buf_pool.flush_list_mutex)); @@ -3632,7 +3632,7 @@ completed: recv_sys.parse_start_lsn = checkpoint_lsn; - if (srv_operation == SRV_OPERATION_NORMAL) { + if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { buf_dblwr.recover(); } @@ -3696,7 +3696,8 @@ completed: log_sys.last_checkpoint_lsn = checkpoint_lsn; - if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL + if (!srv_read_only_mode + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED && (~log_t::FORMAT_ENCRYPTED & log_sys.log.format) == log_t::FORMAT_10_5) { /* Write a FILE_CHECKPOINT marker as the first thing, diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index dd514a09bcd..30b2b31abb2 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -772,22 +772,16 @@ os_file_punch_hole_posix( return(DB_IO_NO_PUNCH_HOLE); } - - /** Retrieves the last error number if an error occurs in a file io function. The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, the OS error number + 100 is returned. @param[in] report_all_errors true if we want an error message - printed of all errors + printed of all errors @param[in] on_error_silent true then don't print any diagnostic to the log @return error number, or OS error number + 100 */ -static -ulint -os_file_get_last_error_low( - bool report_all_errors, - bool on_error_silent) +ulint os_file_get_last_error(bool report_all_errors, bool on_error_silent) { int err = errno; @@ -1793,16 +1787,13 @@ bool os_file_flush_func(os_file_t file) The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, then OS error number + OS_FILE_ERROR_MAX is returned. -@param[in] report_all_errors true if we want an error message printed - of all errors +@param[in] report_all_errors true if we want an error message +printed of all errors @param[in] on_error_silent true then don't print any diagnostic to the log @return error number, or OS error number + OS_FILE_ERROR_MAX */ -static -ulint -os_file_get_last_error_low( - bool report_all_errors, - bool on_error_silent) +ulint os_file_get_last_error(bool report_all_errors, bool on_error_silent) + { ulint err = (ulint) GetLastError(); @@ -3024,20 +3015,6 @@ os_file_read_page( return err; } -/** Retrieves the last error number if an error occurs in a file io function. -The number should be retrieved before any other OS calls (because they may -overwrite the error number). If the number is not known to this program, -the OS error number + 100 is returned. -@param[in] report_all_errors true if we want an error printed - for all errors -@return error number, or OS error number + 100 */ -ulint -os_file_get_last_error( - bool report_all_errors) -{ - return(os_file_get_last_error_low(report_all_errors, false)); -} - /** Handle errors for file operations. @param[in] name name of a file or NULL @param[in] operation operation @@ -3054,7 +3031,7 @@ os_file_handle_error_cond_exit( { ulint err; - err = os_file_get_last_error_low(false, on_error_silent); + err = os_file_get_last_error(false, on_error_silent); switch (err) { case OS_FILE_DISK_FULL: diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index 8ed4863b3fb..12e75605dc9 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -3377,7 +3377,7 @@ page_zip_validate_low( differed. Let us ignore it. */ page_zip_fail(("page_zip_validate:" " min_rec_flag" - " (%s" ULINTPF "," ULINTPF + " (%s" UINT32PF "," UINT32PF ",0x%02x)\n", sloppy ? "ignored, " : "", page_get_space_id(page), @@ -3422,7 +3422,8 @@ page_zip_validate_low( page + PAGE_NEW_INFIMUM, TRUE); trec = page_rec_get_next_low( temp_page + PAGE_NEW_INFIMUM, TRUE); - const ulint n_core = page_is_leaf(page) ? index->n_fields : 0; + const ulint n_core = (index && page_is_leaf(page)) + ? index->n_fields : 0; do { if (page_offset(rec) != page_offset(trec)) { diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index d54dc57655e..ae8c3c4513e 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -291,7 +291,9 @@ rec_init_offsets_comp_ordinary( != n_core) ? UT_BITS_IN_BYTES(unsigned(index->get_n_nullable(n_core))) : (redundant_temp - ? UT_BITS_IN_BYTES(index->n_nullable) + ? (index->is_instant() + ? UT_BITS_IN_BYTES(index->get_n_nullable(n_core)) + : UT_BITS_IN_BYTES(index->n_nullable)) : index->n_core_null_bytes); if (mblob) { @@ -448,7 +450,7 @@ start: continue; } - len = offs += len; + len = offs += static_cast<rec_offs>(len); } else { len = offs += field->fixed_len; } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 872e95c91a3..3c29461c19d 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3988,6 +3988,7 @@ page_corrupted: block->page.zip.data = src; frame_changed = true; } else if (!page_compressed + && type != FIL_PAGE_TYPE_XDES && !block->page.zip.data) { block->frame = src; frame_changed = true; diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index f354424cb36..4774bef49ea 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -833,6 +833,30 @@ row_vers_build_cur_vrow( return(cur_vrow); } +/** Find out whether data tuple has missing data type +for indexed virtual column. +@param tuple data tuple +@param index virtual index +@return true if tuple has missing column type */ +static bool dtuple_vcol_data_missing(const dtuple_t &tuple, + dict_index_t *index) +{ + for (ulint i= 0; i < index->n_uniq; i++) + { + dict_col_t *col= index->fields[i].col; + if (!col->is_virtual()) + continue; + dict_v_col_t *vcol= reinterpret_cast<dict_v_col_t*>(col); + for (ulint j= 0; j < index->table->n_v_cols; j++) + { + if (vcol == &index->table->v_cols[j] + && tuple.v_fields[j].type.mtype == DATA_MISSING) + return true; + } + } + return false; +} + /** Finds out if a version of the record, where the version >= the current purge view, should have ientry as its secondary index entry. We check if there is any not delete marked version of the record where the trx @@ -1041,6 +1065,9 @@ unsafe_to_purge: if (dict_index_has_virtual(index)) { if (vrow) { + if (dtuple_vcol_data_missing(*vrow, index)) { + goto nochange_index; + } /* Keep the virtual row info for the next version, unless it is changed */ mem_heap_empty(v_heap); @@ -1051,6 +1078,7 @@ unsafe_to_purge: if (!cur_vrow) { /* Nothing for this index has changed, continue */ +nochange_index: version = prev_version; continue; } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 44fca2c81a5..a507b29ffa2 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -715,7 +715,7 @@ srv_undo_tablespaces_init(bool create_new_db) srv_undo_tablespaces_open= 0; ut_a(srv_undo_tablespaces <= TRX_SYS_N_RSEGS); - ut_a(!create_new_db || srv_operation == SRV_OPERATION_NORMAL); + ut_a(!create_new_db || srv_operation <= SRV_OPERATION_EXPORT_RESTORED); if (srv_undo_tablespaces == 1) srv_undo_tablespaces= 0; @@ -1062,7 +1062,7 @@ dberr_t srv_start(bool create_new_db) bool srv_log_file_found = true; mtr_t mtr; - ut_ad(srv_operation == SRV_OPERATION_NORMAL + ut_ad(srv_operation <= SRV_OPERATION_RESTORE_EXPORT || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -1472,7 +1472,8 @@ file_checked: } switch (srv_operation) { - case SRV_OPERATION_NORMAL: + case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: case SRV_OPERATION_RESTORE_EXPORT: /* Initialize the change buffer. */ err = dict_boot(); @@ -1884,7 +1885,8 @@ skip_monitors: return(srv_init_abort(err)); } - if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) { + if (!srv_read_only_mode + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Initialize the innodb_temporary tablespace and keep it open until shutdown. */ err = srv_open_tmp_tablespace(create_new_db); @@ -1900,7 +1902,8 @@ skip_monitors: } } - if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL + if (!srv_read_only_mode + && srv_operation <= SRV_OPERATION_EXPORT_RESTORED && srv_force_recovery < SRV_FORCE_NO_BACKGROUND) { srv_init_purge_tasks(); purge_sys.coordinator_startup(); @@ -1977,7 +1980,7 @@ void innodb_preshutdown() if (srv_read_only_mode) return; - if (!srv_fast_shutdown && srv_operation == SRV_OPERATION_NORMAL) + if (!srv_fast_shutdown && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Because a slow shutdown must empty the change buffer, we had better prevent any further changes from being buffered. */ @@ -2016,6 +2019,7 @@ void innodb_shutdown() mysql_mutex_unlock(&buf_pool.flush_list_mutex); break; case SRV_OPERATION_NORMAL: + case SRV_OPERATION_EXPORT_RESTORED: /* Shut down the persistent files. */ logs_empty_and_mark_files_at_shutdown(); } diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 244b11a30fc..ec8225d9e01 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -3068,7 +3068,8 @@ pfs_start_table_io_wait_v1(PSI_table_locker_state *state, PFS_table_share *share= pfs_table->m_share; wait->m_thread_internal_id= pfs_thread->m_thread_internal_id; - wait->m_class= &global_table_io_class; + if (wait->m_class == NULL || wait->m_class->m_type != PFS_CLASS_TABLE_LOCK) + wait->m_class= &global_table_io_class; wait->m_timer_start= timer_start; wait->m_timer_end= 0; wait->m_object_instance_addr= pfs_table->m_identity; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc index 81cd2200ae0..a87fe01b3a1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc @@ -9,8 +9,8 @@ if ($rpl_inited) # Write file to make mysql-test-run.pl expect the "crash", but don't start # it until it's told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" > $_expect_file_name # Send shutdown to the connected server and give diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test index e084b57fbda..88a02c469bb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_to_start_after_corruption.test @@ -16,6 +16,7 @@ # restart server to change error log and ignore corruptopn on startup --let $_mysqld_option=--log-error=$LOG --rocksdb_allow_to_start_after_corruption=1 --source include/restart_mysqld_with_option.inc +--let $_server_id= `SELECT @@server_id` --echo # --echo # Test server crashes on corrupted data and restarts diff --git a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test index 49e5e5c1172..da9d8602c01 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test @@ -4,8 +4,8 @@ DROP TABLE IF EXISTS t1; --enable_warnings ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --let $_cache_file_name= $MYSQLTEST_VARDIR/tmp/persistent_cache --exec echo "wait" >$_expect_file_name diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test index e7ab37d2658..b2647b38e08 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -17,8 +17,8 @@ CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITI # Write file to make mysql-test-run.pl expect the "crash", but don't restart the # server until it is told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --let LOG=$MYSQLTEST_VARDIR/tmp/validate_datadic.err --exec echo "wait" >$_expect_file_name diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test index 743f942af9c..1daa9898c1a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -7,8 +7,8 @@ SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; # Write file to make mysql-test-run.pl expect the "crash", but don't restart the # server until it is told to ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_expect_file_name= `select regexp_replace(@@tmpdir, '^.*/','')` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/$_expect_file_name.expect --exec echo "wait" >$_expect_file_name # Send shutdown to the connected server and give it 10 seconds to die before diff --git a/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc b/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc deleted file mode 100644 index a5446a6188d..00000000000 --- a/storage/spider/mysql-test/spider/bugfix/include/restart_spider.inc +++ /dev/null @@ -1,8 +0,0 @@ ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect - ---exec echo "wait" > $_expect_file_name ---shutdown_server ---source include/wait_until_disconnected.inc ---exec echo "restart" > $_expect_file_name ---enable_reconnect ---source include/wait_until_connected_again.inc diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result index 5715edf2bd6..975d3834d42 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29352.result @@ -9,4 +9,5 @@ CREATE FUNCTION spider_bg_direct_sql RETURNS INT SONAME 'ha_spider.so'; ERROR HY000: Can't execute the query because you have a conflicting read lock SELECT * FROM t; c +# restart DROP TABLE t; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result new file mode 100644 index 00000000000..c89309a514d --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29904.result @@ -0,0 +1,4 @@ +# +# MDEV-29904 SPIDER plugin initialization fails upon startup +# +# restart: --plugin-load-add=ha_spider diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test index 00d8ee73ebc..626364efb99 100644 --- a/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29352.test @@ -6,6 +6,6 @@ FLUSH TABLES WITH READ LOCK; CREATE FUNCTION spider_bg_direct_sql RETURNS INT SONAME 'ha_spider.so'; SELECT * FROM t; ---source include/restart_spider.inc +--source include/restart_mysqld.inc DROP TABLE t; diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test new file mode 100644 index 00000000000..d3dcb363890 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29904.test @@ -0,0 +1,6 @@ +--echo # +--echo # MDEV-29904 SPIDER plugin initialization fails upon startup +--echo # + +--let $restart_parameters=--plugin-load-add=ha_spider +--source include/restart_mysqld.inc diff --git a/strings/conf_to_src.c b/strings/conf_to_src.c index 4a3b035aa49..efb708db8f6 100644 --- a/strings/conf_to_src.c +++ b/strings/conf_to_src.c @@ -459,7 +459,7 @@ main(int argc, char **argv __attribute__((unused))) bzero((void*)&all_charsets,sizeof(all_charsets)); bzero((void*) refids, sizeof(refids)); - sprintf(filename,"%s/%s",argv[1],"Index.xml"); + snprintf(filename,sizeof(filename),"%s/%s",argv[1],"Index.xml"); my_read_charset_file(filename); for (cs= all_charsets; @@ -470,7 +470,7 @@ main(int argc, char **argv __attribute__((unused))) { if ( (!simple_cs_is_full(cs)) && (cs->csname)) { - sprintf(filename,"%s/%s.xml",argv[1],cs->csname); + snprintf(filename,sizeof(filename),"%s/%s.xml",argv[1],cs->csname); my_read_charset_file(filename); } cs->state|= MY_CS_LOADED; diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index fd79a98e59a..d1ca32a8e62 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -2989,6 +2989,14 @@ static inline int my_weight_mb2_ucs2_general_ci(uchar b0, uchar b1) } +static inline int my_weight_mb2_ucs2_general_mysql500_ci(uchar b0, uchar b1) +{ + my_wc_t wc= UCS2_CODE(b0, b1); + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; + return (int) (page ? page[wc & 0xFF].sort : wc); +} + + #define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_ci #define DEFINE_STRNXFRM_UNICODE #define DEFINE_STRNXFRM_UNICODE_NOPAD @@ -3002,6 +3010,18 @@ static inline int my_weight_mb2_ucs2_general_ci(uchar b0, uchar b1) #include "strcoll.inl" +#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_mysql500_ci +#define DEFINE_STRNXFRM_UNICODE +#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e) +#define OPTIMIZE_ASCII 0 +#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR +#define UNICASE_PAGE0 my_unicase_mysql500_page00 +#define UNICASE_PAGES my_unicase_mysql500_pages +#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x)) +#define WEIGHT_MB2(b0,b1) my_weight_mb2_ucs2_general_mysql500_ci(b0,b1) +#include "strcoll.inl" + + #define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_bin #define DEFINE_STRNXFRM_UNICODE_BIN2 #define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e) @@ -3287,6 +3307,23 @@ static MY_COLLATION_HANDLER my_collation_ucs2_general_ci_handler = }; +static MY_COLLATION_HANDLER my_collation_ucs2_general_mysql500_ci_handler = +{ + NULL, /* init */ + my_strnncoll_ucs2_general_mysql500_ci, + my_strnncollsp_ucs2_general_mysql500_ci, + my_strnncollsp_nchars_ucs2_general_mysql500_ci, + my_strnxfrm_ucs2_general_mysql500_ci, + my_strnxfrmlen_unicode, + my_like_range_generic, + my_wildcmp_ucs2_ci, + my_strcasecmp_mb2_or_mb4, + my_instr_mb, + my_hash_sort_ucs2, + my_propagate_simple +}; + + static MY_COLLATION_HANDLER my_collation_ucs2_bin_handler = { NULL, /* init */ @@ -3434,7 +3471,7 @@ struct charset_info_st my_charset_ucs2_general_mysql500_ci= 0, /* escape_with_backslash_is_dangerous */ 1, /* levels_for_order */ &my_charset_ucs2_handler, - &my_collation_ucs2_general_ci_handler + &my_collation_ucs2_general_mysql500_ci_handler }; diff --git a/strings/ctype-unidata.h b/strings/ctype-unidata.h index 6712f5e1d79..9900fd0cedd 100644 --- a/strings/ctype-unidata.h +++ b/strings/ctype-unidata.h @@ -21,6 +21,9 @@ extern MY_UNICASE_CHARACTER my_unicase_default_page00[256]; extern MY_UNICASE_CHARACTER *my_unicase_default_pages[256]; +extern MY_UNICASE_CHARACTER my_unicase_mysql500_page00[256]; +extern MY_UNICASE_CHARACTER *my_unicase_mysql500_pages[256]; + size_t my_strxfrm_pad_nweights_unicode(uchar *str, uchar *strend, size_t nweights); size_t my_strxfrm_pad_unicode(uchar *str, uchar *strend); diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index b119826b4cc..1db4b6d9f30 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -248,7 +248,7 @@ MY_UNICASE_CHARACTER my_unicase_default_page00[]={ Almost similar to my_unicase_default_page00, but maps sorting order for U+00DF to 0x00DF instead of 0x0053. */ -static MY_UNICASE_CHARACTER plane00_mysql500[]={ +MY_UNICASE_CHARACTER my_unicase_mysql500_page00[]={ {0x0000,0x0000,0x0000}, {0x0001,0x0001,0x0001}, {0x0002,0x0002,0x0002}, {0x0003,0x0003,0x0003}, {0x0004,0x0004,0x0004}, {0x0005,0x0005,0x0005}, @@ -1739,8 +1739,8 @@ MY_UNICASE_INFO my_unicase_default= /* Reproduce old utf8mb3_general_ci behaviour before we fixed Bug#27877. */ -MY_UNICASE_CHARACTER *my_unicase_pages_mysql500[256]={ - plane00_mysql500, +MY_UNICASE_CHARACTER *my_unicase_mysql500_pages[256]={ + my_unicase_mysql500_page00, plane01, plane02, plane03, plane04, plane05, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1780,7 +1780,7 @@ MY_UNICASE_CHARACTER *my_unicase_pages_mysql500[256]={ MY_UNICASE_INFO my_unicase_mysql500= { 0xFFFF, - my_unicase_pages_mysql500 + my_unicase_mysql500_pages }; @@ -5268,14 +5268,14 @@ static inline int my_weight_mb3_utf8mb3_general_ci(uchar b0, uchar b1, uchar b2) static inline int my_weight_mb1_utf8mb3_general_mysql500_ci(uchar b) { - return (int) plane00_mysql500[b & 0xFF].sort; + return (int) my_unicase_mysql500_page00[b & 0xFF].sort; } static inline int my_weight_mb2_utf8mb3_general_mysql500_ci(uchar b0, uchar b1) { my_wc_t wc= UTF8MB2_CODE(b0, b1); - MY_UNICASE_CHARACTER *page= my_unicase_pages_mysql500[wc >> 8]; + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; return (int) (page ? page[wc & 0xFF].sort : wc); } @@ -5284,7 +5284,7 @@ static inline int my_weight_mb3_utf8mb3_general_mysql500_ci(uchar b0, uchar b1, uchar b2) { my_wc_t wc= UTF8MB3_CODE(b0, b1, b2); - MY_UNICASE_CHARACTER *page= my_unicase_pages_mysql500[wc >> 8]; + MY_UNICASE_CHARACTER *page= my_unicase_mysql500_pages[wc >> 8]; return (int) (page ? page[wc & 0xFF].sort : wc); } @@ -5294,8 +5294,8 @@ my_weight_mb3_utf8mb3_general_mysql500_ci(uchar b0, uchar b1, uchar b2) #define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb3_quick(pwc, s, e) #define OPTIMIZE_ASCII 1 #define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR -#define UNICASE_PAGE0 plane00_mysql500 -#define UNICASE_PAGES my_unicase_pages_mysql500 +#define UNICASE_PAGE0 my_unicase_mysql500_page00 +#define UNICASE_PAGES my_unicase_mysql500_pages #define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x)) #define WEIGHT_MB1(x) my_weight_mb1_utf8mb3_general_mysql500_ci(x) #define WEIGHT_MB2(x,y) my_weight_mb2_utf8mb3_general_mysql500_ci(x,y) diff --git a/strings/uctypedump.c b/strings/uctypedump.c index 30ae33afee1..397b6e586f1 100644 --- a/strings/uctypedump.c +++ b/strings/uctypedump.c @@ -120,7 +120,7 @@ int main(int ac, char ** av) } else { - strcpy(tok,s); + safe_strcpy(tok, sizeof(tok), s); } end=tok+strlen(tok); @@ -225,7 +225,7 @@ int main(int ac, char ** av) { char plane_name[128]="NULL"; if(uctype[plane].ctype){ - sprintf(plane_name,"uctype_page%02X",(uint) plane); + snprintf(plane_name,sizeof(plane_name),"uctype_page%02X",(uint) plane); } printf("\t{%d,%s}%s\n",uctype[plane].pctype,plane_name,plane<255?",":""); } diff --git a/strings/xml.c b/strings/xml.c index d16df34bf30..7260ecadc66 100644 --- a/strings/xml.c +++ b/strings/xml.c @@ -304,10 +304,10 @@ static int my_xml_leave(MY_XML_PARSER *p, const char *str, size_t slen) if (glen) { mstr(g, tag, sizeof(g)-1, glen); - sprintf(p->errstr,"'</%s>' unexpected ('</%s>' wanted)",s,g); + snprintf(p->errstr,sizeof(p->errstr),"'</%s>' unexpected ('</%s>' wanted)",s,g); } else - sprintf(p->errstr,"'</%s>' unexpected (END-OF-INPUT wanted)", s); + snprintf(p->errstr,sizeof(p->errstr),"'</%s>' unexpected (END-OF-INPUT wanted)", s); return MY_XML_ERROR; } @@ -362,7 +362,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) { if (MY_XML_IDENT != (lex=my_xml_scan(p,&a))) { - sprintf(p->errstr,"%s unexpected (ident wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident wanted)",lex2str(lex)); return MY_XML_ERROR; } if (MY_XML_OK != my_xml_leave(p,a.beg,(size_t) (a.end-a.beg))) @@ -390,7 +390,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) } else { - sprintf(p->errstr,"%s unexpected (ident or '/' wanted)", + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident or '/' wanted)", lex2str(lex)); return MY_XML_ERROR; } @@ -412,7 +412,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len) } else { - sprintf(p->errstr,"%s unexpected (ident or string wanted)", + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected (ident or string wanted)", lex2str(lex)); return MY_XML_ERROR; } @@ -449,7 +449,7 @@ gt: { if (lex != MY_XML_QUESTION) { - sprintf(p->errstr,"%s unexpected ('?' wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected ('?' wanted)",lex2str(lex)); return MY_XML_ERROR; } if (MY_XML_OK != my_xml_leave(p,NULL,0)) @@ -465,7 +465,7 @@ gt: if (lex != MY_XML_GT) { - sprintf(p->errstr,"%s unexpected ('>' wanted)",lex2str(lex)); + snprintf(p->errstr,sizeof(p->errstr),"%s unexpected ('>' wanted)",lex2str(lex)); return MY_XML_ERROR; } } @@ -486,7 +486,7 @@ gt: if (p->attr.start[0]) { - sprintf(p->errstr,"unexpected END-OF-INPUT"); + snprintf(p->errstr,sizeof(p->errstr),"unexpected END-OF-INPUT"); return MY_XML_ERROR; } return MY_XML_OK; |