summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikita Malyavin <nikitamalyavin@gmail.com>2021-05-05 23:17:20 +0300
committerNikita Malyavin <nikitamalyavin@gmail.com>2021-05-05 23:57:11 +0300
commit3f55c569514679d98e09e71286ca28a8ac667a71 (patch)
tree4347fbb2238d1a97e5e1166f9e7b7b7adba165ce
parentca1dc0789b7e724128d1369977e2f70fb9d69bb5 (diff)
parenta4139f8d68bd31e80ff6202c093cd232c194ddfd (diff)
downloadmariadb-git-3f55c569514679d98e09e71286ca28a8ac667a71.tar.gz
Merge branch bb-10.4-release into bb-10.5-releasemariadb-10.5.10
-rw-r--r--CMakeLists.txt27
-rw-r--r--cmake/libutils.cmake2
m---------libmariadb0
-rw-r--r--libmysqld/CMakeLists.txt4
-rw-r--r--mysql-test/main/derived_split_innodb.result34
-rw-r--r--mysql-test/main/derived_split_innodb.test26
-rw-r--r--mysql-test/main/lowercase_table.result47
-rw-r--r--mysql-test/main/lowercase_table.test69
-rw-r--r--mysql-test/main/mdev19198.result15
-rw-r--r--mysql-test/main/mdev19198.test15
-rw-r--r--mysql-test/main/plugin_vars.result35
-rw-r--r--mysql-test/main/plugin_vars.test35
-rw-r--r--mysql-test/main/show.result32
-rw-r--r--mysql-test/main/show.test34
-rw-r--r--mysql-test/main/show_explain.opt1
-rw-r--r--mysql-test/main/show_explain.test9
-rw-r--r--mysql-test/main/sp-bugs.result27
-rw-r--r--mysql-test/main/sp-bugs.test23
-rw-r--r--mysql-test/main/sp.result15
-rw-r--r--mysql-test/main/sp.test19
-rw-r--r--mysql-test/main/udf.result9
-rw-r--r--mysql-test/main/udf.test10
-rw-r--r--mysql-test/suite/funcs_1/r/storedproc.result26
-rw-r--r--mysql-test/suite/galera/r/galera_inject_bf_long_wait.result22
-rw-r--r--mysql-test/suite/galera/r/galera_password.result24
-rw-r--r--mysql-test/suite/galera/r/galera_wan_restart_sst.result24
-rw-r--r--mysql-test/suite/galera/t/galera_inject_bf_long_wait.test25
-rw-r--r--mysql-test/suite/galera/t/galera_password.test14
-rw-r--r--mysql-test/suite/gcol/inc/gcol_keys.inc8
-rw-r--r--mysql-test/suite/gcol/r/gcol_bugfixes.result74
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_innodb.result5
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_myisam.result5
-rw-r--r--mysql-test/suite/gcol/t/gcol_bugfixes.test84
-rw-r--r--mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test1
-rw-r--r--mysql-test/suite/innodb/r/innodb-alter-tempfile.result44
-rw-r--r--mysql-test/suite/innodb/r/log_file_name.result1
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-tempfile.test49
-rw-r--r--mysql-test/suite/innodb/t/log_file_name.test3
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result10
-rw-r--r--mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.opt1
-rw-r--r--mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test10
-rw-r--r--mysql-test/suite/rpl/r/rpl_incompatible_heartbeat.result17
-rw-r--r--mysql-test/suite/rpl/t/rpl_incompatible_heartbeat.test44
-rw-r--r--mysql-test/suite/vcol/r/binlog.result13
-rw-r--r--mysql-test/suite/vcol/t/binlog.test14
-rw-r--r--mysql-test/suite/versioning/r/delete_history.result33
-rw-r--r--mysql-test/suite/versioning/r/trx_id.result8
-rw-r--r--mysql-test/suite/versioning/t/delete_history.test22
-rw-r--r--mysql-test/suite/versioning/t/trx_id.opt1
-rw-r--r--mysql-test/suite/versioning/t/trx_id.test13
-rw-r--r--mysys/my_delete.c33
-rw-r--r--plugin/versioning/versioning.cc1
-rw-r--r--scripts/mysql_install_db.sh6
-rw-r--r--sql/item_create.cc15
-rw-r--r--sql/item_create.h1
-rw-r--r--sql/item_vers.cc16
-rw-r--r--sql/item_vers.h30
-rw-r--r--sql/log_event.h13
-rw-r--r--sql/log_event_server.cc16
-rw-r--r--sql/sp_head.h2
-rw-r--r--sql/sql_acl.cc6
-rw-r--r--sql/sql_base.cc3
-rw-r--r--sql/sql_class.cc16
-rw-r--r--sql/sql_class.h4
-rw-r--r--sql/sql_derived.cc6
-rw-r--r--sql/sql_insert.cc5
-rw-r--r--sql/sql_lex.cc74
-rw-r--r--sql/sql_lex.h4
-rw-r--r--sql/sql_prepare.cc16
-rw-r--r--sql/sql_repl.cc31
-rw-r--r--sql/sql_select.cc11
-rw-r--r--sql/sql_select.h6
-rw-r--r--sql/sql_show.cc2
-rw-r--r--sql/sql_table.cc10
-rw-r--r--sql/sql_udf.cc6
-rw-r--r--sql/sql_update.cc5
-rw-r--r--storage/connect/CMakeLists.txt3
-rw-r--r--storage/connect/bson.cpp18
-rw-r--r--storage/connect/bsonudf.cpp46
-rw-r--r--storage/connect/bsonudf.h3
-rw-r--r--storage/connect/cmgoconn.cpp6
-rw-r--r--storage/connect/ha_connect.cc62
-rw-r--r--storage/connect/javaconn.cpp21
-rw-r--r--storage/connect/jmgoconn.cpp9
-rw-r--r--storage/connect/json.cpp19
-rw-r--r--storage/connect/jsonudf.cpp206
-rw-r--r--storage/connect/jsonudf.h6
-rw-r--r--storage/connect/mysql-test/connect/r/json_udf.result10
-rw-r--r--storage/connect/tabbson.cpp32
-rw-r--r--storage/connect/tabbson.h3
-rw-r--r--storage/connect/tabjson.cpp116
-rw-r--r--storage/connect/tabjson.h10
-rw-r--r--storage/connect/tabrest.cpp208
-rw-r--r--storage/connect/tabxml.cpp19
-rw-r--r--storage/connect/valblk.h2
-rw-r--r--storage/innobase/dict/dict0dict.cc6
-rw-r--r--storage/innobase/fil/fil0fil.cc15
-rw-r--r--storage/innobase/handler/ha_innodb.cc14
-rw-r--r--storage/innobase/handler/i_s.cc3
-rw-r--r--storage/innobase/include/dict0mem.h5
-rw-r--r--storage/innobase/include/trx0trx.h4
-rw-r--r--storage/innobase/lock/lock0wait.cc33
-rw-r--r--storage/innobase/row/row0vers.cc2
-rw-r--r--storage/innobase/trx/trx0roll.cc3
-rw-r--r--storage/innobase/trx/trx0trx.cc13
-rw-r--r--storage/mroonga/CMakeLists.txt2
-rw-r--r--storage/myisam/ha_myisam.cc3
-rw-r--r--storage/perfschema/CMakeLists.txt3
-rw-r--r--storage/perfschema/pfs_prepared_stmt.h4
109 files changed, 1799 insertions, 531 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c1a5de98904..cec9cd7eb70 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -14,7 +14,8 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7)
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12)
+PROJECT(MySQL)
# Remove the following comment if you don't want to have striped binaries
# in RPM's:
@@ -49,8 +50,16 @@ IF(NOT DEFINED MANUFACTURER)
MARK_AS_ADVANCED(MANUFACTURER)
ENDIF()
-SET(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
- "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel")
+IF(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
+ # Setting build type to RelWithDebInfo as none was specified.")
+ SET(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
+ "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel"
+ FORCE)
+ # Set the possible values of build type for cmake-gui
+ SET_PROPERTY(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
+ "None" "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
+ENDIF()
+
# MAX_INDEXES - Set the maximum number of indexes per table, default 64
SET(MAX_INDEXES 64 CACHE STRING "Max number of indexes")
@@ -76,18 +85,8 @@ IF(UNIX AND NOT APPLE)
MARK_AS_ADVANCED(WITH_PIC)
ENDIF()
-# Optionally set project name, e.g.
-# foo.xcodeproj (mac) or foo.sln (windows)
+# This is used by TokuDB only
SET(MYSQL_PROJECT_NAME_DOCSTRING "MySQL project name")
-IF(DEFINED MYSQL_PROJECT_NAME)
- SET(MYSQL_PROJECT_NAME ${MYSQL_PROJECT_NAME} CACHE STRING
- ${MYSQL_PROJECT_NAME_DOCSTRING} FORCE)
-ELSE()
- SET(MYSQL_PROJECT_NAME "MySQL" CACHE STRING
- ${MYSQL_PROJECT_NAME_DOCSTRING} FORCE)
- MARK_AS_ADVANCED(MYSQL_PROJECT_NAME)
-ENDIF()
-PROJECT(${MYSQL_PROJECT_NAME})
IF(CMAKE_VERSION VERSION_LESS "3.1")
IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
diff --git a/cmake/libutils.cmake b/cmake/libutils.cmake
index 37f9cf196af..4c8401971f6 100644
--- a/cmake/libutils.cmake
+++ b/cmake/libutils.cmake
@@ -177,7 +177,7 @@ MACRO(MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
LIST(REVERSE OSLIBS)
LIST(REMOVE_DUPLICATES OSLIBS)
LIST(REVERSE OSLIBS)
- TARGET_LINK_LIBRARIES(${TARGET} ${OSLIBS})
+ TARGET_LINK_LIBRARIES(${TARGET} LINK_PRIVATE ${OSLIBS})
ENDIF()
# Make the generated dummy source file depended on all static input
diff --git a/libmariadb b/libmariadb
-Subproject d19c7c69269fdf4e2af8943dd86c12e4e1664af
+Subproject 180c543704d627a50a52aaf60e24ca14e0ec468
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index e370ac80a8d..90b27a59f02 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -464,9 +464,9 @@ IF(NOT DISABLE_SHARED)
# libmysqld
SET_TARGET_PROPERTIES(libmysqld PROPERTIES CLEAN_DIRECT_OUTPUT 1)
SET_TARGET_PROPERTIES(mysqlserver PROPERTIES CLEAN_DIRECT_OUTPUT 1)
- TARGET_LINK_LIBRARIES(mysqlserver tpool)
+ TARGET_LINK_LIBRARIES(mysqlserver LINK_PRIVATE tpool ${CRC32_LIBRARY})
IF(LIBMYSQLD_SO_EXTRA_LIBS)
- TARGET_LINK_LIBRARIES(libmysqld ${LIBMYSQLD_SO_EXTRA_LIBS})
+ TARGET_LINK_LIBRARIES(libmysqld LINK_PRIVATE ${LIBMYSQLD_SO_EXTRA_LIBS})
ENDIF()
ENDIF()
ENDIF()
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 15b67b51f45..55ace91507e 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -142,3 +142,37 @@ id select_type table type possible_keys key key_len ref rows Extra
3 DERIVED t2 index NULL PRIMARY 4 NULL 3
drop view v1;
drop table t1,t2;
+#
+# MDEV-23723: Crash when test_if_skip_sort_order() is checked for derived table subject to split
+#
+CREATE TABLE t1 (a INT, b INT, KEY (a), KEY (a,b)) ENGINE=InnoDB;
+CREATE TABLE t2 (c INT, KEY (c)) ENGINE=InnoDB;
+SELECT * FROM t1 t1a JOIN t1 t1b;
+a b a b
+INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1,2),(3,4),(5,6),(7,8),(9,10),(11,12);
+set statement optimizer_switch='split_materialized=off' for EXPLAIN
+SELECT *
+FROM
+t1 JOIN
+(SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+WHERE
+t1.a = dt.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 index a,a_2 a_2 10 NULL 6 Using where; Using index
+1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2
+3 DERIVED t1 index NULL a_2 10 NULL 6 Using where; Using index
+3 DERIVED t2 ref c c 5 test.t1.b 1 Using index
+set statement optimizer_switch='split_materialized=on' for EXPLAIN
+SELECT *
+FROM
+t1 JOIN
+(SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+WHERE
+t1.a = dt.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 index a,a_2 a_2 10 NULL 6 Using where; Using index
+1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2
+3 LATERAL DERIVED t1 ref a,a_2 a 5 test.t1.a 1 Using where; Using temporary; Using filesort
+3 LATERAL DERIVED t2 ref c c 5 test.t1.b 1 Using index
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index d4d7fde1fcd..10fc3f93190 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -126,3 +126,29 @@ eval set statement optimizer_switch='split_materialized=off' for explain $q;
drop view v1;
drop table t1,t2;
+
+--echo #
+--echo # MDEV-23723: Crash when test_if_skip_sort_order() is checked for derived table subject to split
+--echo #
+CREATE TABLE t1 (a INT, b INT, KEY (a), KEY (a,b)) ENGINE=InnoDB;
+CREATE TABLE t2 (c INT, KEY (c)) ENGINE=InnoDB;
+
+SELECT * FROM t1 t1a JOIN t1 t1b;
+
+INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1,2),(3,4),(5,6),(7,8),(9,10),(11,12);
+
+let $query=
+EXPLAIN
+SELECT *
+FROM
+ t1 JOIN
+ (SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+WHERE
+ t1.a = dt.a;
+
+eval set statement optimizer_switch='split_materialized=off' for $query;
+eval set statement optimizer_switch='split_materialized=on' for $query;
+
+DROP TABLE t1, t2;
+
diff --git a/mysql-test/main/lowercase_table.result b/mysql-test/main/lowercase_table.result
index 823ffa7696f..3d840445bf2 100644
--- a/mysql-test/main/lowercase_table.result
+++ b/mysql-test/main/lowercase_table.result
@@ -1,7 +1,3 @@
-drop table if exists t1,t2,t3,t4;
-drop table if exists t0,t5,t6,t7,t8,t9;
-drop database if exists mysqltest;
-drop view if exists v0, v1, v2, v3, v4;
create table T1 (id int primary key, Word varchar(40) not null, Index(Word));
create table t4 (id int primary key, Word varchar(40) not null);
INSERT INTO T1 VALUES (1, 'a'), (2, 'b'), (3, 'c');
@@ -79,13 +75,21 @@ ERROR 42000: Not unique table/alias: 'C'
select C.a, c.a from t1 c, t2 C;
ERROR 42000: Not unique table/alias: 'C'
drop table t1, t2;
+#
+# Bug #9761: CREATE TABLE ... LIKE ... not handled correctly when lower_case_table_names is set
+#
create table t1 (a int);
create table t2 like T1;
drop table t1, t2;
show tables;
Tables_in_test
+#
+# End of 4.1 tests
+#
+#
+# Bug#20404: SHOW CREATE TABLE fails with Turkish I
+#
set names utf8;
-drop table if exists Ä°,Ä°Ä°;
create table Ä° (s1 int);
show create table Ä°;
Table Create Table
@@ -107,7 +111,12 @@ Tables_in_test
ii
drop table Ä°Ä°;
set names latin1;
-End of 5.0 tests
+#
+# End of 5.0 tests
+#
+#
+# Bug#21317: SHOW CREATE DATABASE does not obey to lower_case_table_names
+#
create database mysql_TEST character set latin2;
create table mysql_TEST.T1 (a int);
show create database mysql_TEST;
@@ -126,8 +135,32 @@ show databases like "mysql_TE%";
Database (mysql_TE%)
mysql_test
drop database mysql_TEST;
-End of 10.0 tests
+#
+# End of 10.0 tests
+#
+#
+# MDEV-17148 DROP DATABASE throw "Directory not empty" after changed lower_case_table_names.
+#
create database db1;
create table t1 (a int);
drop database db1;
drop table t1;
+#
+# End of 10.2 tests
+#
+#
+# MDEV-25109 Server crashes in sp_name::sp_name upon invalid data in mysql.proc
+#
+call mtr.add_suppression("Stored routine ''.'': invalid value in column");
+insert ignore into mysql.proc () values ();
+Warnings:
+Warning 1364 Field 'param_list' doesn't have a default value
+Warning 1364 Field 'returns' doesn't have a default value
+Warning 1364 Field 'body' doesn't have a default value
+Warning 1364 Field 'comment' doesn't have a default value
+show function status;
+ERROR 42000: Incorrect routine name ''
+delete from mysql.proc where name = '';
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/lowercase_table.test b/mysql-test/main/lowercase_table.test
index e0dcb6c36dd..4f92e43f5f7 100644
--- a/mysql-test/main/lowercase_table.test
+++ b/mysql-test/main/lowercase_table.test
@@ -2,14 +2,6 @@
# Test of --lower-case-table-names
#
---disable_warnings
-drop table if exists t1,t2,t3,t4;
-# Clear up from other tests (to ensure that SHOW TABLES below is right)
-drop table if exists t0,t5,t6,t7,t8,t9;
-drop database if exists mysqltest;
-drop view if exists v0, v1, v2, v3, v4;
---enable_warnings
-
create table T1 (id int primary key, Word varchar(40) not null, Index(Word));
create table t4 (id int primary key, Word varchar(40) not null);
INSERT INTO T1 VALUES (1, 'a'), (2, 'b'), (3, 'c');
@@ -68,32 +60,29 @@ drop table t1,t2;
#
create table t1 (a int);
create table t2 (a int);
--- error 1066
+--error ER_NONUNIQ_TABLE
select * from t1 c, t2 C;
--- error 1066
+--error ER_NONUNIQ_TABLE
select C.a, c.a from t1 c, t2 C;
drop table t1, t2;
-#
-# Bug #9761: CREATE TABLE ... LIKE ... not handled correctly when
-# lower_case_table_names is set
+--echo #
+--echo # Bug #9761: CREATE TABLE ... LIKE ... not handled correctly when lower_case_table_names is set
+--echo #
create table t1 (a int);
create table t2 like T1;
drop table t1, t2;
show tables;
+--echo #
+--echo # End of 4.1 tests
+--echo #
-# End of 4.1 tests
-
-
-#
-# Bug#20404: SHOW CREATE TABLE fails with Turkish I
-#
+--echo #
+--echo # Bug#20404: SHOW CREATE TABLE fails with Turkish I
+--echo #
set names utf8;
---disable_warnings
-drop table if exists Ä°,Ä°Ä°;
---enable_warnings
create table Ä° (s1 int);
show create table Ä°;
show tables;
@@ -104,11 +93,13 @@ show tables;
drop table Ä°Ä°;
set names latin1;
---echo End of 5.0 tests
+--echo #
+--echo # End of 5.0 tests
+--echo #
-#
-# Bug#21317: SHOW CREATE DATABASE does not obey to lower_case_table_names
-#
+--echo #
+--echo # Bug#21317: SHOW CREATE DATABASE does not obey to lower_case_table_names
+--echo #
create database mysql_TEST character set latin2;
create table mysql_TEST.T1 (a int);
show create database mysql_TEST;
@@ -117,11 +108,13 @@ show databases like "mysql%";
show databases like "mysql_TE%";
drop database mysql_TEST;
---echo End of 10.0 tests
+--echo #
+--echo # End of 10.0 tests
+--echo #
-#
-# MDEV-17148 DROP DATABASE throw "Directory not empty" after changed lower_case_table_names.
-#
+--echo #
+--echo # MDEV-17148 DROP DATABASE throw "Directory not empty" after changed lower_case_table_names.
+--echo #
let $datadir=`select @@datadir`;
create database db1;
@@ -130,3 +123,19 @@ copy_file $datadir/test/t1.frm $datadir/db1/T1.frm;
drop database db1;
drop table t1;
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # MDEV-25109 Server crashes in sp_name::sp_name upon invalid data in mysql.proc
+--echo #
+call mtr.add_suppression("Stored routine ''.'': invalid value in column");
+insert ignore into mysql.proc () values ();
+--error ER_SP_WRONG_NAME
+show function status;
+delete from mysql.proc where name = '';
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/mdev19198.result b/mysql-test/main/mdev19198.result
new file mode 100644
index 00000000000..77c08ca0fb7
--- /dev/null
+++ b/mysql-test/main/mdev19198.result
@@ -0,0 +1,15 @@
+CREATE TABLE t1 (c INT);
+CREATE TABLE t2 (c INT);
+LOCK TABLES t1 WRITE, t2 READ;
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+Warnings:
+Note 1050 Table 't1' already exists
+UNLOCK TABLES;
+LOCK TABLES t1 READ , t2 READ;
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
+UNLOCK TABLES;
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+Warnings:
+Note 1050 Table 't1' already exists
+DROP TABLES t1,t2;
diff --git a/mysql-test/main/mdev19198.test b/mysql-test/main/mdev19198.test
new file mode 100644
index 00000000000..19b45ed7510
--- /dev/null
+++ b/mysql-test/main/mdev19198.test
@@ -0,0 +1,15 @@
+CREATE TABLE t1 (c INT);
+CREATE TABLE t2 (c INT);
+
+LOCK TABLES t1 WRITE, t2 READ;
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+UNLOCK TABLES;
+
+LOCK TABLES t1 READ , t2 READ;
+--error ER_TABLE_NOT_LOCKED_FOR_WRITE
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+UNLOCK TABLES;
+
+CREATE TABLE IF NOT EXISTS t1 LIKE t2;
+
+DROP TABLES t1,t2;
diff --git a/mysql-test/main/plugin_vars.result b/mysql-test/main/plugin_vars.result
index 0e382427b1d..3fadd5e74fd 100644
--- a/mysql-test/main/plugin_vars.result
+++ b/mysql-test/main/plugin_vars.result
@@ -30,3 +30,38 @@ disconnect con2;
USE test;
DROP PROCEDURE p_install;
DROP PROCEDURE p_show_vars;
+#
+# Bug#29363867: LOST CONNECTION TO MYSQL SERVER DURING QUERY
+#
+## prepared SET with a plugin variable prevents uninstall
+install plugin query_response_time soname 'query_response_time';
+prepare s from 'set global query_response_time_range_base=16';
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+plugin_status
+ACTIVE
+uninstall plugin query_response_time;
+Warnings:
+Warning 1620 Plugin is busy and will be uninstalled on shutdown
+execute s;
+execute s;
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+plugin_status
+DELETED
+deallocate prepare s;
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+plugin_status
+## prepared SET mentioning a plugin otherwise does not prevent uninstall
+install plugin archive soname 'ha_archive';
+create table t1 (a int) engine=archive;
+insert t1 values (1),(2),(3);
+prepare s from 'set session auto_increment_increment=(select count(*) from t1)';
+flush tables;
+select plugin_status from information_schema.plugins where plugin_name='archive';
+plugin_status
+ACTIVE
+uninstall plugin archive;
+select plugin_status from information_schema.plugins where plugin_name='archive';
+plugin_status
+execute s;
+ERROR 42000: Unknown storage engine 'ARCHIVE'
+drop table t1;
diff --git a/mysql-test/main/plugin_vars.test b/mysql-test/main/plugin_vars.test
index 8ba8fe2ec0e..797dcbea727 100644
--- a/mysql-test/main/plugin_vars.test
+++ b/mysql-test/main/plugin_vars.test
@@ -1,3 +1,10 @@
+if (!$QUERY_RESPONSE_TIME_SO) {
+ skip Needs query_response_time loadable plugin;
+}
+if (!$HA_ARCHIVE_SO) {
+ skip Needs Archive loadable plugin;
+}
+
--echo #
--echo # MDEV-5345 - Deadlock between mysql_change_user(), SHOW VARIABLES and
--echo # INSTALL PLUGIN
@@ -54,3 +61,31 @@ disconnect con2;
USE test;
DROP PROCEDURE p_install;
DROP PROCEDURE p_show_vars;
+
+--echo #
+--echo # Bug#29363867: LOST CONNECTION TO MYSQL SERVER DURING QUERY
+--echo #
+
+--echo ## prepared SET with a plugin variable prevents uninstall
+install plugin query_response_time soname 'query_response_time';
+prepare s from 'set global query_response_time_range_base=16';
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+uninstall plugin query_response_time;
+execute s;
+execute s;
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+deallocate prepare s;
+select plugin_status from information_schema.plugins where plugin_name='query_response_time';
+
+--echo ## prepared SET mentioning a plugin otherwise does not prevent uninstall
+install plugin archive soname 'ha_archive';
+create table t1 (a int) engine=archive;
+insert t1 values (1),(2),(3);
+prepare s from 'set session auto_increment_increment=(select count(*) from t1)';
+flush tables;
+select plugin_status from information_schema.plugins where plugin_name='archive';
+uninstall plugin archive;
+select plugin_status from information_schema.plugins where plugin_name='archive';
+--error ER_UNKNOWN_STORAGE_ENGINE
+execute s;
+drop table t1;
diff --git a/mysql-test/main/show.result b/mysql-test/main/show.result
index 3dd7af5de05..d1b373d8969 100644
--- a/mysql-test/main/show.result
+++ b/mysql-test/main/show.result
@@ -1,3 +1,8 @@
+#
+# MDEV-9538 Server crashes in check_show_access on SHOW STATISTICS
+# MDEV-9539 Server crashes in make_columns_old_format on SHOW GEOMETRY_COLUMNS
+# MDEV-9540 SHOW SPATIAL_REF_SYS and SHOW SYSTEM_VARIABLES return empty results with numerous warnings
+#
show statistics;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'statistics' at line 1
show spatial_ref_sys
@@ -10,3 +15,30 @@ show geometry_columns;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'geometry_columns' at line 1
show nonexistent;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'nonexistent' at line 1
+#
+# MDEV-21603 Crashing SHOW TABLES with derived table in WHERE condition
+#
+create table t1 (nm varchar(32), a int);
+insert t1 values ('1',1),('2',2),('3',3);
+show tables
+where tables_in_test in (select *
+from (select nm from test.t1 group by nm) dt);
+Tables_in_test
+show fields from test.t1
+where field in (select * from (select nm from test.t1 group by nm) dt);
+Field Type Null Key Default Extra
+insert t1 values ('nm',0);
+show fields from test.t1
+where field in (select * from (select nm from test.t1 group by nm) dt);
+Field Type Null Key Default Extra
+nm varchar(32) YES NULL
+show fields from test.t1 where field in
+(select * from (select column_name from information_schema.columns
+where table_name='t1' group by column_name) dt);
+Field Type Null Key Default Extra
+nm varchar(32) YES NULL
+a int(11) YES NULL
+drop table t1;
+#
+# End of 10.2 tests
+#
diff --git a/mysql-test/main/show.test b/mysql-test/main/show.test
index 3101f443264..f2f6efc4e45 100644
--- a/mysql-test/main/show.test
+++ b/mysql-test/main/show.test
@@ -1,8 +1,8 @@
-#
-# MDEV-9538 Server crashes in check_show_access on SHOW STATISTICS
-# MDEV-9539 Server crashes in make_columns_old_format on SHOW GEOMETRY_COLUMNS
-# MDEV-9540 SHOW SPATIAL_REF_SYS and SHOW SYSTEM_VARIABLES return empty results with numerous warnings
-#
+--echo #
+--echo # MDEV-9538 Server crashes in check_show_access on SHOW STATISTICS
+--echo # MDEV-9539 Server crashes in make_columns_old_format on SHOW GEOMETRY_COLUMNS
+--echo # MDEV-9540 SHOW SPATIAL_REF_SYS and SHOW SYSTEM_VARIABLES return empty results with numerous warnings
+--echo #
--error ER_PARSE_ERROR
show statistics;
--error ER_PARSE_ERROR
@@ -13,3 +13,27 @@ show system_variables;
show geometry_columns;
--error ER_PARSE_ERROR
show nonexistent;
+
+--echo #
+--echo # MDEV-21603 Crashing SHOW TABLES with derived table in WHERE condition
+--echo #
+create table t1 (nm varchar(32), a int);
+insert t1 values ('1',1),('2',2),('3',3);
+
+show tables
+ where tables_in_test in (select *
+ from (select nm from test.t1 group by nm) dt);
+show fields from test.t1
+ where field in (select * from (select nm from test.t1 group by nm) dt);
+insert t1 values ('nm',0);
+show fields from test.t1
+ where field in (select * from (select nm from test.t1 group by nm) dt);
+
+show fields from test.t1 where field in
+ (select * from (select column_name from information_schema.columns
+ where table_name='t1' group by column_name) dt);
+drop table t1;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/show_explain.opt b/mysql-test/main/show_explain.opt
new file mode 100644
index 00000000000..3a3bab51225
--- /dev/null
+++ b/mysql-test/main/show_explain.opt
@@ -0,0 +1 @@
+--enable-plugin-innodb-lock-waits --enable-plugin-innodb-trx
diff --git a/mysql-test/main/show_explain.test b/mysql-test/main/show_explain.test
index 6f49b9cd301..515eb9efa47 100644
--- a/mysql-test/main/show_explain.test
+++ b/mysql-test/main/show_explain.test
@@ -863,7 +863,14 @@ select * from t1 where pk between 10 and 20 for update;
# run SHOW EXPLAIN on a frozen thread
connection default;
let $save_wait_condition= $wait_condition;
-let $wait_condition= select State='Sending data' from information_schema.processlist where id=$thr2;
+let $wait_condition=
+select 1
+from information_schema.INNODB_LOCK_WAITS
+where
+ requesting_trx_id=(select trx_id
+ from information_schema.INNODB_TRX
+ where trx_mysql_thread_id=$thr2);
+
let $thr_default=`select connection_id()`;
--source include/wait_condition.inc
--echo # do: send_eval show explain for thr2;
diff --git a/mysql-test/main/sp-bugs.result b/mysql-test/main/sp-bugs.result
index 0aa9033f477..665e787442d 100644
--- a/mysql-test/main/sp-bugs.result
+++ b/mysql-test/main/sp-bugs.result
@@ -116,7 +116,9 @@ Warnings:
Note 1050 Table 't2' already exists
DROP DATABASE testdb;
USE test;
-End of 5.1 tests
+#
+# End of 5.1 tests
+#
#
# BUG#13489996 valgrind:conditional jump or move depends on
# uninitialised values-field_blob
@@ -328,3 +330,26 @@ FOR i IN 1..10 DO
RETURN 1;
END FOR
DROP FUNCTION f1;
+#
+# End of 10.2 tests
+#
+#
+# MDEV-25501 routine_definition in information_schema.routines loses tablename if it starts with an _ and is not backticked
+#
+create table _t1 (a int);
+create procedure p1() select * from _t1;
+show create procedure p1;
+Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
+p1 CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`()
+select * from _t1 latin1 latin1_swedish_ci latin1_swedish_ci
+select routine_definition from information_schema.routines where routine_schema=database() and specific_name='p1';
+routine_definition
+select * from _t1
+select body, body_utf8 from mysql.proc where name='p1';
+body body_utf8
+select * from _t1 select * from _t1
+drop procedure p1;
+drop table _t1;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/sp-bugs.test b/mysql-test/main/sp-bugs.test
index f06e9eca690..9b81fd1af61 100644
--- a/mysql-test/main/sp-bugs.test
+++ b/mysql-test/main/sp-bugs.test
@@ -143,7 +143,9 @@ CALL p1();
DROP DATABASE testdb;
USE test;
---echo End of 5.1 tests
+--echo #
+--echo # End of 5.1 tests
+--echo #
--echo #
--echo # BUG#13489996 valgrind:conditional jump or move depends on
@@ -350,3 +352,22 @@ DELIMITER ;$$
SELECT f1();
SELECT body FROM mysql.proc WHERE db='test' AND specific_name='f1';
DROP FUNCTION f1;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # MDEV-25501 routine_definition in information_schema.routines loses tablename if it starts with an _ and is not backticked
+--echo #
+create table _t1 (a int);
+create procedure p1() select * from _t1;
+show create procedure p1;
+select routine_definition from information_schema.routines where routine_schema=database() and specific_name='p1';
+select body, body_utf8 from mysql.proc where name='p1';
+drop procedure p1;
+drop table _t1;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/sp.result b/mysql-test/main/sp.result
index 17b0af92a40..6c0dd6d4c8a 100644
--- a/mysql-test/main/sp.result
+++ b/mysql-test/main/sp.result
@@ -8466,6 +8466,21 @@ ERROR HY000: View 'test.v1' references invalid table(s) or column(s) or function
DROP PROCEDURE p1;
DROP VIEW v1;
DROP TABLE t1;
+#
+# BUG#30366310: USING A FUNCTION TO ASSIGN DEFAULT VALUES TO
+# 2 OR MORE VARIABLES CRASHES SERVER
+#
+create function f1() returns bigint return now()-1|
+create procedure p1()
+begin
+declare b, c bigint default f1();
+select b-c;
+end|
+call p1()|
+b-c
+0
+drop procedure p1|
+drop function f1|
#End of 10.2 tests
#
# MDEV-12007 Allow ROW variables as a cursor FETCH target
diff --git a/mysql-test/main/sp.test b/mysql-test/main/sp.test
index bf3a70b6283..c9528c1ccb9 100644
--- a/mysql-test/main/sp.test
+++ b/mysql-test/main/sp.test
@@ -10013,6 +10013,25 @@ DROP PROCEDURE p1;
DROP VIEW v1;
DROP TABLE t1;
+
+--echo #
+--echo # BUG#30366310: USING A FUNCTION TO ASSIGN DEFAULT VALUES TO
+--echo # 2 OR MORE VARIABLES CRASHES SERVER
+--echo #
+
+delimiter |;
+create function f1() returns bigint return now()-1|
+create procedure p1()
+begin
+ declare b, c bigint default f1();
+ select b-c;
+end|
+call p1()|
+drop procedure p1|
+drop function f1|
+delimiter ;|
+
+
--echo #End of 10.2 tests
--echo #
diff --git a/mysql-test/main/udf.result b/mysql-test/main/udf.result
index a6fabf7f137..27bd17e7e31 100644
--- a/mysql-test/main/udf.result
+++ b/mysql-test/main/udf.result
@@ -492,8 +492,17 @@ select * from mysql.plugin WHERE name='unexisting_udf';
name dl
DROP FUNCTION unexisting_udf;
ERROR 42000: FUNCTION test.unexisting_udf does not exist
+#
+# Bug #31674599: THE UDF_INIT() FUNCTION CAUSE SERVER CRASH
+#
+call mtr.add_suppression('Invalid row in mysql.func table');
+insert mysql.func () values ();
+# restart
+delete from mysql.func where name = '';
+#
# End of 10.2 tests
#
+#
# MDEV-15073: Generic UDAF parser code in server for window functions
#
CREATE AGGREGATE FUNCTION avgcost
diff --git a/mysql-test/main/udf.test b/mysql-test/main/udf.test
index 2e2272b2157..058f131273d 100644
--- a/mysql-test/main/udf.test
+++ b/mysql-test/main/udf.test
@@ -562,7 +562,17 @@ select * from mysql.plugin WHERE name='unexisting_udf';
--error ER_SP_DOES_NOT_EXIST
DROP FUNCTION unexisting_udf;
+--echo #
+--echo # Bug #31674599: THE UDF_INIT() FUNCTION CAUSE SERVER CRASH
+--echo #
+call mtr.add_suppression('Invalid row in mysql.func table');
+insert mysql.func () values ();
+source include/restart_mysqld.inc;
+delete from mysql.func where name = '';
+
+--echo #
--echo # End of 10.2 tests
+--echo #
--echo #
--echo # MDEV-15073: Generic UDAF parser code in server for window functions
diff --git a/mysql-test/suite/funcs_1/r/storedproc.result b/mysql-test/suite/funcs_1/r/storedproc.result
index 4005240aa49..30cbdb2c32a 100644
--- a/mysql-test/suite/funcs_1/r/storedproc.result
+++ b/mysql-test/suite/funcs_1/r/storedproc.result
@@ -7110,7 +7110,7 @@ CALL sp1();
x y z
000 000 000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7148,7 +7148,7 @@ CALL sp1();
x y z
00000 00000 00000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7186,7 +7186,7 @@ CALL sp1();
x y z
00000000 00000000 00000000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7224,7 +7224,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7262,7 +7262,7 @@ CALL sp1();
x y z
00000000000000000000 00000000000000000000 00000000000000000000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7282,7 +7282,7 @@ CALL sp1();
x y z
-9999999999 -9999999999 -9999999999
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7293,7 +7293,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7304,7 +7304,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
-Warning 1264 Out of range value for column 'z' at row 1
+Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7315,7 +7315,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7326,7 +7326,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7337,7 +7337,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7348,7 +7348,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@@ -7359,7 +7359,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
-Note 1265 Data truncated for column 'z' at row 1
+Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
diff --git a/mysql-test/suite/galera/r/galera_inject_bf_long_wait.result b/mysql-test/suite/galera/r/galera_inject_bf_long_wait.result
new file mode 100644
index 00000000000..e9eab5401c4
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_inject_bf_long_wait.result
@@ -0,0 +1,22 @@
+CREATE TABLE t1(id int not null primary key, b int) engine=InnoDB;
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3);
+BEGIN;
+UPDATE t1 set b = 100 where id between 1 and 2;;
+connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1b;
+SET @save_dbug = @@SESSION.debug_dbug;
+SET @@SESSION.innodb_lock_wait_timeout=2;
+SET @@SESSION.debug_dbug = '+d,wsrep_instrument_BF_lock_wait';
+UPDATE t1 set b = 200 WHERE id = 1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET @@SESSION.debug_dbug = @save_dbug;
+connection node_1;
+COMMIT;
+SELECT * FROM t1;
+id b
+0 0
+1 100
+2 100
+3 3
+disconnect node_1b;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_password.result b/mysql-test/suite/galera/r/galera_password.result
new file mode 100644
index 00000000000..00ffc1df8f9
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_password.result
@@ -0,0 +1,24 @@
+connection node_2;
+connection node_1;
+SHOW VARIABLES LIKE '%password%';
+Variable_name Value
+default_password_lifetime 0
+disconnect_on_expired_password OFF
+max_password_errors 4294967295
+old_passwords OFF
+report_password
+strict_password_validation ON
+CREATE USER 'user123456'@'localhost';
+GRANT SELECT, INSERT, UPDATE ON test.* TO 'user123456'@'localhost';
+SET PASSWORD FOR 'user123456'@'localhost' = PASSWORD('A$10abcdDCBA123456%7');
+SHOW GRANTS FOR 'user123456'@'localhost';
+Grants for user123456@localhost
+GRANT USAGE ON *.* TO `user123456`@`localhost` IDENTIFIED BY PASSWORD '*5846CF4D641598B360B3562E581586155C59F65A'
+GRANT SELECT, INSERT, UPDATE ON `test`.* TO `user123456`@`localhost`
+connection node_2;
+SHOW GRANTS FOR 'user123456'@'localhost';
+Grants for user123456@localhost
+GRANT USAGE ON *.* TO `user123456`@`localhost` IDENTIFIED BY PASSWORD '*5846CF4D641598B360B3562E581586155C59F65A'
+GRANT SELECT, INSERT, UPDATE ON `test`.* TO `user123456`@`localhost`
+connection node_1;
+DROP USER 'user123456'@'localhost';
diff --git a/mysql-test/suite/galera/r/galera_wan_restart_sst.result b/mysql-test/suite/galera/r/galera_wan_restart_sst.result
index 05390338160..988b63e314a 100644
--- a/mysql-test/suite/galera/r/galera_wan_restart_sst.result
+++ b/mysql-test/suite/galera/r/galera_wan_restart_sst.result
@@ -1,8 +1,8 @@
connection node_2;
connection node_1;
-SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
-VARIABLE_VALUE = 4
-1
+SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+EXPECT_4
+4
connection node_1;
CREATE TABLE t1 (f1 INTEGER);
INSERT INTO t1 VALUES (1);
@@ -52,23 +52,23 @@ SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_2;
-SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
-VARIABLE_VALUE = 4
-1
+SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+EXPECT_4
+4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_3;
-SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
-VARIABLE_VALUE = 4
-1
+SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+EXPECT_4
+4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_4;
-SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
-VARIABLE_VALUE = 4
-1
+SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+EXPECT_4
+4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
diff --git a/mysql-test/suite/galera/t/galera_inject_bf_long_wait.test b/mysql-test/suite/galera/t/galera_inject_bf_long_wait.test
new file mode 100644
index 00000000000..f4aac7fd795
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_inject_bf_long_wait.test
@@ -0,0 +1,25 @@
+--source include/galera_cluster.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1(id int not null primary key, b int) engine=InnoDB;
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3);
+
+BEGIN;
+--send UPDATE t1 set b = 100 where id between 1 and 2;
+
+--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1b
+SET @save_dbug = @@SESSION.debug_dbug;
+SET @@SESSION.innodb_lock_wait_timeout=2;
+SET @@SESSION.debug_dbug = '+d,wsrep_instrument_BF_lock_wait';
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 set b = 200 WHERE id = 1;
+SET @@SESSION.debug_dbug = @save_dbug;
+
+--connection node_1
+--reap
+COMMIT;
+SELECT * FROM t1;
+--disconnect node_1b
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_password.test b/mysql-test/suite/galera/t/galera_password.test
new file mode 100644
index 00000000000..7843097c67e
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_password.test
@@ -0,0 +1,14 @@
+--source include/galera_cluster.inc
+
+SHOW VARIABLES LIKE '%password%';
+
+CREATE USER 'user123456'@'localhost';
+GRANT SELECT, INSERT, UPDATE ON test.* TO 'user123456'@'localhost';
+SET PASSWORD FOR 'user123456'@'localhost' = PASSWORD('A$10abcdDCBA123456%7');
+SHOW GRANTS FOR 'user123456'@'localhost';
+
+--connection node_2
+SHOW GRANTS FOR 'user123456'@'localhost';
+
+--connection node_1
+DROP USER 'user123456'@'localhost';
diff --git a/mysql-test/suite/gcol/inc/gcol_keys.inc b/mysql-test/suite/gcol/inc/gcol_keys.inc
index 475ab96e56f..e5f7f976120 100644
--- a/mysql-test/suite/gcol/inc/gcol_keys.inc
+++ b/mysql-test/suite/gcol/inc/gcol_keys.inc
@@ -809,4 +809,12 @@ eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (p
--remove_file $MYSQLTEST_VARDIR/tmp/load.data
DROP TABLE t1;
+
+--echo # MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
+--echo # failed in ha_myisam::setup_vcols_for_repair
+CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
+ALTER TABLE t1 ADD KEY (a);
+
+DROP TABLE t1;
+
}
diff --git a/mysql-test/suite/gcol/r/gcol_bugfixes.result b/mysql-test/suite/gcol/r/gcol_bugfixes.result
index 71c8ab4b190..6d93c63fc2f 100644
--- a/mysql-test/suite/gcol/r/gcol_bugfixes.result
+++ b/mysql-test/suite/gcol/r/gcol_bugfixes.result
@@ -670,3 +670,77 @@ PRIMARY KEY (number)
REPLACE t2(number) VALUES('1');
REPLACE t2(number) VALUES('1');
DROP TABLE t2;
+# MDEV-24583 SELECT aborts after failed REPLACE into table with vcol
+CREATE TABLE t1 (pk INT, a VARCHAR(3), v VARCHAR(3) AS (CONCAT('x-',a)),
+PRIMARY KEY(pk)) ENGINE=MyISAM;
+CREATE VIEW v1 AS SELECT * FROM t1;
+INSERT INTO t1 (pk, a) VALUES (1,'foo');
+SET sql_mode=CONCAT(@@sql_mode,',STRICT_ALL_TABLES');
+REPLACE INTO t1 (pk,a) VALUES (1,'qux');
+SELECT * FROM v1;
+pk a v
+1 foo x-f
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT,
+a VARCHAR(1),
+v VARCHAR(1) AS (CONCAT('virt-',a)) VIRTUAL,
+PRIMARY KEY (pk)
+) ENGINE=InnoDB;
+INSERT INTO t1 (pk,a) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f');
+REPLACE INTO t1 (pk) VALUES (1);
+ERROR 22001: Data too long for column 'v' at row 1
+SELECT * FROM t1 ORDER BY a;
+pk a v
+1 a v
+2 b v
+3 c v
+4 d v
+5 e v
+6 f v
+SET SQL_MODE=DEFAULT;
+DROP TABLE t1;
+# (duplicate) MDEV-24656
+# [FATAL] InnoDB: Data field type 0, len 0, ASAN heap-buffer-overflow
+# upon LOAD DATA with virtual columns
+CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(2333),
+va VARCHAR(171) AS (a)) ENGINE=InnoDB;
+INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
+SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
+ERROR 22001: Data too long for column 'va' at row 1
+SELECT * FROM t1;
+id a va
+1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
+Warnings:
+Warning 1062 Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE t1;
+CREATE TABLE t1 (id BIGINT PRIMARY KEY, a VARCHAR(2333),
+va VARCHAR(171) AS (a)) ENGINE=InnoDB;
+INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
+SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
+ERROR 22001: Data too long for column 'va' at row 1
+SELECT * FROM t1;
+id a va
+1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
+Warnings:
+Warning 1062 Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE t1;
+# (duplicate) MDEV-24665
+# ASAN errors, assertion failures, corrupt values after failed
+# LOAD DATA into table with virtual/stored column
+CREATE TABLE t1 (id INT PRIMARY KEY,
+ts TIMESTAMP DEFAULT '1971-01-01 00:00:00',
+c VARBINARY(8) DEFAULT '', vc VARCHAR(3) AS (c) STORED);
+INSERT IGNORE INTO t1 (id,c) VALUES (1,'foobar');
+Warnings:
+Warning 1265 Data truncated for column 'vc' at row 1
+SELECT id, ts, vc INTO OUTFILE 'load_t1' FROM t1;
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id, ts, vc);
+INSERT IGNORE INTO t1 (id) VALUES (2);
+DROP TABLE t1;
diff --git a/mysql-test/suite/gcol/r/gcol_keys_innodb.result b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
index 4f7d654ac4e..0ee6654f3a7 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
@@ -883,6 +883,11 @@ Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
+# MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
+# failed in ha_myisam::setup_vcols_for_repair
+CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
+ALTER TABLE t1 ADD KEY (a);
+DROP TABLE t1;
#
# BUG#21365158 WL8149:ASSERTION `!TABLE || (!TABLE->WRITE_SET
#
diff --git a/mysql-test/suite/gcol/r/gcol_keys_myisam.result b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
index 3f00d344901..48e11cbe222 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
@@ -883,6 +883,11 @@ Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
+# MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
+# failed in ha_myisam::setup_vcols_for_repair
+CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
+ALTER TABLE t1 ADD KEY (a);
+DROP TABLE t1;
DROP VIEW IF EXISTS v1,v2;
DROP TABLE IF EXISTS t1,t2,t3;
DROP PROCEDURE IF EXISTS p1;
diff --git a/mysql-test/suite/gcol/t/gcol_bugfixes.test b/mysql-test/suite/gcol/t/gcol_bugfixes.test
index 033c430853d..a1f277199eb 100644
--- a/mysql-test/suite/gcol/t/gcol_bugfixes.test
+++ b/mysql-test/suite/gcol/t/gcol_bugfixes.test
@@ -634,3 +634,87 @@ REPLACE t2(number) VALUES('1');
REPLACE t2(number) VALUES('1');
DROP TABLE t2;
+
+--echo # MDEV-24583 SELECT aborts after failed REPLACE into table with vcol
+
+CREATE TABLE t1 (pk INT, a VARCHAR(3), v VARCHAR(3) AS (CONCAT('x-',a)),
+ PRIMARY KEY(pk)) ENGINE=MyISAM;
+CREATE VIEW v1 AS SELECT * FROM t1;
+INSERT INTO t1 (pk, a) VALUES (1,'foo');
+SET sql_mode=CONCAT(@@sql_mode,',STRICT_ALL_TABLES');
+--error 0,ER_DATA_TOO_LONG
+REPLACE INTO t1 (pk,a) VALUES (1,'qux');
+SELECT * FROM v1;
+
+# Cleanup
+DROP VIEW v1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ pk INT,
+ a VARCHAR(1),
+ v VARCHAR(1) AS (CONCAT('virt-',a)) VIRTUAL,
+ PRIMARY KEY (pk)
+) ENGINE=InnoDB;
+
+INSERT INTO t1 (pk,a) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f');
+
+ --error ER_DATA_TOO_LONG
+REPLACE INTO t1 (pk) VALUES (1);
+SELECT * FROM t1 ORDER BY a;
+
+SET SQL_MODE=DEFAULT;
+DROP TABLE t1;
+
+--echo # (duplicate) MDEV-24656
+--echo # [FATAL] InnoDB: Data field type 0, len 0, ASAN heap-buffer-overflow
+--echo # upon LOAD DATA with virtual columns
+
+CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(2333),
+ va VARCHAR(171) AS (a)) ENGINE=InnoDB;
+INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
+SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
+--error ER_DATA_TOO_LONG
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
+SELECT * FROM t1;
+LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
+
+DROP TABLE t1;
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load_t1
+
+CREATE TABLE t1 (id BIGINT PRIMARY KEY, a VARCHAR(2333),
+ va VARCHAR(171) AS (a)) ENGINE=InnoDB;
+INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
+SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
+--error ER_DATA_TOO_LONG
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
+SELECT * FROM t1;
+LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
+
+# Cleanup
+DROP TABLE t1;
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load_t1
+
+
+--echo # (duplicate) MDEV-24665
+--echo # ASAN errors, assertion failures, corrupt values after failed
+--echo # LOAD DATA into table with virtual/stored column
+
+CREATE TABLE t1 (id INT PRIMARY KEY,
+ ts TIMESTAMP DEFAULT '1971-01-01 00:00:00',
+ c VARBINARY(8) DEFAULT '', vc VARCHAR(3) AS (c) STORED);
+INSERT IGNORE INTO t1 (id,c) VALUES (1,'foobar');
+SELECT id, ts, vc INTO OUTFILE 'load_t1' FROM t1;
+--error 0,ER_DATA_TOO_LONG
+LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id, ts, vc);
+INSERT IGNORE INTO t1 (id) VALUES (2);
+
+# Cleanup
+DROP TABLE t1;
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load_t1
+
+
diff --git a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
index 50fd7e3ddb5..cdec8107095 100644
--- a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
+++ b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
@@ -261,4 +261,5 @@ DROP TABLE t1, t2;
--source include/wait_until_count_sessions.inc
set debug_sync=reset;
+
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
diff --git a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
index cfc99650db6..0716f3da23c 100644
--- a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
+++ b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
@@ -1,3 +1,7 @@
+call mtr.add_suppression("Cannot find index f2 in InnoDB index dictionary.");
+call mtr.add_suppression("InnoDB indexes are inconsistent with what defined in .frm for table .*");
+call mtr.add_suppression("Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB .*");
+call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict cache for table .*");
#
# Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS
#
@@ -25,3 +29,43 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f2`,`f1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1;
+#
+# MDEV-22928 InnoDB fails to fetch index type
+# when index mismatch
+#
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
+index(f1), index(f2))ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1, 1), (2, 2);
+connect con1,localhost,root,,test;
+SET DEBUG_SYNC="alter_table_inplace_after_commit SIGNAL default_signal WAIT_FOR default_done";
+ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
+connection default;
+set DEBUG_SYNC="now WAIT_FOR default_signal";
+# restart
+disconnect con1;
+SHOW KEYS FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 1 f1 1 f1 A 2 NULL NULL BTREE
+t1 1 f2 1 f2 A NULL NULL NULL Corrupted
+Warnings:
+Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
+Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
+Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
+Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
+DROP TABLE t1;
+#
+# MDEV-25503 InnoDB hangs on startup during recovery
+#
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1;
+connect con1,localhost,root,,;
+BEGIN;
+DELETE FROM mysql.innodb_table_stats;
+connect con2,localhost,root,,;
+SET DEBUG_SYNC='inplace_after_index_build SIGNAL blocked WAIT_FOR ever';
+ALTER TABLE t1 FORCE;
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR blocked';
+# restart
+SELECT * FROM t1;
+a
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result
index f183cb44ebe..42b988ed3ca 100644
--- a/mysql-test/suite/innodb/r/log_file_name.result
+++ b/mysql-test/suite/innodb/r/log_file_name.result
@@ -23,7 +23,6 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
-FOUND 1 /InnoDB: Ignoring data file '.*t1.ibd' with space ID/ in mysqld.1.err
FOUND 1 /InnoDB: Tablespace \d+ was not found at.*t3.ibd/ in mysqld.1.err
# Fault 3: Wrong space_id in a dirty file, and no missing file.
# restart
diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
index f7635e96d50..cbf8ff9e87f 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test
@@ -12,6 +12,14 @@
--source include/innodb_page_size.inc
+--source include/have_debug_sync.inc
+
+call mtr.add_suppression("Cannot find index f2 in InnoDB index dictionary.");
+call mtr.add_suppression("InnoDB indexes are inconsistent with what defined in .frm for table .*");
+call mtr.add_suppression("Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB .*");
+call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict cache for table .*");
+
+
--echo #
--echo # Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS
--echo #
@@ -41,3 +49,44 @@ show create table t1;
ALTER TABLE t1 ADD PRIMARY KEY (f2, f1);
show create table t1;
drop table t1;
+
+--echo #
+--echo # MDEV-22928 InnoDB fails to fetch index type
+--echo # when index mismatch
+--echo #
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
+ index(f1), index(f2))ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1, 1), (2, 2);
+
+connect (con1,localhost,root,,test);
+SET DEBUG_SYNC="alter_table_inplace_after_commit SIGNAL default_signal WAIT_FOR default_done";
+--send
+ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
+connection default;
+set DEBUG_SYNC="now WAIT_FOR default_signal";
+--let $shutdown_timeout=0
+--source include/restart_mysqld.inc
+disconnect con1;
+SHOW KEYS FROM t1;
+DROP TABLE t1;
+remove_files_wildcard $datadir/test #sql-*.frm;
+
+--echo #
+--echo # MDEV-25503 InnoDB hangs on startup during recovery
+--echo #
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1;
+connect (con1,localhost,root,,);
+BEGIN;
+DELETE FROM mysql.innodb_table_stats;
+
+connect (con2,localhost,root,,);
+SET DEBUG_SYNC='inplace_after_index_build SIGNAL blocked WAIT_FOR ever';
+send ALTER TABLE t1 FORCE;
+
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR blocked';
+--let $shutdown_timeout=0
+--source include/restart_mysqld.inc
+SELECT * FROM t1;
+DROP TABLE t1;
+remove_files_wildcard $datadir/test #sql-*.frm;
diff --git a/mysql-test/suite/innodb/t/log_file_name.test b/mysql-test/suite/innodb/t/log_file_name.test
index 2be3f8e7c50..3abd2d65a96 100644
--- a/mysql-test/suite/innodb/t/log_file_name.test
+++ b/mysql-test/suite/innodb/t/log_file_name.test
@@ -55,9 +55,6 @@ let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Ano
--source include/start_mysqld.inc
eval $check_no_innodb;
-let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID;
---source include/search_pattern_in_file.inc
-
let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd;
--source include/search_pattern_in_file.inc
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
index c192fced34e..2e22e2e5a2f 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
@@ -962,3 +962,13 @@ UPDATE t1 SET f6='cascade';
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_3` FOREIGN KEY (`f5`) REFERENCES `t1` (`f6`) ON UPDATE SET NULL)
DROP TABLE t1;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+#
+# MDEV-25536 sym_node->table != NULL in pars_retrieve_table_def
+#
+CREATE TABLE t1 (f1 TEXT,FULLTEXT (f1)) ENGINE=InnoDB;
+ALTER TABLE t1 DISCARD TABLESPACE;
+SET GLOBAL innodb_ft_aux_table='test/t1';
+SELECT * FROM information_schema.innodb_ft_deleted;
+DOC_ID
+DROP TABLE t1;
+SET GLOBAL innodb_ft_aux_table=DEFAULT;
diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.opt b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.opt
new file mode 100644
index 00000000000..b38416a0349
--- /dev/null
+++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.opt
@@ -0,0 +1 @@
+--innodb-ft-deleted
diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
index 46b65590298..adc10886d66 100644
--- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
+++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
@@ -932,3 +932,13 @@ UPDATE t1 SET f6='cascade';
DROP TABLE t1;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+
+--echo #
+--echo # MDEV-25536 sym_node->table != NULL in pars_retrieve_table_def
+--echo #
+CREATE TABLE t1 (f1 TEXT,FULLTEXT (f1)) ENGINE=InnoDB;
+ALTER TABLE t1 DISCARD TABLESPACE;
+SET GLOBAL innodb_ft_aux_table='test/t1';
+SELECT * FROM information_schema.innodb_ft_deleted;
+DROP TABLE t1;
+SET GLOBAL innodb_ft_aux_table=DEFAULT;
diff --git a/mysql-test/suite/rpl/r/rpl_incompatible_heartbeat.result b/mysql-test/suite/rpl/r/rpl_incompatible_heartbeat.result
new file mode 100644
index 00000000000..51da761c50d
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_incompatible_heartbeat.result
@@ -0,0 +1,17 @@
+include/master-slave.inc
+[connection master]
+connection master;
+SET @saved_dbug = @@GLOBAL.debug_dbug;
+SET @@global.debug_dbug= 'd,simulate_pos_4G';
+connection slave;
+include/stop_slave.inc
+CHANGE MASTER TO MASTER_HEARTBEAT_PERIOD=0.001;
+include/start_slave.inc
+connection master;
+SET @@GLOBAL.debug_dbug = @saved_dbug;
+connection slave;
+connection master;
+CREATE TABLE t (f INT) ENGINE=INNODB;
+INSERT INTO t VALUES (10);
+DROP TABLE t;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_incompatible_heartbeat.test b/mysql-test/suite/rpl/t/rpl_incompatible_heartbeat.test
new file mode 100644
index 00000000000..104debe707f
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_incompatible_heartbeat.test
@@ -0,0 +1,44 @@
+# ==== Purpose ====
+#
+# Test verifies that slave IO thread can process heartbeat events with log_pos
+# values higher than UINT32_MAX.
+#
+# ==== Implementation ====
+#
+# Steps:
+# 0 - Stop slave threads. Configure a small master_heartbeat_period.
+# 1 - Using debug points, simulate a huge binlog offset higher than
+# UINT32_MAX on master.
+# 2 - Start the slave and observe that slave IO thread is able to process
+# the offset received through heartbeat event.
+#
+# ==== References ====
+#
+# MDEV-16146: MariaDB slave stops with incompatible heartbeat
+#
+--source include/have_debug.inc
+--source include/have_innodb.inc
+--source include/have_binlog_format_mixed.inc
+# Test simulates binarylog offsets higher than UINT32_MAX
+--source include/have_64bit.inc
+--source include/master-slave.inc
+
+--connection master
+SET @saved_dbug = @@GLOBAL.debug_dbug;
+SET @@global.debug_dbug= 'd,simulate_pos_4G';
+
+--connection slave
+--source include/stop_slave.inc
+CHANGE MASTER TO MASTER_HEARTBEAT_PERIOD=0.001;
+--source include/start_slave.inc
+
+--connection master
+sleep 1;
+SET @@GLOBAL.debug_dbug = @saved_dbug;
+--sync_slave_with_master
+
+--connection master
+CREATE TABLE t (f INT) ENGINE=INNODB;
+INSERT INTO t VALUES (10);
+DROP TABLE t;
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/vcol/r/binlog.result b/mysql-test/suite/vcol/r/binlog.result
index 83382d47511..d4893b7ed3c 100644
--- a/mysql-test/suite/vcol/r/binlog.result
+++ b/mysql-test/suite/vcol/r/binlog.result
@@ -67,4 +67,17 @@ connection master;
DROP VIEW v1;
set @@binlog_row_image=default;
DROP TABLE t1;
+SET SQL_MODE=default;
+CREATE TABLE t1 (pk INT, a VARCHAR(3), b VARCHAR(1) AS (a) VIRTUAL, PRIMARY KEY (pk));
+INSERT IGNORE INTO t1 (pk, a) VALUES (1,'foo'),(2,'bar');
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b' at row 2
+REPLACE INTO t1 (pk) VALUES (2);
+ERROR 22001: Data too long for column 'b' at row 1
+UPDATE IGNORE t1 SET a = NULL;
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b' at row 2
+DROP TABLE t1;
include/rpl_end.inc
diff --git a/mysql-test/suite/vcol/t/binlog.test b/mysql-test/suite/vcol/t/binlog.test
index 95bb4df4cc5..aa939086f12 100644
--- a/mysql-test/suite/vcol/t/binlog.test
+++ b/mysql-test/suite/vcol/t/binlog.test
@@ -51,5 +51,19 @@ DROP VIEW v1;
set @@binlog_row_image=default;
DROP TABLE t1;
+SET SQL_MODE=default;
+
+# MDEV-24782
+# ASAN use-after-poison in Field::pack_int / THD::binlog_update_row
+
+CREATE TABLE t1 (pk INT, a VARCHAR(3), b VARCHAR(1) AS (a) VIRTUAL, PRIMARY KEY (pk));
+INSERT IGNORE INTO t1 (pk, a) VALUES (1,'foo'),(2,'bar');
+--error ER_DATA_TOO_LONG
+REPLACE INTO t1 (pk) VALUES (2);
+UPDATE IGNORE t1 SET a = NULL;
+
+# Cleanup
+DROP TABLE t1;
+
--source include/rpl_end.inc
diff --git a/mysql-test/suite/versioning/r/delete_history.result b/mysql-test/suite/versioning/r/delete_history.result
index cb865a835b3..a5a6de19bc6 100644
--- a/mysql-test/suite/versioning/r/delete_history.result
+++ b/mysql-test/suite/versioning/r/delete_history.result
@@ -154,3 +154,36 @@ select * from t1;
a
1
drop table t1;
+#
+# MDEV-25468 DELETE HISTORY may delete current data on system-versioned table
+#
+create or replace table t1 (x int) with system versioning;
+insert into t1 values (1);
+delete history from t1 before system_time '2039-01-01 23:00';
+select * from t1;
+x
+1
+explain extended delete history from t1 before system_time '2039-01-01 23:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where
+create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00';
+call p;
+select * from t1;
+x
+1
+call p;
+select * from t1;
+x
+1
+drop procedure p;
+prepare stmt from "delete history from t1 before system_time '2039-01-01 23:00'";
+execute stmt;
+select * from t1;
+x
+1
+execute stmt;
+select * from t1;
+x
+1
+drop prepare stmt;
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/trx_id.result b/mysql-test/suite/versioning/r/trx_id.result
index b2bb59a4150..f09284a61ae 100644
--- a/mysql-test/suite/versioning/r/trx_id.result
+++ b/mysql-test/suite/versioning/r/trx_id.result
@@ -498,4 +498,10 @@ add `row_end` bigint unsigned as row end,
add period for system_time(`row_start`,`row_end`),
modify x int after row_start,
with system versioning;
-create or replace database test;
+drop table t;
+#
+# MDEV-20842 Crash using versioning plugin functions after plugin was removed from server
+#
+uninstall plugin test_versioning;
+select trt_begin_ts(0);
+ERROR 42000: FUNCTION test.trt_begin_ts does not exist
diff --git a/mysql-test/suite/versioning/t/delete_history.test b/mysql-test/suite/versioning/t/delete_history.test
index fb5c8520bcb..e3b60011644 100644
--- a/mysql-test/suite/versioning/t/delete_history.test
+++ b/mysql-test/suite/versioning/t/delete_history.test
@@ -169,4 +169,26 @@ insert into t1 values (1);
select * from t1;
drop table t1;
+--echo #
+--echo # MDEV-25468 DELETE HISTORY may delete current data on system-versioned table
+--echo #
+create or replace table t1 (x int) with system versioning;
+insert into t1 values (1);
+delete history from t1 before system_time '2039-01-01 23:00';
+select * from t1;
+explain extended delete history from t1 before system_time '2039-01-01 23:00';
+create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00';
+call p;
+select * from t1;
+call p;
+select * from t1;
+drop procedure p;
+prepare stmt from "delete history from t1 before system_time '2039-01-01 23:00'";
+execute stmt;
+select * from t1;
+execute stmt;
+select * from t1;
+drop prepare stmt;
+drop table t1;
+
--source suite/versioning/common_finish.inc
diff --git a/mysql-test/suite/versioning/t/trx_id.opt b/mysql-test/suite/versioning/t/trx_id.opt
deleted file mode 100644
index b55a187cb13..00000000000
--- a/mysql-test/suite/versioning/t/trx_id.opt
+++ /dev/null
@@ -1 +0,0 @@
---plugin-load-add=$TEST_VERSIONING_SO
diff --git a/mysql-test/suite/versioning/t/trx_id.test b/mysql-test/suite/versioning/t/trx_id.test
index db691a7ec19..7c22aa2a8be 100644
--- a/mysql-test/suite/versioning/t/trx_id.test
+++ b/mysql-test/suite/versioning/t/trx_id.test
@@ -5,6 +5,10 @@ if (!$TEST_VERSIONING_SO)
--source include/have_innodb.inc
--source include/default_charset.inc
+--disable_query_log
+--eval install plugin test_versioning soname '$TEST_VERSIONING_SO'
+--enable_query_log
+
set default_storage_engine= innodb;
create or replace table t1 (
@@ -495,6 +499,11 @@ alter table t add `row_start` bigint unsigned as row start,
add period for system_time(`row_start`,`row_end`),
modify x int after row_start,
with system versioning;
+drop table t;
-
-create or replace database test;
+--echo #
+--echo # MDEV-20842 Crash using versioning plugin functions after plugin was removed from server
+--echo #
+uninstall plugin test_versioning;
+--error ER_SP_DOES_NOT_EXIST
+select trt_begin_ts(0);
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index 3d80f187e19..b9b0e112077 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -59,6 +59,7 @@ int my_delete(const char *name, myf MyFlags)
#if defined (_WIN32)
+
/*
Delete file.
@@ -66,15 +67,14 @@ int my_delete(const char *name, myf MyFlags)
where another program (or thread in the current program) has the the same file
open.
- We're using 2 tricks to prevent the errors.
+ We're using several tricks to prevent the errors, such as
+
+ - Windows 10 "posix semantics" delete
- 1. A usual Win32's DeleteFile() can with ERROR_SHARED_VIOLATION,
- because the file is opened in another application (often, antivirus or backup)
-
- We avoid the error by using CreateFile() with FILE_FLAG_DELETE_ON_CLOSE, instead
+ - Avoid the error by using CreateFile() with FILE_FLAG_DELETE_ON_CLOSE, instead
of DeleteFile()
- 2. If file which is deleted (delete on close) but has not entirely gone,
+ - If file which is deleted (delete on close) but has not entirely gone,
because it is still opened by some app, an attempt to trcreate file with the
same name would result in yet another error. The workaround here is renaming
a file to unique name.
@@ -117,6 +117,27 @@ static int my_win_unlink(const char *name)
DBUG_RETURN(0);
}
+ /*
+ Try Windows 10 method, delete with "posix semantics" (file is not visible, and creating
+ a file with the same name won't fail, even if it the fiile was open)
+ */
+ struct
+ {
+ DWORD _Flags;
+ } disp={0x3};
+ /* 0x3 = FILE_DISPOSITION_FLAG_DELETE | FILE_DISPOSITION_FLAG_POSIX_SEMANTICS */
+
+ handle= CreateFile(name, DELETE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL, OPEN_EXISTING, 0, NULL);
+ if (handle != INVALID_HANDLE_VALUE)
+ {
+ BOOL ok= SetFileInformationByHandle(handle,
+ (FILE_INFO_BY_HANDLE_CLASS) 21, &disp, sizeof(disp));
+ CloseHandle(handle);
+ if (ok)
+ DBUG_RETURN(0);
+ }
+
handle= CreateFile(name, DELETE, 0, NULL, OPEN_EXISTING, FILE_FLAG_DELETE_ON_CLOSE, NULL);
if (handle != INVALID_HANDLE_VALUE)
{
diff --git a/plugin/versioning/versioning.cc b/plugin/versioning/versioning.cc
index a2f39352ab2..6275fadba44 100644
--- a/plugin/versioning/versioning.cc
+++ b/plugin/versioning/versioning.cc
@@ -176,6 +176,7 @@ static int versioning_plugin_init(void *p __attribute__ ((unused)))
static int versioning_plugin_deinit(void *p __attribute__ ((unused)))
{
DBUG_ENTER("versioning_plugin_deinit");
+ (void) item_create_remove(func_array);
DBUG_RETURN(0);
}
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index 5f183afe8fc..14ef6840d6d 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -611,12 +611,8 @@ then
echo
echo
echo "PLEASE REMEMBER TO SET A PASSWORD FOR THE MariaDB root USER !"
- echo "To do so, start the server, then issue the following commands:"
+ echo "To do so, start the server, then issue the following command:"
echo
- echo "'$bindir/mysqladmin' -u root password 'new-password'"
- echo "'$bindir/mysqladmin' -u root -h $hostname password 'new-password'"
- echo
- echo "Alternatively you can run:"
echo "'$bindir/mysql_secure_installation'"
echo
echo "which will also give you the option of removing the test"
diff --git a/sql/item_create.cc b/sql/item_create.cc
index d73da02b7f1..6ac08813be8 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -5664,6 +5664,21 @@ int item_create_append(Native_func_registry array[])
DBUG_RETURN(0);
}
+int item_create_remove(Native_func_registry array[])
+{
+ Native_func_registry *func;
+
+ DBUG_ENTER("item_create_remove");
+
+ for (func= array; func->builder != NULL; func++)
+ {
+ if (my_hash_delete(& native_functions_hash, (uchar*) func))
+ DBUG_RETURN(1);
+ }
+
+ DBUG_RETURN(0);
+}
+
/*
Empty the hash table for native functions.
Note: this code is not thread safe, and is intended to be used at server
diff --git a/sql/item_create.h b/sql/item_create.h
index c9bdb23dffe..c04adad469c 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -304,6 +304,7 @@ struct Native_func_registry
int item_create_init();
int item_create_append(Native_func_registry array[]);
+int item_create_remove(Native_func_registry array[]);
void item_create_cleanup();
Item *create_func_dyncol_create(THD *thd, List<DYNCALL_CREATE_DEF> &list);
diff --git a/sql/item_vers.cc b/sql/item_vers.cc
index c8f1c793895..792c434b8c3 100644
--- a/sql/item_vers.cc
+++ b/sql/item_vers.cc
@@ -26,6 +26,22 @@
#include "tztime.h"
#include "item.h"
+bool Item_func_history::val_bool()
+{
+ Item_field *f= static_cast<Item_field *>(args[0]);
+ DBUG_ASSERT(f->fixed);
+ DBUG_ASSERT(f->field->flags & VERS_SYS_END_FLAG);
+ return !f->field->is_max();
+}
+
+void Item_func_history::print(String *str, enum_query_type query_type)
+{
+ str->append(func_name());
+ str->append('(');
+ args[0]->print(str, query_type);
+ str->append(')');
+}
+
Item_func_trt_ts::Item_func_trt_ts(THD *thd, Item* a, TR_table::field_id_t _trt_field) :
Item_datetimefunc(thd, a),
trt_field(_trt_field)
diff --git a/sql/item_vers.h b/sql/item_vers.h
index a42b5a033f2..0799d04a0bc 100644
--- a/sql/item_vers.h
+++ b/sql/item_vers.h
@@ -22,6 +22,36 @@
#pragma interface /* gcc class implementation */
#endif
+class Item_func_history: public Item_bool_func
+{
+public:
+ /*
+ @param a Item_field for row_end system field
+ */
+ Item_func_history(THD *thd, Item *a): Item_bool_func(thd, a)
+ {
+ DBUG_ASSERT(a->type() == Item::FIELD_ITEM);
+ }
+
+ virtual bool val_bool();
+ virtual longlong val_int()
+ {
+ return (val_bool() ? 1 : 0);
+ }
+ bool fix_length_and_dec()
+ {
+ maybe_null= 0;
+ null_value= 0;
+ decimals= 0;
+ max_length= 1;
+ return FALSE;
+ }
+ virtual const char* func_name() const { return "is_history"; }
+ virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_history>(thd, this); }
+};
+
class Item_func_trt_ts: public Item_datetimefunc
{
TR_table::field_id_t trt_field;
diff --git a/sql/log_event.h b/sql/log_event.h
index 4e193232f4b..3e08653a211 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -576,6 +576,14 @@ class String;
#define MARIA_SLAVE_CAPABILITY_MINE MARIA_SLAVE_CAPABILITY_GTID
+/*
+ When the size of 'log_pos' within Heartbeat_log_event exceeds UINT32_MAX it
+ cannot be accommodated in common_header, as 'log_pos' is of 4 bytes size. In
+ such cases, sub_header, of size 8 bytes will hold larger 'log_pos' value.
+*/
+#define HB_SUB_HEADER_LEN 8
+
+
/**
@enum Log_event_type
@@ -5718,12 +5726,13 @@ bool copy_cache_to_file_wrapped(IO_CACHE *body,
class Heartbeat_log_event: public Log_event
{
public:
- Heartbeat_log_event(const char* buf, uint event_len,
+ uint8 hb_flags;
+ Heartbeat_log_event(const char* buf, ulong event_len,
const Format_description_log_event* description_event);
Log_event_type get_type_code() { return HEARTBEAT_LOG_EVENT; }
bool is_valid() const
{
- return (log_ident != NULL &&
+ return (log_ident != NULL && ident_len <= FN_REFLEN-1 &&
log_pos >= BIN_LOG_HEADER_SIZE);
}
const char * get_log_ident() { return log_ident; }
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 607d5451134..08ac146960d 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -8493,14 +8493,22 @@ void Ignorable_log_event::pack_info(Protocol *protocol)
#if defined(HAVE_REPLICATION)
-Heartbeat_log_event::Heartbeat_log_event(const char* buf, uint event_len,
+Heartbeat_log_event::Heartbeat_log_event(const char* buf, ulong event_len,
const Format_description_log_event* description_event)
:Log_event(buf, description_event)
{
uint8 header_size= description_event->common_header_len;
- ident_len = event_len - header_size;
- set_if_smaller(ident_len,FN_REFLEN-1);
- log_ident= buf + header_size;
+ if (log_pos == 0)
+ {
+ log_pos= uint8korr(buf + header_size);
+ log_ident= buf + header_size + HB_SUB_HEADER_LEN;
+ ident_len= event_len - (header_size + HB_SUB_HEADER_LEN);
+ }
+ else
+ {
+ log_ident= buf + header_size;
+ ident_len = event_len - header_size;
+ }
}
#endif
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 913be1aace7..34dd09fd88f 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -117,7 +117,7 @@ public:
bool use_explicit_name)
: Database_qualified_name(db, name), m_explicit_name(use_explicit_name)
{
- if (lower_case_table_names && m_db.str)
+ if (lower_case_table_names && m_db.length)
m_db.length= my_casedn_str(files_charset_info, (char*) m_db.str);
}
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index a7fbdefd073..5f5f5428dad 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2196,7 +2196,11 @@ static bool validate_password(THD *thd, const LEX_CSTRING &user,
else
{
if (!thd->slave_thread &&
- strict_password_validation && has_validation_plugins())
+ strict_password_validation && has_validation_plugins()
+#ifdef WITH_WSREP
+ && !thd->wsrep_applier
+#endif
+ )
{
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--strict-password-validation");
return true;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a345c4827e3..eb0c517ebc4 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -5272,7 +5272,6 @@ bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags,
uint counter;
MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint();
DBUG_ENTER("open_normal_and_derived_tables");
- DBUG_ASSERT(!thd->fill_derived_tables());
if (open_tables(thd, &tables, &counter, flags, &prelocking_strategy) ||
mysql_handle_derived(thd->lex, dt_phases))
goto end;
@@ -5330,7 +5329,7 @@ bool open_tables_only_view_structure(THD *thd, TABLE_LIST *table_list,
MYSQL_OPEN_GET_NEW_TABLE |
(can_deadlock ?
MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)),
- DT_INIT | DT_PREPARE | DT_CREATE));
+ DT_INIT | DT_PREPARE));
/*
Restore old value of sql_command back as it is being looked at in
process_table() function.
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 85fb0367dfb..ac42b50b4a1 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -335,17 +335,6 @@ THD *thd_get_current_thd()
return current_thd;
}
-/**
- Clear errors from the previous THD
-
- @param thd THD object
-*/
-void thd_clear_errors(THD *thd)
-{
- my_errno= 0;
- thd->mysys_var->abort= 0;
-}
-
extern "C" unsigned long long thd_query_id(const MYSQL_THD thd)
{
@@ -1438,7 +1427,10 @@ void THD::change_user(void)
cleanup();
cleanup_done= 0;
reset_killed();
- thd_clear_errors(this);
+ /* Clear errors from the previous THD */
+ my_errno= 0;
+ if (mysys_var)
+ mysys_var->abort= 0;
/* Clear warnings. */
if (!get_stmt_da()->is_warning_info_empty())
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 49a39593f6a..e265246b42b 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3800,10 +3800,6 @@ public:
give_protection_error();
return TRUE;
}
- inline bool fill_derived_tables()
- {
- return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
- }
inline bool fill_information_schema_tables()
{
return !stmt_arena->is_stmt_prepare();
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 796d285d80a..ed3743b029b 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -73,7 +73,6 @@ bool
mysql_handle_derived(LEX *lex, uint phases)
{
bool res= FALSE;
- THD *thd= lex->thd;
DBUG_ENTER("mysql_handle_derived");
DBUG_PRINT("enter", ("phases: 0x%x", phases));
if (!lex->derived_tables)
@@ -88,8 +87,6 @@ mysql_handle_derived(LEX *lex, uint phases)
break;
if (!(phases & phase_flag))
continue;
- if (phase_flag >= DT_CREATE && !thd->fill_derived_tables())
- break;
for (SELECT_LEX *sl= lex->all_selects_list;
sl && !res;
@@ -173,7 +170,6 @@ bool
mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
{
bool res= FALSE;
- THD *thd= lex->thd;
uint8 allowed_phases= (derived->is_merged_derived() ? DT_PHASES_MERGE :
DT_PHASES_MATERIALIZE);
DBUG_ENTER("mysql_handle_single_derived");
@@ -200,8 +196,6 @@ mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
if (phase_flag != DT_PREPARE &&
!(allowed_phases & phase_flag))
continue;
- if (phase_flag >= DT_CREATE && !thd->fill_derived_tables())
- break;
if ((res= (*processors[phase])(lex->thd, lex, derived)))
break;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 9379421e8cd..ab2c479af5c 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1868,9 +1868,10 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
in handler methods for the just read row in record[1].
*/
table->move_fields(table->field, table->record[1], table->record[0]);
- if (table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE))
- goto err;
+ int verr = table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE);
table->move_fields(table->field, table->record[0], table->record[1]);
+ if (verr)
+ goto err;
}
if (info->handle_duplicates == DUP_UPDATE)
{
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 4832260fd86..f16102d918b 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1322,15 +1322,15 @@ void lex_end(LEX *lex)
DBUG_ENTER("lex_end");
DBUG_PRINT("enter", ("lex: %p", lex));
- lex_end_stage1(lex);
- lex_end_stage2(lex);
+ lex_unlock_plugins(lex);
+ lex_end_nops(lex);
DBUG_VOID_RETURN;
}
-void lex_end_stage1(LEX *lex)
+void lex_unlock_plugins(LEX *lex)
{
- DBUG_ENTER("lex_end_stage1");
+ DBUG_ENTER("lex_unlock_plugins");
/* release used plugins */
if (lex->plugins.elements) /* No function call and no mutex if no plugins. */
@@ -1339,33 +1339,23 @@ void lex_end_stage1(LEX *lex)
lex->plugins.elements);
}
reset_dynamic(&lex->plugins);
-
- if (lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)
- {
- /*
- Don't delete lex->sphead, it'll be needed for EXECUTE.
- Note that of all statements that populate lex->sphead
- only SQLCOM_COMPOUND can be PREPAREd
- */
- DBUG_ASSERT(lex->sphead == 0 || lex->sql_command == SQLCOM_COMPOUND);
- }
- else
- {
- sp_head::destroy(lex->sphead);
- lex->sphead= NULL;
- }
-
DBUG_VOID_RETURN;
}
/*
+ Don't delete lex->sphead, it'll be needed for EXECUTE.
+ Note that of all statements that populate lex->sphead
+ only SQLCOM_COMPOUND can be PREPAREd
+
MASTER INFO parameters (or state) is normally cleared towards the end
of a statement. But in case of PS, the state needs to be preserved during
its lifetime and should only be cleared on PS close or deallocation.
*/
-void lex_end_stage2(LEX *lex)
+void lex_end_nops(LEX *lex)
{
- DBUG_ENTER("lex_end_stage2");
+ DBUG_ENTER("lex_end_nops");
+ sp_head::destroy(lex->sphead);
+ lex->sphead= NULL;
/* Reset LEX_MASTER_INFO */
lex->mi.reset(lex->sql_command == SQLCOM_CHANGE_MASTER);
@@ -2771,6 +2761,11 @@ int Lex_input_stream::scan_ident_middle(THD *thd, Lex_ident_cli_st *str,
yySkip(); // next state does a unget
}
+ yyUnget(); // ptr points now after last token char
+ str->set_ident(m_tok_start, length, is_8bit);
+ m_cpp_text_start= m_cpp_tok_start;
+ m_cpp_text_end= m_cpp_text_start + length;
+
/*
Note: "SELECT _bla AS 'alias'"
_bla should be considered as a IDENT if charset haven't been found.
@@ -2780,28 +2775,17 @@ int Lex_input_stream::scan_ident_middle(THD *thd, Lex_ident_cli_st *str,
DBUG_ASSERT(length > 0);
if (resolve_introducer && m_tok_start[0] == '_')
{
-
- yyUnget(); // ptr points now after last token char
- str->set_ident(m_tok_start, length, false);
-
- m_cpp_text_start= m_cpp_tok_start;
- m_cpp_text_end= m_cpp_text_start + length;
- body_utf8_append(m_cpp_text_start, m_cpp_tok_start + length);
ErrConvString csname(str->str + 1, str->length - 1, &my_charset_bin);
CHARSET_INFO *cs= get_charset_by_csname(csname.ptr(),
MY_CS_PRIMARY, MYF(0));
if (cs)
{
+ body_utf8_append(m_cpp_text_start, m_cpp_tok_start + length);
*introducer= cs;
return UNDERSCORE_CHARSET;
}
- return IDENT;
}
- yyUnget(); // ptr points now after last token char
- str->set_ident(m_tok_start, length, is_8bit);
- m_cpp_text_start= m_cpp_tok_start;
- m_cpp_text_end= m_cpp_text_start + length;
body_utf8_append(m_cpp_text_start);
body_utf8_append_ident(thd, str, m_cpp_text_end);
return is_8bit ? IDENT_QUOTED : IDENT;
@@ -6408,13 +6392,33 @@ void LEX::sp_variable_declarations_init(THD *thd, int nvars)
bool LEX::sp_variable_declarations_set_default(THD *thd, int nvars,
Item *dflt_value_item)
{
- if (!dflt_value_item &&
+ bool has_default_clause= dflt_value_item != NULL;
+ if (!has_default_clause &&
unlikely(!(dflt_value_item= new (thd->mem_root) Item_null(thd))))
return true;
+ sp_variable *first_spvar = NULL;
+
for (uint i= 0 ; i < (uint) nvars ; i++)
{
sp_variable *spvar= spcont->get_last_context_variable((uint) nvars - 1 - i);
+
+ if (i == 0) {
+ first_spvar = spvar;
+ } else if (has_default_clause) {
+ Item_splocal *item =
+ new (thd->mem_root)
+ Item_splocal(thd, &sp_rcontext_handler_local,
+ &first_spvar->name, first_spvar->offset,
+ first_spvar->type_handler(), 0, 0);
+ if (item == NULL)
+ return true; // OOM
+#ifndef DBUG_OFF
+ item->m_sp = sphead;
+#endif
+ dflt_value_item = item;
+ }
+
bool last= i + 1 == (uint) nvars;
spvar->default_value= dflt_value_item;
/* The last instruction is responsible for freeing LEX. */
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 58681e0d267..45a3bf72fa4 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -5031,8 +5031,8 @@ extern void lex_init(void);
extern void lex_free(void);
extern void lex_start(THD *thd);
extern void lex_end(LEX *lex);
-extern void lex_end_stage1(LEX *lex);
-extern void lex_end_stage2(LEX *lex);
+extern void lex_end_nops(LEX *lex);
+extern void lex_unlock_plugins(LEX *lex);
void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex);
int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex);
extern int MYSQLlex(union YYSTYPE *yylval, THD *thd);
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 3f28f818acc..56ff533cfd0 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1552,7 +1552,7 @@ static int mysql_test_select(Prepared_statement *stmt,
}
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_INIT | DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE))
goto error;
thd->lex->used_tables= 0; // Updated by setup_fields
@@ -1614,7 +1614,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
DBUG_RETURN(TRUE);
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_INIT | DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE))
DBUG_RETURN(TRUE);
DBUG_RETURN(setup_fields(thd, Ref_ptr_array(),
*values, COLUMNS_READ, 0, NULL, 0));
@@ -1646,7 +1646,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_INIT | DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE))
goto error;
while ((var= it++))
@@ -1810,7 +1810,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_INIT | DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE))
DBUG_RETURN(TRUE);
select_lex->context.resolve_in_select_list= TRUE;
@@ -4329,8 +4329,10 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
thd->release_transactional_locks();
}
- /* Preserve CHANGE MASTER attributes */
- lex_end_stage1(lex);
+ /* Preserve locked plugins for SET */
+ if (lex->sql_command != SQLCOM_SET_OPTION)
+ lex_unlock_plugins(lex);
+
cleanup_stmt();
thd->restore_backup_statement(this, &stmt_backup);
thd->stmt_arena= old_stmt_arena;
@@ -5174,7 +5176,7 @@ void Prepared_statement::deallocate_immediate()
status_var_increment(thd->status_var.com_stmt_close);
/* It should now be safe to reset CHANGE MASTER parameters */
- lex_end_stage2(lex);
+ lex_end(lex);
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index ce270351dc3..ff2faca5ecf 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -32,6 +32,7 @@
#include "semisync_slave.h"
#include "mysys_err.h"
+
enum enum_gtid_until_state {
GTID_UNTIL_NOT_DONE,
GTID_UNTIL_STOP_AFTER_STANDALONE,
@@ -816,7 +817,7 @@ get_slave_until_gtid(THD *thd, String *out_str)
@param event_coordinates binlog file name and position of the last
real event master sent from binlog
- @note
+ @note
Among three essential pieces of heartbeat data Log_event::when
is computed locally.
The error to send is serious and should force terminating
@@ -830,6 +831,8 @@ static int send_heartbeat_event(binlog_send_info *info,
DBUG_ENTER("send_heartbeat_event");
ulong ev_offset;
+ char sub_header_buf[HB_SUB_HEADER_LEN];
+ bool sub_header_in_use=false;
if (reset_transmit_packet(info, info->flags, &ev_offset, &info->errmsg))
DBUG_RETURN(1);
@@ -850,18 +853,38 @@ static int send_heartbeat_event(binlog_send_info *info,
size_t event_len = ident_len + LOG_EVENT_HEADER_LEN +
(do_checksum ? BINLOG_CHECKSUM_LEN : 0);
int4store(header + SERVER_ID_OFFSET, global_system_variables.server_id);
+ DBUG_EXECUTE_IF("simulate_pos_4G",
+ {
+ const_cast<event_coordinates *>(coord)->pos= (UINT_MAX32 + (ulong)1);
+ DBUG_SET("-d, simulate_pos_4G");
+ };);
+ if (coord->pos <= UINT_MAX32)
+ {
+ int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
+ }
+ else
+ {
+ // Set common_header.log_pos=0 to indicate its overflow
+ int4store(header + LOG_POS_OFFSET, 0);
+ sub_header_in_use= true;
+ int8store(sub_header_buf, coord->pos);
+ event_len+= HB_SUB_HEADER_LEN;
+ }
+
int4store(header + EVENT_LEN_OFFSET, event_len);
int2store(header + FLAGS_OFFSET, 0);
- int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
-
packet->append(header, sizeof(header));
- packet->append(p, ident_len); // log_file_name
+ if (sub_header_in_use)
+ packet->append(sub_header_buf, sizeof(sub_header_buf));
+ packet->append(p, ident_len); // log_file_name
if (do_checksum)
{
char b[BINLOG_CHECKSUM_LEN];
ha_checksum crc= my_checksum(0, (uchar*) header, sizeof(header));
+ if (sub_header_in_use)
+ crc= my_checksum(crc, (uchar*) sub_header_buf, sizeof(sub_header_buf));
crc= my_checksum(crc, (uchar*) p, ident_len);
int4store(b, crc);
packet->append(b, sizeof(b));
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 7e1edf37971..28bb1c625ab 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -859,7 +859,8 @@ Item* period_get_condition(THD *thd, TABLE_LIST *table, SELECT_LEX *select,
cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
break;
case SYSTEM_TIME_BEFORE:
- cond1= newx Item_func_lt(thd, conds->field_end, conds->start.item);
+ cond1= newx Item_func_history(thd, conds->field_end);
+ cond2= newx Item_func_lt(thd, conds->field_end, conds->start.item);
break;
default:
DBUG_ASSERT(0);
@@ -912,7 +913,8 @@ Item* period_get_condition(THD *thd, TABLE_LIST *table, SELECT_LEX *select,
cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
break;
case SYSTEM_TIME_BEFORE:
- cond1= newx Item_func_trt_trx_sees(thd, trx_id0, conds->field_end);
+ cond1= newx Item_func_history(thd, conds->field_end);
+ cond2= newx Item_func_trt_trx_sees(thd, trx_id0, conds->field_end);
break;
default:
DBUG_ASSERT(0);
@@ -10841,6 +10843,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
j->ref.disable_cache= FALSE;
j->ref.null_ref_part= NO_REF_PART;
j->ref.const_ref_part_map= 0;
+ j->ref.uses_splitting= FALSE;
keyuse=org_keyuse;
store_key **ref_key= j->ref.key_copy;
@@ -10889,6 +10892,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
j->ref.null_rejecting|= (key_part_map)1 << i;
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
+ j->ref.uses_splitting |= (keyuse->validity_ref != NULL);
/*
We don't want to compute heavy expressions in EXPLAIN, an example would
select * from t1 where t1.key=(select thats very heavy);
@@ -23557,7 +23561,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
todo: why does JT_REF_OR_NULL mean filesort? We could find another index
that satisfies the ordering. I would just set ref_key=MAX_KEY here...
*/
- if (tab->type == JT_REF_OR_NULL || tab->type == JT_FT)
+ if (tab->type == JT_REF_OR_NULL || tab->type == JT_FT ||
+ tab->ref.uses_splitting)
goto use_filesort;
}
else if (select && select->quick) // Range found by opt_range
diff --git a/sql/sql_select.h b/sql/sql_select.h
index dd364e441cb..29e42ff8ef8 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -178,6 +178,12 @@ typedef struct st_table_ref
*/
bool disable_cache;
+ /*
+ If true, this ref access was constructed from equalities generated by
+ LATERAL DERIVED (aka GROUP BY splitting) optimization
+ */
+ bool uses_splitting;
+
bool tmp_table_index_lookup_init(THD *thd, KEY *tmp_key, Item_iterator &it,
bool value, uint skip= 0);
bool is_access_triggered();
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d7050bcf2d1..ab62a56b166 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1458,7 +1458,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
if (open_normal_and_derived_tables(thd, table_list,
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL,
- DT_INIT | DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE))
DBUG_VOID_RETURN;
table= table_list->table;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 6330c09e2e8..8fd9265cf19 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6092,11 +6092,18 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
/*
Ensure that we have an exclusive lock on target table if we are creating
non-temporary table.
+ If we're creating non-temporary table, then either
+ - there is an exclusive lock on the table
+ or
+ - there was CREATE IF EXIST, and the table was not created
+ (it existed), and was previously locked
*/
DBUG_ASSERT((create_info->tmp_table()) ||
thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db.str,
table->table_name.str,
- MDL_EXCLUSIVE));
+ MDL_EXCLUSIVE) ||
+ (thd->locked_tables_mode && pos_in_locked_tables &&
+ create_info->if_not_exists()));
}
DEBUG_SYNC(thd, "create_table_like_before_binlog");
@@ -8161,6 +8168,7 @@ static bool mysql_inplace_alter_table(THD *thd,
{
goto rollback;
}
+ DEBUG_SYNC(thd, "alter_table_inplace_after_commit");
}
/* Notify the engine that the table definition has changed */
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 07dd3b1f6ca..b55bbc7ffac 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -208,7 +208,7 @@ void udf_init()
DBUG_PRINT("info",("init udf record"));
LEX_CSTRING name;
name.str=get_field(&mem, table->field[0]);
- name.length = (uint) strlen(name.str);
+ name.length = (uint) safe_strlen(name.str);
char *dl_name= get_field(&mem, table->field[2]);
bool new_dl=0;
Item_udftype udftype=UDFTYPE_FUNCTION;
@@ -222,12 +222,12 @@ void udf_init()
On windows we must check both FN_LIBCHAR and '/'.
*/
- if (check_valid_path(dl_name, strlen(dl_name)) ||
+ if (!name.str || !dl_name || check_valid_path(dl_name, strlen(dl_name)) ||
check_string_char_length(&name, 0, NAME_CHAR_LEN,
system_charset_info, 1))
{
sql_print_error("Invalid row in mysql.func table for function '%.64s'",
- name.str);
+ safe_str(name.str));
continue;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 8ea73439480..578a05d18ae 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1712,8 +1712,9 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd)
call in setup_tables()).
*/
- if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
- table_list, select_lex->leaf_tables, FALSE, TRUE))
+ if (setup_tables_and_check_access(thd, &select_lex->context,
+ &select_lex->top_join_list, table_list, select_lex->leaf_tables,
+ FALSE, UPDATE_ACL, SELECT_ACL, TRUE))
DBUG_RETURN(1);
List<Item> *fields= &lex->first_select_lex()->item_list;
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index 0936eab8bfb..2ab7ad50004 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -270,9 +270,6 @@ IF(CONNECT_WITH_JDBC)
Mongo2Interface.java Mongo3Interface.java
JavaWrappers.jar)
add_definitions(-DJAVA_SUPPORT)
- IF(CONNECT_WITH_MONGO)
- add_definitions(-DMONGO_SUPPORT)
- ENDIF()
ELSE()
SET(JDBC_LIBRARY "")
ENDIF()
diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp
index 3c33551cb68..a0a421657bd 100644
--- a/storage/connect/bson.cpp
+++ b/storage/connect/bson.cpp
@@ -1138,6 +1138,9 @@ PBVAL BJSON::GetArrayValue(PBVAL bap, int n)
CheckType(bap, TYPE_JAR);
int i = 0;
+ if (n < 0)
+ n += GetArraySize(bap);
+
for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
if (i == n)
return bvp;
@@ -1348,12 +1351,17 @@ PBVAL BJSON::NewVal(PVAL valp)
/***********************************************************************/
/* Sub-allocate and initialize a BVAL from another BVAL. */
/***********************************************************************/
-PBVAL BJSON::DupVal(PBVAL bvlp) {
- PBVAL bvp = NewVal();
+PBVAL BJSON::DupVal(PBVAL bvlp)
+{
+ if (bvlp) {
+ PBVAL bvp = NewVal();
+
+ *bvp = *bvlp;
+ bvp->Next = 0;
+ return bvp;
+ } else
+ return NULL;
- *bvp = *bvlp;
- bvp->Next = 0;
- return bvp;
} // end of DupVal
/***********************************************************************/
diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp
index 5b4332d6e9e..39bcc18281b 100644
--- a/storage/connect/bsonudf.cpp
+++ b/storage/connect/bsonudf.cpp
@@ -117,7 +117,7 @@ BJNX::BJNX(PGLOBAL g) : BDOC(g)
Jp = NULL;
Nodes = NULL;
Value = NULL;
- MulVal = NULL;
+ //MulVal = NULL;
Jpath = NULL;
Buf_Type = TYPE_STRING;
Long = len;
@@ -148,7 +148,7 @@ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC
Jp = NULL;
Nodes = NULL;
Value = AllocateValue(g, type, len, prec);
- MulVal = NULL;
+ //MulVal = NULL;
Jpath = NULL;
Buf_Type = type;
Long = len;
@@ -273,40 +273,6 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
return true;
} // endif's
-#if 0
- // For calculated arrays, a local Value must be used
- switch (jnp->Op) {
- case OP_NUM:
- jnp->Valp = AllocateValue(g, TYPE_INT);
- break;
- case OP_ADD:
- case OP_MULT:
- case OP_SEP:
- if (!IsTypeChar(Buf_Type))
- jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
- else
- jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
-
- break;
- case OP_MIN:
- case OP_MAX:
- jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
- break;
- case OP_CNC:
- if (IsTypeChar(Buf_Type))
- jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
- else
- jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
-
- break;
- default:
- break;
- } // endswitch Op
-
- if (jnp->Valp)
- MulVal = AllocateValue(g, jnp->Valp);
-#endif // 0
-
return false;
} // end of SetArrayOptions
@@ -452,6 +418,8 @@ PBVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp, int n)
{
PBVAL vlp, jvp = bvp;
+ Jb = false;
+
if (n < Nod -1) {
if (bvp->Type == TYPE_JAR) {
int ars = GetArraySize(bvp);
@@ -3022,7 +2990,7 @@ void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PBVAL bop = (PBVAL)g->Activityp;
if (g->N-- > 0)
- bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0));
+ bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0));
} // end of bson_object_grp_add
@@ -3710,7 +3678,7 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
PUSH_WARNING("CheckMemory error");
goto fin;
} else {
- bnx.Reset();
+ bnx.Reset();
jvp = bnx.MakeValue(args, 0, true);
if (g->Mrr) { // First argument is a constant
@@ -4056,7 +4024,7 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args,
*is_null = 1;
return 0.0;
} else {
- bnx.Reset();
+ bnx.Reset();
jvp = bnx.MakeValue(args, 0);
if ((p = bnx.GetString(jvp))) {
diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h
index bbfd1ceed80..0fe3715617e 100644
--- a/storage/connect/bsonudf.h
+++ b/storage/connect/bsonudf.h
@@ -41,7 +41,6 @@ typedef struct _jnode {
PSZ Key; // The key used for object
OPVAL Op; // Operator used for this node
PVAL CncVal; // To cont value used for OP_CNC
- PVAL Valp; // The internal array VALUE
int Rank; // The rank in array
int Rx; // Read row number
int Nx; // Next to read row number
@@ -153,7 +152,7 @@ protected:
JOUTSTR *Jp;
JNODE *Nodes; // The intermediate objects
PVAL Value;
- PVAL MulVal; // To value used by multiple column
+ //PVAL MulVal; // To value used by multiple column
char *Jpath; // The json path
int Buf_Type;
int Long;
diff --git a/storage/connect/cmgoconn.cpp b/storage/connect/cmgoconn.cpp
index edee1874b97..474f940a8cf 100644
--- a/storage/connect/cmgoconn.cpp
+++ b/storage/connect/cmgoconn.cpp
@@ -150,6 +150,12 @@ void CMgoConn::mongo_init(bool init)
/***********************************************************************/
bool CMgoConn::Connect(PGLOBAL g)
{
+ if (!Pcg->Db_name || !Pcg->Coll_name) {
+ // This would crash in mongoc_client_get_collection
+ strcpy(g->Message, "Missing DB or collection name");
+ return true;
+ } // endif name
+
if (!IsInit)
#if defined(__WIN__)
__try {
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 95cbcd2b336..85781925e31 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -170,7 +170,7 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.07.0002 January 27, 2021";
+ char version[]= "Version 1.07.0002 March 22, 2021";
#if defined(__WIN__)
char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__;
static char slash= '\\';
@@ -275,6 +275,10 @@ static handler *connect_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
+static bool checkPrivileges(THD* thd, TABTYPE type, PTOS options,
+ const char* db, TABLE* table = NULL,
+ bool quick = false);
+
static int connect_assisted_discovery(handlerton *hton, THD* thd,
TABLE_SHARE *table_s,
HA_CREATE_INFO *info);
@@ -762,8 +766,8 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir)
For engines that have two file name extensions (separate meta/index file
and data file), the order of elements is relevant. First element of engine
- file name extensions array should be meta/index file extention. Second
- element - data file extention. This order is assumed by
+ file name extensions array should be meta/index file extension. Second
+ element - data file extension. This order is assumed by
prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
@see
@@ -1299,9 +1303,9 @@ PCSZ GetStringTableOption(PGLOBAL g, PTOS options, PCSZ opname, PCSZ sdef)
else if (!stricmp(opname, "Data_charset"))
opval= options->data_charset;
else if (!stricmp(opname, "Http") || !stricmp(opname, "URL"))
- opval = options->http;
+ opval= options->http;
else if (!stricmp(opname, "Uri"))
- opval = options->uri;
+ opval= options->uri;
if (!opval && options->oplist)
opval= GetListOption(g, opname, options->oplist);
@@ -1615,7 +1619,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Opt= (fop) ? (int)fop->opt : 0;
if (fp->field_length >= 0) {
- pcf->Length = fp->field_length;
+ pcf->Length= fp->field_length;
// length is bytes for Connect, not characters
if (!strnicmp(chset, "utf8", 4))
@@ -1630,7 +1634,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Offset= (int)fop->offset;
pcf->Freq= (int)fop->freq;
pcf->Datefmt= (char*)fop->dateformat;
- pcf->Fieldfmt = fop->fieldformat ? (char*)fop->fieldformat
+ pcf->Fieldfmt= fop->fieldformat ? (char*)fop->fieldformat
: fop->jsonpath ? (char*)fop->jsonpath : (char*)fop->xmlpath;
} else {
pcf->Offset= -1;
@@ -4511,11 +4515,9 @@ int ha_connect::delete_all_rows()
} // end of delete_all_rows
-bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool quick)
+static bool checkPrivileges(THD *thd, TABTYPE type, PTOS options,
+ const char *db, TABLE *table, bool quick)
{
- const char *db= (dbn && *dbn) ? dbn : NULL;
- TABTYPE type=GetRealType(options);
-
switch (type) {
case TAB_UNDEF:
// case TAB_CATLG:
@@ -4598,6 +4600,16 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0));
return true;
+} // end of checkPrivileges
+
+// Check whether the user has required (file) privileges
+bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn,
+ bool quick)
+{
+ const char *db= (dbn && *dbn) ? dbn : NULL;
+ TABTYPE type=GetRealType(options);
+
+ return checkPrivileges(thd, type, options, db, table, quick);
} // end of check_privileges
// Check that two indexes are equivalent
@@ -5406,12 +5418,7 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
int len, int dec, char* key, uint tm, const char* rem,
char* dft, char* xtra, char* fmt, int flag, bool dbf, char v)
{
-#if defined(DEVELOPMENT)
- // Some client programs regard CHAR(36) as GUID
- char var = (len > 255 || len == 36) ? 'V' : v;
-#else
char var = (len > 255) ? 'V' : v;
-#endif
bool q, error = false;
const char* type = PLGtoMYSQLtype(typ, dbf, var);
@@ -5745,6 +5752,29 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
#endif // REST_SUPPORT
} // endif ttp
+ if (fn && *fn)
+ switch (ttp) {
+ case TAB_FMT:
+ case TAB_DBF:
+ case TAB_XML:
+ case TAB_INI:
+ case TAB_VEC:
+ case TAB_REST:
+ case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ if (checkPrivileges(thd, ttp, topt, db)) {
+ strcpy(g->Message, "This operation requires the FILE privilege");
+ rc= HA_ERR_INTERNAL_ERROR;
+ goto err;
+ } // endif check_privileges
+
+ break;
+ default:
+ break;
+ } // endswitch ttp
+
if (!tab) {
if (ttp == TAB_TBL) {
// Make tab the first table of the list
diff --git a/storage/connect/javaconn.cpp b/storage/connect/javaconn.cpp
index 8dc4add9f49..eda5d31c80b 100644
--- a/storage/connect/javaconn.cpp
+++ b/storage/connect/javaconn.cpp
@@ -1,7 +1,7 @@
/************ Javaconn C++ Functions Source Code File (.CPP) ***********/
-/* Name: JAVAConn.CPP Version 1.0 */
+/* Name: JAVAConn.CPP Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2017 - 2021 */
/* */
/* This file contains the JAVA connection classes functions. */
/***********************************************************************/
@@ -400,24 +400,35 @@ bool JAVAConn::Open(PGLOBAL g)
jpop->Append(ClassPath);
} // endif ClassPath
- // Java source will be compiled as a jar file installed in the plugin dir
+#if 0
+ // Java source will be compiled as a jar file installed in the plugin dir
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("JdbcInterface.jar");
+#endif // 0
// All wrappers are pre-compiled in JavaWrappers.jar in the plugin dir
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("JavaWrappers.jar");
+#if defined(MONGO_SUPPORT)
+ jpop->Append(sep);
+ jpop->Append(GetPluginDir());
+ jpop->Append("Mongo3.jar");
+ jpop->Append(sep);
+ jpop->Append(GetPluginDir());
+ jpop->Append("Mongo2.jar");
+#endif // MONGO_SUPPORT
+
if ((cp = getenv("CLASSPATH"))) {
jpop->Append(sep);
jpop->Append(cp);
} // endif cp
if (trace(1)) {
- htrc("ClassPath=%s\n", ClassPath);
- htrc("CLASSPATH=%s\n", cp);
+ htrc("ClassPath=%s\n", ClassPath ? ClassPath : "null");
+ htrc("CLASSPATH=%s\n", cp ? cp : "null");
htrc("%s\n", jpop->GetStr());
} // endif trace
diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp
index 8a12fffbd05..0af91bc78cd 100644
--- a/storage/connect/jmgoconn.cpp
+++ b/storage/connect/jmgoconn.cpp
@@ -121,20 +121,21 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper)
/***********************************************************************/
void JMgoConn::AddJars(PSTRG jpop, char sep)
{
-#if defined(BSON_SUPPORT)
+#if defined(DEVELOPMENT)
if (m_Version == 2) {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin");
- jpop->Append(sep);
+// jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-2.13.3.jar");
} else {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap3/bin");
+// jpop->Append(sep);
// jpop->Append("C:/Program Files/MariaDB 10.1/lib/plugin/JavaWrappers.jar");
- jpop->Append(sep);
+// jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar");
} // endif m_Version
-#endif // BSON_SUPPORT
+#endif // DEVELOPMENT
} // end of AddJars
/***********************************************************************/
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index bd9c4fac7a1..e9925ee959a 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -54,15 +54,24 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e);
#endif // SE_CATCH
char *GetJsonNull(void);
+int GetDefaultPrec(void);
/***********************************************************************/
/* IsNum: check whether this string is all digits. */
/***********************************************************************/
-bool IsNum(PSZ s) {
- for (char* p = s; *p; p++)
+bool IsNum(PSZ s)
+{
+ char* p = s;
+
+ if (*p == '-')
+ p++;
+
+ if (*p == ']')
+ return false;
+ else for (; *p; p++)
if (*p == ']')
break;
- else if (!isdigit(*p) || *p == '-')
+ else if (!isdigit(*p))
return false;
return true;
@@ -1257,6 +1266,8 @@ PJVAL JARRAY::GetArrayValue(int i)
{
if (Mvals && i >= 0 && i < Size)
return Mvals[i];
+ else if (Mvals && i < 0 && i >= -Size)
+ return Mvals[Size + i];
else
return NULL;
} // end of GetValue
@@ -1752,7 +1763,7 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll)
void JVALUE::SetFloat(PGLOBAL g, double f)
{
F = f;
- Nd = 6;
+ Nd = GetDefaultPrec();
DataType = TYPE_DBL;
} // end of SetFloat
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 79e1534c51c..cd62b2efdd1 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -72,7 +72,7 @@ JSNX::JSNX(PGLOBAL g, PJSON row, int type, int len, int prec, my_bool wr)
Jp = NULL;
Nodes = NULL;
Value = AllocateValue(g, type, len, prec);
- MulVal = NULL;
+ //MulVal = NULL;
Jpath = NULL;
Buf_Type = type;
Long = len;
@@ -198,38 +198,6 @@ my_bool JSNX::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm)
return true;
} // endif's
- // For calculated arrays, a local Value must be used
- switch (jnp->Op) {
- case OP_NUM:
- jnp->Valp = AllocateValue(g, TYPE_INT);
- break;
- case OP_ADD:
- case OP_MULT:
- case OP_SEP:
- if (!IsTypeChar(Buf_Type))
- jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
- else
- jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
-
- break;
- case OP_MIN:
- case OP_MAX:
- jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
- break;
- case OP_CNC:
- if (IsTypeChar(Buf_Type))
- jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
- else
- jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
-
- break;
- default:
- break;
- } // endswitch Op
-
- if (jnp->Valp)
- MulVal = AllocateValue(g, jnp->Valp);
-
return false;
} // end of SetArrayOptions
@@ -312,7 +280,7 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
} // endfor i, p
Nod = i;
- MulVal = AllocateValue(g, Value);
+ //MulVal = AllocateValue(g, Value);
if (trace(1))
for (i = 0; i < Nod; i++)
@@ -324,23 +292,6 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
} // end of ParseJpath
/*********************************************************************************/
-/* MakeJson: Serialize the json item and set value to it. */
-/*********************************************************************************/
-PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp)
-{
- if (Value->IsTypeNum()) {
- strcpy(g->Message, "Cannot make Json for a numeric value");
- Value->Reset();
- } else if (jsp->GetType() != TYPE_JAR && jsp->GetType() != TYPE_JOB) {
- strcpy(g->Message, "Target is not an array or object");
- Value->Reset();
- } else
- Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
-
- return Value;
-} // end of MakeJson
-
-/*********************************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/*********************************************************************************/
void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
@@ -350,6 +301,7 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
if (Jb) {
vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0));
+ Jb = false;
} else switch (val->GetValType()) {
case TYPE_DTM:
case TYPE_STRG:
@@ -396,6 +348,52 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
} // end of SetJsonValue
/*********************************************************************************/
+/* MakeJson: Serialize the json item and set value to it. */
+/*********************************************************************************/
+PJVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp, int n)
+{
+ Jb = false;
+
+ if (Value->IsTypeNum()) {
+ strcpy(g->Message, "Cannot make Json for a numeric value");
+ return NULL;
+ } else if (jsp->GetType() != TYPE_JAR && jsp->GetType() != TYPE_JOB) {
+ strcpy(g->Message, "Target is not an array or object");
+ return NULL;
+ } else if (n < Nod -1) {
+ if (jsp->GetType() == TYPE_JAR) {
+ int ars = jsp->GetSize(false);
+ PJNODE jnp = &Nodes[n];
+ PJAR jarp = new(g) JARRAY;
+
+ jnp->Op = OP_EQ;
+
+ for (jnp->Rank = 0; jnp->Rank < ars; jnp->Rank++)
+ jarp->AddArrayValue(g, GetRowValue(g, jsp, n));
+
+ jarp->InitArray(g);
+ jnp->Op = OP_XX;
+ jnp->Rank = 0;
+ jsp = jarp;
+ } else if(jsp->GetType() == TYPE_JOB) {
+ PJSON jp;
+ PJOB jobp = new(g) JOBJECT;
+
+ for (PJPR prp = ((PJOB)jsp)->GetFirst(); prp; prp = prp->Next) {
+ jp = (prp->Val->DataType == TYPE_JSON) ? prp->Val->Jsp : prp->Val;
+ jobp->SetKeyValue(g, GetRowValue(g, jp, n + 1), prp->Key);
+ } // endfor prp
+
+ jsp = jobp;
+ } // endif Type
+
+ } // endif
+
+ Jb = true;
+ return new(g) JVALUE(jsp);
+} // end of MakeJson
+
+/*********************************************************************************/
/* GetJson: */
/*********************************************************************************/
PJVAL JSNX::GetJson(PGLOBAL g)
@@ -437,8 +435,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
val = new(g) JVALUE(g, Value);
return val;
} else if (Nodes[i].Op == OP_XX) {
- Jb = b;
- return new(g)JVALUE(row);
+ return MakeJson(g, row, i);
} else switch (row->GetType()) {
case TYPE_JOB:
if (!Nodes[i].Key) {
@@ -505,6 +502,88 @@ PVAL JSNX::ExpandArray(PGLOBAL g, PJAR arp, int n)
} // end of ExpandArray
/*********************************************************************************/
+/* Get the value used for calculating the array. */
+/*********************************************************************************/
+PVAL JSNX::GetCalcValue(PGLOBAL g, PJAR jap, int n)
+{
+ // For calculated arrays, a local Value must be used
+ int lng = 0;
+ short type, prec = 0;
+ bool b = n < Nod - 1;
+ PVAL valp;
+ PJVAL vlp, vp;
+ OPVAL op = Nodes[n].Op;
+
+ switch (op) {
+ case OP_NUM:
+ type = TYPE_INT;
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ if (!IsTypeNum(Buf_Type)) {
+ type = TYPE_INT;
+ prec = 0;
+
+ for (vlp = jap->GetArrayValue(0); vlp; vlp = vlp->Next) {
+ vp = (b && vlp->GetJsp()) ? GetRowValue(g, vlp, n + 1) : vlp;
+
+ switch (vp->DataType) {
+ case TYPE_BINT:
+ if (type == TYPE_INT)
+ type = TYPE_BIGINT;
+
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ type = TYPE_DOUBLE;
+ prec = MY_MAX(prec, vp->Nd);
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ } // endfor vlp
+
+ } else {
+ type = Buf_Type;
+ prec = GetPrecision();
+ } // endif Buf_Type
+
+ break;
+ case OP_SEP:
+ if (IsTypeChar(Buf_Type)) {
+ type = TYPE_DOUBLE;
+ prec = 2;
+ } else {
+ type = Buf_Type;
+ prec = GetPrecision();
+ } // endif Buf_Type
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ type = Buf_Type;
+ lng = Long;
+ prec = GetPrecision();
+ break;
+ case OP_CNC:
+ type = TYPE_STRING;
+
+ if (IsTypeChar(Buf_Type)) {
+ lng = (Long) ? Long : 512;
+ prec = GetPrecision();
+ } else
+ lng = 512;
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ return valp = AllocateValue(g, type, lng, prec);
+} // end of GetCalcValue
+
+/*********************************************************************************/
/* CalculateArray: */
/*********************************************************************************/
PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
@@ -512,7 +591,8 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
int i, ars = arp->size(), nv = 0;
bool err;
OPVAL op = Nodes[n].Op;
- PVAL val[2], vp = Nodes[n].Valp;
+ PVAL val[2], vp = GetCalcValue(g, arp, n);
+ PVAL mulval = AllocateValue(g, vp);
PJVAL jvrp, jvp;
JVALUE jval;
@@ -545,9 +625,9 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
SetJsonValue(g, vp, jvp);
continue;
} else
- SetJsonValue(g, MulVal, jvp);
+ SetJsonValue(g, mulval, jvp);
- if (!MulVal->IsNull()) {
+ if (!mulval->IsNull()) {
switch (op) {
case OP_CNC:
if (Nodes[n].CncVal) {
@@ -555,18 +635,18 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
err = vp->Compute(g, val, 1, op);
} // endif CncVal
- val[0] = MulVal;
+ val[0] = mulval;
err = vp->Compute(g, val, 1, op);
break;
// case OP_NUM:
case OP_SEP:
- val[0] = Nodes[n].Valp;
- val[1] = MulVal;
+ val[0] = vp;
+ val[1] = mulval;
err = vp->Compute(g, val, 2, OP_ADD);
break;
default:
- val[0] = Nodes[n].Valp;
- val[1] = MulVal;
+ val[0] = vp;
+ val[1] = mulval;
err = vp->Compute(g, val, 2, op);
} // endswitch Op
@@ -588,9 +668,9 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
if (op == OP_SEP) {
// Calculate average
- MulVal->SetValue(nv);
+ mulval->SetValue(nv);
val[0] = vp;
- val[1] = MulVal;
+ val[1] = mulval;
if (vp->Compute(g, val, 2, OP_DIV))
vp->Reset();
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index 689a02ebbc5..ada0dbcd96b 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -44,7 +44,6 @@ typedef struct _jnode {
PSZ Key; // The key used for object
OPVAL Op; // Operator used for this node
PVAL CncVal; // To cont value used for OP_CNC
- PVAL Valp; // The internal array VALUE
int Rank; // The rank in array
int Rx; // Read row number
int Nx; // Next to read row number
@@ -334,8 +333,9 @@ protected:
my_bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm);
PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
+ PVAL GetCalcValue(PGLOBAL g, PJAR bap, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
- PVAL MakeJson(PGLOBAL g, PJSON jsp);
+ PJVAL MakeJson(PGLOBAL g, PJSON jsp, int i);
void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
my_bool CompareValues(PJVAL v1, PJVAL v2);
@@ -358,7 +358,7 @@ protected:
JOUTSTR *Jp;
JNODE *Nodes; // The intermediate objects
PVAL Value;
- PVAL MulVal; // To value used by multiple column
+ //PVAL MulVal; // To value used by multiple column
char *Jpath; // The json path
int Buf_Type;
int Long;
diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result
index 8315fc3f3bf..e3ee84d9084 100644
--- a/storage/connect/mysql-test/connect/r/json_udf.result
+++ b/storage/connect/mysql-test/connect/r/json_udf.result
@@ -322,7 +322,7 @@ JsonGet_String(Json_Make_Array(45,28,36,45,89),'3')
45
SELECT JsonGet_String(Json_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",JsonGet_String(Json_Make_Array(45,28,36,45,89),'[+]') "sum";
list egal sum
-45+28+36+45+89 = 243.00
+45+28+36+45+89 = 243
SELECT JsonGet_String(Json_Make_Array(Json_Make_Array(45,28),Json_Make_Array(36,45,89)),'1.0');
JsonGet_String(Json_Make_Array(Json_Make_Array(45,28),Json_Make_Array(36,45,89)),'1.0')
36
@@ -349,10 +349,10 @@ Warnings:
Warning 1105
SELECT department, JsonGet_String(Json_Make_Object(department, Json_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
department Sumsal
-0021 28500.00
-0318 72230.00
-0319 89800.95
-2452 45900.00
+0021 28500.000000
+0318 72230.000000
+0319 89800.950000
+2452 45900.000000
SELECT JsonGet_Int(@j1, '4');
JsonGet_Int(@j1, '4')
89
diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp
index db63b8e78db..8477d22d364 100644
--- a/storage/connect/tabbson.cpp
+++ b/storage/connect/tabbson.cpp
@@ -1,6 +1,6 @@
/************* tabbson C++ Program Source Code File (.CPP) *************/
-/* PROGRAM NAME: tabbson Version 1.0 */
-/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* PROGRAM NAME: tabbson Version 1.1 */
+/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
/* This program are the BSON class DB execution routines. */
/***********************************************************************/
@@ -158,8 +158,9 @@ BSONDISC::BSONDISC(PGLOBAL g, uint* lg)
bp = NULL;
row = NULL;
sep = NULL;
+ strfy = NULL;
i = n = bf = ncol = lvl = sz = limit = 0;
- all = strfy = false;
+ all = false;
} // end of BSONDISC constructor
int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
@@ -173,7 +174,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
sep = GetStringTableOption(g, topt, "Separator", ".");
sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
limit = GetIntegerTableOption(g, topt, "Limit", 10);
- strfy = GetBooleanTableOption(g, topt, "Stringify", false);
+ strfy = GetStringTableOption(g, topt, "Stringify", NULL);
/*********************************************************************/
/* Open the input file. */
@@ -186,6 +187,9 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif // ZIP_SUPPORT
tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
+ if (!tdp->Fn && topt->http)
+ tdp->Fn = GetStringTableOption(g, topt, "Subtype", NULL);
+
if (!(tdp->Database = SetPath(g, db)))
return 0;
@@ -199,7 +203,8 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (!tdp->Fn && !tdp->Uri) {
strcpy(g->Message, MSG(MISSING_FNAME));
return 0;
- } // endif Fn
+ } else
+ topt->subtype = NULL;
if (tdp->Fn) {
// We used the file name relative to recorded datapath
@@ -428,7 +433,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
jcol.Cbn = true;
- } else if (j < lvl) {
+ } else if (j < lvl && !(strfy && !stricmp(strfy, colname))) {
if (!fmt[bf])
strcat(fmt, colname);
@@ -499,7 +504,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
} // endswitch Type
} else if (lvl >= 0) {
- if (strfy) {
+ if (strfy && !stricmp(strfy, colname)) {
if (!fmt[bf])
strcat(fmt, colname);
@@ -731,7 +736,6 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
case TYPE_FLOAT:
switch (vp->GetType()) {
case TYPE_STRING:
- case TYPE_DATE:
case TYPE_DECIM:
vp->SetValue_psz(GetString(jvp));
break;
@@ -750,6 +754,16 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
vp->SetPrec(jvp->Nd);
break;
+ case TYPE_DATE:
+ if (jvp->Type == TYPE_STRG) {
+ if (!((DTVAL*)vp)->IsFormatted())
+ ((DTVAL*)vp)->SetFormat(g, "YYYY-MM-DDThh:mm:ssZ", 20, 0);
+
+ vp->SetValue_psz(GetString(jvp));
+ } else
+ vp->SetValue(GetInteger(jvp));
+
+ break;
default:
sprintf(G->Message, "Unsupported column type %d", vp->GetType());
throw 888;
@@ -881,7 +895,7 @@ PBVAL BCUTIL::GetRowValue(PGLOBAL g, PBVAL row, int i)
} // endfor i
return bvp;
-} // end of GetColumnValue
+} // end of GetRowValue
/***********************************************************************/
/* GetColumnValue: */
diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h
index adb02dd28e4..e9c5cc6477f 100644
--- a/storage/connect/tabbson.h
+++ b/storage/connect/tabbson.h
@@ -44,10 +44,11 @@ public:
PBPR row;
PBTUT bp;
PCSZ sep;
+ PCSZ strfy;
char colname[65], fmt[129], buf[16];
uint *length;
int i, n, bf, ncol, lvl, sz, limit;
- bool all, strfy;
+ bool all;
}; // end of BSONDISC
/***********************************************************************/
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 402a0a1de37..07c54e8a0fb 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -1,6 +1,6 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: tabjson Version 1.8 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2021 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
#undef BSON_SUPPORT
@@ -9,6 +9,8 @@
/* Include relevant sections of the MariaDB header file. */
/***********************************************************************/
#include <my_global.h>
+#include <mysqld.h>
+#include <sql_error.h>
/***********************************************************************/
/* Include application header files: */
@@ -160,22 +162,24 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
jsp = NULL;
row = NULL;
sep = NULL;
+ strfy = NULL;
i = n = bf = ncol = lvl = sz = limit = 0;
- all = strfy = false;
+ all = false;
} // end of JSONDISC constructor
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
char filename[_MAX_PATH];
+ size_t reclg = 0;
bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
PGLOBAL G = NULL;
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
sep = GetStringTableOption(g, topt, "Separator", ".");
- sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
+ strfy = GetStringTableOption(g, topt, "Stringify", NULL);
+ sz = GetIntegerTableOption(g, topt, "Jsize", 250);
limit = GetIntegerTableOption(g, topt, "Limit", 10);
- strfy = GetBooleanTableOption(g, topt, "Stringify", false);
/*********************************************************************/
/* Open the input file. */
@@ -187,6 +191,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif // ZIP_SUPPORT
tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
+ if (!tdp->Fn && topt->http)
+ tdp->Fn = GetStringTableOption(g, topt, "Subtype", NULL);
+
if (!(tdp->Database = SetPath(g, db)))
return 0;
@@ -200,7 +207,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (!tdp->Fn && !tdp->Uri) {
strcpy(g->Message, MSG(MISSING_FNAME));
return 0;
- } // endif Fn
+ } else
+ topt->subtype = NULL;
if (tdp->Fn) {
// We used the file name relative to recorded datapath
@@ -248,7 +256,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
} else {
if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))
{
- if (!mgo) {
+ if (!mgo && !tdp->Uri) {
sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty);
return 0;
} else
@@ -310,7 +318,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
-// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB
+ if (tdp->Pretty != 2)
+ reclg = strlen(tjnp->To_Line);
+
jsp = tjnp->Row;
} // endswitch ReadDB
@@ -361,7 +371,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
-// jsp = tjnp->FindRow(g);
+ if (tdp->Pretty != 2 && reclg < strlen(tjnp->To_Line))
+ reclg = strlen(tjnp->To_Line);
+
jsp = tjnp->Row;
} // endswitch ReadDB
@@ -373,8 +385,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
} // endfor i
- if (tdp->Pretty != 2)
+ if (tdp->Pretty != 2) {
+ if (!topt->lrecl)
+ topt->lrecl = reclg + 10;
+
tjnp->CloseDB(g);
+ } // endif Pretty
return n;
@@ -426,7 +442,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
jcol.Cbn = true;
- } else if (j < lvl) {
+ } else if (j < lvl && !(strfy && !stricmp(strfy, colname))) {
if (!fmt[bf])
strcat(fmt, colname);
@@ -480,9 +496,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(strncat(colname, "_", n), buf, n - 1);
} // endif all
- } else {
+ } else
strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
- }
if (Find(g, jar->GetArrayValue(k), "", j))
return true;
@@ -497,7 +512,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} // endswitch Type
} else if (lvl >= 0) {
- if (strfy) {
+ if (strfy && !stricmp(strfy, colname)) {
if (!fmt[bf])
strcat(fmt, colname);
@@ -1611,7 +1626,7 @@ PSZ JSONCOL::GetJpath(PGLOBAL g, bool proj)
/***********************************************************************/
/* MakeJson: Serialize the json item and set value to it. */
/***********************************************************************/
-PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
+PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n)
{
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
@@ -1622,6 +1637,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
} // endif Warned
Value->Reset();
+ return Value;
#if 0
} else if (Value->GetType() == TYPE_BIN) {
if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
@@ -1635,13 +1651,66 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
Value->SetValue_char(NULL, 0);
} // endif Clen
#endif // 0
- } else
- Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
+ } else if (n < Nod - 1) {
+ if (jsp->GetType() == TYPE_JAR) {
+ int ars = jsp->GetSize(false);
+ PJNODE jnp = &Nodes[n];
+ PJAR jvp = new(g) JARRAY;
+
+ for (jnp->Rank = 0; jnp->Rank < ars; jnp->Rank++)
+ jvp->AddArrayValue(g, GetRowValue(g, jsp, n));
+
+ jnp->Rank = 0;
+ jvp->InitArray(g);
+ jsp = jvp;
+ } else if (jsp->Type == TYPE_JOB) {
+ PJOB jvp = new(g) JOBJECT;
+
+ for (PJPR prp = ((PJOB)jsp)->GetFirst(); prp; prp = prp->Next)
+ jvp->SetKeyValue(g, GetRowValue(g, prp->Val, n + 1), prp->Key);
+ jsp = jvp;
+ } // endif Type
+
+ } // endif
+
+ Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
} // end of MakeJson
/***********************************************************************/
+/* GetRowValue: */
+/***********************************************************************/
+PJVAL JSONCOL::GetRowValue(PGLOBAL g, PJSON row, int i)
+{
+ int n = Nod - 1;
+ PJVAL val = NULL;
+
+ for (; i < Nod && row; i++) {
+ switch (row->GetType()) {
+ case TYPE_JOB:
+ val = (Nodes[i].Key) ? ((PJOB)row)->GetKeyValue(Nodes[i].Key) : NULL;
+ break;
+ case TYPE_JAR:
+ val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank);
+ break;
+ case TYPE_JVAL:
+ val = (PJVAL)row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->GetType());
+ val = NULL;
+ } // endswitch Type
+
+ if (i < Nod-1)
+ row = (val) ? val->GetJson() : NULL;
+
+ } // endfor i
+
+ return val;
+} // end of GetRowValue
+
+/***********************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/***********************************************************************/
void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
@@ -1657,7 +1726,6 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
case TYPE_DTM:
switch (vp->GetType()) {
case TYPE_STRING:
- case TYPE_DATE:
vp->SetValue_psz(jvp->GetString(g));
break;
case TYPE_INT:
@@ -1675,7 +1743,17 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
vp->SetPrec(jvp->Nd);
break;
- default:
+ case TYPE_DATE:
+ if (jvp->GetValType() == TYPE_STRG) {
+ if (!((DTVAL*)vp)->IsFormatted())
+ ((DTVAL*)vp)->SetFormat(g, "YYYY-MM-DDThh:mm:ssZ", 20, 0);
+
+ vp->SetValue_psz(jvp->GetString(g));
+ } else
+ vp->SetValue(jvp->GetInteger());
+
+ break;
+ default:
sprintf(g->Message, "Unsupported column type %d\n", vp->GetType());
throw 888;
} // endswitch Type
@@ -1741,7 +1819,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
return(Value);
} else if (Nodes[i].Op == OP_XX) {
- return MakeJson(G, row);
+ return MakeJson(G, row, i);
} else switch (row->GetType()) {
case TYPE_JOB:
if (!Nodes[i].Key) {
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index b47dc9b0665..1062928d410 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -1,7 +1,7 @@
/*************** tabjson H Declares Source Code File (.H) **************/
/* Name: tabjson.h Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2021 */
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
@@ -67,10 +67,11 @@ public:
PJSON jsp;
PJOB row;
PCSZ sep;
+ PCSZ strfy;
char colname[65], fmt[129], buf[16];
uint *length;
int i, n, bf, ncol, lvl, sz, limit;
- bool all, strfy;
+ bool all;
}; // end of JSONDISC
/***********************************************************************/
@@ -230,8 +231,9 @@ public:
PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
- PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
+ PVAL MakeJson(PGLOBAL g, PJSON jsp, int n);
+ PJVAL GetRowValue(PGLOBAL g, PJSON row, int i);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
// Default constructor not to be used
diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp
index 1efda6e3bca..4b6bb6a9e62 100644
--- a/storage/connect/tabrest.cpp
+++ b/storage/connect/tabrest.cpp
@@ -1,8 +1,7 @@
/************** tabrest C++ Program Source Code File (.CPP) ************/
-/* PROGRAM NAME: tabrest Version 1.8 */
-/* (C) Copyright to the author Olivier BERTRAND 2018 - 2020 */
+/* PROGRAM NAME: tabrest Version 2.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2018 - 2021 */
/* This program is the REST Web API support for MariaDB. */
-/* When compiled without MARIADB defined, it is the EOM module code. */
/* The way Connect handles NOSQL data returned by REST queries is */
/* just by retrieving it as a file and then leave the existing data */
/* type tables (JSON, XML or CSV) process it as usual. */
@@ -11,23 +10,13 @@
/***********************************************************************/
/* Definitions needed by the included files. */
/***********************************************************************/
-#if defined(MARIADB)
#include <my_global.h> // All MariaDB stuff
#include <mysqld.h>
#include <sql_error.h>
-#else // !MARIADB OEM module
-#include "mini-global.h"
-#define _MAX_PATH 260
-#if !defined(REST_SOURCE)
-#if defined(__WIN__) || defined(_WINDOWS)
-#include <windows.h>
-#else // !__WIN__
-#define __stdcall
-#include <dlfcn.h> // dlopen(), dlclose(), dlsym() ...
-#endif // !__WIN__
-#endif // !REST_SOURCE
-#define _OS_H_INCLUDED // Prevent os.h to be called
-#endif // !MARIADB
+#if !defined(__WIN__) && !defined(_WINDOWS)
+#include <sys/types.h>
+#include <sys/wait.h>
+#endif // !__WIN__ && !_WINDOWS
/***********************************************************************/
/* Include application header files: */
@@ -53,74 +42,98 @@
#define PUSH_WARNING(M) htrc(M)
#endif
-#if defined(__WIN__) || defined(_WINDOWS)
-#define popen _popen
-#define pclose _pclose
-#endif
-
static XGETREST getRestFnc = NULL;
static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename);
-#if !defined(MARIADB)
-/***********************************************************************/
-/* DB static variables. */
-/***********************************************************************/
-int TDB::Tnum;
-int DTVAL::Shift;
-int CSORT::Limit = 0;
-double CSORT::Lg2 = log(2.0);
-size_t CSORT::Cpn[1000] = { 0 };
-
-/***********************************************************************/
-/* These functions are exported from the REST library. */
-/***********************************************************************/
-extern "C" {
- PTABDEF __stdcall GetREST(PGLOBAL, void*);
- PQRYRES __stdcall ColREST(PGLOBAL, PTOS, char*, char*, bool);
-} // extern "C"
-
-/***********************************************************************/
-/* This function returns a table definition class. */
-/***********************************************************************/
-PTABDEF __stdcall GetREST(PGLOBAL g, void *memp)
-{
- return new(g, memp) RESTDEF;
-} // end of GetREST
-#endif // !MARIADB
-
/***********************************************************************/
/* Xcurl: retrieve the REST answer by executing cURL. */
/***********************************************************************/
int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
{
- char buf[1024];
- int rc;
- FILE *pipe;
+ char buf[512];
+ int rc = 0;
+
+ if (strchr(filename, '"')) {
+ strcpy(g->Message, "Invalid file name");
+ return 1;
+ } // endif filename
if (Uri) {
if (*Uri == '/' || Http[strlen(Http) - 1] == '/')
- sprintf(buf, "curl %s%s -o %s", Http, Uri, filename);
+ sprintf(buf, "%s%s", Http, Uri);
else
- sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename);
+ sprintf(buf, "%s/%s", Http, Uri);
} else
- sprintf(buf, "curl %s -o %s", Http, filename);
+ strcpy(buf, Http);
+
+#if defined(__WIN__)
+ char cmd[1024];
+ STARTUPINFO si;
+ PROCESS_INFORMATION pi;
+
+ sprintf(cmd, "curl \"%s\" -o \"%s\"", buf, filename);
+
+ ZeroMemory(&si, sizeof(si));
+ si.cb = sizeof(si);
+ ZeroMemory(&pi, sizeof(pi));
+
+ // Start the child process.
+ if (CreateProcess(NULL, cmd, NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) {
+ // Wait until child process exits.
+ WaitForSingleObject(pi.hProcess, INFINITE);
+
+ // Close process and thread handles.
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+ } else {
+ sprintf(g->Message, "CreateProcess curl failed (%d)", GetLastError());
+ rc = 1;
+ } // endif CreateProcess
+#else // !__WIN__
+ char fn[600];
+ pid_t pID;
- if ((pipe = popen(buf, "rt"))) {
- if (trace(515))
- while (fgets(buf, sizeof(buf), pipe)) {
- htrc("%s", buf);
- } // endwhile
+ // Check if curl package is availabe by executing subprocess
+ FILE *f= popen("command -v curl", "r");
- pclose(pipe);
- rc = 0;
+ if (!f) {
+ strcpy(g->Message, "Problem in allocating memory.");
+ return 1;
} else {
- sprintf(g->Message, "curl failed, errno =%d", errno);
+ char temp_buff[50];
+ size_t len = fread(temp_buff,1, 50, f);
+
+ if(!len) {
+ strcpy(g->Message, "Curl not installed.");
+ return 1;
+ } else
+ pclose(f);
+
+ } // endif f
+
+ pID = vfork();
+ sprintf(fn, "-o%s", filename);
+
+ if (pID == 0) {
+ // Code executed by child process
+ execlp("curl", "curl", buf, fn, (char*)NULL);
+
+ // If execlp() is successful, we should not reach this next line.
+ strcpy(g->Message, "Unsuccessful execlp from vfork()");
+ exit(1);
+ } else if (pID < 0) {
+ // failed to fork
+ strcpy(g->Message, "Failed to fork");
rc = 1;
- } // endif pipe
+ } else {
+ // Parent process
+ wait(NULL); // Wait for the child to terminate
+ } // endif pID
+#endif // !__WIN__
return rc;
-} // end od Xcurl
+} // end of Xcurl
/***********************************************************************/
/* GetREST: load the Rest lib and get the Rest function. */
@@ -130,7 +143,7 @@ XGETREST GetRestFunction(PGLOBAL g)
if (getRestFnc)
return getRestFnc;
-#if !defined(MARIADB) || !defined(REST_SOURCE)
+#if !defined(REST_SOURCE)
if (trace(515))
htrc("Looking for GetRest library\n");
@@ -183,9 +196,9 @@ XGETREST GetRestFunction(PGLOBAL g)
return NULL;
} // endif getdef
#endif // !__WIN__
-#else
+#else // REST_SOURCE
getRestFnc = restGetFile;
-#endif
+#endif // REST_SOURCE
return getRestFnc;
} // end of GetRestFunction
@@ -193,30 +206,21 @@ XGETREST GetRestFunction(PGLOBAL g)
/***********************************************************************/
/* Return the columns definition to MariaDB. */
/***********************************************************************/
-#if defined(MARIADB)
PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
-#else // !MARIADB
-PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
-#endif // !MARIADB
{
PQRYRES qrp= NULL;
char filename[_MAX_PATH + 1]; // MAX PATH ???
int rc;
- bool curl = false;
PCSZ http, uri, fn, ftype;
- XGETREST grf = GetRestFunction(g);
+ XGETREST grf = NULL;
+ bool curl = GetBooleanTableOption(g, tp, "Curl", false);
- if (!grf)
+ if (!curl && !(grf = GetRestFunction(g)))
curl = true;
http = GetStringTableOption(g, tp, "Http", NULL);
uri = GetStringTableOption(g, tp, "Uri", NULL);
-#if defined(MARIADB)
ftype = GetStringTableOption(g, tp, "Type", "JSON");
-#else // !MARIADB
- // OEM tables must specify the file type
- ftype = GetStringTableOption(g, tp, "Ftype", "JSON");
-#endif // !MARIADB
fn = GetStringTableOption(g, tp, "Filename", NULL);
if (!fn) {
@@ -230,28 +234,25 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
filename[n + i] = tolower(ftype[i]);
fn = filename;
- tp->filename = PlugDup(g, fn);
+ tp->subtype = PlugDup(g, fn);
sprintf(g->Message, "No file name. Table will use %s", fn);
PUSH_WARNING(g->Message);
} // endif fn
// We used the file name relative to recorded datapath
PlugSetPath(filename, fn, db);
- curl = GetBooleanTableOption(g, tp, "Curl", curl);
+ remove(filename);
// Retrieve the file from the web and copy it locally
if (curl)
rc = Xcurl(g, http, uri, filename);
- else if (grf)
+ else
rc = grf(g->Message, trace(515), http, uri, filename);
- else {
- strcpy(g->Message, "Cannot access to curl nor casablanca");
- rc = 1;
- } // endif !grf
- if (rc)
+ if (rc) {
+ strcpy(g->Message, "Cannot access to curl nor casablanca");
return NULL;
- else if (!stricmp(ftype, "JSON"))
+ } else if (!stricmp(ftype, "JSON"))
qrp = JSONColumns(g, db, NULL, tp, info);
else if (!stricmp(ftype, "CSV"))
qrp = CSVColumns(g, NULL, tp, info);
@@ -274,19 +275,15 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
char filename[_MAX_PATH + 1];
int rc = 0, n;
- bool curl = false, xt = trace(515);
+ bool xt = trace(515);
LPCSTR ftype;
- XGETREST grf = GetRestFunction(g);
+ XGETREST grf = NULL;
+ bool curl = GetBoolCatInfo("Curl", false);
- if (!grf)
+ if (!curl && !(grf = GetRestFunction(g)))
curl = true;
-#if defined(MARIADB)
ftype = GetStringCatInfo(g, "Type", "JSON");
-#else // !MARIADB
- // OEM tables must specify the file type
- ftype = GetStringCatInfo(g, "Ftype", "JSON");
-#endif // !MARIADB
if (xt)
htrc("ftype = %s am = %s\n", ftype, SVP(am));
@@ -309,24 +306,21 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
// We used the file name relative to recorded datapath
PlugSetPath(filename, Fn, GetPath());
-
- curl = GetBoolCatInfo("Curl", curl);
+ remove(filename);
// Retrieve the file from the web and copy it locally
if (curl) {
rc = Xcurl(g, Http, Uri, filename);
xtrc(515, "Return from Xcurl: rc=%d\n", rc);
- } else if (grf) {
+ } else {
rc = grf(g->Message, xt, Http, Uri, filename);
xtrc(515, "Return from restGetFile: rc=%d\n", rc);
- } else {
- strcpy(g->Message, "Cannot access to curl nor casablanca");
- rc = 1;
- } // endif !grf
+ } // endelse
- if (rc)
- return true;
- else switch (n) {
+ if (rc) {
+ // strcpy(g->Message, "Cannot access to curl nor casablanca");
+ return true;
+ } else switch (n) {
case 1: Tdp = new (g) JSONDEF; break;
#if defined(XML_SUPPORT)
case 2: Tdp = new (g) XMLDEF; break;
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index 81c19ff1a4f..dcebe18dd36 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -148,14 +148,21 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
/* Open the input file. */
/*********************************************************************/
if (!(fn = GetStringTableOption(g, topt, "Filename", NULL))) {
- strcpy(g->Message, MSG(MISSING_FNAME));
- return NULL;
- } else {
- lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
- lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
- lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
+ if (topt->http) // REST table can have default filename
+ fn = GetStringTableOption(g, topt, "Subtype", NULL);
+
+ if (!fn) {
+ strcpy(g->Message, MSG(MISSING_FNAME));
+ return NULL;
+ } else
+ topt->subtype = NULL;
+
} // endif fn
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
+ lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
+
if (trace(1))
htrc("File %s lvl=%d\n", topt->filename, lvl);
diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h
index ad970105868..568fc172c6a 100644
--- a/storage/connect/valblk.h
+++ b/storage/connect/valblk.h
@@ -69,7 +69,7 @@ class VALBLK : public BLOCK {
int GetPrec(void) {return Prec;}
void SetCheck(bool b) {Check = b;}
void MoveNull(int i, int j)
- {if (To_Nulls) To_Nulls[j] = To_Nulls[j];}
+ {if (To_Nulls) To_Nulls[j] = To_Nulls[i];}
virtual void SetNull(int n, bool b)
{if (To_Nulls) {To_Nulls[n] = (b) ? '*' : 0;}}
virtual bool IsNull(int n) {return To_Nulls && To_Nulls[n];}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 06a38f49212..c97b08c063d 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -5274,3 +5274,9 @@ dict_tf_to_row_format_string(
ut_error;
return(0);
}
+
+bool dict_table_t::is_stats_table() const
+{
+ return !strcmp(name.m_name, TABLE_STATS_NAME) ||
+ !strcmp(name.m_name, INDEX_STATS_NAME);
+}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 842dbaf1eb2..880fa9d9efa 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2021, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -2989,7 +2989,7 @@ fil_ibd_load(
space = fil_space_get_by_id(space_id);
mutex_exit(&fil_system.mutex);
- if (space != NULL) {
+ if (space) {
/* Compare the filename we are trying to open with the
filename from the first node of the tablespace we opened
previously. Fail if it is different. */
@@ -3001,8 +3001,8 @@ fil_ibd_load(
<< "' with space ID " << space->id
<< ". Another data file called " << node->name
<< " exists with the same space ID.";
- space = NULL;
- return(FIL_LOAD_ID_CHANGED);
+ space = NULL;
+ return(FIL_LOAD_ID_CHANGED);
}
return(FIL_LOAD_OK);
}
@@ -3039,13 +3039,6 @@ fil_ibd_load(
os_offset_t minimum_size;
case DB_SUCCESS:
if (file.space_id() != space_id) {
- ib::info()
- << "Ignoring data file '"
- << file.filepath()
- << "' with space ID " << file.space_id()
- << ", since the redo log references "
- << file.filepath() << " with space ID "
- << space_id << ".";
return(FIL_LOAD_ID_CHANGED);
}
/* Get and test the file size. */
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index b2908fc9819..df5d2e2ddaa 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -4856,13 +4856,19 @@ ha_innobase::index_type(
{
dict_index_t* index = innobase_get_index(keynr);
- if (index && index->type & DICT_FTS) {
+ if (!index) {
+ return "Corrupted";
+ }
+
+ if (index->type & DICT_FTS) {
return("FULLTEXT");
- } else if (dict_index_is_spatial(index)) {
+ }
+
+ if (dict_index_is_spatial(index)) {
return("SPATIAL");
- } else {
- return("BTREE");
}
+
+ return("BTREE");
}
/****************************************************************//**
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 2d659d9f9a3..420e7eac9e1 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -2425,7 +2425,8 @@ i_s_fts_deleted_generic_fill(
if (!user_table) {
rw_lock_s_unlock(&dict_sys.latch);
DBUG_RETURN(0);
- } else if (!dict_table_has_fts_index(user_table)) {
+ } else if (!dict_table_has_fts_index(user_table)
+ || !user_table->is_readable()) {
dict_table_close(user_table, FALSE, FALSE);
rw_lock_s_unlock(&dict_sys.latch);
DBUG_RETURN(0);
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index c96ea8806df..b43747268b3 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -2373,6 +2373,11 @@ public:
return true;
return false;
}
+
+ /** Check whether the table name is same as mysql/innodb_stats_table
+ or mysql/innodb_index_stats.
+ @return true if the table name is same as stats table */
+ bool is_stats_table() const;
};
inline void dict_index_t::set_modified(mtr_t& mtr) const
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index f1a13e6ea59..ac161687c87 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1076,6 +1076,10 @@ public:
ut_ad(old_n_ref > 0);
}
+ /** @return whether the table has lock on
+ mysql.innodb_table_stats and mysql.innodb_index_stats */
+ bool has_stats_table_lock() const;
+
/** Free the memory to trx_pools */
void free();
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index b606228b1fe..e5f71e0b151 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -193,28 +193,33 @@ wsrep_is_BF_lock_timeout(
const trx_t* trx,
bool locked = true)
{
- if (trx->error_state != DB_DEADLOCK && trx->is_wsrep() &&
- srv_monitor_timer && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
- ib::info() << "WSREP: BF lock wait long for trx:" << ib::hex(trx->id)
+ bool long_wait= (trx->error_state != DB_DEADLOCK &&
+ srv_monitor_timer && trx->is_wsrep() &&
+ wsrep_thd_is_BF(trx->mysql_thd, false));
+ bool was_wait= true;
+
+ DBUG_EXECUTE_IF("wsrep_instrument_BF_lock_wait",
+ was_wait=false; long_wait=true;);
+
+ if (long_wait) {
+ ib::info() << "WSREP: BF lock wait long for trx:" << trx->id
<< " query: " << wsrep_thd_query(trx->mysql_thd);
- if (!locked) {
+
+ if (!locked)
lock_mutex_enter();
- }
ut_ad(lock_mutex_own());
trx_print_latched(stderr, trx, 3000);
+ /* Note this will release lock_sys mutex */
+ lock_print_info_all_transactions(stderr);
- if (!locked) {
- lock_mutex_exit();
- }
+ if (locked)
+ lock_mutex_enter();
- srv_print_innodb_monitor = TRUE;
- srv_print_innodb_lock_monitor = TRUE;
- srv_monitor_timer_schedule_now();
- return true;
- }
- return false;
+ return was_wait;
+ } else
+ return false;
}
#endif /* WITH_WSREP */
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index f096149b0aa..b73786074f2 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -457,6 +457,8 @@ row_vers_build_clust_v_col(
ut_ad(dict_index_has_virtual(index));
ut_ad(index->table == clust_index->table);
+ DEBUG_SYNC(current_thd, "ib_clust_v_col_before_row_allocated");
+
ib_vcol_row vc(nullptr);
byte *record = vc.record(thd, index, &maria_table);
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index dc3dd51bb89..4df4f8fff05 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -777,7 +777,8 @@ void trx_rollback_recovered(bool all)
srv_fast_shutdown)
goto discard;
- if (all || trx_get_dict_operation(trx) != TRX_DICT_OP_NONE)
+ if (all || trx_get_dict_operation(trx) != TRX_DICT_OP_NONE
+ || trx->has_stats_table_lock())
{
trx_rollback_active(trx);
if (trx->error_state != DB_SUCCESS)
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index a48ab780960..d01ba992dc5 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -2333,3 +2333,16 @@ trx_set_rw_mode(
trx->read_view.set_creator_trx_id(trx->id);
}
}
+
+bool trx_t::has_stats_table_lock() const
+{
+ for (lock_list::const_iterator it= lock.table_locks.begin(),
+ end= lock.table_locks.end(); it != end; ++it)
+ {
+ const lock_t *lock= *it;
+ if (lock && lock->un_member.tab_lock.table->is_stats_table())
+ return true;
+ }
+
+ return false;
+}
diff --git a/storage/mroonga/CMakeLists.txt b/storage/mroonga/CMakeLists.txt
index 6ea264ce84c..555ab248751 100644
--- a/storage/mroonga/CMakeLists.txt
+++ b/storage/mroonga/CMakeLists.txt
@@ -17,7 +17,7 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-cmake_minimum_required(VERSION 2.6)
+cmake_minimum_required(VERSION 2.8.12)
project(mroonga)
if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_SOURCE_DIR}")
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index b3f84f5eefa..23a0adcaf2a 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -978,7 +978,8 @@ void ha_myisam::setup_vcols_for_repair(HA_CHECK *param)
return;
file->s->vreclength= new_vreclength;
}
- DBUG_ASSERT(file->s->base.reclength < file->s->vreclength);
+ DBUG_ASSERT(file->s->base.reclength < file->s->vreclength ||
+ !table->s->stored_fields);
param->fix_record= compute_vcols;
table->use_all_columns();
}
diff --git a/storage/perfschema/CMakeLists.txt b/storage/perfschema/CMakeLists.txt
index b2388099328..98d3e2a401d 100644
--- a/storage/perfschema/CMakeLists.txt
+++ b/storage/perfschema/CMakeLists.txt
@@ -20,8 +20,7 @@
# along with this program; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/include
+INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_BINARY_DIR}/sql
${CMAKE_CURRENT_BINARY_DIR}
diff --git a/storage/perfschema/pfs_prepared_stmt.h b/storage/perfschema/pfs_prepared_stmt.h
index c163514ccc2..1b017b508a6 100644
--- a/storage/perfschema/pfs_prepared_stmt.h
+++ b/storage/perfschema/pfs_prepared_stmt.h
@@ -29,8 +29,8 @@
*/
#include "pfs_stat.h"
-#include "include/mysql/psi/psi.h"
-#include "include/mysql/psi/mysql_ps.h"
+#include "mysql/psi/psi.h"
+#include "mysql/psi/mysql_ps.h"
#include "pfs_program.h"
#define PS_NAME_LENGTH NAME_LEN