diff options
author | Oleksandr Byelkin <sanja@mariadb.com> | 2020-08-03 14:44:06 +0200 |
---|---|---|
committer | Oleksandr Byelkin <sanja@mariadb.com> | 2020-08-03 14:44:06 +0200 |
commit | 57325e470615e79f674d82b2d5b09f609508fc6a (patch) | |
tree | 7a2e84a6753a5a5592f44f96194ad7a551c4669f | |
parent | 706a7101bfacd29f4f5728034be92240e82df583 (diff) | |
parent | c32f71af7e4b747de223bf6b44e691941f5997cf (diff) | |
download | mariadb-git-57325e470615e79f674d82b2d5b09f609508fc6a.tar.gz |
Merge branch '10.3' into 10.4
134 files changed, 2306 insertions, 520 deletions
diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake index a03a7bbf7e2..149fdad231f 100644 --- a/cmake/os/WindowsCache.cmake +++ b/cmake/os/WindowsCache.cmake @@ -30,7 +30,6 @@ SET(HAVE_BACKTRACE_SYMBOLS CACHE INTERNAL "") SET(HAVE_BACKTRACE_SYMBOLS_FD CACHE INTERNAL "") SET(HAVE_BFILL CACHE INTERNAL "") SET(HAVE_BSD_SIGNALS CACHE INTERNAL "") -SET(HAVE_BSS_START CACHE INTERNAL "") SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "") SET(HAVE_COMPRESS CACHE INTERNAL "") SET(HAVE_CRYPT CACHE INTERNAL "") diff --git a/config.h.cmake b/config.h.cmake index 8ea8e863e6f..652ea683ce4 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -241,7 +241,6 @@ /* Symbols we may use */ #cmakedefine HAVE_SYS_ERRLIST 1 /* used by stacktrace functions */ -#cmakedefine HAVE_BSS_START 1 #cmakedefine HAVE_BACKTRACE 1 #cmakedefine HAVE_BACKTRACE_SYMBOLS 1 #cmakedefine HAVE_BACKTRACE_SYMBOLS_FD 1 diff --git a/configure.cmake b/configure.cmake index 5dd45a6b05d..b31c9e48388 100644 --- a/configure.cmake +++ b/configure.cmake @@ -798,14 +798,6 @@ CHECK_CXX_SOURCE_COMPILES(" ENDIF() CHECK_C_SOURCE_COMPILES(" - int main(int argc, char **argv) - { - extern char *__bss_start; - return __bss_start ? 1 : 0; - }" -HAVE_BSS_START) - -CHECK_C_SOURCE_COMPILES(" int main() { extern void __attribute__((weak)) foo(void); diff --git a/libmariadb b/libmariadb -Subproject ce74fd0c4009ed9f4bcbdb4a01e96c823e961dc +Subproject a746c3af449a8754e78ad7971e59e79af7957cd diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 529354389b5..1d7e07ff072 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -109,6 +109,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc ../sql/sql_explain.cc ../sql/sql_explain.h ../sql/sql_analyze_stmt.cc ../sql/sql_analyze_stmt.h ../sql/compat56.cc + ../sql/sql_schema.cc ../sql/sql_type.cc ../sql/sql_type.h ../sql/sql_mode.cc ../sql/sql_type_json.cc diff --git a/man/mysql_upgrade.1 b/man/mysql_upgrade.1 index 42307eefd4b..39e893b8863 100644 --- a/man/mysql_upgrade.1 +++ b/man/mysql_upgrade.1 @@ -1,6 +1,6 @@ '\" t .\" -.TH "\FBMYSQL_UPGRADE\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System" +.TH "\FBMYSQL_UPGRADE\FR" "1" "20 July 2020" "MariaDB 10\&.4" "MariaDB Database System" .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- @@ -165,6 +165,8 @@ in the data directory\&. This is used to quickly check whether all tables have b \fB\-\-force\fR option\&. .PP +For this reason, \fBmysql_upgrade\fR needs to be run as a user with write access to the data directory\&. +.PP If you install MariaDB from RPM packages on Linux, you must install the server and client RPMs\&. \fBmysql_upgrade\fR is included in the server RPM but requires the client RPM because the latter includes diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result index 9311a556191..57f5744373e 100644 --- a/mysql-test/main/func_group.result +++ b/mysql-test/main/func_group.result @@ -1186,13 +1186,13 @@ i count(*) std(e1/e2) 3 4 0.00000000 select std(s1/s2) from bug22555; std(s1/s2) -0.21325764 +0.21328517 select std(o1/o2) from bug22555; std(o1/o2) 0.2132576358664934 select std(e1/e2) from bug22555; std(e1/e2) -0.21325764 +0.21328517 set @saved_div_precision_increment=@@div_precision_increment; set div_precision_increment=19; select i, count(*), std(s1/s2) from bug22555 group by i order by i; diff --git a/mysql-test/main/grant5.result b/mysql-test/main/grant5.result index 2cc1c11f7d8..972131a24b7 100644 --- a/mysql-test/main/grant5.result +++ b/mysql-test/main/grant5.result @@ -25,6 +25,19 @@ ERROR HY000: Table 'procs_priv' was not locked with LOCK TABLES REVOKE PROCESS ON *.* FROM u; ERROR HY000: Table 'db' was not locked with LOCK TABLES DROP TABLE t1; +create database mysqltest1; +use mysqltest1; +create table t1(id int); +insert t1 values(2); +create user u1@localhost; +grant select on mysqltest1.t1 to u1@localhost; +grant update on mysqltest1.* to u1@localhost; +connect u1, localhost, u1; +update mysqltest1.t1 set id=1 where id=2; +connection default; +disconnect u1; +drop user u1@localhost; +drop database mysqltest1; # # MDEV-20076: SHOW GRANTS does not quote role names properly # diff --git a/mysql-test/main/grant5.test b/mysql-test/main/grant5.test index 054b16c0a6e..e133108516e 100644 --- a/mysql-test/main/grant5.test +++ b/mysql-test/main/grant5.test @@ -34,6 +34,27 @@ REVOKE EXECUTE ON PROCEDURE sp FROM u; REVOKE PROCESS ON *.* FROM u; DROP TABLE t1; +# +# MDEV-23010 UPDATE privilege at Database and Table level fail to update with SELECT command denied to user +# +create database mysqltest1; +use mysqltest1; +create table t1(id int); +insert t1 values(2); +create user u1@localhost; +grant select on mysqltest1.t1 to u1@localhost; +grant update on mysqltest1.* to u1@localhost; +connect u1, localhost, u1; +update mysqltest1.t1 set id=1 where id=2; +connection default; +disconnect u1; +drop user u1@localhost; +drop database mysqltest1; + +# +# End of 10.1 tests +# + --echo # --echo # MDEV-20076: SHOW GRANTS does not quote role names properly --echo # diff --git a/mysql-test/main/lowercase_fs_off.result b/mysql-test/main/lowercase_fs_off.result index 6ff8c1b7f93..3e9aa7cc239 100644 --- a/mysql-test/main/lowercase_fs_off.result +++ b/mysql-test/main/lowercase_fs_off.result @@ -158,3 +158,13 @@ show triggers like '%T1%'; Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation drop table t1; set GLOBAL sql_mode=default; +# +# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +# +# Compatibility schema names respect the filesystem case sensitivity +CREATE TABLE t1 (a MARIADB_SCHEMA.date); +ERROR HY000: Unknown data type: 'MARIADB_SCHEMA.date' +CREATE TABLE t1 (a Mariadb_schema.date); +ERROR HY000: Unknown data type: 'Mariadb_schema.date' +CREATE TABLE t1 (a mariadb_schema.date); +DROP TABLE t1; diff --git a/mysql-test/main/lowercase_fs_off.test b/mysql-test/main/lowercase_fs_off.test index f828773ed06..e8e8dafe584 100644 --- a/mysql-test/main/lowercase_fs_off.test +++ b/mysql-test/main/lowercase_fs_off.test @@ -130,3 +130,18 @@ let $datadir= `select @@datadir`; remove_file $datadir/mysql_upgrade_info; set GLOBAL sql_mode=default; + + +--echo # +--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +--echo # + +--echo # Compatibility schema names respect the filesystem case sensitivity + +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (a MARIADB_SCHEMA.date); +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (a Mariadb_schema.date); + +CREATE TABLE t1 (a mariadb_schema.date); +DROP TABLE t1; diff --git a/mysql-test/main/parser_precedence.result b/mysql-test/main/parser_precedence.result index 4330c8a2045..f23295bd61b 100644 --- a/mysql-test/main/parser_precedence.result +++ b/mysql-test/main/parser_precedence.result @@ -619,7 +619,7 @@ select 4 - 3 * 2, (4 - 3) * 2, 4 - (3 * 2); Testing that / is left associative select 15 / 5 / 3, (15 / 5) / 3, 15 / (5 / 3); 15 / 5 / 3 (15 / 5) / 3 15 / (5 / 3) -1.00000000 1.00000000 9.0000 +1.00000000 1.00000000 8.9998 Testing that / has precedence over | select 105 / 5 | 2, (105 / 5) | 2, 105 / (5 | 2); 105 / 5 | 2 (105 / 5) | 2 105 / (5 | 2) diff --git a/mysql-test/main/partition.result b/mysql-test/main/partition.result index ea12158862e..0fe60807308 100644 --- a/mysql-test/main/partition.result +++ b/mysql-test/main/partition.result @@ -2763,5 +2763,45 @@ SELECT 1 FROM t1 WHERE a XOR 'a'; 1 DROP TABLE t1; # +# Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE +# WITH A COMPOSITE PREFIX INDEX +# +create table t1(id int unsigned not null, +data varchar(2) default null, +key data_idx (data(1),id) +) default charset=utf8 +partition by range (id) ( +partition p10 values less than (10), +partition p20 values less than (20) +); +insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ; +select id from t1 where data = 'ab' order by id; +id +4 +5 +6 +14 +15 +16 +drop table t1; +create table t1(id int unsigned not null, +data text default null, +key data_idx (data(1),id) +) default charset=utf8 +partition by range (id) ( +partition p10 values less than (10), +partition p20 values less than (20) +); +insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ; +select id from t1 where data = 'ab' order by id; +id +4 +5 +6 +14 +15 +16 +drop table t1; +# # End of 10.1 tests # diff --git a/mysql-test/main/partition.test b/mysql-test/main/partition.test index 89db3e92162..0ac91611e43 100644 --- a/mysql-test/main/partition.test +++ b/mysql-test/main/partition.test @@ -2974,5 +2974,33 @@ SELECT 1 FROM t1 WHERE a XOR 'a'; DROP TABLE t1; --echo # +--echo # Bug #25207522: INCORRECT ORDER-BY BEHAVIOR ON A PARTITIONED TABLE +--echo # WITH A COMPOSITE PREFIX INDEX +--echo # +create table t1(id int unsigned not null, + data varchar(2) default null, + key data_idx (data(1),id) +) default charset=utf8 +partition by range (id) ( + partition p10 values less than (10), + partition p20 values less than (20) +); +insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ; +select id from t1 where data = 'ab' order by id; +drop table t1; + +create table t1(id int unsigned not null, + data text default null, + key data_idx (data(1),id) +) default charset=utf8 +partition by range (id) ( + partition p10 values less than (10), + partition p20 values less than (20) +); +insert t1 values (6, 'ab'), (4, 'ab'), (5, 'ab'), (16, 'ab'), (14, 'ab'), (15, 'ab'), (5, 'ac'), (15, 'aa') ; +select id from t1 where data = 'ab' order by id; +drop table t1; + +--echo # --echo # End of 10.1 tests --echo # diff --git a/mysql-test/main/stat_tables_partition.result b/mysql-test/main/stat_tables_partition.result index 12ae2570272..2619026b231 100644 --- a/mysql-test/main/stat_tables_partition.result +++ b/mysql-test/main/stat_tables_partition.result @@ -9,5 +9,38 @@ ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK -SET use_stat_tables = DEFAULT; DROP TABLE t1; +# +# MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows +# +CREATE TABLE t1 ( +id int(11) auto_increment primary key, +c1 int(11) DEFAULT NULL +) PARTITION BY RANGE (id) ( +PARTITION p0 VALUES LESS THAN (4), +PARTITION p1 VALUES LESS THAN MAXVALUE +); +insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1); +insert into t1(c1) select c1 from t1; +insert into t1(c1) select c1 from t1; +select count(*) from t1; +count(*) +32 +select count(*) from t1 where id <4; +count(*) +3 +flush status; +set session use_stat_tables='preferably'; +# Must NOT show "Engine-independent statistics collected": +alter table t1 analyze partition p0; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +# Should not have Handler_read_rnd_next=34 +show session status like 'Handler_read_rnd%'; +Variable_name Value +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 34 +drop table t1; +SET use_stat_tables = DEFAULT; diff --git a/mysql-test/main/stat_tables_partition.test b/mysql-test/main/stat_tables_partition.test index 1316e5cca11..11b74818d82 100644 --- a/mysql-test/main/stat_tables_partition.test +++ b/mysql-test/main/stat_tables_partition.test @@ -11,7 +11,33 @@ CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a) INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1); ANALYZE TABLE t1; +DROP TABLE t1; -SET use_stat_tables = DEFAULT; +--echo # +--echo # MDEV-21472: ALTER TABLE ... ANALYZE PARTITION ... with EITS reads and locks all rows +--echo # +CREATE TABLE t1 ( + id int(11) auto_increment primary key, + c1 int(11) DEFAULT NULL +) PARTITION BY RANGE (id) ( + PARTITION p0 VALUES LESS THAN (4), + PARTITION p1 VALUES LESS THAN MAXVALUE +); -DROP TABLE t1; +insert into t1(c1) values (1),(1),(1),(1), (1),(1),(1),(1); +insert into t1(c1) select c1 from t1; +insert into t1(c1) select c1 from t1; + +select count(*) from t1; +select count(*) from t1 where id <4; +flush status; +set session use_stat_tables='preferably'; + +--echo # Must NOT show "Engine-independent statistics collected": +alter table t1 analyze partition p0; + +--echo # Should not have Handler_read_rnd_next=34 +show session status like 'Handler_read_rnd%'; +drop table t1; + +SET use_stat_tables = DEFAULT; diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result index 34a484f3b18..8c8471512cf 100644 --- a/mysql-test/main/subselect4.result +++ b/mysql-test/main/subselect4.result @@ -2585,6 +2585,30 @@ e 2 o 6 DROP TABLE t1, t2; # +# MDEV-19232: Floating point precision / value comparison problem +# +CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0)); +INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797); +INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422); +SET @save_optimizer_switch=@@optimizer_switch; +SET optimizer_switch='subquery_cache=on'; +SELECT +population, area, population/area, +cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1; +population area population/area cast(population/area as DECIMAL(20,9)) +11797 91 129.6374 129.637400000 +SELECT * FROM t1 A +WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region); +region area population +Central America and the Caribbean 442 66422 +SET optimizer_switch='subquery_cache=off'; +SELECT * FROM t1 A +WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region); +region area population +Central America and the Caribbean 442 66422 +SET @@optimizer_switch= @save_optimizer_switch; +DROP TABLE t1; +# # MDEV-22852: SIGSEGV in sortlength (optimized builds) # SET @save_optimizer_switch=@@optimizer_switch; diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test index 36490c340e1..1ed79de4598 100644 --- a/mysql-test/main/subselect4.test +++ b/mysql-test/main/subselect4.test @@ -2116,6 +2116,32 @@ SELECT * FROM t1 where ( t1.l1 < ANY (SELECT MAX(t2.v1) FROM t2)); DROP TABLE t1, t2; --echo # +--echo # MDEV-19232: Floating point precision / value comparison problem +--echo # + +CREATE TABLE t1 (region varchar(60), area decimal(10,0), population decimal(11,0)); +INSERT INTO t1 VALUES ('Central America and the Caribbean',91,11797); +INSERT INTO t1 VALUES ('Central America and the Caribbean',442,66422); + +SET @save_optimizer_switch=@@optimizer_switch; +SET optimizer_switch='subquery_cache=on'; + +SELECT +population, area, population/area, +cast(population/area as DECIMAL(20,9)) FROM t1 LIMIT 1; + +SELECT * FROM t1 A +WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region); + +SET optimizer_switch='subquery_cache=off'; +SELECT * FROM t1 A +WHERE population/area = (SELECT MAX(population/area) from t1 B where A.region = B.region); + +SET @@optimizer_switch= @save_optimizer_switch; + +DROP TABLE t1; + +--echo # --echo # MDEV-22852: SIGSEGV in sortlength (optimized builds) --echo # diff --git a/mysql-test/main/subselect_exists2in.result b/mysql-test/main/subselect_exists2in.result index 576559c2e86..e8ef7081b09 100644 --- a/mysql-test/main/subselect_exists2in.result +++ b/mysql-test/main/subselect_exists2in.result @@ -975,4 +975,131 @@ id DROP PROCEDURE p1; DROP TABLE t1; # End of 10.0 tests +# +# MDEV-23221: A subquery causes crash +# +create table t1 ( +location_code varchar(10), +country_id varchar(10) +); +insert into t1 values ('HKG', 'HK'); +insert into t1 values ('NYC', 'US'); +insert into t1 values ('LAX', 'US'); +create table t2 ( +container_id varchar(10), +cntr_activity_type varchar(10), +cntr_dest varchar(10) +); +insert into t2 values ('AAAA1111', 'VSL', 'NYC'); +insert into t2 values ('AAAA1111', 'CUV', 'NYC'); +insert into t2 values ('BBBB2222', 'VSL', 'LAX'); +insert into t2 values ('BBBB2222', 'XYZ', 'LAX'); +# Must not crash or return an error: +select +(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry, +(select +max(container_id) +from t2 as cl2 +where +cl2.container_id = cl1.container_id and +cl2.cntr_activity_type = 'CUV' and +exists (select location_code +from t1 +where +location_code = cl2.cntr_dest and +country_id = dest_cntry) +) as CUV +from +t2 cl1; +dest_cntry CUV +US AAAA1111 +US AAAA1111 +US NULL +US NULL +prepare s from "select +(select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry, +(select +max(container_id) +from t2 as cl2 +where +cl2.container_id = cl1.container_id and +cl2.cntr_activity_type = 'CUV' and +exists (select location_code +from t1 +where +location_code = cl2.cntr_dest and +country_id = dest_cntry) +) as CUV +from +t2 cl1"; +execute s; +dest_cntry CUV +US AAAA1111 +US AAAA1111 +US NULL +US NULL +execute s; +dest_cntry CUV +US AAAA1111 +US AAAA1111 +US NULL +US NULL +drop table t1,t2; +# +# MDEV-20557: SQL query with duplicate table aliases consistently crashes server +# (Just a testcase) +# +create table t1 (id int, id2 int); +create table t2 (id int, id2 int, a int); +create table t3 (id int); +create table t4 (id int); +select (select 1 from t1 where (exists +(select 1 from t2 +where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt +from t3; +ERROR 42000: This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left expression of IN/ALL/ANY' +drop table t1,t2,t3,t4; +# +# MDEV-21649: Crash when using nested EXISTS +# (Just a testcase) +# +CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id)); +CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id)); +CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id)); +SELECT +W0.`id` +FROM +`t1` W0 +WHERE ( +EXISTS( +SELECT +V0.`id` + FROM +`t2` V0 +WHERE ( +EXISTS( +SELECT +U0.`id` + FROM +`t2` U0 +INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`) +WHERE ( +U0.`ip_id` = V0.`ip_id` + AND U4.`storage_method_id` = ( +SELECT +U5.`storage_method_id` + FROM +`t3` U5 +WHERE +U5.`storage_target_id` = V0.`id` + LIMIT +1 +) +) +) +) +) +); +id +drop table t1,t2,t3; set optimizer_switch=default; diff --git a/mysql-test/main/subselect_exists2in.test b/mysql-test/main/subselect_exists2in.test index 2a9947123d4..e27ce57038b 100644 --- a/mysql-test/main/subselect_exists2in.test +++ b/mysql-test/main/subselect_exists2in.test @@ -829,5 +829,117 @@ DROP TABLE t1; --echo # End of 10.0 tests +--echo # +--echo # MDEV-23221: A subquery causes crash +--echo # +create table t1 ( +location_code varchar(10), +country_id varchar(10) +); +insert into t1 values ('HKG', 'HK'); +insert into t1 values ('NYC', 'US'); +insert into t1 values ('LAX', 'US'); + +create table t2 ( +container_id varchar(10), +cntr_activity_type varchar(10), +cntr_dest varchar(10) +); +insert into t2 values ('AAAA1111', 'VSL', 'NYC'); +insert into t2 values ('AAAA1111', 'CUV', 'NYC'); +insert into t2 values ('BBBB2222', 'VSL', 'LAX'); +insert into t2 values ('BBBB2222', 'XYZ', 'LAX'); + +let $query= +select + (select country_id from t1 where location_code = cl1.cntr_dest) as dest_cntry, + (select + max(container_id) + from t2 as cl2 + where + cl2.container_id = cl1.container_id and + cl2.cntr_activity_type = 'CUV' and + exists (select location_code + from t1 + where + location_code = cl2.cntr_dest and + country_id = dest_cntry) + ) as CUV +from + t2 cl1; + +--echo # Must not crash or return an error: +eval $query; + +eval prepare s from "$query"; +execute s; +execute s; + +drop table t1,t2; + +--echo # +--echo # MDEV-20557: SQL query with duplicate table aliases consistently crashes server +--echo # (Just a testcase) +--echo # + +create table t1 (id int, id2 int); +create table t2 (id int, id2 int, a int); +create table t3 (id int); +create table t4 (id int); + +--error ER_NOT_SUPPORTED_YET +select (select 1 from t1 where (exists + (select 1 from t2 + where t2.a = (select t4.id from t4 where t4.id = t3.id) and t2.id2 = t1.id2))) dt +from t3; + +drop table t1,t2,t3,t4; + + +--echo # +--echo # MDEV-21649: Crash when using nested EXISTS +--echo # (Just a testcase) +--echo # +CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY(id)); +CREATE TABLE t2 (id INT NOT NULL AUTO_INCREMENT, ip_id INT, PRIMARY KEY(id)); +CREATE TABLE t3 (id INT NOT NULL AUTO_INCREMENT, storage_method_id INT, storage_target_id INT, PRIMARY KEY(id)); + +SELECT + W0.`id` +FROM + `t1` W0 +WHERE ( + EXISTS( + SELECT + V0.`id` + FROM + `t2` V0 + WHERE ( + EXISTS( + SELECT + U0.`id` + FROM + `t2` U0 + INNER JOIN `t3` U4 ON (U0.`id` = U4.`storage_target_id`) + WHERE ( + U0.`ip_id` = V0.`ip_id` + AND U4.`storage_method_id` = ( + SELECT + U5.`storage_method_id` + FROM + `t3` U5 + WHERE + U5.`storage_target_id` = V0.`id` + LIMIT + 1 + ) + ) + ) + ) + ) +); + +drop table t1,t2,t3; + #restore defaults set optimizer_switch=default; diff --git a/mysql-test/main/type_float.result b/mysql-test/main/type_float.result index 167c167ad45..2588481ff75 100644 --- a/mysql-test/main/type_float.result +++ b/mysql-test/main/type_float.result @@ -676,6 +676,27 @@ Warnings: Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2010e0 DROP TABLE t1; # +# MDEV-23282 FLOAT(53,0) badly handles out-of-range values +# +CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL); +INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40); +Warnings: +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +SELECT c1, c2 FROM t1; +c1 c2 +3.40282e38 -3.40282e38 +DROP TABLE t1; +CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL); +INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40); +Warnings: +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 +SELECT c1, c2 FROM t1; +c1 c2 +340282346638528860000000000000000000000 -340282346638528860000000000000000000000 +DROP TABLE t1; +# # End of 10.1 tests # # diff --git a/mysql-test/main/type_float.test b/mysql-test/main/type_float.test index 4665c945a76..ea1829bfdaa 100644 --- a/mysql-test/main/type_float.test +++ b/mysql-test/main/type_float.test @@ -486,6 +486,20 @@ DROP TABLE t1; --echo # +--echo # MDEV-23282 FLOAT(53,0) badly handles out-of-range values +--echo # + +CREATE OR REPLACE TABLE t1 (c1 FLOAT NOT NULL, c2 FLOAT NOT NULL); +INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40); +SELECT c1, c2 FROM t1; +DROP TABLE t1; + +CREATE OR REPLACE TABLE t1 (c1 FLOAT(53,0) NOT NULL, c2 FLOAT(53,0) NOT NULL); +INSERT IGNORE INTO t1 VALUES (1e+40, -1e+40); +SELECT c1, c2 FROM t1; +DROP TABLE t1; + +--echo # --echo # End of 10.1 tests --echo # diff --git a/mysql-test/main/type_newdecimal.result b/mysql-test/main/type_newdecimal.result index d5fc6db107b..07ecef95e5d 100644 --- a/mysql-test/main/type_newdecimal.result +++ b/mysql-test/main/type_newdecimal.result @@ -1532,11 +1532,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 * 1.01500000 * 1.01500000 * 0.99500000) 0.81298807395367312459230693948000000000 create table t1 as select 5.05 / 0.014; -Warnings: -Note 1265 Data truncated for column '5.05 / 0.014' at row 1 show warnings; Level Code Message -Note 1265 Data truncated for column '5.05 / 0.014' at row 1 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1651,8 +1648,6 @@ my_col 0.12345678912345678912345678912345678912 DROP TABLE t1; CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col; -Warnings: -Note 1265 Data truncated for column 'my_col' at row 1 DESCRIBE t1; Field Type Null Key Default Extra my_col decimal(65,4) YES NULL diff --git a/mysql-test/main/type_ranges.result b/mysql-test/main/type_ranges.result index 784a394d8b5..199c8a45c97 100644 --- a/mysql-test/main/type_ranges.result +++ b/mysql-test/main/type_ranges.result @@ -91,8 +91,6 @@ DROP INDEX test ON t1; insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one'); insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one'); insert ignore into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3); -Warnings: -Warning 1265 Data truncated for column 'string' at row 1 insert ignore into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1); Warnings: Warning 1264 Out of range value for column 'utiny' at row 1 @@ -130,7 +128,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col 10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1 11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2 -12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3 +12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3 13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1 14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295 15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295 @@ -182,7 +180,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7 select * from t2; auto string mediumblob_col new_field 1 2 2 ne -2 0.33333333 ne +2 0.3333 ne 3 -1 -1 ne 4 -429496729 -4294967295 ne 5 4294967295 4294967295 ne diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 4cedfa61cce..d6abda9eb36 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -177,6 +177,7 @@ my @DEFAULT_SUITES= qw( csv- compat/oracle- compat/mssql- + compat/maxdb- encryption- federated- funcs_1- @@ -687,8 +688,7 @@ sub run_test_server ($$$) { My::CoreDump->show($core_file, $exe_mysqld, $opt_parallel); # Limit number of core files saved - if ($opt_max_save_core > 0 && - $num_saved_cores >= $opt_max_save_core) + if ($num_saved_cores >= $opt_max_save_core) { mtr_report(" - deleting it, already saved", "$opt_max_save_core"); @@ -704,8 +704,7 @@ sub run_test_server ($$$) { }, $worker_savedir); - if ($opt_max_save_datadir > 0 && - $num_saved_datadir >= $opt_max_save_datadir) + if ($num_saved_datadir >= $opt_max_save_datadir) { mtr_report(" - skipping '$worker_savedir/'"); rmtree($worker_savedir); @@ -714,9 +713,9 @@ sub run_test_server ($$$) { { mtr_report(" - saving '$worker_savedir/' to '$savedir/'"); rename($worker_savedir, $savedir); + $num_saved_datadir++; } resfile_print_test(); - $num_saved_datadir++; $num_failed_test++ unless ($result->{retries} || $result->{exp_fail}); @@ -1273,6 +1272,17 @@ sub command_line_setup { report_option('verbose', $opt_verbose); } + # Negative values aren't meaningful on integer options + foreach(grep(/=i$/, keys %options)) + { + if (defined ${$options{$_}} && + do { no warnings "numeric"; int ${$options{$_}} < 0}) + { + my $v= (split /=/)[0]; + die("$v doesn't accept a negative value:"); + } + } + # Find the absolute path to the test directory $glob_mysql_test_dir= cwd(); if ($glob_mysql_test_dir =~ / /) @@ -6421,12 +6431,12 @@ Options for debugging the product test(s) max-save-core Limit the number of core files saved (to avoid filling up disks for heavily crashing server). Defaults to - $opt_max_save_core, set to 0 for no limit. Set - it's default with MTR_MAX_SAVE_CORE + $opt_max_save_core. Set its default with + MTR_MAX_SAVE_CORE max-save-datadir Limit the number of datadir saved (to avoid filling up disks for heavily crashing server). Defaults to - $opt_max_save_datadir, set to 0 for no limit. Set - it's default with MTR_MAX_SAVE_DATADIR + $opt_max_save_datadir. Set its default with + MTR_MAX_SAVE_DATADIR max-test-fail Limit the number of test failures before aborting the current test run. Defaults to $opt_max_test_fail, set to 0 for no limit. Set diff --git a/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.result b/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.result new file mode 100644 index 00000000000..7c2012945c9 --- /dev/null +++ b/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.result @@ -0,0 +1,65 @@ +include/master-slave.inc +[connection master] +# +# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +# +SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:00:00'); +SET sql_mode=DEFAULT; +CREATE TABLE t1 (a TIMESTAMP); +INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES ('2001-01-01 10:20:30'); +SET sql_mode=MAXDB; +CREATE TABLE t2 SELECT * FROM t1; +SET timestamp=DEFAULT; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a TIMESTAMP) +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (NULL) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES ('2001-01-01 10:20:30') +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE "t2" ( + "a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp() +) +master-bin.000001 # Annotate_rows # # CREATE TABLE t2 SELECT * FROM t1 +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +connection slave; +SELECT * FROM t1; +a +2001-01-01 10:00:00 +2001-01-01 10:20:30 +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp() +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` timestamp NOT NULL DEFAULT current_timestamp() +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET sql_mode=MAXDB; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp() +) +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE "t2" ( + "a" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp() +) +connection master; +DROP TABLE t1, t2; +include/rpl_end.inc diff --git a/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.test b/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.test new file mode 100644 index 00000000000..50663229937 --- /dev/null +++ b/mysql-test/suite/compat/maxdb/rpl_mariadb_timestamp.test @@ -0,0 +1,34 @@ +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--echo # +--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +--echo # + +SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:00:00'); +SET sql_mode=DEFAULT; +CREATE TABLE t1 (a TIMESTAMP); +INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES ('2001-01-01 10:20:30'); +SET sql_mode=MAXDB; +CREATE TABLE t2 SELECT * FROM t1; +SET timestamp=DEFAULT; + +--let $binlog_file = LAST +source include/show_binlog_events.inc; + + +--sync_slave_with_master +SELECT * FROM t1; +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +SET sql_mode=MAXDB; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +--connection master +DROP TABLE t1, t2; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/compat/maxdb/type_timestamp.result b/mysql-test/suite/compat/maxdb/type_timestamp.result new file mode 100644 index 00000000000..355a4e28414 --- /dev/null +++ b/mysql-test/suite/compat/maxdb/type_timestamp.result @@ -0,0 +1,53 @@ +# +# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +# +SET sql_mode=DEFAULT; +CREATE TABLE t1 ( +def_timestamp TIMESTAMP, +mdb_timestamp mariadb_schema.TIMESTAMP, +ora_timestamp oracle_schema.TIMESTAMP, +max_timestamp maxdb_schema.TIMESTAMP +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `def_timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `mdb_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + `ora_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + `max_timestamp` datetime DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET sql_mode=MAXDB; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "def_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp(), + "mdb_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + "ora_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + "max_timestamp" datetime DEFAULT NULL +) +DROP TABLE t1; +SET sql_mode=MAXDB; +CREATE TABLE t1 ( +def_timestamp TIMESTAMP, +mdb_timestamp mariadb_schema.TIMESTAMP, +ora_timestamp oracle_schema.TIMESTAMP, +max_timestamp maxdb_schema.TIMESTAMP +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "def_timestamp" datetime DEFAULT NULL, + "mdb_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT current_timestamp(), + "ora_timestamp" mariadb_schema.timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + "max_timestamp" datetime DEFAULT NULL +) +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `def_timestamp` datetime DEFAULT NULL, + `mdb_timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `ora_timestamp` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + `max_timestamp` datetime DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/mysql-test/suite/compat/maxdb/type_timestamp.test b/mysql-test/suite/compat/maxdb/type_timestamp.test new file mode 100644 index 00000000000..cd60ffc0795 --- /dev/null +++ b/mysql-test/suite/compat/maxdb/type_timestamp.test @@ -0,0 +1,29 @@ +--echo # +--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +--echo # + + +SET sql_mode=DEFAULT; +CREATE TABLE t1 ( + def_timestamp TIMESTAMP, + mdb_timestamp mariadb_schema.TIMESTAMP, + ora_timestamp oracle_schema.TIMESTAMP, + max_timestamp maxdb_schema.TIMESTAMP +); +SHOW CREATE TABLE t1; +SET sql_mode=MAXDB; +SHOW CREATE TABLE t1; +DROP TABLE t1; + + +SET sql_mode=MAXDB; +CREATE TABLE t1 ( + def_timestamp TIMESTAMP, + mdb_timestamp mariadb_schema.TIMESTAMP, + ora_timestamp oracle_schema.TIMESTAMP, + max_timestamp maxdb_schema.TIMESTAMP +); +SHOW CREATE TABLE t1; +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/mysql-test/suite/compat/oracle/r/rpl_mariadb_date.result b/mysql-test/suite/compat/oracle/r/rpl_mariadb_date.result new file mode 100644 index 00000000000..9aca6fa3542 --- /dev/null +++ b/mysql-test/suite/compat/oracle/r/rpl_mariadb_date.result @@ -0,0 +1,86 @@ +include/master-slave.inc +[connection master] +SET SQL_MODE=DEFAULT; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES ('2001-01-01'); +SET SQL_MODE= ORACLE; +CREATE TABLE t2 SELECT * FROM t1; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a DATE) +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (NULL) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES ('2001-01-01') +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE "t2" ( + "a" mariadb_schema.date DEFAULT NULL +) +master-bin.000001 # Annotate_rows # # CREATE TABLE t2 SELECT * FROM t1 +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +SET SQL_MODE= DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET SQL_MODE= ORACLE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" mariadb_schema.date DEFAULT NULL +) +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE "t2" ( + "a" mariadb_schema.date DEFAULT NULL +) +connection slave; +SELECT * FROM t1; +a +NULL +2001-01-01 +SELECT * FROM t2; +a +NULL +2001-01-01 +SET SQL_MODE= DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET SQL_MODE= ORACLE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" mariadb_schema.date DEFAULT NULL +) +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE "t2" ( + "a" mariadb_schema.date DEFAULT NULL +) +connection master; +DROP TABLE t1, t2; +include/rpl_end.inc diff --git a/mysql-test/suite/compat/oracle/r/type_date.result b/mysql-test/suite/compat/oracle/r/type_date.result index 0989fc593d2..40d2a834056 100644 --- a/mysql-test/suite/compat/oracle/r/type_date.result +++ b/mysql-test/suite/compat/oracle/r/type_date.result @@ -6,3 +6,153 @@ t1 CREATE TABLE "t1" ( "a" datetime DEFAULT NULL ) DROP TABLE t1; +# +# MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +# +SET sql_mode=DEFAULT; +CREATE TABLE t1 (a unknown.DATE); +ERROR HY000: Unknown data type: 'unknown.date' +SET sql_mode=DEFAULT; +CREATE TABLE t1 ( +def_date DATE, +mdb_date mariadb_schema.DATE, +ora_date oracle_schema.DATE, +max_date maxdb_schema.DATE +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `def_date` date DEFAULT NULL, + `mdb_date` date DEFAULT NULL, + `ora_date` datetime DEFAULT NULL, + `max_date` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET sql_mode=ORACLE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "def_date" mariadb_schema.date DEFAULT NULL, + "mdb_date" mariadb_schema.date DEFAULT NULL, + "ora_date" datetime DEFAULT NULL, + "max_date" mariadb_schema.date DEFAULT NULL +) +DROP TABLE t1; +SET sql_mode=ORACLE; +CREATE TABLE t1 ( +def_date DATE, +mdb_date mariadb_schema.DATE, +ora_date oracle_schema.DATE, +max_date maxdb_schema.DATE +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "def_date" datetime DEFAULT NULL, + "mdb_date" mariadb_schema.date DEFAULT NULL, + "ora_date" datetime DEFAULT NULL, + "max_date" mariadb_schema.date DEFAULT NULL +) +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `def_date` datetime DEFAULT NULL, + `mdb_date` date DEFAULT NULL, + `ora_date` datetime DEFAULT NULL, + `max_date` date DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# +# ALTER..MODIFY and ALTER..CHANGE understand qualifiers +# +SET sql_mode=DEFAULT; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2001-01-01'); +SET sql_mode=ORACLE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" mariadb_schema.date DEFAULT NULL +) +SELECT * FROM t1; +a +2001-01-01 +ALTER TABLE t1 MODIFY a DATE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" datetime DEFAULT NULL +) +SELECT * FROM t1; +a +2001-01-01 00:00:00 +ALTER TABLE t1 MODIFY a mariadb_schema.DATE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" mariadb_schema.date DEFAULT NULL +) +SELECT * FROM t1; +a +2001-01-01 +ALTER TABLE t1 MODIFY a oracle_schema.DATE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" datetime DEFAULT NULL +) +SELECT * FROM t1; +a +2001-01-01 00:00:00 +ALTER TABLE t1 CHANGE a b mariadb_schema.DATE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "b" mariadb_schema.date DEFAULT NULL +) +SELECT * FROM t1; +b +2001-01-01 +ALTER TABLE t1 CHANGE b a oracle_schema.DATE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE "t1" ( + "a" datetime DEFAULT NULL +) +SELECT * FROM t1; +a +2001-01-01 00:00:00 +DROP TABLE t1; +# +# Qualified syntax is not supported yet in SP +# See MDEV-23353 Qualified data types in SP +# +SET sql_mode=ORACLE; +CREATE FUNCTION f1() RETURN mariadb_schema.DATE AS +BEGIN +RETURN CURRENT_DATE; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'mariadb_schema.DATE AS +BEGIN +RETURN CURRENT_DATE; +END' at line 1 +CREATE PROCEDURE p1(a mariadb_schema.DATE) AS +BEGIN +NULL; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') AS +BEGIN +NULL; +END' at line 1 +CREATE PROCEDURE p1() AS +a mariadb_schema.DATE; +BEGIN +NULL; +END; +$$ +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '; +BEGIN +NULL; +END' at line 2 diff --git a/mysql-test/suite/compat/oracle/t/rpl_mariadb_date.test b/mysql-test/suite/compat/oracle/t/rpl_mariadb_date.test new file mode 100644 index 00000000000..b2aff23333b --- /dev/null +++ b/mysql-test/suite/compat/oracle/t/rpl_mariadb_date.test @@ -0,0 +1,38 @@ +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +SET SQL_MODE=DEFAULT; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES ('2001-01-01'); + +SET SQL_MODE= ORACLE; +CREATE TABLE t2 SELECT * FROM t1; + +--let $binlog_file = LAST +source include/show_binlog_events.inc; + +SET SQL_MODE= DEFAULT; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +SET SQL_MODE= ORACLE; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +--sync_slave_with_master +SELECT * FROM t1; +SELECT * FROM t2; + +SET SQL_MODE= DEFAULT; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +SET SQL_MODE= ORACLE; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; + +# Cleanup +--connection master +DROP TABLE t1, t2; +--source include/rpl_end.inc diff --git a/mysql-test/suite/compat/oracle/t/type_date.test b/mysql-test/suite/compat/oracle/t/type_date.test index 61f7aa53944..36a5e99795f 100644 --- a/mysql-test/suite/compat/oracle/t/type_date.test +++ b/mysql-test/suite/compat/oracle/t/type_date.test @@ -2,3 +2,102 @@ SET sql_mode=ORACLE; CREATE TABLE t1 (a DATE); SHOW CREATE TABLE t1; DROP TABLE t1; + + +--echo # +--echo # MDEV-19632 Replication aborts with ER_SLAVE_CONVERSION_FAILED upon CREATE ... SELECT in ORACLE mode +--echo # + +SET sql_mode=DEFAULT; +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (a unknown.DATE); + + +SET sql_mode=DEFAULT; +CREATE TABLE t1 ( + def_date DATE, + mdb_date mariadb_schema.DATE, + ora_date oracle_schema.DATE, + max_date maxdb_schema.DATE +); +SHOW CREATE TABLE t1; +SET sql_mode=ORACLE; +SHOW CREATE TABLE t1; +DROP TABLE t1; + + +SET sql_mode=ORACLE; +CREATE TABLE t1 ( + def_date DATE, + mdb_date mariadb_schema.DATE, + ora_date oracle_schema.DATE, + max_date maxdb_schema.DATE +); +SHOW CREATE TABLE t1; +SET sql_mode=DEFAULT; +SHOW CREATE TABLE t1; +DROP TABLE t1; + + +--echo # +--echo # ALTER..MODIFY and ALTER..CHANGE understand qualifiers +--echo # + +SET sql_mode=DEFAULT; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2001-01-01'); +SET sql_mode=ORACLE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +ALTER TABLE t1 MODIFY a DATE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +ALTER TABLE t1 MODIFY a mariadb_schema.DATE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +ALTER TABLE t1 MODIFY a oracle_schema.DATE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +ALTER TABLE t1 CHANGE a b mariadb_schema.DATE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +ALTER TABLE t1 CHANGE b a oracle_schema.DATE; +SHOW CREATE TABLE t1; +SELECT * FROM t1; + +DROP TABLE t1; + +--echo # +--echo # Qualified syntax is not supported yet in SP +--echo # See MDEV-23353 Qualified data types in SP +--echo # + +SET sql_mode=ORACLE; +DELIMITER $$; +# Change to this when merging to 10.5: +#--error ER_UNKNOWN_DATA_TYPE +--error ER_PARSE_ERROR +CREATE FUNCTION f1() RETURN mariadb_schema.DATE AS +BEGIN + RETURN CURRENT_DATE; +END; +$$ +--error ER_PARSE_ERROR +CREATE PROCEDURE p1(a mariadb_schema.DATE) AS +BEGIN + NULL; +END; +$$ +--error ER_PARSE_ERROR +CREATE PROCEDURE p1() AS + a mariadb_schema.DATE; +BEGIN + NULL; +END; +$$ +DELIMITER ;$$ diff --git a/mysql-test/suite/engines/iuds/r/insert_decimal.result b/mysql-test/suite/engines/iuds/r/insert_decimal.result index 1e6b7a08558..f167712d048 100644 --- a/mysql-test/suite/engines/iuds/r/insert_decimal.result +++ b/mysql-test/suite/engines/iuds/r/insert_decimal.result @@ -1896,9 +1896,13 @@ Warnings: Warning 1264 Out of range value for column 'c1' at row 3 INSERT IGNORE INTO t5 VALUES('1e+52','-1e+52','1e+52',5),('1e-52','-1e-52','1e-52',6); Warnings: +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 Warning 1264 Out of range value for column 'c3' at row 1 INSERT IGNORE INTO t5 VALUES('1e+53','-1e+53','1e+53',7),('1e-53','-1e-53','1e-53',8); Warnings: +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1264 Out of range value for column 'c2' at row 1 Warning 1264 Out of range value for column 'c3' at row 1 SELECT * FROM t5; c1 c2 c3 c4 diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index cfab7ce3472..8550b2bd180 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -46,3 +46,4 @@ partition : MDEV-19958 Galera test failure on galera.partition query_cache: MDEV-15805 Test failure on galera.query_cache sql_log_bin : MDEV-21491 galera.sql_log_bin versioning_trx_id : MDEV-18590 galera.versioning_trx_id +MW-328A : MDEV-22666? diff --git a/mysql-test/suite/innodb/r/read_only_recover_committed.result b/mysql-test/suite/innodb/r/read_only_recover_committed.result index e4cfebd1ec0..e7895ce6d46 100644 --- a/mysql-test/suite/innodb/r/read_only_recover_committed.result +++ b/mysql-test/suite/innodb/r/read_only_recover_committed.result @@ -33,6 +33,7 @@ a 20 UPDATE t SET a=3 WHERE a=1; # restart: --innodb-read-only +SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output; # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED. # In earlier versions, this would return the last committed version # (only a=3; no record for a=20)! diff --git a/mysql-test/suite/innodb/t/read_only_recover_committed.test b/mysql-test/suite/innodb/t/read_only_recover_committed.test index 526d9fc02bf..9ad09bb9b3a 100644 --- a/mysql-test/suite/innodb/t/read_only_recover_committed.test +++ b/mysql-test/suite/innodb/t/read_only_recover_committed.test @@ -59,6 +59,7 @@ SELECT * FROM t; UPDATE t SET a=3 WHERE a=1; --let $restart_parameters= --innodb-read-only --source include/restart_mysqld.inc +SET GLOBAL innodb_status_output= @@GLOBAL.innodb_status_output; --echo # Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED. --echo # In earlier versions, this would return the last committed version --echo # (only a=3; no record for a=20)! diff --git a/mysql-test/suite/maria/encrypt-no-key.result b/mysql-test/suite/maria/encrypt-no-key.result new file mode 100644 index 00000000000..6745670dfac --- /dev/null +++ b/mysql-test/suite/maria/encrypt-no-key.result @@ -0,0 +1,15 @@ +call mtr.add_suppression('Unknown key id 1. Can''t continue'); +set global aria_encrypt_tables= 1; +create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1; +alter table t1 disable keys; +insert into t1 values (1,1); +alter table t1 enable keys; +ERROR HY000: Unknown key id 1. Can't continue! +repair table t1 use_frm; +Table Op Msg_type Msg_text +test.t1 repair warning Number of rows changed from 0 to 1 +test.t1 repair Error Unknown key id 1. Can't continue! +test.t1 repair Error Unknown key id 1. Can't continue! +test.t1 repair status OK +drop table t1; +set global aria_encrypt_tables= default; diff --git a/mysql-test/suite/maria/encrypt-no-key.test b/mysql-test/suite/maria/encrypt-no-key.test new file mode 100644 index 00000000000..2d586c50695 --- /dev/null +++ b/mysql-test/suite/maria/encrypt-no-key.test @@ -0,0 +1,14 @@ +# +# MDEV-18496 Crash when Aria encryption is enabled but plugin not available +# +call mtr.add_suppression('Unknown key id 1. Can''t continue'); + +set global aria_encrypt_tables= 1; +create table t1 (pk int primary key, a int, key(a)) engine=aria transactional=1; +alter table t1 disable keys; +insert into t1 values (1,1); +error 192; +alter table t1 enable keys; +repair table t1 use_frm; +drop table t1; +set global aria_encrypt_tables= default; diff --git a/mysql-test/suite/parts/r/partition_debug.result b/mysql-test/suite/parts/r/partition_debug.result index e438cae7a14..96ed9b010b7 100644 --- a/mysql-test/suite/parts/r/partition_debug.result +++ b/mysql-test/suite/parts/r/partition_debug.result @@ -1,3 +1,4 @@ +FLUSH TABLES; # # Bug#13737949: CRASH IN HA_PARTITION::INDEX_INIT # Bug#18694052: SERVER CRASH IN HA_PARTITION::INIT_RECORD_PRIORITY_QUEUE diff --git a/mysql-test/suite/parts/t/partition_debug.test b/mysql-test/suite/parts/t/partition_debug.test index 864452f8d8b..6d7cf3ae8b3 100644 --- a/mysql-test/suite/parts/t/partition_debug.test +++ b/mysql-test/suite/parts/t/partition_debug.test @@ -8,6 +8,10 @@ # Crash tests don't work with embedded --source include/not_embedded.inc +# Make sure system tables are not open, as the test will kill the server +# and it will cause corruption errors in the log +FLUSH TABLES; + # Partitioning test that require debug features --echo # diff --git a/mysql-test/suite/roles/drop_current_role.result b/mysql-test/suite/roles/drop_current_role.result new file mode 100644 index 00000000000..b6de0304d44 --- /dev/null +++ b/mysql-test/suite/roles/drop_current_role.result @@ -0,0 +1,5 @@ +create role r; +set role r; +drop role r; +revoke all on *.* from current_role; +ERROR OP000: Invalid role specification `r` diff --git a/mysql-test/suite/roles/drop_current_role.test b/mysql-test/suite/roles/drop_current_role.test new file mode 100644 index 00000000000..c8d6fc5d9df --- /dev/null +++ b/mysql-test/suite/roles/drop_current_role.test @@ -0,0 +1,9 @@ +--source include/not_embedded.inc +# +# MDEV-22521 Server crashes in traverse_role_graph_up or Assertion `user' fails in traverse_role_graph_impl +# +create role r; +set role r; +drop role r; +error ER_INVALID_ROLE; +revoke all on *.* from current_role; diff --git a/mysql-test/suite/sys_vars/r/div_precision_increment_func.result b/mysql-test/suite/sys_vars/r/div_precision_increment_func.result index c839dd6b289..e16ce76fed9 100644 --- a/mysql-test/suite/sys_vars/r/div_precision_increment_func.result +++ b/mysql-test/suite/sys_vars/r/div_precision_increment_func.result @@ -51,9 +51,9 @@ INSERT into t1(name, salary, income_tax) values('Record_2', 501, 501*2.5/1000); INSERT into t1(name, salary, income_tax) values('Record_3', 210, 210*2.5/1000); SELECT * from t1; id name salary income_tax -1 Record_1 100011 250.027 -2 Record_2 501 1.2525 -3 Record_3 210 0.525 +1 Record_1 100011 250.03 +2 Record_2 501 1.25 +3 Record_3 210 0.53 connect test_con2, localhost, root,,; connection test_con2; ## Verifying session & global value of variable ## @@ -69,11 +69,11 @@ INSERT into t1(name, salary, income_tax) values('Record_5', 501, 501*2.5/1000); INSERT into t1(name, salary, income_tax) values('Record_6', 210, 210*2.5/1000); SELECT * from t1; id name salary income_tax -1 Record_1 100011 250.027 -2 Record_2 501 1.2525 -3 Record_3 210 0.525 -4 Record_4 100011 250.027 -5 Record_5 501 1.2525 +1 Record_1 100011 250.03 +2 Record_2 501 1.25 +3 Record_3 210 0.53 +4 Record_4 100011 250.028 +5 Record_5 501 1.253 6 Record_6 210 0.525 ## Dropping table t1 ## drop table t1; diff --git a/mysql-test/suite/vcol/r/not_supported.result b/mysql-test/suite/vcol/r/not_supported.result index c804cf220d2..c524234e7e5 100644 --- a/mysql-test/suite/vcol/r/not_supported.result +++ b/mysql-test/suite/vcol/r/not_supported.result @@ -29,7 +29,7 @@ set time_zone='+1:00'; flush tables; select * from t1; a b v -1 2 0.3333333330000000000 +1 2 0.3333000000000000000 select * from t8; a b v 1234567890 2 2009-02-14 00:31:30 diff --git a/mysys/stacktrace.c b/mysys/stacktrace.c index 2274c505195..d9009d874d2 100644 --- a/mysys/stacktrace.c +++ b/mysys/stacktrace.c @@ -34,19 +34,20 @@ #include <execinfo.h> #endif +#ifdef __linux__ #define PTR_SANE(p) ((p) && (char*)(p) >= heap_start && (char*)(p) <= heap_end) - static char *heap_start; - -#if(defined HAVE_BSS_START) && !(defined __linux__) extern char *__bss_start; -#endif +#else +#define PTR_SANE(p) (p) +#endif /* __linux */ + void my_init_stacktrace() { -#if(defined HAVE_BSS_START) && !(defined __linux__) +#ifdef __linux__ heap_start = (char*) &__bss_start; -#endif +#endif /* __linux */ } #ifdef __linux__ @@ -149,15 +150,15 @@ static int safe_print_str(const char *addr, size_t max_len) int my_safe_print_str(const char* val, size_t max_len) { +#ifdef __linux__ char *heap_end; -#ifdef __linux__ // Try and make use of /proc filesystem to safely print memory contents. if (!safe_print_str(val, max_len)) return 0; -#endif heap_end= (char*) sbrk(0); +#endif if (!PTR_SANE(val)) { diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 5226c183d46..1feb17ee1b6 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -67,6 +67,7 @@ Usage: $0 [OPTIONS] --cross-bootstrap For internal use. Used when building the MariaDB system tables on a different host than the target. --datadir=path The path to the MariaDB data directory. + --no-defaults Don't read default options from any option file. --defaults-extra-file=name Read this file after the global files are read. --defaults-file=name Only read default options from the given file name. @@ -79,8 +80,6 @@ Usage: $0 [OPTIONS] --help Display this help and exit. --ldata=path The path to the MariaDB data directory. Same as --datadir. - --no-defaults Don't read default options from any option file. - --defaults-file=path Read only this configuration file. --rpm For internal use. This option is used by RPM files during the MariaDB installation process. --skip-name-resolve Use IP addresses rather than hostnames when creating diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index bab8e67fdac..1d3bf837237 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -132,6 +132,7 @@ SET (SQL_SOURCE rpl_gtid.cc rpl_parallel.cc semisync.cc semisync_master.cc semisync_slave.cc semisync_master_ack_receiver.cc + sql_schema.cc sql_type.cc sql_mode.cc sql_type_json.cc item_windowfunc.cc sql_window.cc sql_cte.cc diff --git a/sql/compat56.cc b/sql/compat56.cc index a500fcc46e1..3d8574419d3 100644 --- a/sql/compat56.cc +++ b/sql/compat56.cc @@ -305,7 +305,7 @@ uint my_datetime_binary_length(uint dec) /* On disk we store as unsigned number with DATETIMEF_INT_OFS offset, - for HA_KETYPE_BINARY compatibilty purposes. + for HA_KETYPE_BINARY compatibility purposes. */ #define DATETIMEF_INT_OFS 0x8000000000LL diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index bf721bddb85..39ceaadc671 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -33,7 +33,7 @@ /* Action to perform at a synchronization point. NOTE: This structure is moved around in memory by realloc(), qsort(), - and memmove(). Do not add objects with non-trivial constuctors + and memmove(). Do not add objects with non-trivial constructors or destructors, which might prevent moving of this structure with these functions. */ @@ -542,7 +542,7 @@ static void debug_sync_reset(THD *thd) @description Removing an action mainly means to decrement the ds_active counter. But if the action is between other active action in the array, then - the array needs to be shrinked. The active actions above the one to + the array needs to be shrunk. The active actions above the one to be removed have to be moved down by one slot. */ diff --git a/sql/derror.cc b/sql/derror.cc index 7a79833c26c..8b44d1bff9b 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -236,7 +236,7 @@ static File open_error_msg_file(const char *file_name, const char *language, MYF(0))) < 0) { /* - Trying pre-5.4 sematics of the --language parameter. + Trying pre-5.4 semantics of the --language parameter. It included the language-specific part, e.g.: --language=/path/to/english/ */ diff --git a/sql/encryption.cc b/sql/encryption.cc index 9c38713fdfa..13239b91910 100644 --- a/sql/encryption.cc +++ b/sql/encryption.cc @@ -78,8 +78,8 @@ int initialize_encryption_plugin(st_plugin_int *plugin) (struct st_mariadb_encryption*) plugin->plugin->info; /* - Copmiler on Spark doesn't like the '?' operator here as it - belives the (uint (*)...) implies the C++ call model. + Compiler on Spark doesn't like the '?' operator here as it + believes the (uint (*)...) implies the C++ call model. */ if (handle->crypt_ctx_size) encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size; diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index d9ed0b633e4..70faeee6039 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -161,7 +161,7 @@ Event_creation_ctx::load_from_db(THD *thd, /*************************************************************************/ /* - Initiliazes dbname and name of an Event_queue_element_for_exec + Initializes dbname and name of an Event_queue_element_for_exec object SYNOPSIS diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index dc47ed0b2e1..399a19b4112 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -672,7 +672,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length, parse_data->name.str)); - DBUG_PRINT("info", ("check existance of an event with the same name")); + DBUG_PRINT("info", ("check existence of an event with the same name")); if (!find_named_event(&parse_data->dbname, &parse_data->name, table)) { if (thd->lex->create_info.or_replace()) diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index e2f73cd2bb4..d2a168e538e 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -97,7 +97,7 @@ Event_parse_data::init_name(THD *thd, sp_name *spn) ENDS or AT is in the past, we are trying to create an event that will never be executed. If it has ON COMPLETION NOT PRESERVE (default), then it would normally be dropped already, so on CREATE - EVENT we give a warning, and do not create anyting. On ALTER EVENT + EVENT we give a warning, and do not create anything. On ALTER EVENT we give a error, and do not change the event. If the event has ON COMPLETION PRESERVE, then we see if the event is @@ -362,7 +362,7 @@ wrong_value: EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that the event will be executed every 5 minutes but this will start at the date shown above. Expressions are possible : - DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at + DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tomorrow at same time. RETURN VALUE @@ -417,7 +417,7 @@ wrong_value: EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that the event will be executed every 5 minutes but this will end at the date shown above. Expressions are possible : - DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at + DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tomorrow at same time. RETURN VALUE diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 6b3f5777df3..91c243b3f70 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -360,7 +360,7 @@ Event_queue::drop_matching_events(THD *thd, const LEX_CSTRING *pattern, We don't call mysql_cond_broadcast(&COND_queue_state); If we remove the top event: 1. The queue is empty. The scheduler will wake up at some time and - realize that the queue is empty. If create_event() comes inbetween + realize that the queue is empty. If create_event() comes in between it will signal the scheduler 2. The queue is not empty, but the next event after the previous top, won't be executed any time sooner than the element we removed. Hence, diff --git a/sql/events.cc b/sql/events.cc index 3e731cc23c4..195c0fa09e2 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -129,7 +129,7 @@ bool Events::check_if_system_tables_error() /** Reconstructs interval expression from interval type and expression - value that is in form of a value of the smalles entity: + value that is in form of a value of the smallest entity: For YEAR_MONTH - expression is in months DAY_MINUTE - expression is in minutes diff --git a/sql/field.cc b/sql/field.cc index 18c38d59297..49ee54883e4 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -42,7 +42,7 @@ #define MAX_EXPONENT 1024 /***************************************************************************** - Instansiate templates and static variables + Instantiate templates and static variables *****************************************************************************/ static const char *zero_timestamp="0000-00-00 00:00:00.000000"; @@ -88,7 +88,7 @@ inline bool Field::marked_for_write_or_computed() const /* Rules for merging different types of fields in UNION - NOTE: to avoid 256*256 table, gap in table types numeration is skiped + NOTE: to avoid 256*256 table, gap in table types numeration is skipped following #defines describe that gap and how to canculate number of fields and index of field in this array. */ @@ -1490,7 +1490,7 @@ Item *Field_num::get_equal_zerofill_const_item(THD *thd, const Context &ctx, /** - Contruct warning parameters using thd->no_errors +Construct warning parameters using thd->no_errors to determine whether to generate or suppress warnings. We can get here in a query like this: SELECT COUNT(@@basedir); @@ -1538,7 +1538,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd, if (filter.want_warning_edom()) { /* - We can use err.ptr() here as ErrConvString is guranteed to put an + We can use err.ptr() here as ErrConvString is guaranteed to put an end \0 here. */ THD *wthd= thd ? thd : current_thd; @@ -1570,7 +1570,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd, - found garbage at the end of the string. @param type Data type name (e.g. "decimal", "integer", "double") - @param edom Indicates that the string-to-number routine retuned + @param edom Indicates that the string-to-number routine returned an error code equivalent to EDOM (value out of domain), i.e. the string fully consisted of garbage and the conversion routine could not get any digits from it. @@ -1633,7 +1633,7 @@ int Field_num::check_edom_and_truncation(const char *type, bool edom, /* - Conver a string to an integer then check bounds. + Convert a string to an integer then check bounds. SYNOPSIS Field_num::get_int @@ -2730,7 +2730,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs) We only have to generate warnings if count_cuted_fields is set. This is to avoid extra checks of the number when they are not needed. Even if this flag is not set, it's OK to increment warnings, if - it makes the code easer to read. + it makes the code easier to read. */ if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION) @@ -2813,7 +2813,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs) } /* - Now write the formated number + Now write the formatted number First the digits of the int_% parts. Do we have enough room to write these digits ? @@ -3333,7 +3333,7 @@ int Field_new_decimal::store(const char *from, size_t length, If check_decimal() failed because of EDOM-alike error, (e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero. Note: if check_decimal() failed because of truncation, - decimal_value is alreay properly initialized. + decimal_value is already properly initialized. */ my_decimal_set_zero(&decimal_value); /* @@ -4749,11 +4749,12 @@ int truncate_double(double *nr, uint field_length, uint dec, { uint order= field_length - dec; uint step= array_elements(log_10) - 1; - max_value= 1.0; + double max_value_by_dec= 1.0; for (; order > step; order-= step) - max_value*= log_10[step]; - max_value*= log_10[order]; - max_value-= 1.0 / log_10[dec]; + max_value_by_dec*= log_10[step]; + max_value_by_dec*= log_10[order]; + max_value_by_dec-= 1.0 / log_10[dec]; + set_if_smaller(max_value, max_value_by_dec); /* Check for infinity so we don't get NaN in calculations */ if (!std::isinf(res)) @@ -5041,7 +5042,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg, { /* We mark the flag with TIMESTAMP_FLAG to indicate to the client that - this field will be automaticly updated on insert. + this field will be automatically updated on insert. */ flags|= TIMESTAMP_FLAG; if (unireg_check != TIMESTAMP_DN_FIELD) @@ -7543,7 +7544,7 @@ Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end, with the real type. Since all allowable types have 0xF as most significant bits of the metadata word, lengths <256 will not affect the real type at all, while all other values will result in a - non-existant type in the range 17-244. + non-existent type in the range 17-244. @see Field_string::unpack @@ -7729,8 +7730,7 @@ void Field_varstring::mark_unused_memory_as_defined() #endif -int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr, - uint max_len) +int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr) { uint a_length, b_length; int diff; @@ -7745,8 +7745,8 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr, a_length= uint2korr(a_ptr); b_length= uint2korr(b_ptr); } - set_if_smaller(a_length, max_len); - set_if_smaller(b_length, max_len); + set_if_smaller(a_length, field_length); + set_if_smaller(b_length, field_length); diff= field_charset->coll->strnncollsp(field_charset, a_ptr+ length_bytes, @@ -7758,6 +7758,43 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr, } +static int cmp_str_prefix(const uchar *ua, size_t alen, const uchar *ub, + size_t blen, size_t prefix, CHARSET_INFO *cs) +{ + const char *a= (char*)ua, *b= (char*)ub; + MY_STRCOPY_STATUS status; + prefix/= cs->mbmaxlen; + alen= cs->cset->well_formed_char_length(cs, a, a + alen, prefix, &status); + blen= cs->cset->well_formed_char_length(cs, b, b + blen, prefix, &status); + return cs->coll->strnncollsp(cs, ua, alen, ub, blen); +} + + + +int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, + size_t prefix_len) +{ + /* avoid expensive well_formed_char_length if possible */ + if (prefix_len == table->field[field_index]->field_length) + return Field_varstring::cmp(a_ptr, b_ptr); + + size_t a_length, b_length; + + if (length_bytes == 1) + { + a_length= *a_ptr; + b_length= *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } + return cmp_str_prefix(a_ptr+length_bytes, a_length, b_ptr+length_bytes, + b_length, prefix_len, field_charset); +} + + /** @note varstring and blob keys are ALWAYS stored with a 2 byte length prefix @@ -8262,8 +8299,7 @@ longlong Field_varstring_compressed::val_int(void) } -int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr, - uint max_len) +int Field_varstring_compressed::cmp(const uchar *a_ptr, const uchar *b_ptr) { String a, b; uint a_length, b_length; @@ -8282,11 +8318,6 @@ int Field_varstring_compressed::cmp_max(const uchar *a_ptr, const uchar *b_ptr, uncompress(&a, &a, a_ptr + length_bytes, a_length); uncompress(&b, &b, b_ptr + length_bytes, b_length); - if (a.length() > max_len) - a.length(max_len); - if (b.length() > max_len) - b.length(max_len); - return sortcmp(&a, &b, field_charset); } @@ -8521,16 +8552,24 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b, } -int Field_blob::cmp_max(const uchar *a_ptr, const uchar *b_ptr, - uint max_length) +int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr) +{ + uchar *blob1,*blob2; + memcpy(&blob1, a_ptr+packlength, sizeof(char*)); + memcpy(&blob2, b_ptr+packlength, sizeof(char*)); + size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr); + return cmp(blob1, (uint32)a_len, blob2, (uint32)b_len); +} + + +int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr, + size_t prefix_len) { uchar *blob1,*blob2; memcpy(&blob1, a_ptr+packlength, sizeof(char*)); memcpy(&blob2, b_ptr+packlength, sizeof(char*)); - uint a_len= get_length(a_ptr), b_len= get_length(b_ptr); - set_if_smaller(a_len, max_length); - set_if_smaller(b_len, max_length); - return Field_blob::cmp(blob1,a_len,blob2,b_len); + size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr); + return cmp_str_prefix(blob1, a_len, blob2, b_len, prefix_len, field_charset); } @@ -9997,7 +10036,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value) The a and b pointer must be pointers to the field in a record (not the table->record[0] necessarily) */ -int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len) +int Field_bit::cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) { my_ptrdiff_t a_diff= a - ptr; my_ptrdiff_t b_diff= b - ptr; @@ -10481,6 +10520,19 @@ void Column_definition::set_attributes(const Lex_field_type_st &type, set_handler(type.type_handler()); charset= cs; +#if MYSQL_VERSION_ID > 100500 +#error When merging to 10.5, please move the code below to +#error Type_handler_timestamp_common::Column_definition_set_attributes() +#else + /* + Unlike other types TIMESTAMP fields are NOT NULL by default. + Unless --explicit-defaults-for-timestamp is given. + */ + if (!opt_explicit_defaults_for_timestamp && + type.type_handler()->field_type() == MYSQL_TYPE_TIMESTAMP) + flags|= NOT_NULL_FLAG; +#endif + if (type.length()) { int err; diff --git a/sql/field.h b/sql/field.h index 25d7ad4cff0..58ff9ea2554 100644 --- a/sql/field.h +++ b/sql/field.h @@ -280,7 +280,7 @@ protected: }; - // String-to-number convertion methods for the old code compatibility + // String-to-number conversion methods for the old code compatibility longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr, const char *end) const { @@ -361,7 +361,7 @@ public: /* Item context attributes. Comparison functions pass their attributes to propagate_equal_fields(). - For exmple, for string comparison, the collation of the comparison + For example, for string comparison, the collation of the comparison operation is important inside propagate_equal_fields(). */ class Context @@ -1089,9 +1089,13 @@ public: return type(); } inline int cmp(const uchar *str) { return cmp(ptr,str); } - virtual int cmp_max(const uchar *a, const uchar *b, uint max_len) - { return cmp(a, b); } virtual int cmp(const uchar *,const uchar *)=0; + /* + The following method is used for comparing prefix keys. + Currently it's only used in partitioning. + */ + virtual int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) + { return cmp(a, b); } virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U) { return memcmp(a,b,pack_length()); } virtual int cmp_offset(my_ptrdiff_t row_offset) @@ -3702,11 +3706,8 @@ public: longlong val_int(void); String *val_str(String*,String *); my_decimal *val_decimal(my_decimal *); - int cmp_max(const uchar *, const uchar *, uint max_length); - int cmp(const uchar *a,const uchar *b) - { - return cmp_max(a, b, ~0U); - } + int cmp(const uchar *a,const uchar *b); + int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len); void sort_string(uchar *buff,uint length); uint get_key_image(uchar *buff,uint length, imagetype type); void set_key_image(const uchar *buff,uint length); @@ -3776,7 +3777,7 @@ private: { return (field_length - 1) / field_charset->mbmaxlen; } - int cmp_max(const uchar *a_ptr, const uchar *b_ptr, uint max_len); + int cmp(const uchar *a_ptr, const uchar *b_ptr); /* Compressed fields can't have keys as two rows may have different @@ -3944,9 +3945,8 @@ public: longlong val_int(void); String *val_str(String*,String *); my_decimal *val_decimal(my_decimal *); - int cmp_max(const uchar *, const uchar *, uint max_length); - int cmp(const uchar *a,const uchar *b) - { return cmp_max(a, b, ~0U); } + int cmp(const uchar *a,const uchar *b); + int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len); int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length); int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U); int key_cmp(const uchar *,const uchar*); @@ -4391,7 +4391,7 @@ private: This is the reason: - Field_bit::cmp_binary() is only implemented in the base class (Field::cmp_binary()). - - Field::cmp_binary() currenly use pack_length() to calculate how + - Field::cmp_binary() currently uses pack_length() to calculate how long the data is. - pack_length() includes size of the bits stored in the NULL bytes of the record. @@ -4450,7 +4450,7 @@ public: } int cmp_binary_offset(uint row_offset) { return cmp_offset(row_offset); } - int cmp_max(const uchar *a, const uchar *b, uint max_length); + int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len); int key_cmp(const uchar *a, const uchar *b) { return cmp_binary((uchar *) a, (uchar *) b); } int key_cmp(const uchar *str, uint length); diff --git a/sql/field_conv.cc b/sql/field_conv.cc index bcd4c5fbb38..7ec93e032e6 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -230,7 +230,7 @@ static void do_skip(Copy_field *copy __attribute__((unused))) note: if the record we're copying from is NULL-complemetned (i.e. from_field->table->null_row==1), it will also have all NULLable columns to be - set to NULLs, so we dont need to check table->null_row here. + set to NULLs, so we don't need to check table->null_row here. */ static void do_copy_null(Copy_field *copy) diff --git a/sql/filesort.cc b/sql/filesort.cc index 6a04f6ebfdb..aa25474be1a 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -850,12 +850,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, } if (!quick_select) { - (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ + (void) file->extra(HA_EXTRA_NO_CACHE); /* End caching of records */ if (!next_pos) file->ha_rnd_end(); } - /* Signal we should use orignal column read and write maps */ + /* Signal we should use original column read and write maps */ sort_form->column_bitmaps_set(save_read_set, save_write_set); if (unlikely(thd->is_error())) diff --git a/sql/gcalc_slicescan.cc b/sql/gcalc_slicescan.cc index 4919e5b959b..b3752420316 100644 --- a/sql/gcalc_slicescan.cc +++ b/sql/gcalc_slicescan.cc @@ -1877,7 +1877,7 @@ int Gcalc_scan_iterator::add_eq_node(Gcalc_heap::Info *node, point *sp) if (!en) GCALC_DBUG_RETURN(1); - /* eq_node iserted after teh equal point. */ + /* eq_node inserted after the equal point. */ en->next= node->get_next(); node->next= en; diff --git a/sql/gcalc_slicescan.h b/sql/gcalc_slicescan.h index 54b12962d2a..b5188f29dfd 100644 --- a/sql/gcalc_slicescan.h +++ b/sql/gcalc_slicescan.h @@ -362,9 +362,9 @@ enum Gcalc_scan_events /* - Gcalc_scan_iterator incapsulates the slisescan algorithm. - It takes filled Gcalc_heap as an datasource. Then can be - iterated trought the vertexes and intersection points with + Gcalc_scan_iterator incapsulates the slicescan algorithm. + It takes filled Gcalc_heap as a datasource. Then can be + iterated through the vertexes and intersection points with the step() method. After the 'step()' one usually observes the current 'slice' to do the necessary calculations, like looking for intersections, calculating the area, whatever. diff --git a/sql/gcalc_tools.cc b/sql/gcalc_tools.cc index 14a7c6331f3..307f063fb43 100644 --- a/sql/gcalc_tools.cc +++ b/sql/gcalc_tools.cc @@ -1184,14 +1184,14 @@ int Gcalc_operation_reducer::connect_threads( { rp0->outer_poly= prev_range->thread_start; tb->thread_start= prev_range->thread_start; - /* Chack if needed */ + /* Check if needed */ ta->thread_start= prev_range->thread_start; } else { rp0->outer_poly= 0; ta->thread_start= rp0; - /* Chack if needed */ + /* Check if needed */ tb->thread_start= rp0; } GCALC_DBUG_RETURN(0); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 413c14d017e..b767045fa26 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1480,7 +1480,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, /** - @brief Check and repair the table if neccesary + @brief Check and repair the table if necessary @param thd Thread object @@ -2969,7 +2969,7 @@ error: /** Read the .par file to get the partitions engines and names - @param name Name of table file (without extention) + @param name Name of table file (without extension) @return Operation status @retval true Failure @@ -3199,7 +3199,7 @@ static uchar *get_part_name(PART_NAME_DEF *part, size_t *length, @return Operation status @retval true Failure - @retval false Sucess + @retval false Success */ bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id, @@ -3325,7 +3325,7 @@ err: @return Operation status @retval true Failure - @retval false Sucess + @retval false Success */ bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg) @@ -4298,7 +4298,7 @@ int ha_partition::write_row(const uchar * buf) /* If we have failed to set the auto-increment value for this row, it is highly likely that we will not be able to insert it into - the correct partition. We must check and fail if neccessary. + the correct partition. We must check and fail if necessary. */ if (unlikely(error)) goto exit; @@ -4369,7 +4369,7 @@ exit: have the previous row record in it, while new_data will have the newest data in it. Keep in mind that the server can do updates based on ordering if an - ORDER BY clause was used. Consecutive ordering is not guarenteed. + ORDER BY clause was used. Consecutive ordering is not guaranteed. Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. new_data is always record[0] @@ -4502,7 +4502,7 @@ exit: (from either a previous rnd_xxx() or index_xxx() call). If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. - Keep in mind that the server does no guarentee consecutive deletions. + Keep in mind that the server does no guarantee consecutive deletions. ORDER BY clauses can be used. Called in sql_acl.cc and sql_udf.cc to manage internal table information. @@ -4921,7 +4921,7 @@ int ha_partition::end_bulk_insert() When scan is used we will scan one handler partition at a time. When preparing for rnd_pos we will init all handler partitions. - No extra cache handling is needed when scannning is not performed. + No extra cache handling is needed when scanning is not performed. Before initialising we will call rnd_end to ensure that we clean up from any previous incarnation of a table scan. @@ -8630,7 +8630,7 @@ static int end_keyread_cb(handler* h, void *unused) function after completing a query. 3) It is called when deleting the QUICK_RANGE_SELECT object if the QUICK_RANGE_SELECT object had its own handler object. It is called - immediatley before close of this local handler object. + immediately before close of this local handler object. HA_EXTRA_KEYREAD: HA_EXTRA_NO_KEYREAD: These parameters are used to provide an optimisation hint to the handler. @@ -8667,7 +8667,7 @@ static int end_keyread_cb(handler* h, void *unused) HA_EXTRA_IGNORE_DUP_KEY: HA_EXTRA_NO_IGNORE_DUP_KEY: Informs the handler to we will not stop the transaction if we get an - duplicate key errors during insert/upate. + duplicate key errors during insert/update. Always called in pair, triggered by INSERT IGNORE and other similar SQL constructs. Not used by MyISAM. @@ -10158,7 +10158,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table, /* Changing to similar partitioning, only update metadata. - Non allowed changes would be catched in prep_alter_part_table(). + Non allowed changes would be caought in prep_alter_part_table(). */ if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO) { @@ -10194,7 +10194,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table, /* Changing to similar partitioning, only update metadata. - Non allowed changes would be catched in prep_alter_part_table(). + Non allowed changes would be caught in prep_alter_part_table(). */ if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO) { @@ -10242,7 +10242,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table, /* Changing to similar partitioning, only update metadata. - Non allowed changes would be catched in prep_alter_part_table(). + Non allowed changes would be caught in prep_alter_part_table(). */ if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO) { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index ea9d3e0f610..6c9ce40d224 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -512,7 +512,7 @@ public: ------------------------------------------------------------------------- MODULE create/delete handler object ------------------------------------------------------------------------- - Object create/delete methode. The normal called when a table object + Object create/delete method. Normally called when a table object exists. There is also a method to create the handler object with only partition information. This is used from mysql_create_table when the table is to be created and the engine type is deduced to be the @@ -826,7 +826,7 @@ public: /** @breif - Positions an index cursor to the index specified in the hanlde. Fetches the + Positions an index cursor to the index specified in the handle. Fetches the row if available. If the key value is null, begin at first key of the index. */ @@ -1124,7 +1124,7 @@ public: HA_REC_NOT_IN_SEQ: This flag is set for handlers that cannot guarantee that the rows are - returned accroding to incremental positions (0, 1, 2, 3...). + returned according to incremental positions (0, 1, 2, 3...). This also means that rnd_next() should return HA_ERR_RECORD_DELETED if it finds a deleted row. (MyISAM (not fixed length row), HEAP, InnoDB) diff --git a/sql/handler.cc b/sql/handler.cc index 21922ec9d73..40dea349272 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -745,7 +745,7 @@ int ha_end() DBUG_ENTER("ha_end"); /* - This should be eventualy based on the graceful shutdown flag. + This should be eventually based on the graceful shutdown flag. So if flag is equal to HA_PANIC_CLOSE, the deallocate the errors. */ @@ -1406,8 +1406,8 @@ int ha_commit_trans(THD *thd, bool all) THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt; /* "real" is a nick name for a transaction for which a commit will - make persistent changes. E.g. a 'stmt' transaction inside a 'all' - transation is not 'real': even though it's possible to commit it, + make persistent changes. E.g. a 'stmt' transaction inside an 'all' + transaction is not 'real': even though it's possible to commit it, the changes are not durable as they might be rolled back if the enclosing 'all' transaction is rolled back. */ @@ -2672,7 +2672,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) /* TODO: Implement a more efficient way to have more than one index open for - the same table instance. The ha_open call is not cachable for clone. + the same table instance. The ha_open call is not cacheable for clone. This is not critical as the engines already have the table open and should be able to use the original instance of the table. @@ -3529,7 +3529,7 @@ int handler::update_auto_increment() index_init() or rnd_init() and in any column_bitmaps_signal() call after this. - The handler is allowd to do changes to the bitmap after a index_init or + The handler is allowed to do changes to the bitmap after a index_init or rnd_init() call is made as after this, MySQL will not use the bitmap for any program logic checking. */ @@ -3592,7 +3592,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, { // Autoincrement at key-start error= ha_index_last(table->record[1]); /* - MySQL implicitely assumes such method does locking (as MySQL decides to + MySQL implicitly assumes such method does locking (as MySQL decides to use nr+increment without checking again with the handler, in handler::update_auto_increment()), so reserves to infinite. */ @@ -6615,7 +6615,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM); t_field= static_cast<Item_field *>(fnc->arguments()[0])->field; uint length= (uint)fnc->arguments()[1]->val_int(); - if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length)) + if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length)) is_same= false; } } diff --git a/sql/handler.h b/sql/handler.h index f4399ef0f5d..80d0f1b59d8 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -231,7 +231,7 @@ enum enum_alter_inplace_result { this flag must implement start_read_removal() and end_read_removal(). The handler may return "fake" rows constructed from the key of the row asked for. This is used to optimize UPDATE and DELETE by reducing the - numer of roundtrips between handler and storage engine. + number of roundtrips between handler and storage engine. Example: UPDATE a=1 WHERE pk IN (<keys>) @@ -560,7 +560,7 @@ enum enum_binlog_command { /* Bits in used_fields */ #define HA_CREATE_USED_AUTO (1UL << 0) -#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble +#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer available #define HA_CREATE_USED_UNION (1UL << 2) #define HA_CREATE_USED_INSERT_METHOD (1UL << 3) #define HA_CREATE_USED_MIN_ROWS (1UL << 4) @@ -1221,7 +1221,7 @@ struct handler_iterator { /* Pointer to buffer for the iterator to use. Should be allocated by function which created the iterator and - destroied by freed by above "destroy" call + destroyed by freed by above "destroy" call */ void *buffer; }; @@ -1439,7 +1439,7 @@ struct handlerton "cookie". The flush and call of commit_checkpoint_notify_ha() need not happen - immediately - it can be scheduled and performed asynchroneously (ie. as + immediately - it can be scheduled and performed asynchronously (ie. as part of next prepare(), or sync every second, or whatever), but should not be postponed indefinitely. It is however also permissible to do it immediately, before returning from commit_checkpoint_request(). @@ -1529,7 +1529,7 @@ struct handlerton file extention. This is implied by the open_table_error() and the default discovery implementation. - Second element - data file extention. This is implied + Second element - data file extension. This is implied assumed by REPAIR TABLE ... USE_FRM implementation. */ const char **tablefile_extensions; // by default - empty list @@ -2223,7 +2223,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st, CONVERT TO CHARACTER SET DEFAULT to CONVERT TO CHARACTER SET <character-set-of-the-current-database> - TODO: Should't we postpone resolution of DEFAULT until the + TODO: Shouldn't we postpone resolution of DEFAULT until the character set of the table owner database is loaded from its db.opt? */ DBUG_ASSERT(cs); @@ -3017,7 +3017,7 @@ public: ha_statistics stats; /** MultiRangeRead-related members: */ - range_seq_t mrr_iter; /* Interator to traverse the range sequence */ + range_seq_t mrr_iter; /* Iterator to traverse the range sequence */ RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */ HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */ uint ranges_in_seq; /* Total number of ranges in the traversed sequence */ @@ -4022,7 +4022,7 @@ public: This method offers the storage engine, the possibility to store a reference to a table name which is going to be used with query cache. The method is called each time a statement is written to the cache and can - be used to verify if a specific statement is cachable. It also offers + be used to verify if a specific statement is cacheable. It also offers the possibility to register a generic (but static) call back function which is called each time a statement is matched against the query cache. diff --git a/sql/item.cc b/sql/item.cc index 4c2c03cb3b2..61651323626 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5011,7 +5011,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, @note We have to mark all items between current_sel (including) and - last_select (excluding) as dependend (select before last_select should + last_select (excluding) as dependent (select before last_select should be marked with actual table mask used by resolved item, all other with OUTER_REF_TABLE_BIT) and also write dependence information to Item of resolved identifier. @@ -5387,7 +5387,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) bool upward_lookup= FALSE; TABLE_LIST *table_list; - /* Calulate the TABLE_LIST for the table */ + /* Calculate the TABLE_LIST for the table */ table_list= (cached_table ? cached_table : field_found && (*from_field) != view_ref_found ? (*from_field)->table->pos_in_table_list : 0); @@ -6123,7 +6123,7 @@ Item *Item_field::propagate_equal_fields(THD *thd, but failed to create a valid DATE literal from the given string literal. Do not do constant propagation in such cases and unlink - "this" from the found Item_equal (as this equality not usefull). + "this" from the found Item_equal (as this equality not useful). */ item_equal= NULL; return this; @@ -7844,7 +7844,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) /* Due to cache, find_field_in_tables() can return field which doesn't belong to provided outer_context. In this case we have - to find proper field context in order to fix field correcly. + to find proper field context in order to fix field correctly. */ do { @@ -8029,9 +8029,9 @@ Item* Item_ref::transform(THD *thd, Item_transformer transformer, uchar *arg) callback functions. First the function applies the analyzer to the Item_ref object. Then - if the analizer succeeeds we first applies the compile method to the + if the analyzer succeeds we first apply the compile method to the object the Item_ref object is referencing. If this returns a new - item the old item is substituted for a new one. After this the + item the old item is substituted for a new one. After this the transformer is applied to the Item_ref object itself. The compile function is not called if the analyzer returns NULL in the parameter arg_p. diff --git a/sql/item.h b/sql/item.h index d779e2d6f16..2d5cedbee76 100644 --- a/sql/item.h +++ b/sql/item.h @@ -164,7 +164,7 @@ void dummy_error_processor(THD *thd, void *data); void view_error_processor(THD *thd, void *data); /* - Instances of Name_resolution_context store the information necesary for + Instances of Name_resolution_context store the information necessary for name resolution of Items and other context analysis of a query made in fix_fields(). @@ -344,7 +344,7 @@ public: Monotonicity is defined only for Item* trees that represent table partitioning expressions (i.e. have no subselects/user vars/PS parameters etc etc). An Item* tree is assumed to have the same monotonicity properties - as its correspoinding function F: + as its corresponding function F: [signed] longlong F(field1, field2, ...) { put values of field_i into table record buffer; @@ -1131,7 +1131,7 @@ public: /* real_type() is the type of base item. This is same as type() for most items, except Item_ref() and Item_cache_wrapper() where it - shows the type for the underlaying item. + shows the type for the underlying item. */ virtual enum Type real_type() const { return type(); } @@ -1277,7 +1277,7 @@ public: The caller can modify the returned String, if it's not marked "const" (with the String::mark_as_const() method). That means that if the item returns its own internal buffer (e.g. tmp_value), it - *must* be marked "const" [1]. So normally it's preferrable to + *must* be marked "const" [1]. So normally it's preferable to return the result value in the String, that was passed as an argument. But, for example, SUBSTR() returns a String that simply points into the buffer of SUBSTR()'s args[0]->val_str(). Such a @@ -1755,7 +1755,7 @@ public: @param cond_ptr[OUT] Store a replacement item here if the condition can be simplified, e.g.: WHERE part1 OR part2 OR part3 - with one of the partN evalutating to SEL_TREE::ALWAYS. + with one of the partN evaluating to SEL_TREE::ALWAYS. */ virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); /* @@ -2292,8 +2292,9 @@ public: virtual bool is_outer_field() const { DBUG_ASSERT(is_fixed()); return FALSE; } /** - Checks if this item or any of its decendents contains a subquery. This is a - replacement of the former Item::has_subquery() and Item::with_subselect. + Checks if this item or any of its descendents contains a subquery. + This is a replacement of the former Item::has_subquery() and + Item::with_subselect. */ virtual bool with_subquery() const { DBUG_ASSERT(is_fixed()); return false; } @@ -6055,7 +6056,7 @@ public: This is the method that updates the cached value. It must be explicitly called by the user of this class to store the value - of the orginal item in the cache. + of the original item in the cache. */ virtual void copy() = 0; diff --git a/sql/item_buff.cc b/sql/item_buff.cc index 81949bcdae0..9c96fdb1a9a 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -192,7 +192,7 @@ bool Cached_item_field::cmp(void) /* If value is not null and value changed (from null to not null or - becasue of value change), then copy the new value to buffer. + because of value change), then copy the new value to buffer. */ if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0)))) field->get_image(buff,length,field->charset()); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index b23aca2c5f3..5a3a418f432 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1396,7 +1396,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) @note Item_in_optimizer should work as pass-through for - subqueries that were processed by ALL/ANY->MIN/MAX rewrite - - subqueries taht were originally EXISTS subqueries (and were coverted by + - subqueries that were originally EXISTS subqueries (and were coinverted by the EXISTS->IN rewrite) When Item_in_optimizer is not not working as a pass-through, it @@ -1986,8 +1986,8 @@ longlong Item_func_interval::val_int() interval_range *range= intervals + mid; my_bool cmp_result; /* - The values in the range intervall may have different types, - Only do a decimal comparision of the first argument is a decimal + The values in the range interval may have different types, + Only do a decimal comparison if the first argument is a decimal and we are comparing against a decimal */ if (dec && range->type == DECIMAL_RESULT) @@ -2619,7 +2619,7 @@ Item_func_nullif::fix_length_and_dec() Some examples of what NULLIF can end up with after argument substitution (we don't mention args[1] in some cases for simplicity): - 1. l_expr is not an aggragate function: + 1. l_expr is not an aggregate function: a. No conversion happened. args[0] and args[2] were not replaced to something else @@ -2743,7 +2743,7 @@ Item_func_nullif::fix_length_and_dec() In this case we remember and reuse m_arg0 during EXECUTE time as args[2]. QQ: How to make sure that m_args0 does not point - to something temporary which will be destoyed between PREPARE and EXECUTE. + to something temporary which will be destroyed between PREPARE and EXECUTE. The condition below should probably be more strict and somehow check that: - change_item_tree() was called for the new args[0] - m_args0 is referenced from inside args[0], e.g. as a function argument, @@ -7154,7 +7154,7 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item) and not ot2.col. eliminate_item_equal() also has code that deals with equality substitution - in presense of SJM nests. + in presence of SJM nests. */ TABLE_LIST *emb_nest; diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 178ed8360dd..71b8dedeb6c 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -180,7 +180,7 @@ protected: /* Return the full select tree for "field_item" and "value": - a single SEL_TREE if the field is not in a multiple equality, or - - a conjuction of all SEL_TREEs for all fields from + - a conjunction of all SEL_TREEs for all fields from the same multiple equality with "field_item". */ SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param, diff --git a/sql/item_func.cc b/sql/item_func.cc index 8000d7f6a1d..beb138215e7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -506,7 +506,7 @@ Item *Item_func::transform(THD *thd, Item_transformer transformer, uchar *argume callback functions. First the function applies the analyzer to the root node of - the Item_func object. Then if the analizer succeeeds (returns TRUE) + the Item_func object. Then if the analyzer succeeds (returns TRUE) the function recursively applies the compile method to each argument of the Item_func node. If the call of the method for an argument item returns a new item @@ -1482,13 +1482,14 @@ double Item_func_div::real_op() my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) { int err; + my_decimal tmp; VDec2_lazy val(args[0], args[1]); if ((null_value= val.has_null())) return 0; if ((err= check_decimal_overflow(my_decimal_div(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW & ~E_DEC_DIV_ZERO, - decimal_value, + &tmp, val.m_a.ptr(), val.m_b.ptr(), prec_increment))) > 3) { @@ -1497,6 +1498,7 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) null_value= 1; return 0; } + tmp.round_to(decimal_value, decimals, HALF_UP); return decimal_value; } @@ -1553,7 +1555,7 @@ bool Item_func_div::fix_length_and_dec() DBUG_ENTER("Item_func_div::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); prec_increment= current_thd->variables.div_precincrement; - maybe_null= 1; // devision by zero + maybe_null= 1; // division by zero const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_div; DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;); @@ -4685,7 +4687,7 @@ bool Item_func_set_user_var::register_field_in_bitmap(void *arg) @param type type of new value @param cs charset info for new value @param dv derivation for new value - @param unsigned_arg indiates if a value of type INT_RESULT is unsigned + @param unsigned_arg indicates if a value of type INT_RESULT is unsigned @note Sets error and fatal error if allocation fails. @@ -6585,7 +6587,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) /* Here we check privileges of the stored routine only during view creation, in order to validate the view. A runtime check is - perfomed in Item_func_sp::execute(), and this method is not + performed in Item_func_sp::execute(), and this method is not called during context analysis. Notice, that during view creation we do not infer into stored routine bodies and do not check privileges of its statements, which would probably be a diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc index 082584181a4..a2abbce6619 100644 --- a/sql/item_inetfunc.cc +++ b/sql/item_inetfunc.cc @@ -372,7 +372,7 @@ bool Inet6::make_from_item(Item *item) @return Completion status. @retval true - error, the given string does not represent an IPv4-address. - @retval false - ok, the string has been converted sucessfully. + @retval false - ok, the string has been converted successfully. @note The problem with inet_pton() is that it treats leading zeros in IPv4-part differently on different platforms. @@ -496,7 +496,7 @@ bool Inet4::ascii_to_ipv4(const char *str, size_t str_length) @return Completion status. @retval true - error, the given string does not represent an IPv6-address. - @retval false - ok, the string has been converted sucessfully. + @retval false - ok, the string has been converted successfully. @note The problem with inet_pton() is that it treats leading zeros in IPv4-part differently on different platforms. @@ -851,7 +851,7 @@ size_t Inet6::to_string(char *dst, size_t dstsize) const @return Completion status. @retval NULL Given string does not represent an IP-address. - @retval !NULL The string has been converted sucessfully. + @retval !NULL The string has been converted successfully. */ String *Item_func_inet6_aton::val_str(String *buffer) diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 0baa762d25f..a3ff8dd389a 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -989,7 +989,7 @@ String *Item_func_concat_ws::val_str(String *str) goto null; // Must be a blob } else if (res2 == &tmp_value) - { // This can happend only 1 time + { // This can happen only 1 time if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res)) goto null; res= &tmp_value; @@ -1139,7 +1139,7 @@ bool Item_func_reverse::fix_length_and_dec() } /** - Replace all occurences of string2 in string1 with string3. + Replace all occurrences of string2 in string1 with string3. Don't reallocate val_str() if not needed. @@ -3999,7 +3999,7 @@ bool Item_func_export_set::fix_length_and_dec() using in a SQL statement. Adds a \\ before all characters that needs to be escaped in a SQL string. - We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when + We also escape '^Z' (END-OF-FILE in windows) to avoid problems when running commands from a file in windows. This function is very useful when you want to generate SQL statements. diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 889f9902e12..578f0ded00e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1135,12 +1135,12 @@ void Item_singlerow_subselect::reset() /** @todo - - We cant change name of Item_field or Item_ref, because it will - prevent it's correct resolving, but we should save name of + - We can't change name of Item_field or Item_ref, because it will + prevent its correct resolving, but we should save name of removed item => we do not make optimization if top item of list is field or reference. - switch off this optimization for prepare statement, - because we do not rollback this changes. + because we do not rollback these changes. Make rollback for it, or special name resolving mode in 5.0. @param join Join object of the subquery (i.e. 'child' join). @@ -1163,8 +1163,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join) select_lex->item_list.elements == 1 && !select_lex->item_list.head()->with_sum_func() && /* - We cant change name of Item_field or Item_ref, because it will - prevent it's correct resolving, but we should save name of + We can't change name of Item_field or Item_ref, because it will + prevent its correct resolving, but we should save name of removed item => we do not make optimization if top item of list is field or reference. TODO: solve above problem @@ -1662,7 +1662,7 @@ longlong Item_exists_subselect::val_int() Return the result of EXISTS as a string value Converts the true/false result into a string value. - Note that currently this cannot be NULL, so if the query exection fails + Note that currently this cannot be NULL, so if the query execution fails it will return 0. @param decimal_value[out] buffer to hold the resulting string value @@ -1685,7 +1685,7 @@ String *Item_exists_subselect::val_str(String *str) Return the result of EXISTS as a decimal value Converts the true/false result into a decimal value. - Note that currently this cannot be NULL, so if the query exection fails + Note that currently this cannot be NULL, so if the query execution fails it will return 0. @param decimal_value[out] Buffer to hold the resulting decimal value @@ -2383,7 +2383,7 @@ Item_in_subselect::row_value_transformer(JOIN *join) is_not_null_test(v3)) where is_not_null_test registers NULLs values but reject rows. - in case when we do not need correct NULL, we have simplier construction: + in case when we do not need correct NULL, we have simpler construction: EXISTS (SELECT ... WHERE where and (l1 = v1) and (l2 = v2) and @@ -2786,6 +2786,8 @@ bool Item_exists_subselect::select_prepare_to_be_in() Check if 'func' is an equality in form "inner_table.column = outer_expr" @param func Expression to check + @param allow_subselect If true, the outer_expr part can have a subquery + If false, it cannot. @param local_field OUT Return "inner_table.column" here @param outer_expr OUT Return outer_expr here @@ -2793,6 +2795,7 @@ bool Item_exists_subselect::select_prepare_to_be_in() */ static bool check_equality_for_exist2in(Item_func *func, + bool allow_subselect, Item_ident **local_field, Item **outer_exp) { @@ -2803,7 +2806,8 @@ static bool check_equality_for_exist2in(Item_func *func, args= func->arguments(); if (args[0]->real_type() == Item::FIELD_ITEM && args[0]->all_used_tables() != OUTER_REF_TABLE_BIT && - args[1]->all_used_tables() == OUTER_REF_TABLE_BIT) + args[1]->all_used_tables() == OUTER_REF_TABLE_BIT && + (allow_subselect || !args[1]->with_subquery())) { /* It is Item_field or Item_direct_view_ref) */ DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM || @@ -2814,7 +2818,8 @@ static bool check_equality_for_exist2in(Item_func *func, } else if (args[1]->real_type() == Item::FIELD_ITEM && args[1]->all_used_tables() != OUTER_REF_TABLE_BIT && - args[0]->all_used_tables() == OUTER_REF_TABLE_BIT) + args[0]->all_used_tables() == OUTER_REF_TABLE_BIT && + (allow_subselect || !args[0]->with_subquery())) { /* It is Item_field or Item_direct_view_ref) */ DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM || @@ -2843,6 +2848,13 @@ typedef struct st_eq_field_outer outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond + if there is just one outer_expr=inner_expr pair, then outer_expr can have a + subselect in it. If there are many such pairs, then none of outer_expr can + have a subselect in it. If we allow this, the query will fail with an error: + + This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left + expression of IN/ALL/ANY' + @param conds Condition to be checked @parm result Array to collect EQ_FIELD_OUTER elements describing inner-vs-outer equalities the function has found. @@ -2860,14 +2872,17 @@ static bool find_inner_outer_equalities(Item **conds, { List_iterator<Item> li(*((Item_cond*)*conds)->argument_list()); Item *item; + bool allow_subselect= true; while ((item= li++)) { if (item->type() == Item::FUNC_ITEM && check_equality_for_exist2in((Item_func *)item, + allow_subselect, &element.local_field, &element.outer_exp)) { found= TRUE; + allow_subselect= false; element.eq_ref= li.ref(); if (result.append(element)) goto alloc_err; @@ -2876,6 +2891,7 @@ static bool find_inner_outer_equalities(Item **conds, } else if ((*conds)->type() == Item::FUNC_ITEM && check_equality_for_exist2in((Item_func *)*conds, + true, &element.local_field, &element.outer_exp)) { @@ -3242,7 +3258,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join) /* In some optimisation cases we will not need this Item_in_optimizer object, but we can't know it here, but here we need address correct - reference on left expresion. + reference on left expression. note: we won't need Item_in_optimizer when handling degenerate cases like "... IN (SELECT 1)" @@ -3274,7 +3290,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join) and all that items do not make permanent changes in current item arena which allow to us call them with changed arena (if we do not know nature of Item, we have to call fix_fields() for it only with original arena to - avoid memory leack) + avoid memory leak) */ if (left_expr->cols() == 1) trans_res= single_value_transformer(join); @@ -3437,7 +3453,7 @@ bool Item_in_subselect::setup_mat_engine() /* The select_engine (that executes transformed IN=>EXISTS subselects) is - pre-created at parse time, and is stored in statment memory (preserved + pre-created at parse time, and is stored in statement memory (preserved across PS executions). */ DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE); @@ -3906,7 +3922,7 @@ int subselect_single_select_engine::exec() For at least one of the pushed predicates the following is true: We should not apply optimizations based on the condition that was pushed down into the subquery. Those optimizations are ref[_or_null] - acceses. Change them to be full table scans. + accesses. Change them to be full table scans. */ JOIN_TAB *tab; for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); @@ -6135,7 +6151,7 @@ int subselect_partial_match_engine::exec() if (has_covering_null_row) { /* - If there is a NULL-only row that coveres all columns the result of IN + If there is a NULL-only row that covers all columns the result of IN is UNKNOWN. */ item_in->value= 0; @@ -6330,7 +6346,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++) { /* - Check if the first and only indexed column contains NULL in the curent + Check if the first and only indexed column contains NULL in the current row, and add the row number to the corresponding key. */ if (merge_keys[i]->get_field(0)->is_null()) @@ -6542,7 +6558,7 @@ bool subselect_rowid_merge_engine::partial_match() } /* - If all nullable columns contain only NULLs, then there is a guranteed + If all nullable columns contain only NULLs, then there is a guaranteed partial match, and we don't need to search for a matching row. */ if (has_covering_null_columns) diff --git a/sql/item_subselect.h b/sql/item_subselect.h index dc8417495c5..a3105bd99e0 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -568,7 +568,7 @@ public: bool jtbm_const_row_found; /* - TRUE<=>this is a flattenable semi-join, false overwise. + TRUE<=>this is a flattenable semi-join, false otherwise. */ bool is_flattenable_semijoin; @@ -993,7 +993,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine /* FALSE for 'ref', TRUE for 'ref-or-null'. */ bool check_null; /* - The "having" clause. This clause (further reffered to as "artificial + The "having" clause. This clause (further referred to as "artificial having") was inserted by subquery transformation code. It contains Item(s) that have a side-effect: they record whether the subquery has produced a row with NULL certain components. We need to use it for cases @@ -1014,7 +1014,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine However, subqueries like the above are currently not handled by index lookup-based subquery engines, the engine applicability check misses them: it doesn't switch the engine for case of artificial having and - [eq_]ref access (only for artifical having + ref_or_null or no having). + [eq_]ref access (only for artificial having + ref_or_null or no having). The above example subquery is handled as a full-blown SELECT with eq_ref access to one table. @@ -1085,7 +1085,7 @@ public: */ JOIN *materialize_join; /* - A conjunction of all the equality condtions between all pairs of expressions + A conjunction of all the equality conditions between all pairs of expressions that are arguments of an IN predicate. We need these to post-filter some IN results because index lookups sometimes match values that are actually not equal to the search key in SQL terms. diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 88110367b38..b980923d03d 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -704,7 +704,7 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2) C_MODE_START -/* Declarations for auxilary C-callbacks */ +/* Declarations for auxiliary C-callbacks */ int simple_raw_key_cmp(void* arg, const void* key1, const void* key2) { @@ -736,7 +736,7 @@ C_MODE_END @param thd Thread descriptor @return status @retval FALSE success - @retval TRUE faliure + @retval TRUE failure Prepares Aggregator_distinct to process the incoming stream. Creates the temporary table and the Unique class if needed. @@ -1940,7 +1940,7 @@ void Item_sum_count::cleanup() /* - Avgerage + Average */ void Item_sum_avg::fix_length_and_dec_decimal() @@ -2206,7 +2206,7 @@ bool Item_sum_variance::fix_length_and_dec() /* According to the SQL2003 standard (Part 2, Foundations; sec 10.9, aggregate function; paragraph 7h of Syntax Rules), "the declared - type of the result is an implementation-defined aproximate numeric + type of the result is an implementation-defined approximate numeric type. */ if (args[0]->type_handler()->Item_sum_variance_fix_length_and_dec(this)) @@ -2279,7 +2279,7 @@ double Item_sum_variance::val_real() is one or zero. If it's zero, i.e. a population variance, then we only set nullness when the count is zero. - Another way to read it is that 'sample' is the numerical threshhold, at and + Another way to read it is that 'sample' is the numerical threshold, at and below which a 'count' number of items is called NULL. */ DBUG_ASSERT((sample == 0) || (sample == 1)); @@ -4181,7 +4181,7 @@ bool Item_func_group_concat::setup(THD *thd) { /* Force the create_tmp_table() to convert BIT columns to INT - as we cannot compare two table records containg BIT fields + as we cannot compare two table records containing BIT fields stored in the the tree used for distinct/order by. Moreover we don't even save in the tree record null bits where BIT fields store parts of their data. diff --git a/sql/item_sum.h b/sql/item_sum.h index 7a6fda708a4..f715c80ffaf 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -251,7 +251,7 @@ class Window_spec; The field 'aggr_level' is to contain the nest level of the subquery where the set function is aggregated. - The field 'max_arg_level' is for the maximun of the nest levels of the + The field 'max_arg_level' is for the maximum of the nest levels of the unbound column references occurred in the set function. A column reference is unbound within a set function if it is not bound by any subquery used as a subexpression in this function. A column reference is bound by diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index c740d1e227f..3aac5281566 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -475,7 +475,7 @@ err: /** - Create a formated date/time value in a string. + Create a formatted date/time value in a string. */ static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time, @@ -1052,7 +1052,7 @@ uint week_mode(uint mode) a date at start of january) In this case one can get 53 for the first week of next year. This flag ensures that the week is relevant for the given year. Note that this flag is only - releveant if WEEK_JANUARY is not set. + relevant if WEEK_JANUARY is not set. If set Week is in range 1-53. @@ -1354,7 +1354,7 @@ bool get_interval_value(THD *thd, Item *args, if (!(res= args->val_str_ascii(&str_value))) return (1); - /* record negative intervalls in interval->neg */ + /* record negative intervals in interval->neg */ str=res->ptr(); cs= res->charset(); const char *end=str+res->length(); @@ -1497,7 +1497,7 @@ bool Item_func_from_days::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzz /** - Converts current time in my_time_t to MYSQL_TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME representation for local time zone. Defines time zone (local) used for whole CURDATE function. */ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1508,7 +1508,7 @@ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) /** - Converts current time in my_time_t to MYSQL_TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME representation for UTC time zone. Defines time zone (UTC) used for whole UTC_DATE function. */ void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1586,7 +1586,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item) } /** - Converts current time in my_time_t to MYSQL_TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME representation for local time zone. Defines time zone (local) used for whole CURTIME function. */ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1600,7 +1600,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) /** - Converts current time in my_time_t to MYSQL_TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME representation for UTC time zone. Defines time zone (UTC) used for whole UTC_TIME function. */ void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1654,7 +1654,7 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions) /** - Converts current time in my_time_t to MYSQL_TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME representation for local time zone. Defines time zone (local) used for whole NOW function. */ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1666,7 +1666,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) /** - Converts current time in my_time_t to MYSQL_TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME representation for UTC time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function. */ void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1696,7 +1696,7 @@ bool Item_func_now::get_date(THD *thd, MYSQL_TIME *res, /** - Converts current time in my_time_t to MYSQL_TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME representation for local time zone. Defines time zone (local) used for whole SYSDATE function. */ void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) @@ -1982,7 +1982,7 @@ bool Item_func_convert_tz::get_date(THD *thd, MYSQL_TIME *ltime, uint not_used; my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used); ulong sec_part= ltime->second_part; - /* my_time_tmp is guranteed to be in the allowed range */ + /* my_time_tmp is guaranteed to be in the allowed range */ if (my_time_tmp) to_tz->gmt_sec_to_TIME(ltime, my_time_tmp); /* we rely on the fact that no timezone conversion can change sec_part */ @@ -2403,7 +2403,7 @@ void Item_char_typecast::fix_length_and_dec_internal(CHARSET_INFO *from_cs) uint32 char_length; /* We always force character set conversion if cast_cs - is a multi-byte character set. It garantees that the + is a multi-byte character set. It guarantees that the result of CAST is a well-formed string. For single-byte character sets we allow just to copy from the argument. A single-byte character sets string diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 3ed48e0a124..28bddb75df2 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -62,7 +62,7 @@ typedef struct my_xml_node_st } MY_XML_NODE; -/* Lexical analizer token */ +/* Lexical analyzer token */ typedef struct my_xpath_lex_st { int term; /* token type, see MY_XPATH_LEX_XXXXX below */ @@ -1121,7 +1121,7 @@ static Item* nametestfunc(MY_XPATH *xpath, /* - Tokens consisting of one character, for faster lexical analizer. + Tokens consisting of one character, for faster lexical analyzer. */ static char simpletok[128]= { @@ -1441,7 +1441,7 @@ my_xpath_function(const char *beg, const char *end) } -/* Initialize a lex analizer token */ +/* Initialize a lex analyzer token */ static void my_xpath_lex_init(MY_XPATH_LEX *lex, const char *str, const char *strend) @@ -1472,7 +1472,7 @@ my_xdigit(int c) SYNOPSYS Scan the next token from the input. lex->term is set to the scanned token type. - lex->beg and lex->end are set to the beginnig + lex->beg and lex->end are set to the beginning and to the end of the token. RETURN N/A @@ -1498,7 +1498,7 @@ my_xpath_lex_scan(MY_XPATH *xpath, (const uchar*) end)) > 0 && ((ctype & (_MY_L | _MY_U)) || *beg == '_')) { - // scan untill the end of the idenfitier + // scan until the end of the identifier for (beg+= length; (length= xpath->cs->cset->ctype(xpath->cs, &ctype, (const uchar*) beg, @@ -1627,7 +1627,7 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath) ** Grammar rules, according to http://www.w3.org/TR/xpath ** Implemented using recursive descendant method. ** All the following grammar processing functions accept -** a signle "xpath" argument and return 1 on success and 0 on error. +** a single "xpath" argument and return 1 on success and 0 on error. ** They also modify "xpath" argument by creating new items. */ @@ -2523,7 +2523,7 @@ public: as it is in conflict with abbreviated step. 1 + .123 does not work, 1 + 0.123 does. - Perhaps it is better to move this code into lex analizer. + Perhaps it is better to move this code into lex analyzer. RETURN 1 - success @@ -2878,7 +2878,7 @@ append_node(String *str, MY_XML_NODE *node) SYNOPSYS A call-back function executed when XML parser - is entering a tag or an attribue. + is entering a tag or an attribute. Appends the new node into data->pxml. Increments data->level. @@ -2914,7 +2914,7 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) SYNOPSYS A call-back function executed when XML parser - is entering into a tag or an attribue textual value. + is entering into a tag or an attribute textual value. The value is appended into data->pxml. RETURN @@ -2942,7 +2942,7 @@ int xml_value(MY_XML_PARSER *st,const char *attr, size_t len) SYNOPSYS A call-back function executed when XML parser - is leaving a tag or an attribue. + is leaving a tag or an attribute. Decrements data->level. RETURN diff --git a/sql/key.cc b/sql/key.cc index bf50094a9e4..adff6975631 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -228,7 +228,7 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, { /* This in fact never happens, as we have only partial BLOB - keys yet anyway, so it's difficult to find any sence to + keys yet anyway, so it's difficult to find any sense to restore the part of a record. Maybe this branch is to be removed, but now we have to ignore GCov compaining. @@ -612,8 +612,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec) max length. The exceptions are the BLOB and VARCHAR field types that take the max length into account. */ - if ((result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff, - key_part->length))) + if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff, + key_part->length))) DBUG_RETURN(result); next_loop: key_part++; diff --git a/sql/lock.cc b/sql/lock.cc index 8b8dd6fbed1..a3744d7f000 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1144,7 +1144,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) { DBUG_ENTER("make_global_read_lock_block_commit"); /* - If we didn't succeed lock_global_read_lock(), or if we already suceeded + If we didn't succeed lock_global_read_lock(), or if we already succeeded make_global_read_lock_block_commit(), do nothing. */ diff --git a/sql/log.cc b/sql/log.cc index f6dcbea1210..eaa49bcb58c 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -162,7 +162,7 @@ void setup_log_handling() /** purge logs, master and slave sides both, related error code - convertor. + converter. Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs() @param res an internal to purging routines error code @@ -381,7 +381,7 @@ public: never zero. This is done while calling the constructor binlog_cache_mngr. - We cannot set informaton in the constructor binlog_cache_data + We cannot set information in the constructor binlog_cache_data because the space for binlog_cache_mngr is allocated through a placement new. @@ -3031,7 +3031,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, mysql_mutex_lock(&LOCK_log); if (is_open()) - { // Safety agains reopen + { // Safety against reopen char buff[80], *end; char query_time_buff[22+7], lock_time_buff[22+7]; size_t buff_len; @@ -3330,7 +3330,7 @@ void MYSQL_BIN_LOG::cleanup() /* Free data for global binlog state. - We can't do that automaticly as we need to do this before + We can't do that automatically as we need to do this before safemalloc is shut down */ if (!is_relay_log) @@ -4105,7 +4105,7 @@ err: /** - Delete all logs refered to in the index file. + Delete all logs referred to in the index file. The new index file will only contain this file. @@ -5685,7 +5685,7 @@ binlog_cache_mngr *THD::binlog_setup_trx_data() - Start a statement transaction to allow us to truncate the cache. - - Save the currrent binlog position so that we can roll back the + - Save the current binlog position so that we can roll back the statement by truncating the cache. We only update the saved position if the old one was undefined, @@ -6870,7 +6870,7 @@ static const char* get_first_binlog(char* buf_arg) } if (normalize_binlog_name(buf_arg, fname, false)) { - errmsg= "cound not normalize the first file name in the binlog index"; + errmsg= "could not normalize the first file name in the binlog index"; goto end; } end: @@ -9863,7 +9863,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) than compare all found against each other to find the one pointing to the most recent binlog. - Note also that we need to first release LOCK_xid_list, then aquire + Note also that we need to first release LOCK_xid_list, then acquire LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while holding LOCK_xid_list, we might deadlock with other threads that take the locks in the opposite order. @@ -9948,7 +9948,7 @@ TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie) necessary stuff. In the future, this thread could also be used to do log rotation in the - background, which could elimiate all stalls around binlog rotations. + background, which could eliminate all stalls around binlog rotations. */ pthread_handler_t binlog_background_thread(void *arg __attribute__((unused))) diff --git a/sql/log_event.cc b/sql/log_event.cc index 58f0d77e80b..deca177fa43 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -4533,7 +4533,7 @@ get_str_len_and_pointer(const Log_event::Byte **src, const Log_event::Byte *end) { if (*src >= end) - return -1; // Will be UINT_MAX in two-complement arithmetics + return -1; // Will be UINT_MAX in two-complement arithmetic uint length= **src; if (length > 0) { @@ -4921,7 +4921,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, /* A 2nd variable part; this is common to all versions */ memcpy((char*) start, end, data_len); // Copy db and query - start[data_len]= '\0'; // End query with \0 (For safetly) + start[data_len]= '\0'; // End query with \0 (For safety) db= (char *)start; query= (char *)(start + db_len + 1); q_len= data_len - db_len -1; @@ -6624,7 +6624,7 @@ int Format_description_log_event::do_update_pos(rpl_group_info *rgi) If we do not skip stepping the group log position (and the server id was changed when restarting the server), it might well be that we start executing at a position that is invalid, e.g., - at a Rows_log_event or a Query_log_event preceeded by a + at a Rows_log_event or a Query_log_event preceded by a Intvar_log_event instead of starting at a Table_map_log_event or the Intvar_log_event respectively. */ @@ -6735,7 +6735,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp @return the version-safe checksum alg descriptor where zero designates no checksum, 255 - the orginator is - checksum-unaware (effectively no checksum) and the actuall + checksum-unaware (effectively no checksum) and the actual [1-254] range alg descriptor. */ enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len) @@ -7461,7 +7461,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi, /* When replication is running fine, if it was DUP_ERROR on the master then we could choose IGNORE here, because if DUP_ERROR - suceeded on master, and data is identical on the master and slave, + succeeded on master, and data is identical on the master and slave, then there should be no uniqueness errors on slave, so IGNORE is the same as DUP_ERROR. But in the unlikely case of uniqueness errors (because the data on the master and slave happen to be different @@ -8008,7 +8008,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg, /* Used to record GTID while sending binlog to slave, without having to - fully contruct every Gtid_log_event() needlessly. + fully construct every Gtid_log_event() needlessly. */ bool Gtid_log_event::peek(const char *event_start, size_t event_len, @@ -8574,7 +8574,7 @@ err: /* Used to record gtid_list event while sending binlog to slave, without having to - fully contruct the event object. + fully construct the event object. */ bool Gtid_list_log_event::peek(const char *event_start, size_t event_len, @@ -8654,7 +8654,7 @@ Intvar_log_event::Intvar_log_event(const char* buf, const Format_description_log_event* description_event) :Log_event(buf, description_event) { - /* The Post-Header is empty. The Varible Data part begins immediately. */ + /* The Post-Header is empty. The Variable Data part begins immediately. */ buf+= description_event->common_header_len + description_event->post_header_len[INTVAR_EVENT-1]; type= buf[I_TYPE_OFFSET]; @@ -9944,7 +9944,7 @@ void Create_file_log_event::pack_info(Protocol *protocol) /** Create_file_log_event::do_apply_event() - Constructor for Create_file_log_event to intantiate an event + Constructor for Create_file_log_event to instantiate an event from the relay log on the slave. @retval @@ -11018,7 +11018,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, DBUG_VOID_RETURN; } - /* if my_bitmap_init fails, catched in is_valid() */ + /* if my_bitmap_init fails, caught in is_valid() */ if (likely(!my_bitmap_init(&m_cols, m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, m_width, @@ -11435,7 +11435,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) */ { - DBUG_PRINT("debug", ("Checking compability of tables to lock - tables_to_lock: %p", + DBUG_PRINT("debug", ("Checking compatibility of tables to lock - tables_to_lock: %p", rgi->tables_to_lock)); /** @@ -11490,7 +11490,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) ptr->table->s->table_name.str)); /* We should not honour --slave-skip-errors at this point as we are - having severe errors which should not be skiped. + having severe errors which should not be skipped. */ thd->is_slave_error= 1; /* remove trigger's tables */ @@ -11879,7 +11879,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd) /** The method either increments the relay log position or commits the current statement and increments the master group - possition if the event is STMT_END_F flagged and + position if the event is STMT_END_F flagged and the statement corresponds to the autocommit query (i.e replicated without wrapping in BEGIN/COMMIT) @@ -12091,7 +12091,7 @@ err: /** Print an event "body" cache to @c file possibly in two fragments. - Each fragement is optionally per @c do_wrap to produce an SQL statement. + Each fragment is optionally per @c do_wrap to produce an SQL statement. @param file a file to print to @param body the "body" IO_CACHE of event @@ -13904,7 +13904,7 @@ record_compare_exit: Find the best key to use when locating the row in @c find_row(). A primary key is preferred if it exists; otherwise a unique index is - preferred. Else we pick the index with the smalles rec_per_key value. + preferred. Else we pick the index with the smallest rec_per_key value. If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info member fields appropriately. @@ -14038,7 +14038,7 @@ static int row_not_found_error(rpl_group_info *rgi) Locate the current row in event's table. The current row is pointed by @c m_curr_row. Member @c m_width tells - how many columns are there in the row (this can be differnet from + how many columns are there in the row (this can be different from the number of columns in the table). It is assumed that event's table is already open and pointed by @c m_table. @@ -14079,7 +14079,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi) rpl_row_tabledefs.test specifies that if the extra field on the slave does not have a default value and this is okay with Delete or Update events. - Todo: fix wl3228 hld that requires defauls for all types of events + Todo: fix wl3228 hld that requires defaults for all types of events */ prepare_record(table, m_width, FALSE); @@ -14332,7 +14332,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi) while (record_compare(table)); /* - Note: above record_compare will take into accout all record fields + Note: above record_compare will take into account all record fields which might be incorrect in case a partial row was given in the event */ diff --git a/sql/log_event.h b/sql/log_event.h index dbd2f4ab348..0468dc40a1d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -455,7 +455,7 @@ class String; /** @def LOG_EVENT_ARTIFICIAL_F - Artificial events are created arbitarily and not written to binary + Artificial events are created arbitrarily and not written to binary log These events should not update the master log position when slave @@ -962,13 +962,13 @@ private: }; /** - the struct aggregates two paramenters that identify an event + the struct aggregates two parameters that identify an event uniquely in scope of communication of a particular master and slave couple. I.e there can not be 2 events from the same staying connected master which have the same coordinates. @note Such identifier is not yet unique generally as the event originating master - is resetable. Also the crashed master can be replaced with some other. + is resettable. Also the crashed master can be replaced with some other. */ typedef struct event_coordinates { @@ -2792,7 +2792,7 @@ public: uint8 number_of_event_types; /* The list of post-headers' lengths followed - by the checksum alg decription byte + by the checksum alg description byte */ uint8 *post_header_len; class master_version_split: public Version { @@ -3131,7 +3131,7 @@ public: */ bool is_deferred() { return deferred; } /* - In case of the deffered applying the variable instance is flagged + In case of the deferred applying the variable instance is flagged and the parsing time query id is stored to be used at applying time. */ void set_deferred(query_id_t qid) { deferred= true; query_id= qid; } @@ -5019,7 +5019,7 @@ private: /** @class Incident_log_event - Class representing an incident, an occurance out of the ordinary, + Class representing an incident, an occurence out of the ordinary, that happened on the master. The event is used to inform the slave that something out of the @@ -5063,7 +5063,7 @@ public: m_message.str= NULL; /* Just as a precaution */ m_message.length= 0; set_direct_logging(); - /* Replicate the incident irregardless of @@skip_replication. */ + /* Replicate the incident regardless of @@skip_replication. */ flags&= ~LOG_EVENT_SKIP_REPLICATION_F; DBUG_VOID_RETURN; } @@ -5084,7 +5084,7 @@ public: strmake(m_message.str, msg->str, msg->length); m_message.length= msg->length; set_direct_logging(); - /* Replicate the incident irregardless of @@skip_replication. */ + /* Replicate the incident regardless of @@skip_replication. */ flags&= ~LOG_EVENT_SKIP_REPLICATION_F; DBUG_VOID_RETURN; } diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index e01488abbb3..8ec823d3d64 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1227,7 +1227,7 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len, DBUG_VOID_RETURN; } - /* if my_bitmap_init fails, catched in is_valid() */ + /* if my_bitmap_init fails, caught in is_valid() */ if (likely(!my_bitmap_init(&m_cols, m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, m_width, diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 11238fd573d..9c811f3011f 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -4398,7 +4398,11 @@ static int replace_user_table(THD *thd, const User_table &user_table, { if (revoke_grant) { - my_error(ER_NONEXISTING_GRANT, MYF(0), combo->user.str, combo->host.str); + if (combo->host.length) + my_error(ER_NONEXISTING_GRANT, MYF(0), combo->user.str, + combo->host.str); + else + my_error(ER_INVALID_ROLE, MYF(0), combo->user.str); goto end; } /* @@ -5922,6 +5926,8 @@ static void propagate_role_grants(ACL_ROLE *role, enum PRIVS_TO_MERGE::what what, const char *db= 0, const char *name= 0) { + if (!role) + return; mysql_mutex_assert_owner(&acl_cache->lock); PRIVS_TO_MERGE data= { what, db, name }; @@ -8107,6 +8113,21 @@ err: } +static void check_grant_column_int(GRANT_TABLE *grant_table, const char *name, + uint length, ulong *want_access) +{ + if (grant_table) + { + *want_access&= ~grant_table->privs; + if (*want_access & grant_table->cols) + { + GRANT_COLUMN *grant_column= column_hash_search(grant_table, name, length); + if (grant_column) + *want_access&= ~grant_column->rights; + } + } +} + /* Check column rights in given security context @@ -8129,9 +8150,6 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant, const char *db_name, const char *table_name, const char *name, size_t length, Security_context *sctx) { - GRANT_TABLE *grant_table; - GRANT_TABLE *grant_table_role; - GRANT_COLUMN *grant_column; ulong want_access= grant->want_privilege & ~grant->privilege; DBUG_ENTER("check_grant_column"); DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access)); @@ -8156,45 +8174,20 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant, grant->version= grant_version; /* purecov: inspected */ } - grant_table= grant->grant_table_user; - grant_table_role= grant->grant_table_role; - - if (!grant_table && !grant_table_role) - goto err; + check_grant_column_int(grant->grant_table_user, name, (uint)length, + &want_access); + check_grant_column_int(grant->grant_table_role, name, (uint)length, + &want_access); - if (grant_table) - { - grant_column= column_hash_search(grant_table, name, length); - if (grant_column) - { - want_access&= ~grant_column->rights; - } - } - if (grant_table_role) - { - grant_column= column_hash_search(grant_table_role, name, length); - if (grant_column) - { - want_access&= ~grant_column->rights; - } - } + mysql_rwlock_unlock(&LOCK_grant); if (!want_access) - { - mysql_rwlock_unlock(&LOCK_grant); DBUG_RETURN(0); - } -err: - mysql_rwlock_unlock(&LOCK_grant); char command[128]; get_privilege_desc(command, sizeof(command), want_access); /* TODO perhaps error should print current rolename aswell */ - my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), - command, - sctx->priv_user, - sctx->host_or_ip, - name, - table_name); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user, + sctx->host_or_ip, name, table_name); DBUG_RETURN(1); } diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index d62c18c6769..6201411d4aa 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -768,8 +768,18 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, { compl_result_code= result_code= HA_ADMIN_INVALID; } + + /* + The check for ALTER_PARTITION_ADMIN implements this logic: + do not collect EITS STATS for this syntax: + ALTER TABLE ... ANALYZE PARTITION p + EITS statistics is global (not per-partition). Collecting global stats + is much more expensive processing just one partition, so the most + appropriate action is to just not collect EITS stats for this command. + */ collect_eis= (table->table->s->table_category == TABLE_CATEGORY_USER && + !(lex->alter_info.flags & ALTER_PARTITION_ADMIN) && (check_eits_collection_allowed(thd) || lex->with_persistent_for_clause)); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 10bc6ccab6c..75427f106a4 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1419,7 +1419,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table, FALSE); /* extra() call must come only after all instances above are closed */ if (function != HA_EXTRA_NOT_USED) - (void) table->file->extra(function); + DBUG_RETURN(table->file->extra(function)); DBUG_RETURN(FALSE); } @@ -7944,15 +7944,11 @@ bool setup_tables(THD *thd, Name_resolution_context *context, FALSE ok; In this case *map will include the chosen index TRUE error */ -bool setup_tables_and_check_access(THD *thd, - Name_resolution_context *context, +bool setup_tables_and_check_access(THD *thd, Name_resolution_context *context, List<TABLE_LIST> *from_clause, - TABLE_LIST *tables, - List<TABLE_LIST> &leaves, - bool select_insert, - ulong want_access_first, - ulong want_access, - bool full_table_list) + TABLE_LIST *tables, List<TABLE_LIST> &leaves, + bool select_insert, ulong want_access_first, + ulong want_access, bool full_table_list) { DBUG_ENTER("setup_tables_and_check_access"); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 19180c85a2a..7327f270c33 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1205,10 +1205,8 @@ extern "C" my_thread_id next_thread_id_noinline() #endif -const Type_handler *THD::type_handler_for_date() const +const Type_handler *THD::type_handler_for_datetime() const { - if (!(variables.sql_mode & MODE_ORACLE)) - return &type_handler_newdate; if (opt_mysql56_temporal_format) return &type_handler_datetime2; return &type_handler_datetime; diff --git a/sql/sql_class.h b/sql/sql_class.h index 22637872cfc..7cacfe25ce4 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3418,7 +3418,7 @@ public: { return !MY_TEST(variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES); } - const Type_handler *type_handler_for_date() const; + const Type_handler *type_handler_for_datetime() const; bool timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts, ulong sec_part, date_mode_t fuzzydate); inline my_time_t query_start() { return start_time; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index f8b93abd4ae..821755d94eb 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -240,6 +240,7 @@ void st_parsing_options::reset() { allows_variable= TRUE; + lookup_keywords_after_qualifier= false; } @@ -1612,7 +1613,10 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd) yylval->lex_str.str= (char*) get_ptr(); yylval->lex_str.length= 1; c= yyGet(); // should be '.' - next_state= MY_LEX_IDENT_START; // Next is ident (not keyword) + if (lex->parsing_options.lookup_keywords_after_qualifier) + next_state= MY_LEX_IDENT_OR_KEYWORD; + else + next_state= MY_LEX_IDENT_START; // Next is ident (not keyword) if (!ident_map[(uchar) yyPeek()]) // Probably ` or " next_state= MY_LEX_START; return((int) c); @@ -4197,7 +4201,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only) sl->options|= SELECT_DESCRIBE; inner_join->select_options|= SELECT_DESCRIBE; } - res= inner_join->optimize(); + if ((res= inner_join->optimize())) + return TRUE; if (!inner_join->cleaned) sl->update_used_tables(); sl->update_correlated_cache(); @@ -10418,3 +10423,31 @@ Spvar_definition *LEX::row_field_name(THD *thd, const Lex_ident_sys_st &name) init_last_field(res, &name, thd->variables.collation_database); return res; } + + +bool LEX::map_data_type(const Lex_ident_sys_st &schema_name, + Lex_field_type_st *type) const +{ + const Schema *schema= schema_name.str ? + Schema::find_by_name(schema_name) : + Schema::find_implied(thd); + if (!schema) + { + char buf[128]; + const Name type_name= type->type_handler()->name(); + my_snprintf(buf, sizeof(buf), "%.*s.%.*s", + (int) schema_name.length, schema_name.str, + (int) type_name.length(), type_name.ptr()); +#if MYSQL_VERSION_ID > 100500 +#error Please remove the old code + my_error(ER_UNKNOWN_DATA_TYPE, MYF(0), buf); +#else + my_printf_error(ER_UNKNOWN_ERROR, "Unknown data type: '%-.64s'", + MYF(0), buf); +#endif + return true; + } + const Type_handler *mapped= schema->map_data_type(thd, type->type_handler()); + type->set_handler(mapped); + return false; +} diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 8e8a62e7a4f..ce30a630388 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -32,6 +32,7 @@ #include "sp.h" // enum stored_procedure_type #include "sql_tvc.h" #include "item.h" +#include "sql_schema.h" /* Used for flags of nesting constructs */ #define SELECT_NESTING_MAP_SIZE 64 @@ -2301,6 +2302,7 @@ private: struct st_parsing_options { bool allows_variable; + bool lookup_keywords_after_qualifier; st_parsing_options() { reset(); } void reset(); @@ -4524,6 +4526,9 @@ public: const LEX_CSTRING &soname); Spvar_definition *row_field_name(THD *thd, const Lex_ident_sys_st &name); + bool map_data_type(const Lex_ident_sys_st &schema, + Lex_field_type_st *type) const; + void mark_first_table_as_inserting(); }; diff --git a/sql/sql_schema.cc b/sql/sql_schema.cc new file mode 100644 index 00000000000..0bf4a63c2f8 --- /dev/null +++ b/sql/sql_schema.cc @@ -0,0 +1,80 @@ +/* + Copyright (c) 2020, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include "mariadb.h" +#include "sql_type.h" +#include "sql_schema.h" +#include "sql_class.h" + +class Schema_oracle: public Schema +{ +public: + Schema_oracle(const LEX_CSTRING &name) + :Schema(name) + { } + const Type_handler *map_data_type(THD *thd, const Type_handler *src) + const + { + if (src == &type_handler_newdate) + return thd->type_handler_for_datetime(); + return src; + } +}; + + +class Schema_maxdb: public Schema +{ +public: + Schema_maxdb(const LEX_CSTRING &name) + :Schema(name) + { } + const Type_handler *map_data_type(THD *thd, const Type_handler *src) + const + { + if (src == &type_handler_timestamp || + src == &type_handler_timestamp2) + return thd->type_handler_for_datetime(); + return src; + } +}; + + +Schema mariadb_schema(Lex_cstring(STRING_WITH_LEN("mariadb_schema"))); +Schema_oracle oracle_schema(Lex_cstring(STRING_WITH_LEN("oracle_schema"))); +Schema_maxdb maxdb_schema(Lex_cstring(STRING_WITH_LEN("maxdb_schema"))); + + +Schema *Schema::find_by_name(const LEX_CSTRING &name) +{ + DBUG_ASSERT(name.str); + if (mariadb_schema.eq_name(name)) + return &mariadb_schema; + if (oracle_schema.eq_name(name)) + return &oracle_schema; + if (maxdb_schema.eq_name(name)) + return &maxdb_schema; + return NULL; +} + + +Schema *Schema::find_implied(THD *thd) +{ + if (thd->variables.sql_mode & MODE_ORACLE) + return &oracle_schema; + if (thd->variables.sql_mode & MODE_MAXDB) + return &maxdb_schema; + return &mariadb_schema; +} diff --git a/sql/sql_schema.h b/sql/sql_schema.h new file mode 100644 index 00000000000..7c8f284d526 --- /dev/null +++ b/sql/sql_schema.h @@ -0,0 +1,70 @@ +#ifndef SQL_SCHEMA_H_INCLUDED +#define SQL_SCHEMA_H_INCLUDED +/* + Copyright (c) 2020, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include "mysqld.h" +#include "lex_string.h" + +class Schema +{ + LEX_CSTRING m_name; +public: + Schema(const LEX_CSTRING &name) + :m_name(name) + { } + virtual ~Schema() { } + const LEX_CSTRING &name() const { return m_name; } + virtual const Type_handler *map_data_type(THD *thd, const Type_handler *src) + const + { + return src; + } + /* + For now we have *hard-coded* compatibility schemas: + schema_mariadb, schema_oracle, schema_maxdb. + But eventually we'll turn then into real databases on disk. + So the code below compares names according to the filesystem + case sensitivity, like it is done for regular databases. + + Note, this is different to information_schema, whose name + is always case insensitive. This is intentional! + The assymetry will be gone when we'll implement SQL standard + regular and delimited identifiers. + */ + bool eq_name(const LEX_CSTRING &name) const + { +#if MYSQL_VERSION_ID > 100500 +#error Remove the old code + return !table_alias_charset->strnncoll(m_name.str, m_name.length, + name.str, name.length); +#else + // Please remove this when merging to 10.5 + return !table_alias_charset->coll->strnncoll(table_alias_charset, + (const uchar *) m_name.str, + m_name.length, + (const uchar *) name.str, + name.length, FALSE); +#endif + } + static Schema *find_by_name(const LEX_CSTRING &name); + static Schema *find_implied(THD *thd); +}; + + +extern Schema mariadb_schema; + +#endif // SQL_SCHEMA_H_INCLUDED diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d0bb0c816ec..5d255831055 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1547,7 +1547,6 @@ err: bool JOIN::build_explain() { DBUG_ENTER("JOIN::build_explain"); - create_explain_query_if_not_exists(thd->lex, thd->mem_root); have_query_plan= QEP_AVAILABLE; /* @@ -1589,6 +1588,7 @@ bool JOIN::build_explain() int JOIN::optimize() { int res= 0; + create_explain_query_if_not_exists(thd->lex, thd->mem_root); join_optimization_state init_state= optimization_state; if (select_lex->pushdown_select) { diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ba895384c1a..fd19b6064b5 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2232,6 +2232,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, append_identifier(thd, packet, &field->field_name); packet->append(' '); + const Type_handler *th= field->type_handler(); + const Schema *implied_schema= Schema::find_implied(thd); + if (th != implied_schema->map_data_type(thd, th)) + { + packet->append(th->schema()->name(), system_charset_info); + packet->append(STRING_WITH_LEN("."), system_charset_info); + } type.set(tmp, sizeof(tmp), system_charset_info); field->sql_type(type); packet->append(type.ptr(), type.length(), system_charset_info); diff --git a/sql/sql_string.h b/sql/sql_string.h index 3050a5ef464..807a3e63898 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -912,6 +912,10 @@ public: // Append with optional character set conversion from cs to charset() bool append(const char *s, size_t arg_length, CHARSET_INFO *cs); + bool append(const LEX_CSTRING &s, CHARSET_INFO *cs) + { + return append(s.str, s.length, cs); + } void strip_sp(); friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 0b333d44b77..2a0f90a918d 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -81,6 +81,12 @@ Type_handler_geometry type_handler_geometry; #endif +Schema *Type_handler::schema() const +{ + return &mariadb_schema; +} + + bool Type_handler_data::init() { #ifdef HAVE_SPATIAL diff --git a/sql/sql_type.h b/sql/sql_type.h index badd8d2f7f5..be0f79b88aa 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -1285,6 +1285,8 @@ public: } }; +class Schema; + /** Class Time is designed to store valid TIME values. @@ -3287,6 +3289,7 @@ public: Type_handler *aggregate_for_num_op_traditional(const Type_handler *h1, const Type_handler *h2); + virtual Schema *schema() const; virtual const Name name() const= 0; virtual const Name version() const { return m_version_default; } virtual enum_field_types field_type() const= 0; diff --git a/sql/sql_view.cc b/sql/sql_view.cc index cfaa5141a3b..0a18a852832 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -449,9 +449,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (thd->open_temporary_tables(lex->query_tables) || open_and_lock_tables(thd, lex->query_tables, TRUE, 0)) { - view= lex->unlink_first_table(&link_to_local); res= TRUE; - goto err; + goto err_no_relink; } view= lex->unlink_first_table(&link_to_local); @@ -714,10 +713,12 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, #ifdef WITH_WSREP wsrep_error_label: res= true; + goto err_no_relink; #endif err: lex->link_first_table_back(view, link_to_local); +err_no_relink: unit->cleanup(); DBUG_RETURN(res || thd->is_error()); } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 88d3669e008..3adc2801270 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1810,6 +1810,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type <type_handler> int_type real_type %type <Lex_field_type> type_with_opt_collate field_type + qualified_field_type field_type_numeric field_type_string field_type_lob @@ -6629,10 +6630,12 @@ field_spec: lex->init_last_field(f, &$1, NULL); $<create_field>$= f; + lex->parsing_options.lookup_keywords_after_qualifier= true; } field_type_or_serial opt_check_constraint { LEX *lex=Lex; + lex->parsing_options.lookup_keywords_after_qualifier= false; $$= $<create_field>2; $$->check_constraint= $4; @@ -6651,7 +6654,7 @@ field_spec: ; field_type_or_serial: - field_type { Lex->last_field->set_attributes($1, Lex->charset); } + qualified_field_type { Lex->last_field->set_attributes($1, Lex->charset); } field_def | SERIAL_SYM { @@ -6824,6 +6827,18 @@ column_default_expr: } ; +qualified_field_type: + field_type + { + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); + } + | sp_decl_ident '.' field_type + { + if (Lex->map_data_type($1, &($$= $3))) + MYSQL_YYABORT; + } + ; + field_type: field_type_numeric | field_type_temporal @@ -6945,7 +6960,7 @@ field_type_temporal: } $$.set(&type_handler_year, $2); } - | DATE_SYM { $$.set(thd->type_handler_for_date()); } + | DATE_SYM { $$.set(&type_handler_newdate); } | TIME_SYM opt_field_length { $$.set(opt_mysql56_temporal_format ? @@ -6955,31 +6970,14 @@ field_type_temporal: } | TIMESTAMP opt_field_length { - if (thd->variables.sql_mode & MODE_MAXDB) - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_datetime2) : - static_cast<const Type_handler*>(&type_handler_datetime), - $2); - else - { - /* - Unlike other types TIMESTAMP fields are NOT NULL by default. - Unless --explicit-defaults-for-timestamp is given. - */ - if (!opt_explicit_defaults_for_timestamp) - Lex->last_field->flags|= NOT_NULL_FLAG; - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_timestamp2): - static_cast<const Type_handler*>(&type_handler_timestamp), - $2); - } + $$.set(opt_mysql56_temporal_format ? + static_cast<const Type_handler*>(&type_handler_timestamp2): + static_cast<const Type_handler*>(&type_handler_timestamp), + $2); } | DATETIME opt_field_length { - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_datetime2) : - static_cast<const Type_handler*>(&type_handler_datetime), - $2); + $$.set(thd->type_handler_for_datetime(), $2); } ; @@ -7307,14 +7305,14 @@ with_or_without_system: type_with_opt_collate: field_type opt_collate { - $$= $1; + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); if ($2) { if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } - Lex->last_field->set_attributes($1, Lex->charset); + Lex->last_field->set_attributes($$, Lex->charset); } ; diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index 9d5e42355b9..fdb3e90a509 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -1290,6 +1290,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type <type_handler> int_type real_type %type <Lex_field_type> type_with_opt_collate field_type + qualified_field_type sp_param_type_with_opt_collate sp_param_field_type sp_param_field_type_string @@ -6636,10 +6637,12 @@ field_spec: lex->init_last_field(f, &$1, NULL); $<create_field>$= f; + lex->parsing_options.lookup_keywords_after_qualifier= true; } field_type_or_serial opt_check_constraint { LEX *lex=Lex; + lex->parsing_options.lookup_keywords_after_qualifier= false; $$= $<create_field>2; $$->check_constraint= $4; @@ -6658,7 +6661,7 @@ field_spec: ; field_type_or_serial: - field_type { Lex->last_field->set_attributes($1, Lex->charset); } + qualified_field_type { Lex->last_field->set_attributes($1, Lex->charset); } field_def | SERIAL_SYM { @@ -6831,6 +6834,18 @@ column_default_expr: } ; +qualified_field_type: + field_type + { + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); + } + | sp_decl_ident '.' field_type + { + if (Lex->map_data_type($1, &($$= $3))) + MYSQL_YYABORT; + } + ; + field_type: field_type_numeric | field_type_temporal @@ -7004,7 +7019,7 @@ field_type_temporal: } $$.set(&type_handler_year, $2); } - | DATE_SYM { $$.set(thd->type_handler_for_date()); } + | DATE_SYM { $$.set(&type_handler_newdate); } | TIME_SYM opt_field_length { $$.set(opt_mysql56_temporal_format ? @@ -7014,31 +7029,14 @@ field_type_temporal: } | TIMESTAMP opt_field_length { - if (thd->variables.sql_mode & MODE_MAXDB) - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_datetime2) : - static_cast<const Type_handler*>(&type_handler_datetime), - $2); - else - { - /* - Unlike other types TIMESTAMP fields are NOT NULL by default. - Unless --explicit-defaults-for-timestamp is given. - */ - if (!opt_explicit_defaults_for_timestamp) - Lex->last_field->flags|= NOT_NULL_FLAG; - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_timestamp2): - static_cast<const Type_handler*>(&type_handler_timestamp), - $2); - } + $$.set(opt_mysql56_temporal_format ? + static_cast<const Type_handler*>(&type_handler_timestamp2): + static_cast<const Type_handler*>(&type_handler_timestamp), + $2); } | DATETIME opt_field_length { - $$.set(opt_mysql56_temporal_format ? - static_cast<const Type_handler*>(&type_handler_datetime2) : - static_cast<const Type_handler*>(&type_handler_datetime), - $2); + $$.set(thd->type_handler_for_datetime(), $2); } ; @@ -7393,27 +7391,28 @@ with_or_without_system: type_with_opt_collate: field_type opt_collate { - $$= $1; + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); if ($2) { if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } - Lex->last_field->set_attributes($1, Lex->charset); + Lex->last_field->set_attributes($$, Lex->charset); } ; sp_param_type_with_opt_collate: sp_param_field_type opt_collate { - $$= $1; + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); + if ($2) { if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) MYSQL_YYABORT; } - Lex->last_field->set_attributes($1, Lex->charset); + Lex->last_field->set_attributes($$, Lex->charset); } ; diff --git a/sql/structs.h b/sql/structs.h index 0c00aeec33a..c47e4802452 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -626,6 +626,10 @@ public: { set(handler, 0, 0); } + void set_handler(const Type_handler *handler) + { + m_handler= handler; + } const Type_handler *type_handler() const { return m_handler; } }; diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 60c10527fe9..3b58e8b5a8f 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -355,7 +355,6 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, } // endif mode rcop = false; - } catch (int n) { if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 43b3e461cd1..1ac4161e478 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -49,6 +49,7 @@ #include "global.h" #include "plgdbsem.h" #include "filamdbf.h" +#include "filamzip.h" #include "tabdos.h" #include "valblk.h" #define NO_FUNC @@ -139,7 +140,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) if (fread(buf, HEADLEN, 1, file) != 1) { strcpy(g->Message, MSG(NO_READ_32)); return RC_NF; - } // endif fread + } // endif fread // Check first byte to be sure of .dbf type if ((buf->Version & 0x03) != DBFTYPE) { @@ -149,7 +150,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) if ((buf->Version & 0x30) == 0x30) { strcpy(g->Message, MSG(FOXPRO_FILE)); dbc = 264; // FoxPro database container - } // endif Version + } // endif Version } else strcpy(g->Message, MSG(DBASE_FILE)); @@ -158,12 +159,12 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) if (fseek(file, buf->Headlen() - dbc, SEEK_SET) != 0) { sprintf(g->Message, MSG(BAD_HEADER), fn); return RC_FX; - } // endif fseek + } // endif fseek if (fread(&endmark, 2, 1, file) != 1) { strcpy(g->Message, MSG(BAD_HEAD_END)); return RC_FX; - } // endif fread + } // endif fread // Some files have just 1D others have 1D00 following fields if (endmark[0] != EOH && endmark[1] != EOH) { @@ -172,7 +173,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) if (rc == RC_OK) return RC_FX; - } // endif endmark + } // endif endmark // Calculate here the number of fields while we have the dbc info buf->SetFields((buf->Headlen() - dbc - 1) / 32); @@ -180,13 +181,58 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) return rc; } // end of dbfhead +/****************************************************************************/ +/* dbfields: Analyze a DBF header and set the table fields number. */ +/* Parameters: */ +/* PGLOBAL g -- pointer to the CONNECT Global structure */ +/* DBFHEADER *hdrp -- pointer to _dbfheader structure */ +/* Returns: */ +/* RC_OK, RC_INFO, or RC_FX if error. */ +/****************************************************************************/ +static int dbfields(PGLOBAL g, DBFHEADER* hdrp) +{ + char* endmark; + int dbc = 2, rc = RC_OK; + + *g->Message = '\0'; + + // Check first byte to be sure of .dbf type + if ((hdrp->Version & 0x03) != DBFTYPE) { + strcpy(g->Message, MSG(NOT_A_DBF_FILE)); + rc = RC_INFO; + + if ((hdrp->Version & 0x30) == 0x30) { + strcpy(g->Message, MSG(FOXPRO_FILE)); + dbc = 264; // FoxPro database container + } // endif Version + + } else + strcpy(g->Message, MSG(DBASE_FILE)); + + // Check last byte(s) of header + endmark = (char*)hdrp + hdrp->Headlen() - dbc; + + // Some headers just have 1D others have 1D00 following fields + if (endmark[0] != EOH && endmark[1] != EOH) { + sprintf(g->Message, MSG(NO_0DH_HEAD), dbc); + + if (rc == RC_OK) + return RC_FX; + + } // endif endmark + + // Calculate here the number of fields while we have the dbc info + hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32); + return rc; +} // end of dbfields + /* -------------------------- Function DBFColumns ------------------------- */ /****************************************************************************/ /* DBFColumns: constructs the result blocks containing the description */ /* of all the columns of a DBF file that will be retrieved by #GetData. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info) { int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -196,10 +242,12 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) char buf[2], filename[_MAX_PATH]; int ncol = sizeof(buftyp) / sizeof(int); int rc, type, len, field, fields; - bool bad; - DBFHEADER mainhead; - DESCRIPTOR thisfield; - FILE *infile = NULL; + bool bad, mul; + PCSZ target, pwd; + DBFHEADER mainhead, *hp; + DESCRIPTOR thisfield, *tfp; + FILE *infile = NULL; + UNZIPUTL *zutp = NULL; PQRYRES qrp; PCOLRES crp; @@ -217,21 +265,55 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) /************************************************************************/ PlugSetPath(filename, fn, dp); - if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb"))) - return NULL; - - /************************************************************************/ - /* Get the first 32 bytes of the header. */ - /************************************************************************/ - if ((rc = dbfhead(g, infile, filename, &mainhead)) == RC_FX) { - fclose(infile); - return NULL; - } // endif dbfhead - - /************************************************************************/ - /* Allocate the structures used to refer to the result set. */ - /************************************************************************/ - fields = mainhead.Fields(); + if (topt->zipped) { + target = GetStringTableOption(g, topt, "Entry", NULL); + mul = (target && *target) ? strchr(target, '*') || strchr(target, '?') + : false; + mul = GetBooleanTableOption(g, topt, "Mulentries", mul); + + if (mul) { + strcpy(g->Message, "Cannot find column definition for multiple entries"); + return NULL; + } // endif Multiple + + pwd = GetStringTableOption(g, topt, "Password", NULL); + zutp = new(g) UNZIPUTL(target, pwd, mul); + + if (!zutp->OpenTable(g, MODE_READ, filename)) + hp = (DBFHEADER*)zutp->memory; + else + return NULL; + + /**********************************************************************/ + /* Set the table fields number. */ + /**********************************************************************/ + if ((rc = dbfields(g, hp)) == RC_FX) { + zutp->close(); + return NULL; + } // endif dbfields + + tfp = (DESCRIPTOR*)hp; + } else { + if (!(infile = global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb"))) + return NULL; + else + hp = &mainhead; + + /**********************************************************************/ + /* Get the first 32 bytes of the header. */ + /**********************************************************************/ + if ((rc = dbfhead(g, infile, filename, hp)) == RC_FX) { + fclose(infile); + return NULL; + } // endif dbfhead + + tfp = &thisfield; + } // endif zipped + + /************************************************************************/ + /* Get the number of the table fields. */ + /************************************************************************/ + fields = hp->Fields(); } else fields = 0; @@ -241,19 +323,21 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) if (info || !qrp) { if (infile) fclose(infile); + else if (zutp) + zutp->close(); return qrp; - } // endif info + } // endif info if (trace(1)) { htrc("Structure of %s\n", filename); htrc("headlen=%hd reclen=%hd degree=%d\n", - mainhead.Headlen(), mainhead.Reclen(), fields); - htrc("flags(iem)=%d,%d,%d cp=%d\n", mainhead.Incompleteflag, - mainhead.Encryptflag, mainhead.Mdxflag, mainhead.Language); + hp->Headlen(), hp->Reclen(), fields); + htrc("flags(iem)=%d,%d,%d cp=%d\n", hp->Incompleteflag, + hp->Encryptflag, hp->Mdxflag, hp->Language); htrc("%hd records, last changed %02d/%02d/%d\n", - mainhead.Records(), mainhead.Filedate[1], mainhead.Filedate[2], - mainhead.Filedate[0] + (mainhead.Filedate[0] <= 30) ? 2000 : 1900); + hp->Records(), hp->Filedate[1], hp->Filedate[2], + hp->Filedate[0] + (hp->Filedate[0] <= 30) ? 2000 : 1900); htrc("Field Type Offset Len Dec Set Mdx\n"); } // endif trace @@ -265,21 +349,24 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) for (field = 0; field < fields; field++) { bad = FALSE; - if (fread(&thisfield, HEADLEN, 1, infile) != 1) { + if (topt->zipped) { + tfp = (DESCRIPTOR*)((char*)tfp + HEADLEN); + } else if (fread(tfp, HEADLEN, 1, infile) != 1) { sprintf(g->Message, MSG(ERR_READING_REC), field+1, fn); goto err; - } else - len = thisfield.Length; + } // endif fread + + len = tfp->Length; if (trace(1)) htrc("%-11s %c %6ld %3d %2d %3d %3d\n", - thisfield.Name, thisfield.Type, thisfield.Offset, len, - thisfield.Decimals, thisfield.Setfield, thisfield.Mdxfield); + tfp->Name, tfp->Type, tfp->Offset, len, + tfp->Decimals, tfp->Setfield, tfp->Mdxfield); /************************************************************************/ /* Now get the results into blocks. */ /************************************************************************/ - switch (thisfield.Type) { + switch (tfp->Type) { case 'C': // Characters case 'L': // Logical 'T' or 'F' or space type = TYPE_STRING; @@ -294,7 +381,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) // type = TYPE_INT; // break; case 'N': - type = (thisfield.Decimals) ? TYPE_DOUBLE + type = (tfp->Decimals) ? TYPE_DOUBLE : (len > 10) ? TYPE_BIGINT : TYPE_INT; break; case 'F': // Float @@ -306,8 +393,8 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) break; default: if (!info) { - sprintf(g->Message, MSG(BAD_DBF_TYPE), thisfield.Type - , thisfield.Name); + sprintf(g->Message, MSG(BAD_DBF_TYPE), tfp->Type + , tfp->Name); goto err; } // endif info @@ -316,27 +403,31 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) } // endswitch Type crp = qrp->Colresp; // Column Name - crp->Kdata->SetValue(thisfield.Name, field); + crp->Kdata->SetValue(tfp->Name, field); crp = crp->Next; // Data Type crp->Kdata->SetValue((int)type, field); crp = crp->Next; // Type Name if (bad) { - buf[0] = thisfield.Type; + buf[0] = tfp->Type; crp->Kdata->SetValue(buf, field); } else crp->Kdata->SetValue(GetTypeName(type), field); crp = crp->Next; // Precision - crp->Kdata->SetValue((int)thisfield.Length, field); + crp->Kdata->SetValue((int)tfp->Length, field); crp = crp->Next; // Length - crp->Kdata->SetValue((int)thisfield.Length, field); + crp->Kdata->SetValue((int)tfp->Length, field); crp = crp->Next; // Scale (precision) - crp->Kdata->SetValue((int)thisfield.Decimals, field); + crp->Kdata->SetValue((int)tfp->Decimals, field); } // endfor field qrp->Nblin = field; - fclose(infile); + + if (infile) + fclose(infile); + else if (zutp) + zutp->close(); #if 0 if (info) { @@ -347,9 +438,9 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) sprintf(buf, "Ver=%02x ncol=%hu nlin=%u lrecl=%hu headlen=%hu date=%02d/%02d/%02d", - mainhead.Version, fields, mainhead.Records, mainhead.Reclen, - mainhead.Headlen, mainhead.Filedate[0], mainhead.Filedate[1], - mainhead.Filedate[2]); + hp->Version, fields, hp->Records, hp->Reclen, + hp->Headlen, hp->Filedate[0], hp->Filedate[1], + hp->Filedate[2]); strcat(g->Message, buf); } // endif info @@ -360,9 +451,13 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) /**************************************************************************/ return qrp; - err: - fclose(infile); - return NULL; +err: + if (infile) + fclose(infile); + else if (zutp) + zutp->close(); + + return NULL; } // end of DBFColumns /* ---------------------------- Class DBFBASE ----------------------------- */ diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index 640fc349b4c..dfe5cb5cfc4 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM; /****************************************************************************/ /* Functions used externally. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info); +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS tiop, bool info); /****************************************************************************/ /* This is the base class for dBASE file access methods. */ diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index fd1cf0ceff9..eb14e846120 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,11 +1,11 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.3 */ +/* Version 1.4 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -45,6 +45,62 @@ #define WRITEBUFFERSIZE (16384) +/****************************************************************************/ +/* Definitions used for DBF tables. */ +/****************************************************************************/ +#define HEADLEN 32 /* sizeof ( mainhead or thisfield ) */ +//efine MEMOLEN 10 /* length of memo field in .dbf */ +#define DBFTYPE 3 /* value of bits 0 and 1 if .dbf */ +#define EOH 0x0D /* end-of-header marker in .dbf file */ + +/****************************************************************************/ +/* First 32 bytes of a DBF table. */ +/* Note: some reserved fields are used here to store info (Fields) */ +/****************************************************************************/ +typedef struct _dbfheader { + uchar Version; /* Version information flags */ + char Filedate[3]; /* date, YYMMDD, binary. YY=year-1900 */ +private: + /* The following four members are stored in little-endian format on disk */ + char m_RecordsBuf[4]; /* records in the file */ + char m_HeadlenBuf[2]; /* bytes in the header */ + char m_ReclenBuf[2]; /* bytes in a record */ + char m_FieldsBuf[2]; /* Reserved but used to store fields */ +public: + char Incompleteflag; /* 01 if incomplete, else 00 */ + char Encryptflag; /* 01 if encrypted, else 00 */ + char Reserved2[12]; /* for LAN use */ + char Mdxflag; /* 01 if production .mdx, else 00 */ + char Language; /* Codepage */ + char Reserved3[2]; + + uint Records(void) const { return uint4korr(m_RecordsBuf); } + ushort Headlen(void) const { return uint2korr(m_HeadlenBuf); } + ushort Reclen(void) const { return uint2korr(m_ReclenBuf); } + ushort Fields(void) const { return uint2korr(m_FieldsBuf); } + + void SetHeadlen(ushort num) { int2store(m_HeadlenBuf, num); } + void SetReclen(ushort num) { int2store(m_ReclenBuf, num); } + void SetFields(ushort num) { int2store(m_FieldsBuf, num); } +} DBFHEADER; + +/****************************************************************************/ +/* Column field descriptor of a .dbf file. */ +/****************************************************************************/ +typedef struct _descriptor { + char Name[11]; /* field name, in capitals, null filled*/ + char Type; /* field type, C, D, F, L, M or N */ + uint Offset; /* used in memvars, not in files. */ + uchar Length; /* field length */ + uchar Decimals; /* number of decimal places */ + short Reserved4; + char Workarea; /* ??? */ + char Reserved5[2]; + char Setfield; /* ??? */ + char Reserved6[7]; + char Mdxfield; /* 01 if tag field in production .mdx */ +} DESCRIPTOR; + bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul); /***********************************************************************/ @@ -214,10 +270,21 @@ bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE); - if (mul) - err = ZipFiles(g, zutp, fn, buf); - else - err = ZipFile(g, zutp, fn, entry, buf); + if (!mul) { + PCSZ entp; + + if (!entry) { // entry defaults to the file name + char* p = strrchr((char*)fn, '/'); +#if defined(__WIN__) + if (!p) p = strrchr((char*)fn, '\\'); +#endif // __WIN__ + entp = (p) ? p + 1 : entry; + } else + entp = entry; + + err = ZipFile(g, zutp, fn, entp, buf); + } else + err = ZipFiles(g, zutp, fn, buf); zutp->close(); return err; @@ -232,6 +299,7 @@ ZIPUTIL::ZIPUTIL(PCSZ tgt) { zipfile = NULL; target = tgt; + pwd = NULL; fp = NULL; entryopen = false; } // end of ZIPUTIL standard constructor @@ -241,6 +309,7 @@ ZIPUTIL::ZIPUTIL(ZIPUTIL *zutp) { zipfile = zutp->zipfile; target = zutp->target; + pwd = zutp->pwd; fp = zutp->fp; entryopen = zutp->entryopen; } // end of UNZIPUTL copy constructor @@ -385,11 +454,11 @@ void ZIPUTIL::closeEntry() /***********************************************************************/ /* Constructors. */ /***********************************************************************/ -UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul) +UNZIPUTL::UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul) { zipfile = NULL; target = tgt; - pwd = NULL; + pwd = pw; fp = NULL; memory = NULL; size = 0; @@ -959,7 +1028,7 @@ int UZXFAM::Cardinality(PGLOBAL g) } // end of Cardinality /***********************************************************************/ -/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ +/* OpenTableFile: Open a FIX/UNIX table file from a ZIP file. */ /***********************************************************************/ bool UZXFAM::OpenTableFile(PGLOBAL g) { @@ -1015,6 +1084,197 @@ int UZXFAM::GetNext(PGLOBAL g) return RC_OK; } // end of GetNext +/* -------------------------- class UZDFAM --------------------------- */ + +/***********************************************************************/ +/* Constructors. */ +/***********************************************************************/ +UZDFAM::UZDFAM(PDOSDEF tdp) : DBMFAM(tdp) +{ + zutp = NULL; + tdfp = tdp; + //target = tdp->GetEntry(); + //mul = tdp->GetMul(); + //Lrecl = tdp->GetLrecl(); +} // end of UZXFAM standard constructor + +UZDFAM::UZDFAM(PUZDFAM txfp) : DBMFAM(txfp) +{ + zutp = txfp->zutp; + tdfp = txfp->tdfp; + //target = txfp->target; + //mul = txfp->mul; + //Lrecl = txfp->Lrecl; +} // end of UZXFAM copy constructor + +#if 0 +/****************************************************************************/ +/* dbfhead: Routine to analyze a DBF header. */ +/* Parameters: */ +/* PGLOBAL g -- pointer to the CONNECT Global structure */ +/* DBFHEADER *hdrp -- pointer to _dbfheader structure */ +/* Returns: */ +/* RC_OK, RC_NF, RC_INFO, or RC_FX if error. */ +/* Side effects: */ +/* Set the fields number in the header. */ +/****************************************************************************/ +int UZDFAM::dbfhead(PGLOBAL g, void* buf) +{ + char *endmark; + int dbc = 2, rc = RC_OK; + DBFHEADER* hdrp = (DBFHEADER*)buf; + + *g->Message = '\0'; + + // Check first byte to be sure of .dbf type + if ((hdrp->Version & 0x03) != DBFTYPE) { + strcpy(g->Message, MSG(NOT_A_DBF_FILE)); + rc = RC_INFO; + + if ((hdrp->Version & 0x30) == 0x30) { + strcpy(g->Message, MSG(FOXPRO_FILE)); + dbc = 264; // FoxPro database container + } // endif Version + + } else + strcpy(g->Message, MSG(DBASE_FILE)); + + // Check last byte(s) of header + endmark = (char*)hdrp + hdrp->Headlen() - dbc; + + // Some headers just have 1D others have 1D00 following fields + if (endmark[0] != EOH && endmark[1] != EOH) { + sprintf(g->Message, MSG(NO_0DH_HEAD), dbc); + + if (rc == RC_OK) + return RC_FX; + + } // endif endmark + + // Calculate here the number of fields while we have the dbc info + hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32); + return rc; +} // end of dbfhead + +/****************************************************************************/ +/* ScanHeader: scan the DBF file header for number of records, record size,*/ +/* and header length. Set Records, check that Reclen is equal to lrecl and */ +/* return the header length or 0 in case of error. */ +/****************************************************************************/ +int UZDFAM::ScanHeader(PGLOBAL g, int* rln) +{ + int rc; + DBFHEADER header; + + /************************************************************************/ + /* Get the first 32 bytes of the header. */ + /************************************************************************/ + rc = dbfhead(g, &header); + + if (rc == RC_FX) + return -1; + + *rln = (int)header.Reclen(); + Records = (int)header.Records(); + return (int)header.Headlen(); +} // end of ScanHeader +#endif // 0 + +/***********************************************************************/ +/* ZIP GetFileLength: returns file size in number of bytes. */ +/***********************************************************************/ +int UZDFAM::GetFileLength(PGLOBAL g) +{ + int len; + + if (!zutp && OpenTableFile(g)) + return 0; + + if (zutp->entryopen) + len = zutp->size; + else + len = 0; + + return len; +} // end of GetFileLength + +/***********************************************************************/ +/* ZIP Cardinality: return the number of rows if possible. */ +/***********************************************************************/ +int UZDFAM::Cardinality(PGLOBAL g) +{ + if (!g) + return 1; + + int card = -1; + int len = GetFileLength(g); + + card = Records; + + // Set number of blocks for later use + Block = (card > 0) ? (card + Nrec - 1) / Nrec : 0; + return card; +} // end of Cardinality + +/***********************************************************************/ +/* OpenTableFile: Open a DBF table file from a ZIP file. */ +/***********************************************************************/ +bool UZDFAM::OpenTableFile(PGLOBAL g) +{ + // May have been already opened in GetFileLength + if (!zutp || !zutp->zipfile) { + char filename[_MAX_PATH]; + MODE mode = Tdbp->GetMode(); + + /*********************************************************************/ + /* Allocate the ZIP utility class. */ + /*********************************************************************/ + if (!zutp) + zutp = new(g)UNZIPUTL(tdfp); + + // We used the file name relative to recorded datapath + PlugSetPath(filename, To_File, Tdbp->GetPath()); + + if (!zutp->OpenTable(g, mode, filename)) { + // The pseudo "buffer" is here the entire real buffer + Memory = zutp->memory; + Top = Memory + zutp->size; + To_Fb = zutp->fp; // Useful when closing + return AllocateBuffer(g); + } else + return true; + + } else + Reset(); + + return false; +} // end of OpenTableFile + +/***********************************************************************/ +/* GetNext: go to next entry. */ +/***********************************************************************/ +int UZDFAM::GetNext(PGLOBAL g) +{ + int rc = zutp->nextEntry(g); + + if (rc != RC_OK) + return rc; + + int len = zutp->size; + +#if 0 + if (len % Lrecl) { + sprintf(g->Message, MSG(NOT_FIXED_LEN), zutp->fn, len, Lrecl); + return RC_FX; + } // endif size +#endif // 0 + + Memory = zutp->memory; + Top = Memory + len; + Rewind(); + return RC_OK; +} // end of GetNext + /* -------------------------- class ZIPFAM --------------------------- */ /***********************************************************************/ @@ -1045,7 +1305,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g) strcpy(g->Message, "No insert into existing zip file"); return true; } else if (append && len > 0) { - UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false); if (!zutp->IsInsertOk(g, filename)) { strcpy(g->Message, "No insert into existing entry"); @@ -1129,7 +1389,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g) strcpy(g->Message, "No insert into existing zip file"); return true; } else if (append && len > 0) { - UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false); if (!zutp->IsInsertOk(g, filename)) { strcpy(g->Message, "No insert into existing entry"); diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index be17d954728..7ff1fb0a543 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -1,7 +1,7 @@ /************** filamzip H Declares Source Code File (.H) **************/ -/* Name: filamzip.h Version 1.2 */ +/* Name: filamzip.h Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */ /* */ /* This file contains the ZIP file access method classes declares. */ /***********************************************************************/ @@ -11,6 +11,7 @@ #include "block.h" #include "filamap.h" #include "filamfix.h" +#include "filamdbf.h" #include "zip.h" #include "unzip.h" @@ -18,6 +19,7 @@ typedef class UNZFAM *PUNZFAM; typedef class UZXFAM *PUZXFAM; +typedef class UZDFAM* PUZDFAM; typedef class ZIPFAM *PZIPFAM; typedef class ZPXFAM *PZPXFAM; @@ -53,7 +55,7 @@ class DllExport ZIPUTIL : public BLOCK { class DllExport UNZIPUTL : public BLOCK { public: // Constructor - UNZIPUTL(PCSZ tgt, bool mul); + UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul); UNZIPUTL(PDOSDEF tdp); // Implementation @@ -144,6 +146,36 @@ class DllExport UZXFAM : public MPXFAM { }; // end of UZXFAM /***********************************************************************/ +/* This is the fixed unzip file access method. */ +/***********************************************************************/ +class DllExport UZDFAM : public DBMFAM { + //friend class UNZFAM; +public: + // Constructors + UZDFAM(PDOSDEF tdp); + UZDFAM(PUZDFAM txfp); + + // Implementation + virtual AMT GetAmType(void) { return TYPE_AM_ZIP; } + virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UZDFAM(this); } + + // Methods + virtual int GetFileLength(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); + virtual bool OpenTableFile(PGLOBAL g); + virtual int GetNext(PGLOBAL g); + //virtual int ReadBuffer(PGLOBAL g); + +protected: + int dbfhead(PGLOBAL g, void* buf); + int ScanHeader(PGLOBAL g, int* rln); + + // Members + UNZIPUTL* zutp; + PDOSDEF tdfp; +}; // end of UZDFAM + +/***********************************************************************/ /* This is the zip file access method. */ /***********************************************************************/ class DllExport ZIPFAM : public DOSFAM { diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 8b9a1ae5c8d..a5c90e50293 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -5880,7 +5880,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else switch (ttp) { case TAB_DBF: - qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL); + qrp= DBFColumns(g, dpath, fn, topt, fnc == FNC_COL); break; #if defined(ODBC_SUPPORT) case TAB_ODBC: @@ -6731,11 +6731,6 @@ int ha_connect::create(const char *name, TABLE *table_arg, PCSZ m= GetListOption(g, "Mulentries", options->oplist, "NO"); bool mul= *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON"); - if (!entry && !mul) { - my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } // endif entry - strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/"); PlugSetPath(zbuf, options->filename, dbpath); PlugSetPath(buf, fn, dbpath); diff --git a/storage/connect/mongo.cpp b/storage/connect/mongo.cpp index 53e2bf377c4..bd3d3b893c1 100644 --- a/storage/connect/mongo.cpp +++ b/storage/connect/mongo.cpp @@ -380,7 +380,6 @@ MGODEF::MGODEF(void) Uri = NULL; Colist = NULL; Filter = NULL; - Level = 0; Base = 0; Version = 0; Pipe = false; diff --git a/storage/connect/mongo.h b/storage/connect/mongo.h index 97c391a217f..dcefac372c0 100644 --- a/storage/connect/mongo.h +++ b/storage/connect/mongo.h @@ -82,7 +82,6 @@ protected: PSZ Wrapname; /* Java wrapper name */ PCSZ Colist; /* Options list */ PCSZ Filter; /* Filtering query */ - int Level; /* Used for catalog table */ int Base; /* The array index base */ int Version; /* The Java driver version */ bool Pipe; /* True is Colist is a pipeline */ diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp index f3d3a010266..8c5cc261899 100644 --- a/storage/connect/plgxml.cpp +++ b/storage/connect/plgxml.cpp @@ -49,7 +49,7 @@ bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry) { #if defined(ZIP_SUPPORT) bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false; - zip = new(g) UNZIPUTL(entry, mul); + zip = new(g) UNZIPUTL(entry, NULL, mul); return zip == NULL; #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); diff --git a/storage/connect/tabcmg.cpp b/storage/connect/tabcmg.cpp index b9b7f6e4b60..f2ff721627c 100644 --- a/storage/connect/tabcmg.cpp +++ b/storage/connect/tabcmg.cpp @@ -26,6 +26,8 @@ #include "tabmul.h" #include "filter.h" +PQRYRES MGOColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt, bool info); + /* -------------------------- Class CMGDISC -------------------------- */ /***********************************************************************/ diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 3d8e89f3f3c..3002f8906ed 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1,11 +1,11 @@ /************* TabDos C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABDOS */ /* ------------- */ -/* Version 4.9.4 */ +/* Version 4.9.5 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2019 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -359,7 +359,26 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) /* Allocate table and file processing class of the proper type. */ /* Column blocks will be allocated only when needed. */ /*********************************************************************/ - if (Zipped) { + if (Recfm == RECFM_DBF) { + if (Catfunc == FNC_NO) { + if (Zipped) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { + txfp = new(g) UZDFAM(this); + } else { + strcpy(g->Message, "Zipped DBF tables are read only"); + return NULL; + } // endif's mode + + } else if (map) + txfp = new(g) DBMFAM(this); + else + txfp = new(g) DBFFAM(this); + + tdbp = new(g) TDBFIX(this, txfp); + } else + tdbp = new(g) TDBDCL(this); // Catfunc should be 'C' + + } else if (Zipped) { #if defined(ZIP_SUPPORT) if (Recfm == RECFM_VAR) { if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { @@ -389,17 +408,6 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; #endif // !ZIP_SUPPORT - } else if (Recfm == RECFM_DBF) { - if (Catfunc == FNC_NO) { - if (map) - txfp = new(g) DBMFAM(this); - else - txfp = new(g) DBFFAM(this); - - tdbp = new(g) TDBFIX(this, txfp); - } else // Catfunc should be 'C' - tdbp = new(g) TDBDCL(this); - } else if (Recfm != RECFM_VAR && Compressed < 2) { if (Huge) txfp = new(g) BGXFAM(this); diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 207a1277fce..80dfe63845d 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -30,6 +30,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ friend class DBFBASE; friend class UNZIPUTL; friend class JSONCOL; + friend class TDBDCL; public: // Constructor DOSDEF(void); diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h index 53c0af1c422..5f859a2bffe 100644 --- a/storage/connect/tabfix.h +++ b/storage/connect/tabfix.h @@ -98,18 +98,20 @@ class DllExport BINCOL : public DOSCOL { /* This is the class declaration for the DBF columns catalog table. */ /***********************************************************************/ class TDBDCL : public TDBCAT { - public: - // Constructor - TDBDCL(PDOSDEF tdp) : TDBCAT(tdp) {Fn = tdp->GetFn();} +public: + // Constructor + TDBDCL(PDOSDEF tdp) : TDBCAT(tdp) + {Fn = tdp->GetFn(); Topt = tdp->GetTopt();} - protected: +protected: // Specific routines - virtual PQRYRES GetResult(PGLOBAL g) - {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);} + virtual PQRYRES GetResult(PGLOBAL g) + {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, Topt, false);} - // Members + // Members PCSZ Fn; // The DBF file (path) name - }; // end of class TDBOCL + PTOS Topt; +}; // end of class TDBOCL #endif // __TABFIX__ diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index a8e96e2fe8d..692ca9d0258 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -741,6 +741,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) /***********************************************************************/ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) { + G = NULL; Top = NULL; Row = NULL; Val = NULL; diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 8721a2a5ab7..8c3f1013919 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -104,7 +104,6 @@ public: PCSZ Xcol; /* Name of expandable column */ int Limit; /* Limit of multiple values */ int Pretty; /* Depends on file structure */ - int Level; /* Used for catalog table */ int Base; /* The array index base */ bool Strict; /* Strict syntax checking */ char Sep; /* The Jpath separator */ diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp index c026744dba8..d9c13e2a58a 100644 --- a/storage/connect/tabzip.cpp +++ b/storage/connect/tabzip.cpp @@ -23,6 +23,7 @@ #include "filamzip.h" #include "resource.h" // for IDS_COLUMNS #include "tabdos.h" +#include "tabmul.h" #include "tabzip.h" /* -------------------------- Class ZIPDEF --------------------------- */ @@ -41,7 +42,14 @@ bool ZIPDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) /***********************************************************************/ PTDB ZIPDEF::GetTable(PGLOBAL g, MODE m) { - return new(g) TDBZIP(this); + PTDB tdbp = NULL; + + tdbp = new(g) TDBZIP(this); + + if (Multiple) + tdbp = new(g) TDBMUL(tdbp); + + return tdbp; } // end of GetTable /* ------------------------------------------------------------------- */ @@ -108,7 +116,7 @@ int TDBZIP::Cardinality(PGLOBAL g) Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0; } else - Cardinal = 0; + Cardinal = 10; // Dummy for multiple tables } // endif Cardinal @@ -187,6 +195,7 @@ int TDBZIP::DeleteDB(PGLOBAL g, int irc) void TDBZIP::CloseDB(PGLOBAL g) { close(); + nexterr = UNZ_OK; // For multiple tables Use = USE_READY; // Just to be clean } // end of CloseDB diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h index 32b15281f81..d36e4dc01d0 100644 --- a/storage/connect/tabzip.h +++ b/storage/connect/tabzip.h @@ -48,6 +48,8 @@ public: // Implementation virtual AMT GetAmType(void) {return TYPE_AM_ZIP;} + virtual PCSZ GetFile(PGLOBAL) {return zfn;} + virtual void SetFile(PGLOBAL, PCSZ fn) {zfn = fn;} // Methods virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index c232a719227..c85f9331580 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -861,7 +861,7 @@ buf_LRU_check_size_of_non_data_objects( + UT_LIST_GET_LEN(buf_pool->LRU)) < buf_pool->curr_size / 3) { - if (!buf_lru_switched_on_innodb_mon) { + if (!buf_lru_switched_on_innodb_mon && srv_monitor_event) { /* Over 67 % of the buffer pool is occupied by lock heaps or the adaptive hash index. This may be a memory diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 199941e71f8..fafdda16429 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1105,9 +1105,6 @@ fts_cache_clear( index_cache->doc_stats = NULL; } - mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg)); - cache->sync_heap->arg = NULL; - fts_need_sync = false; cache->total_size = 0; @@ -1115,6 +1112,9 @@ fts_cache_clear( mutex_enter((ib_mutex_t*) &cache->deleted_lock); cache->deleted_doc_ids = NULL; mutex_exit((ib_mutex_t*) &cache->deleted_lock); + + mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg)); + cache->sync_heap->arg = NULL; } /*********************************************************************//** diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 67df52db9fc..3e4da9bb8b2 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4305,16 +4305,7 @@ innobase_commit_low( const bool is_wsrep = trx->is_wsrep(); THD* thd = trx->mysql_thd; if (is_wsrep) { -#ifdef WSREP_PROC_INFO - char info[64]; - info[sizeof(info) - 1] = '\0'; - snprintf(info, sizeof(info) - 1, - "innobase_commit_low():trx_commit_for_mysql(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); - tmp = thd_proc_info(thd, info); -#else tmp = thd_proc_info(thd, "innobase_commit_low()"); -#endif /* WSREP_PROC_INFO */ } #endif /* WITH_WSREP */ if (trx_is_started(trx)) { @@ -18497,11 +18488,14 @@ static void innodb_status_output_update(THD*,st_mysql_sys_var*,void*var,const void*save) { - *static_cast<my_bool*>(var) = *static_cast<const my_bool*>(save); - mysql_mutex_unlock(&LOCK_global_system_variables); - /* Wakeup server monitor thread. */ - os_event_set(srv_monitor_event); - mysql_mutex_lock(&LOCK_global_system_variables); + *static_cast<my_bool*>(var)= *static_cast<const my_bool*>(save); + if (srv_monitor_event) + { + mysql_mutex_unlock(&LOCK_global_system_variables); + /* Wakeup server monitor thread. */ + os_event_set(srv_monitor_event); + mysql_mutex_lock(&LOCK_global_system_variables); + } } /** Update the system variable innodb_encryption_threads. diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc index 0c7d103d25c..8f8ba52c584 100644 --- a/storage/innobase/sync/sync0arr.cc +++ b/storage/innobase/sync/sync0arr.cc @@ -1075,7 +1075,8 @@ sync_array_print_long_waits( sync_array_exit(arr); } - if (noticed) { + if (noticed && srv_monitor_event) { + fprintf(stderr, "InnoDB: ###### Starts InnoDB Monitor" " for 30 secs to print diagnostic info:\n"); diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 0a271a77a36..37c2712b143 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -3205,6 +3205,7 @@ static int write_page(MARIA_SHARE *share, File file, args.page= buff; args.pageno= (pgcache_page_no_t) (pos / share->block_size); args.data= (uchar*) share; + args.crypt_buf= NULL; (* share->kfile.pre_write_hook)(&args); res= (int)my_pwrite(file, args.page, block_size, pos, myf_rw); (* share->kfile.post_write_hook)(res, &args); diff --git a/storage/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c index 43dfc18c710..2b3861b9f7f 100644 --- a/storage/myisammrg/myrg_extra.c +++ b/storage/myisammrg/myrg_extra.c @@ -31,7 +31,7 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function, DBUG_PRINT("info",("function: %lu", (ulong) function)); if (!info->children_attached) - DBUG_RETURN(1); + DBUG_RETURN(0); if (function == HA_EXTRA_CACHE) { info->cache_in_use=1; diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result index 23b7804638f..1feea5e47ee 100644 --- a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result +++ b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result @@ -82,7 +82,7 @@ select * from t1; ERROR HY000: Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a uint)' show warnings; Level Code Message -Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'uint)' at line 1 +Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a uint)' set @@test_sql_discovery_statement='t1:create table t1 (a int)'; select * from t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result index ad920deeda4..17229fa5956 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result @@ -1533,11 +1533,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 * 1.01500000 * 1.01500000 * 0.99500000) 0.81298807395367312459230693948000000000 create table t1 as select 5.05 / 0.014; -Warnings: -Note 1265 Data truncated for column '5.05 / 0.014' at row 1 show warnings; Level Code Message -Note 1265 Data truncated for column '5.05 / 0.014' at row 1 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1652,8 +1649,6 @@ my_col 0.12345678912345678912345678912345678912 DROP TABLE t1; CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col; -Warnings: -Note 1265 Data truncated for column 'my_col' at row 1 DESCRIBE t1; Field Type Null Key Default Extra my_col decimal(65,4) YES NULL diff --git a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result index 38252e870df..e915d56f21f 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result @@ -92,8 +92,6 @@ DROP INDEX test ON t1; insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one'); insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one'); insert into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3); -Warnings: -Warning 1265 Data truncated for column 'string' at row 1 insert into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1); Warnings: Warning 1264 Out of range value for column 'utiny' at row 1 @@ -131,7 +129,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col 10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1 11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2 -12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3 +12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3 13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1 14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295 15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295 @@ -183,7 +181,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7 select * from t2; auto string mediumblob_col new_field 1 2 2 ne -2 0.33333333 ne +2 0.3333 ne 3 -1 -1 ne 4 -429496729 -4294967295 ne 5 4294967295 4294967295 ne diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 04f62829003..3dc6f0fbc71 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -145,7 +145,7 @@ TimeoutStopSec=900 ## # Number of files limit. previously [mysqld_safe] open-files-limit -LimitNOFILE=16364 +LimitNOFILE=16384 # Maximium core size. previously [mysqld_safe] core-file-size # LimitCore= diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index f12f44311f5..f4f0cd8c7f2 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -268,7 +268,7 @@ Group=mysql ## # Number of files limit. previously [mysqld_safe] open-files-limit -LimitNOFILE=16364 +LimitNOFILE=16384 # Maximium core size. previously [mysqld_safe] core-file-size # LimitCore= |