summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2023-05-11 09:09:16 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2023-05-11 09:09:16 +0300
commitd4dd63452935cb790bfaabfcbdb7fec80b64136a (patch)
tree5baa9bb7488bc3e6c4039285621273b2b3a0949a
parent0d8b0493ee2458eebd6110995c46141b7c7978c0 (diff)
parentb735ca47738a1d2e995a429f40afd620eb7d8843 (diff)
downloadmariadb-git-d4dd63452935cb790bfaabfcbdb7fec80b64136a.tar.gz
Merge mariadb-10.5.20 into 10.5
m---------libmariadb0
-rw-r--r--mysql-test/main/derived_split_innodb.result533
-rw-r--r--mysql-test/main/derived_split_innodb.test236
-rw-r--r--mysql-test/main/distinct.result14
-rw-r--r--mysql-test/main/distinct.test13
-rw-r--r--mysql-test/main/explain_innodb.result18
-rw-r--r--mysql-test/main/explain_innodb.test19
-rw-r--r--mysql-test/main/insert_innodb.result2
-rw-r--r--mysql-test/main/insert_innodb.test2
-rw-r--r--mysql-test/main/insert_update.result41
-rw-r--r--mysql-test/main/insert_update.test26
-rw-r--r--mysql-test/main/long_unique_bugs.result6
-rw-r--r--mysql-test/main/long_unique_bugs.test7
-rw-r--r--mysql-test/main/opt_trace.result114
-rw-r--r--mysql-test/main/opt_trace.test70
-rw-r--r--mysql-test/main/selectivity.result8
-rw-r--r--mysql-test/main/selectivity.test7
-rw-r--r--mysql-test/main/selectivity_innodb.result8
-rw-r--r--mysql-test/main/selectivity_innodb_notembedded.result104
-rw-r--r--mysql-test/main/selectivity_innodb_notembedded.test16
-rw-r--r--mysql-test/main/selectivity_no_engine.result6
-rw-r--r--mysql-test/main/selectivity_notembedded.result95
-rw-r--r--mysql-test/main/selectivity_notembedded.test121
-rw-r--r--mysql-test/main/view.result18
-rw-r--r--mysql-test/main/view.test19
-rw-r--r--mysql-test/std_data/rpl/master-bin-seq_10.3.36.000001bin0 -> 1245 bytes
-rw-r--r--mysql-test/suite/galera/r/galera_var_retry_autocommit.result12
-rw-r--r--mysql-test/suite/galera/t/galera_var_retry_autocommit.test19
-rw-r--r--mysql-test/suite/galera_sr/r/MDEV-30838.result15
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-30838.test18
-rw-r--r--mysql-test/suite/perfschema/r/digest_view.result50
-rw-r--r--mysql-test/suite/perfschema/r/start_server_low_digest_sql_length.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel_seq.result85
-rw-r--r--mysql-test/suite/rpl/t/rpl_binlog_index.test8
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_stop_start.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev12179.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev382.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel_seq.test131
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_semi_sync_event.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_ssl.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test4
-rw-r--r--sql/ha_sequence.cc2
-rw-r--r--sql/item.cc10
-rw-r--r--sql/item.h10
-rw-r--r--sql/item_cmpfunc.cc26
-rw-r--r--sql/item_cmpfunc.h6
-rw-r--r--sql/item_subselect.cc14
-rw-r--r--sql/log_event_server.cc31
-rw-r--r--sql/opt_split.cc222
-rw-r--r--sql/rpl_parallel.cc7
-rw-r--r--sql/slave.cc35
-rw-r--r--sql/slave.h3
-rw-r--r--sql/sql_insert.cc3
-rw-r--r--sql/sql_lex.cc31
-rw-r--r--sql/sql_lex.h45
-rw-r--r--sql/sql_schema.cc61
-rw-r--r--sql/sql_schema.h11
-rw-r--r--sql/sql_select.cc42
-rw-r--r--sql/sql_select.h31
-rw-r--r--sql/sql_sequence.cc17
-rw-r--r--sql/sql_statistics.cc44
-rw-r--r--sql/sql_yacc.yy59
-rw-r--r--sql/structs.h24
-rw-r--r--sql/table.cc21
-rw-r--r--sql/wsrep_schema.cc5
-rw-r--r--sql/wsrep_thd.h9
-rw-r--r--storage/rocksdb/CMakeLists.txt5
-rw-r--r--storage/rocksdb/ut0counter.h2
m---------wsrep-lib0
-rw-r--r--zlib/ChangeLog24
-rw-r--r--zlib/LICENSE22
-rw-r--r--zlib/README4
-rw-r--r--zlib/compress.c6
-rw-r--r--zlib/crc32.c33
-rw-r--r--zlib/deflate.c218
-rw-r--r--zlib/deflate.h4
-rw-r--r--zlib/gzlib.c2
-rw-r--r--zlib/gzread.c8
-rw-r--r--zlib/gzwrite.c2
-rw-r--r--zlib/infback.c17
-rw-r--r--zlib/inflate.c7
-rw-r--r--zlib/inftrees.c4
-rw-r--r--zlib/inftrees.h2
-rw-r--r--zlib/make_vms.com4
-rw-r--r--zlib/qnx/package.qpg10
-rw-r--r--zlib/treebuild.xml4
-rw-r--r--zlib/trees.c123
-rw-r--r--zlib/uncompr.c4
-rw-r--r--zlib/win32/README-WIN32.txt4
-rw-r--r--zlib/win32/zlib1.rc2
-rw-r--r--zlib/zconf.h.cmakein19
-rw-r--r--zlib/zconf.h.in19
-rw-r--r--zlib/zlib.34
-rw-r--r--zlib/zlib.h18
-rwxr-xr-xzlib/zlib2ansi4
-rw-r--r--zlib/zutil.c16
-rw-r--r--zlib/zutil.h1
99 files changed, 2693 insertions, 533 deletions
diff --git a/libmariadb b/libmariadb
-Subproject 4e2408c1cc298ada91b30683501c0c94a662156
+Subproject a3bba4639f55148c59a28a506df8a2b88e5e83a
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 478c6e842bc..37f38a2244d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -285,3 +285,536 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
# End of 10.3 tests
+#
+# MDEV-26301: Split optimization refills temporary table too many times
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+explain
+select * from
+(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
+1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+# The important part in the below output is:
+# "lateral": 1,
+# "query_block": {
+# "select_id": 2,
+# "r_loops": 5, <-- must be 5, not 30.
+analyze format=json select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 5,
+ "r_rows": 5,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 2,
+ "r_rows": 2,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "t3",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 10,
+ "rows": 3,
+ "r_rows": 3,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 30,
+ "rows": 10,
+ "r_rows": 1,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))",
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 5,
+ "r_total_time_ms": "REPLACED",
+ "outer_ref_condition": "t1.b is not null",
+ "table": {
+ "table_name": "t10",
+ "access_type": "ref",
+ "possible_keys": ["grp_id"],
+ "key": "grp_id",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 100,
+ "r_rows": 100,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t11",
+ "access_type": "ALL",
+ "r_loops": 5,
+ "rows": 10,
+ "r_rows": 10,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "1Kb",
+ "join_type": "BNL",
+ "attached_condition": "trigcond(t11.col1 = t10.col1)",
+ "r_filtered": 10
+ }
+ }
+ }
+ }
+ }
+}
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+explain
+select * from
+t21, t22,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1 and t22.pk=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+explain
+select * from
+t21,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+create table t5 (
+pk int primary key
+);
+insert into t5 select seq from seq_1_to_1000;
+explain
+select * from
+t21,
+(
+(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
+1 PRIMARY t2 ref a a 5 test.t1.b 2
+1 PRIMARY t3 ref a a 5 test.t1.b 3
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 1 1 100
+1 1 1 2 1 2 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 1 2 100
+2 2 2 2 2 2 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 1 3 100
+3 3 3 2 3 2 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 1 4 100
+4 4 4 2 4 2 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 1 5 100
+5 5 5 2 5 2 5 100
+5 5 5 2 5 3 5 100
+set join_cache_level=4;
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 hash_ALL NULL #hash#$hj 5 test.t10.col1 10 Using where; Using join buffer (flat, BNLH join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 1 1 100
+1 1 1 2 1 2 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 1 2 100
+2 2 2 2 2 2 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 1 3 100
+3 3 3 2 3 2 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 1 4 100
+4 4 4 2 4 2 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 1 5 100
+5 5 5 2 5 2 5 100
+5 5 5 2 5 3 5 100
+set join_cache_level=default;
+drop index a on t2;
+drop index a on t3;
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 1000 Using where
+2 DERIVED t10 ALL grp_id NULL NULL NULL 10000 Using temporary; Using filesort
+2 DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 2 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 2 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 2 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 2 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 2 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 2 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 2 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 2 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 2 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 2 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 3 5 100
+drop table t1,t2,t3;
+drop table t10, t11;
+#
+# MDEV-31194: Server crash or assertion failure with join_cache_level=4
+# (a followup to the above bug, MDEV-26301)
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (3),(4);
+CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=Aria;
+INSERT INTO t2 VALUES (1),(2);
+set @tmp1= @@optimizer_switch, @tmp2= @@join_cache_level;
+set
+optimizer_switch= 'derived_with_keys=off',
+join_cache_level= 4;
+SELECT t1.* FROM t1 JOIN (SELECT id, COUNT(*) FROM t2 GROUP BY id) sq ON sq.id= t1.a;
+a
+set optimizer_switch= @tmp1, join_cache_level= @tmp2;
+DROP TABLE t1, t2;
+# End of 10.4 tests
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 7fd8df35985..a0ed51d1dd4 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -229,3 +229,239 @@ where t3.b > 15;
drop table t3, t4;
--echo # End of 10.3 tests
+
+--source include/have_sequence.inc
+
+--echo #
+--echo # MDEV-26301: Split optimization refills temporary table too many times
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+explain
+select * from
+ (t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+
+# Now, create tables for Groups.
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q1=
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+eval
+explain $q1;
+
+--echo # The important part in the below output is:
+--echo # "lateral": 1,
+--echo # "query_block": {
+--echo # "select_id": 2,
+--echo # "r_loops": 5, <-- must be 5, not 30.
+--source include/analyze-format.inc
+
+eval
+analyze format=json $q1;
+
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+
+# Same as above but throw in a couple of const tables.
+explain
+select * from
+ t21, t22,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1 and t22.pk=2;
+
+explain
+select * from
+ t21,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+# And also add a non-const table
+
+create table t5 (
+ pk int primary key
+ );
+insert into t5 select seq from seq_1_to_1000;
+
+explain
+select * from
+ t21,
+ (
+ (((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q=
+select *
+from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join
+ t3
+ on t3.a=t1.b
+ )
+ left join
+ (
+ select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id
+ )dt
+ on dt.grp_id=t1.b;
+
+eval explain $q;
+eval $q;
+
+set join_cache_level=4;
+eval explain $q;
+eval $q;
+
+set join_cache_level=default;
+
+drop index a on t2;
+drop index a on t3;
+
+eval explain $q;
+eval $q;
+
+drop table t1,t2,t3;
+drop table t10, t11;
+
+
+--echo #
+--echo # MDEV-31194: Server crash or assertion failure with join_cache_level=4
+--echo # (a followup to the above bug, MDEV-26301)
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (3),(4);
+
+CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=Aria;
+INSERT INTO t2 VALUES (1),(2);
+
+set @tmp1= @@optimizer_switch, @tmp2= @@join_cache_level;
+set
+ optimizer_switch= 'derived_with_keys=off',
+ join_cache_level= 4;
+
+SELECT t1.* FROM t1 JOIN (SELECT id, COUNT(*) FROM t2 GROUP BY id) sq ON sq.id= t1.a;
+
+set optimizer_switch= @tmp1, join_cache_level= @tmp2;
+
+# Cleanup
+DROP TABLE t1, t2;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result
index 0c5eccad754..fa9f0259a0f 100644
--- a/mysql-test/main/distinct.result
+++ b/mysql-test/main/distinct.result
@@ -1093,6 +1093,7 @@ sum(distinct 1) sum(t1.d) > 5 c
1 1 0
1 0 5
1 1 6
+SET @sort_buffer_size_save= @@sort_buffer_size;
set @@sort_buffer_size=1024;
insert into t1 select -seq,-seq from seq_1_to_100;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ;
@@ -1106,6 +1107,19 @@ sum(distinct 1) sum(t1.d) > 2 length(group_concat(t1.d)) > 1000 c
1 1 0 5
1 1 0 6
drop table t1;
+set @@sort_buffer_size=@sort_buffer_size_save;
+#
+# MDEV-31113 Server crashes in store_length / Type_handler_string_result::make_sort_key
+# with DISTINCT and group function
+#
+CREATE TABLE t (f INT);
+INSERT INTO t VALUES (1),(2);
+SELECT DISTINCT CONVERT(STDDEV(f), CHAR(16)) AS f1, UUID() AS f2 FROM t GROUP BY f2 WITH ROLLUP;
+f1 f2
+0.0000 #
+0.0000 #
+0.5000 #
+DROP TABLE t;
# End of 10.4 tests
#
# MDEV-27382: OFFSET is ignored when it is combined with the DISTINCT, IN() and JOIN
diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test
index 9039edff20f..893e2dcc9a7 100644
--- a/mysql-test/main/distinct.test
+++ b/mysql-test/main/distinct.test
@@ -834,11 +834,24 @@ select distinct sum(distinct 1), sum(t1.d) > 5 from (t1 e join t1) group by t1.c
select distinct sum(distinct 1), sum(t1.d) > 5, t1.c from (t1 e join t1) group by t1.c;
# Force usage of remove_dup_with_compare() algorithm
+SET @sort_buffer_size_save= @@sort_buffer_size;
set @@sort_buffer_size=1024;
insert into t1 select -seq,-seq from seq_1_to_100;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000 from (t1 e join t1) group by t1.c having t1.c > -2 ;
select distinct sum(distinct 1), sum(t1.d) > 2, length(group_concat(t1.d)) > 1000,t1.c from (t1 e join t1) group by t1.c having t1.c > -2;
drop table t1;
+set @@sort_buffer_size=@sort_buffer_size_save;
+
+--echo #
+--echo # MDEV-31113 Server crashes in store_length / Type_handler_string_result::make_sort_key
+--echo # with DISTINCT and group function
+--echo #
+
+CREATE TABLE t (f INT);
+INSERT INTO t VALUES (1),(2);
+--replace_column 2 #
+SELECT DISTINCT CONVERT(STDDEV(f), CHAR(16)) AS f1, UUID() AS f2 FROM t GROUP BY f2 WITH ROLLUP;
+DROP TABLE t;
--echo # End of 10.4 tests
diff --git a/mysql-test/main/explain_innodb.result b/mysql-test/main/explain_innodb.result
index b46665c279c..c44d7baadea 100644
--- a/mysql-test/main/explain_innodb.result
+++ b/mysql-test/main/explain_innodb.result
@@ -18,3 +18,21 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t1 range NULL id 53 NULL 2 Using index for group-by
SET GLOBAL slow_query_log = @sql_tmp;
drop table t1;
+#
+# MDEV-31181: Server crash in subselect_uniquesubquery_engine::print
+# upon EXPLAIN EXTENDED DELETE
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+EXPLAIN EXTENDED DELETE FROM t1 WHERE a IN (SELECT pk FROM t2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using index
+Warnings:
+Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`))))
+drop table t1, t2;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/explain_innodb.test b/mysql-test/main/explain_innodb.test
index 2c29a6e26da..3dcad4c2d49 100644
--- a/mysql-test/main/explain_innodb.test
+++ b/mysql-test/main/explain_innodb.test
@@ -18,3 +18,22 @@ SELECT * FROM (SELECT id FROM t1 GROUP BY id) dt WHERE 1=0;
SET GLOBAL slow_query_log = @sql_tmp;
drop table t1;
+
+
+--echo #
+--echo # MDEV-31181: Server crash in subselect_uniquesubquery_engine::print
+--echo # upon EXPLAIN EXTENDED DELETE
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+
+EXPLAIN EXTENDED DELETE FROM t1 WHERE a IN (SELECT pk FROM t2);
+
+drop table t1, t2;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/insert_innodb.result b/mysql-test/main/insert_innodb.result
index 314412bcfdd..b8f3979d2ba 100644
--- a/mysql-test/main/insert_innodb.result
+++ b/mysql-test/main/insert_innodb.result
@@ -51,7 +51,7 @@ CREATE TEMPORARY TABLE v0 ( v1 TEXT ( 15 ) CHAR SET BINARY NOT NULL NOT NULL UNI
ERROR HY000: Field 'v1' doesn't have a default value
CREATE TEMPORARY TABLE t1 (i TEXT(15) NOT NULL DEFAULT '' UNIQUE CHECK (i)) engine=innodb
REPLACE SELECT NULL AS a;
-ERROR HY000: Field 'DB_ROW_HASH_1' doesn't have a default value
+ERROR 23000: CONSTRAINT `t1.i` failed for `test`.`t1`
#
# End of 10.5 tests
#
diff --git a/mysql-test/main/insert_innodb.test b/mysql-test/main/insert_innodb.test
index b5a9fc72c07..56ddb2eafbf 100644
--- a/mysql-test/main/insert_innodb.test
+++ b/mysql-test/main/insert_innodb.test
@@ -71,7 +71,7 @@ DROP TABLE t2, t1;
--error ER_NO_DEFAULT_FOR_FIELD
CREATE TEMPORARY TABLE v0 ( v1 TEXT ( 15 ) CHAR SET BINARY NOT NULL NOT NULL UNIQUE CHECK ( v1 ) ) REPLACE SELECT NULL AS v3 , 74 AS v2 ;
---error ER_NO_DEFAULT_FOR_FIELD
+--error ER_CONSTRAINT_FAILED
CREATE TEMPORARY TABLE t1 (i TEXT(15) NOT NULL DEFAULT '' UNIQUE CHECK (i)) engine=innodb
REPLACE SELECT NULL AS a;
diff --git a/mysql-test/main/insert_update.result b/mysql-test/main/insert_update.result
index 68a1003ad85..83344971c59 100644
--- a/mysql-test/main/insert_update.result
+++ b/mysql-test/main/insert_update.result
@@ -412,3 +412,44 @@ select if( @stamp1 = @stamp2, "correct", "wrong");
if( @stamp1 = @stamp2, "correct", "wrong")
correct
drop table t1;
+#
+# MDEV-31164 default current_timestamp() not working when used INSERT ON DUPLICATE KEY in some cases
+#
+set timestamp=unix_timestamp('2000-10-20 0:0:0');
+create table t1 (pk integer primary key, val varchar(20) not null, ts timestamp);
+insert t1 (pk, val) values(1, 'val1');
+select * from t1;
+pk val ts
+1 val1 2000-10-20 00:00:00
+set timestamp=unix_timestamp('2000-10-20 1:0:0');
+insert t1 (pk, val) select 2, 'val3' union select 3, 'val4'
+ on duplicate key update ts=now();
+select * from t1;
+pk val ts
+1 val1 2000-10-20 00:00:00
+2 val3 2000-10-20 01:00:00
+3 val4 2000-10-20 01:00:00
+set timestamp=unix_timestamp('2000-10-20 2:0:0');
+insert t1 (pk, val) select 1, 'val1' union select 4, 'val2'
+ on duplicate key update ts=now();
+select * from t1;
+pk val ts
+1 val1 2000-10-20 02:00:00
+2 val3 2000-10-20 01:00:00
+3 val4 2000-10-20 01:00:00
+4 val2 2000-10-20 02:00:00
+set timestamp=unix_timestamp('2000-10-20 3:0:0');
+insert t1 (pk, val) select 5, 'val1' union select 1, 'val2'
+ on duplicate key update ts=now();
+select * from t1;
+pk val ts
+1 val1 2000-10-20 03:00:00
+2 val3 2000-10-20 01:00:00
+3 val4 2000-10-20 01:00:00
+4 val2 2000-10-20 02:00:00
+5 val1 2000-10-20 03:00:00
+drop table t1;
+set timestamp=default;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/insert_update.test b/mysql-test/main/insert_update.test
index 06e16be84d7..25953938ad1 100644
--- a/mysql-test/main/insert_update.test
+++ b/mysql-test/main/insert_update.test
@@ -311,3 +311,29 @@ insert into t1(f1) values(1) on duplicate key update f1=1;
select @stamp2:=f2 from t1;
select if( @stamp1 = @stamp2, "correct", "wrong");
drop table t1;
+
+--echo #
+--echo # MDEV-31164 default current_timestamp() not working when used INSERT ON DUPLICATE KEY in some cases
+--echo #
+set timestamp=unix_timestamp('2000-10-20 0:0:0');
+create table t1 (pk integer primary key, val varchar(20) not null, ts timestamp);
+insert t1 (pk, val) values(1, 'val1');
+select * from t1;
+set timestamp=unix_timestamp('2000-10-20 1:0:0');
+insert t1 (pk, val) select 2, 'val3' union select 3, 'val4'
+ on duplicate key update ts=now();
+select * from t1;
+set timestamp=unix_timestamp('2000-10-20 2:0:0');
+insert t1 (pk, val) select 1, 'val1' union select 4, 'val2'
+ on duplicate key update ts=now();
+select * from t1;
+set timestamp=unix_timestamp('2000-10-20 3:0:0');
+insert t1 (pk, val) select 5, 'val1' union select 1, 'val2'
+ on duplicate key update ts=now();
+select * from t1;
+drop table t1;
+set timestamp=default;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result
index 2cb54e0acff..6bddcee362d 100644
--- a/mysql-test/main/long_unique_bugs.result
+++ b/mysql-test/main/long_unique_bugs.result
@@ -449,6 +449,12 @@ a b
1 xxx
drop table t1;
#
+# MDEV-22756 SQL Error (1364): Field 'DB_ROW_HASH_1' doesn't have a default value
+#
+create table t1 (f text not null, unique (f));
+insert into t1 (f) select 'f';
+drop table t1;
+#
# End of 10.4 tests
#
#
diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test
index 86003a6881c..3c1c101ffa0 100644
--- a/mysql-test/main/long_unique_bugs.test
+++ b/mysql-test/main/long_unique_bugs.test
@@ -444,6 +444,13 @@ select * from t1;
drop table t1;
--echo #
+--echo # MDEV-22756 SQL Error (1364): Field 'DB_ROW_HASH_1' doesn't have a default value
+--echo #
+create table t1 (f text not null, unique (f));
+insert into t1 (f) select 'f';
+drop table t1;
+
+--echo #
--echo # End of 10.4 tests
--echo #
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index dac5a7f7fcb..e5d49c6923a 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -450,6 +450,11 @@ select * from v2 {
]
},
{
+ "check_split_materialized": {
+ "not_applicable": "no candidate field can be accessed through ref"
+ }
+ },
+ {
"best_join_order": ["t1"]
},
{
@@ -773,6 +778,11 @@ explain select * from v1 {
]
},
{
+ "check_split_materialized": {
+ "not_applicable": "group list has no candidates"
+ }
+ },
+ {
"best_join_order": ["t1"]
},
{
@@ -8825,6 +8835,110 @@ SET optimizer_trace=DEFAULT;
DROP VIEW v;
DROP TABLE t;
#
+# MDEV-26301: Split optimization improvements: Optimizer Trace coverage
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+set optimizer_trace=1;
+explain
+select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS
+from information_schema.optimizer_trace;
+JS
+[
+ {
+ "split_candidates":
+ ["t10.grp_id"]
+ }
+]
+select
+json_detailed(
+json_remove(
+json_extract(trace, '$**.choose_best_splitting')
+, '$[0].split_plan_search[0]'
+ )
+) as JS
+from information_schema.optimizer_trace;
+JS
+[
+ {
+ "considered_keys":
+ [
+ {
+ "table_name": "t10",
+ "index": "grp_id",
+ "rec_per_key": 100,
+ "param_tables": 1
+ }
+ ],
+ "refills": 5,
+ "spl_pd_boundary": 2,
+ "split_plan_search":
+ [],
+ "lead_table": "t10",
+ "index": "grp_id",
+ "parts": 1,
+ "split_sel": 0.001,
+ "cost": 2535.968504,
+ "records": 100,
+ "refills": 5,
+ "chosen": true
+ }
+]
+drop table t1,t2,t3,t10,t11;
+set optimizer_trace=DEFAULT;
+#
# End of 10.4 tests
#
set optimizer_trace='enabled=on';
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 0d07a0e864f..90f8ef38ead 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -697,6 +697,76 @@ DROP VIEW v;
DROP TABLE t;
--echo #
+--echo # MDEV-26301: Split optimization improvements: Optimizer Trace coverage
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+set optimizer_trace=1;
+explain
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS
+from information_schema.optimizer_trace;
+
+select
+ json_detailed(
+ json_remove(
+ json_extract(trace, '$**.choose_best_splitting')
+ , '$[0].split_plan_search[0]'
+ )
+ ) as JS
+from information_schema.optimizer_trace;
+
+drop table t1,t2,t3,t10,t11;
+set optimizer_trace=DEFAULT;
+
+--echo #
--echo # End of 10.4 tests
--echo #
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 5220483f164..5a7ec7799a0 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -834,7 +834,7 @@ flush table t1;
set optimizer_use_condition_selectivity=4;
explain extended select * from t1 where a=0;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1025 0.39 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1025 0.78 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 0
drop table t1;
@@ -1649,7 +1649,7 @@ test.t1 analyze status Table is already up to date
# Check what info the optimizer has about selectivities
explain extended select * from t1 use index () where a in (17,51,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 3.90 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 3.91 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5)
explain extended select * from t1 use index () where b=2;
@@ -1941,9 +1941,9 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+DROP TABLE t1;
+# End of 10.2 tests
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
-DROP TABLE t1;
-# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index 4e4513d09d6..df3850d74b7 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -1321,16 +1321,17 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
-set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
-set histogram_size=@save_histogram_size;
-set use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
--echo # End of 10.2 tests
+
#
# Clean up
#
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
--source include/restore_charset.inc
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 70d4994a9f5..f9c623cd4b3 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -843,7 +843,7 @@ flush table t1;
set optimizer_use_condition_selectivity=4;
explain extended select * from t1 where a=0;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1025 0.39 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1025 0.78 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 0
drop table t1;
@@ -1659,7 +1659,7 @@ test.t1 analyze status OK
# Check what info the optimizer has about selectivities
explain extended select * from t1 use index () where a in (17,51,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 3.90 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 3.91 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5)
explain extended select * from t1 use index () where b=2;
@@ -1951,11 +1951,11 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+DROP TABLE t1;
+# End of 10.2 tests
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
-DROP TABLE t1;
-# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
set @tmp_ust= @@use_stat_tables;
diff --git a/mysql-test/main/selectivity_innodb_notembedded.result b/mysql-test/main/selectivity_innodb_notembedded.result
new file mode 100644
index 00000000000..8b06fe7556b
--- /dev/null
+++ b/mysql-test/main/selectivity_innodb_notembedded.result
@@ -0,0 +1,104 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+Warnings:
+Warning 1287 '@@storage_engine' is deprecated and will be removed in a future release. Please use '@@default_storage_engine' instead
+set @save_optimizer_switch_for_selectivity_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+drop table if exists t0,t1,t2,t3;
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+set optimizer_use_condition_selectivity=3;
+#
+# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+#
+create table t0(a int);
+insert into t0 select 1 from seq_1_to_78;
+create table t1(a int);
+insert into t1 select 1 from seq_1_to_26;
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+insert into t10 select 34785 from t0, seq_1_to_8;
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+insert into t10 select 65216 from t0, seq_1_to_52;
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+insert into t10 select 65219 from t0;
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+insert into t10 select 73915 from t0, seq_1_to_40;
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+insert into t10 select 91302 from t0, seq_1_to_6;
+insert into t10 select 91303 from t1;
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+insert into t10 select 91305 from t0, seq_1_to_8;
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status OK
+flush tables;
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
+# Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+sel
+[
+ [
+ {
+ "column_name": "a",
+ "ranges":
+ ["91303 <= a <= 91303"],
+ "selectivity_from_histogram": 0.035714283
+ }
+ ]
+]
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+#
+# End of 10.4 tests
+#
+#
+# Clean up
+#
+set @@global.histogram_size=@save_histogram_size;
+set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
+SET SESSION STORAGE_ENGINE=DEFAULT;
+Warnings:
+Warning 1287 '@@storage_engine' is deprecated and will be removed in a future release. Please use '@@default_storage_engine' instead
diff --git a/mysql-test/main/selectivity_innodb_notembedded.test b/mysql-test/main/selectivity_innodb_notembedded.test
new file mode 100644
index 00000000000..387f7dcb7de
--- /dev/null
+++ b/mysql-test/main/selectivity_innodb_notembedded.test
@@ -0,0 +1,16 @@
+--source include/have_innodb.inc
+# This test is slow on buildbot.
+--source include/big_test.inc
+--source include/default_optimizer_switch.inc
+--source include/not_embedded.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+set @save_optimizer_switch_for_selectivity_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+
+--source selectivity_notembedded.test
+
+set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/selectivity_no_engine.result b/mysql-test/main/selectivity_no_engine.result
index b6830e91f61..9ecf2eea23a 100644
--- a/mysql-test/main/selectivity_no_engine.result
+++ b/mysql-test/main/selectivity_no_engine.result
@@ -36,12 +36,12 @@ test.t2 analyze status OK
# The following two must have the same in 'Extra' column:
explain extended select * from t2 where col1 IN (20, 180);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1100 1.35 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1100 1.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where `test`.`t2`.`col1` in (20,180)
explain extended select * from t2 where col1 IN (180, 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1100 1.35 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1100 1.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where `test`.`t2`.`col1` in (180,20)
drop table t1, t2;
@@ -102,7 +102,7 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain extended select * from t1 where col1 in (1,2,3);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 3.37 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 2.97 Using where
Warnings:
Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`col1` in (1,2,3)
# Must not cause fp division by zero, or produce nonsense numbers:
diff --git a/mysql-test/main/selectivity_notembedded.result b/mysql-test/main/selectivity_notembedded.result
new file mode 100644
index 00000000000..d2e90a19a68
--- /dev/null
+++ b/mysql-test/main/selectivity_notembedded.result
@@ -0,0 +1,95 @@
+drop table if exists t0,t1,t2,t3;
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+set optimizer_use_condition_selectivity=3;
+#
+# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+#
+create table t0(a int);
+insert into t0 select 1 from seq_1_to_78;
+create table t1(a int);
+insert into t1 select 1 from seq_1_to_26;
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+insert into t10 select 34785 from t0, seq_1_to_8;
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+insert into t10 select 65216 from t0, seq_1_to_52;
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+insert into t10 select 65219 from t0;
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+insert into t10 select 73915 from t0, seq_1_to_40;
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+insert into t10 select 91302 from t0, seq_1_to_6;
+insert into t10 select 91303 from t1;
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+insert into t10 select 91305 from t0, seq_1_to_8;
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status OK
+flush tables;
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
+# Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+sel
+[
+ [
+ {
+ "column_name": "a",
+ "ranges":
+ ["91303 <= a <= 91303"],
+ "selectivity_from_histogram": 0.035714283
+ }
+ ]
+]
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+#
+# End of 10.4 tests
+#
+#
+# Clean up
+#
+set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity_notembedded.test b/mysql-test/main/selectivity_notembedded.test
new file mode 100644
index 00000000000..6752bd3c7e1
--- /dev/null
+++ b/mysql-test/main/selectivity_notembedded.test
@@ -0,0 +1,121 @@
+--source include/no_valgrind_without_big.inc
+--source include/have_stat_tables.inc
+--source include/have_sequence.inc
+--source include/default_charset.inc
+--source include/not_embedded.inc
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+
+select @@global.use_stat_tables;
+select @@session.use_stat_tables;
+
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+
+--source include/default_optimizer_switch.inc
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+
+# check that statistics on nulls is used
+
+set optimizer_use_condition_selectivity=3;
+
+--echo #
+--echo # MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+--echo #
+create table t0(a int); # This holds how many rows we hold in a bucket.
+insert into t0 select 1 from seq_1_to_78;
+
+create table t1(a int); # one-third of a bucket
+insert into t1 select 1 from seq_1_to_26;
+
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+
+
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+
+
+insert into t10 select 34785 from t0, seq_1_to_8;
+
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+
+insert into t10 select 65216 from t0, seq_1_to_52;
+
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+
+insert into t10 select 65219 from t0;
+
+
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+
+insert into t10 select 73915 from t0, seq_1_to_40;
+
+
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+
+insert into t10 select 91302 from t0, seq_1_to_6;
+
+insert into t10 select 91303 from t1; # Only 1/3rd of bucket matches the search tuple
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+
+insert into t10 select 91305 from t0, seq_1_to_8;
+
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+flush tables;
+
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+
+--echo # Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
+
+--echo #
+--echo # Clean up
+--echo #
+--source include/restore_charset.inc
+set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index c3696b8e52e..bc319d3c5b7 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -6951,4 +6951,22 @@ create algorithm=merge view v as
select * from t1 left join t2 on t1.a=t2.b and t1.a in (select d from t3);
ERROR 42S22: Unknown column 'd' in 'field list'
drop table t1,t2,t3;
+#
+# MDEV-31189: Server crash or assertion failure in upon 2nd
+# execution of PS with views and HAVING
+#
+CREATE TABLE t (f INT);
+INSERT INTO t VALUES (1),(2);
+CREATE VIEW v1 AS SELECT 1 AS a;
+CREATE VIEW v2 AS SELECT a FROM v1;
+PREPARE stmt FROM "SELECT * FROM v2 HAVING 1 IN (SELECT f FROM t)";
+EXECUTE stmt;
+a
+1
+EXECUTE stmt;
+a
+1
+DROP VIEW v1;
+DROP VIEW v2;
+DROP TABLE t;
# End of 10.4 tests
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index 408c8eb2830..0556d612975 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -6684,4 +6684,23 @@ create algorithm=merge view v as
drop table t1,t2,t3;
+--echo #
+--echo # MDEV-31189: Server crash or assertion failure in upon 2nd
+--echo # execution of PS with views and HAVING
+--echo #
+
+CREATE TABLE t (f INT);
+INSERT INTO t VALUES (1),(2); # Optional, fails either way
+CREATE VIEW v1 AS SELECT 1 AS a;
+CREATE VIEW v2 AS SELECT a FROM v1;
+
+PREPARE stmt FROM "SELECT * FROM v2 HAVING 1 IN (SELECT f FROM t)";
+EXECUTE stmt;
+EXECUTE stmt;
+
+# Cleanup
+DROP VIEW v1;
+DROP VIEW v2;
+DROP TABLE t;
+
--echo # End of 10.4 tests
diff --git a/mysql-test/std_data/rpl/master-bin-seq_10.3.36.000001 b/mysql-test/std_data/rpl/master-bin-seq_10.3.36.000001
new file mode 100644
index 00000000000..0fa163d0484
--- /dev/null
+++ b/mysql-test/std_data/rpl/master-bin-seq_10.3.36.000001
Binary files differ
diff --git a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
index 56c2c995402..50667b0a4fa 100644
--- a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
+++ b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
@@ -20,17 +20,25 @@ DROP TABLE t1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
+SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
INSERT INTO t1 (f1) VALUES (3);
connection node_1a;
SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
connection node_2;
TRUNCATE TABLE t1;
-connection node_1;
+connection node_1a;
+SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached';
SELECT COUNT(*) FROM t1;
COUNT(*)
0
+SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue';
+connection node_1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1
SET DEBUG_SYNC = 'RESET';
+SET GLOBAL debug_dbug = NULL;
DROP TABLE t1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
@@ -64,6 +72,8 @@ SET SESSION wsrep_retry_autocommit = 64;
SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue EXECUTE 64';
INSERT INTO t1 VALUES (5);
+connection node_2;
+connection node_1;
connection node_1;
SELECT COUNT(*) FROM t1;
COUNT(*)
diff --git a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
index bd10e448e06..c58eba1410e 100644
--- a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
+++ b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
@@ -25,6 +25,8 @@ SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continu
SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
+--let $wait_condition = SELECT count(*)=1 FROM information_schema.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+--source include/wait_condition.inc
TRUNCATE TABLE t1;
--connection node_1
@@ -44,6 +46,7 @@ DROP TABLE t1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
+SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
--send INSERT INTO t1 (f1) VALUES (3)
@@ -51,14 +54,21 @@ SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continu
SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
+--let $wait_condition = SELECT count(*)=1 FROM information_schema.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+--source include/wait_condition.inc
TRUNCATE TABLE t1;
+--connection node_1a
+SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached';
+SELECT COUNT(*) FROM t1;
+SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue';
+
--connection node_1
---error 0,ER_LOCK_DEADLOCK
--reap
SELECT COUNT(*) FROM t1;
SET DEBUG_SYNC = 'RESET';
+SET GLOBAL debug_dbug = NULL;
DROP TABLE t1;
@@ -79,6 +89,8 @@ SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continu
SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
+--let $wait_condition = SELECT count(*)=1 FROM information_schema.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+--source include/wait_condition.inc
TRUNCATE TABLE t1;
--connection node_1a
@@ -114,6 +126,11 @@ SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continu
--send INSERT INTO t1 VALUES (5)
+--connection node_2
+--let $wait_condition = SELECT count(*)=1 FROM information_schema.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+--source include/wait_condition.inc
+
+--connection node_1
--disable_query_log
--disable_result_log
--let $count = 64
diff --git a/mysql-test/suite/galera_sr/r/MDEV-30838.result b/mysql-test/suite/galera_sr/r/MDEV-30838.result
new file mode 100644
index 00000000000..6997b9c4d5d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/MDEV-30838.result
@@ -0,0 +1,15 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+SET debug_dbug='+d,ib_create_table_fail_too_many_trx';
+INSERT INTO t1 VALUES(2);
+ERROR HY000: Error while appending streaming replication fragment
+COMMIT;
+SELECT * FROM t1;
+f1
+SET debug_dbug='-d,ib_create_table_fail_too_many_trx';
+DROP TABLE t1;
+CALL mtr.add_suppression("Error writing into mysql.wsrep_streaming_log: 177");
diff --git a/mysql-test/suite/galera_sr/t/MDEV-30838.test b/mysql-test/suite/galera_sr/t/MDEV-30838.test
new file mode 100644
index 00000000000..39ca7d2a375
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/MDEV-30838.test
@@ -0,0 +1,18 @@
+#
+# MDEV-30838 - Assertion `m_thd == _current_thd()' failed in
+# virtual int Wsrep_client_service::bf_rollback()
+#
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+SET debug_dbug='+d,ib_create_table_fail_too_many_trx';
+--error ER_ERROR_DURING_COMMIT
+INSERT INTO t1 VALUES(2);
+COMMIT;
+SELECT * FROM t1;
+SET debug_dbug='-d,ib_create_table_fail_too_many_trx';
+DROP TABLE t1;
+CALL mtr.add_suppression("Error writing into mysql.wsrep_streaming_log: 177");
diff --git a/mysql-test/suite/perfschema/r/digest_view.result b/mysql-test/suite/perfschema/r/digest_view.result
index 659c17ac5dc..5ed23a07092 100644
--- a/mysql-test/suite/perfschema/r/digest_view.result
+++ b/mysql-test/suite/perfschema/r/digest_view.result
@@ -191,17 +191,17 @@ SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR
FROM performance_schema.events_statements_summary_by_digest
ORDER BY DIGEST_TEXT;
SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR
-test b09ba79901451a3ffe552333c381e88a EXPLAIN SELECT * FROM `test` . `v1` 1
-test dd6dc5d5113f799b5285767c07012224 EXPLAIN SELECT * FROM `test` . `v1` WHERE `a` = ? 1
-test 1f1038b822a0be14896a83f14f636d3c EXPLAIN SELECT * FROM `test` . `v1` WHERE `b` > ? 1
-test 3958e74f20c70e3ceabeaf97ba341a35 EXPLAIN SELECT `a` , `b` FROM `test` . `v1` 1
-test 5a885e25d8b50c97edaa741a81a24929 EXPLAIN SELECT `b` , `a` FROM `test` . `v1` 1
-test 6e05f1ed475ef896fa98bc8f1f44bbfa SELECT * FROM `test` . `v1` 1
-test 96f38ba3c6be9dd6f7170f62550e7d4f SELECT * FROM `test` . `v1` WHERE `a` = ? 1
-test 4027a714ef42d32c2c750b0cf4b51be8 SELECT * FROM `test` . `v1` WHERE `b` > ? 1
-test 8b0d66e44b6f489f5dd0422c725e3ee2 SELECT `a` , `b` FROM `test` . `v1` 1
-test 7bc3d60a4285971a627e5e3f52d09c2c SELECT `b` , `a` FROM `test` . `v1` 1
-test 85c6d6adac956e76a890209c3be41520 TRUNCATE TABLE `performance_schema` . `events_statements_summary_by_digest` 1
+test 37dea8f84aac346903b2a7458f5a205c EXPLAIN SELECT * FROM `test` . `v1` 1
+test a315952945621324a6bfcf1f6661bc1f EXPLAIN SELECT * FROM `test` . `v1` WHERE `a` = ? 1
+test 1330b191a5d215acdf8d0b735ec6f336 EXPLAIN SELECT * FROM `test` . `v1` WHERE `b` > ? 1
+test a337bc71da7c8fbc66b6de7512427352 EXPLAIN SELECT `a` , `b` FROM `test` . `v1` 1
+test 0b41d635fa372446c43c91e2aff21566 EXPLAIN SELECT `b` , `a` FROM `test` . `v1` 1
+test 2bc3eac48624171b6b2fb16df6a5bbea SELECT * FROM `test` . `v1` 1
+test 71eee1f484086701a377431beee4964f SELECT * FROM `test` . `v1` WHERE `a` = ? 1
+test f9fc1d70334997a0d84ae02de912fc3b SELECT * FROM `test` . `v1` WHERE `b` > ? 1
+test 51eb6f80b8fc7e42278d2ef05338f9c6 SELECT `a` , `b` FROM `test` . `v1` 1
+test ba48024b5bd0a5bf03ae8e2593f7a8b3 SELECT `b` , `a` FROM `test` . `v1` 1
+test ce479a0a1fd4bd47717f1c9082833700 TRUNCATE TABLE `performance_schema` . `events_statements_summary_by_digest` 1
DROP TABLE test.v1;
CREATE VIEW test.v1 AS SELECT * FROM test.t1;
EXPLAIN SELECT * from test.v1;
@@ -248,19 +248,19 @@ SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR
FROM performance_schema.events_statements_summary_by_digest
ORDER BY DIGEST_TEXT;
SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR
-test 2394efff76d5d6cd2fc36c009caedb6a CREATE VIEW `test` . `v1` AS SELECT * FROM `test` . `t1` 1
-test 8fd87aff2a1cba6e969f5f88f65e8def DROP TABLE `test` . `v1` 1
-test b09ba79901451a3ffe552333c381e88a EXPLAIN SELECT * FROM `test` . `v1` 2
-test dd6dc5d5113f799b5285767c07012224 EXPLAIN SELECT * FROM `test` . `v1` WHERE `a` = ? 2
-test 1f1038b822a0be14896a83f14f636d3c EXPLAIN SELECT * FROM `test` . `v1` WHERE `b` > ? 2
-test 3958e74f20c70e3ceabeaf97ba341a35 EXPLAIN SELECT `a` , `b` FROM `test` . `v1` 2
-test 5a885e25d8b50c97edaa741a81a24929 EXPLAIN SELECT `b` , `a` FROM `test` . `v1` 2
-test 6e05f1ed475ef896fa98bc8f1f44bbfa SELECT * FROM `test` . `v1` 2
-test 96f38ba3c6be9dd6f7170f62550e7d4f SELECT * FROM `test` . `v1` WHERE `a` = ? 2
-test 4027a714ef42d32c2c750b0cf4b51be8 SELECT * FROM `test` . `v1` WHERE `b` > ? 2
-test 6bf49b04e433e53f3ef755b00777810e SELECT SCHEMA_NAME , `DIGEST` , `DIGEST_TEXT` , `COUNT_STAR` FROM `performance_schema` . `events_statements_summary_by_digest` ORDER BY `DIGEST_TEXT` 1
-test 8b0d66e44b6f489f5dd0422c725e3ee2 SELECT `a` , `b` FROM `test` . `v1` 2
-test 7bc3d60a4285971a627e5e3f52d09c2c SELECT `b` , `a` FROM `test` . `v1` 2
-test 85c6d6adac956e76a890209c3be41520 TRUNCATE TABLE `performance_schema` . `events_statements_summary_by_digest` 1
+test 441b9375dc36ec4fe7044966d0bf1c1b CREATE VIEW `test` . `v1` AS SELECT * FROM `test` . `t1` 1
+test c14613cf8c7cb5f85c64f82e45ae95d6 DROP TABLE `test` . `v1` 1
+test 37dea8f84aac346903b2a7458f5a205c EXPLAIN SELECT * FROM `test` . `v1` 2
+test a315952945621324a6bfcf1f6661bc1f EXPLAIN SELECT * FROM `test` . `v1` WHERE `a` = ? 2
+test 1330b191a5d215acdf8d0b735ec6f336 EXPLAIN SELECT * FROM `test` . `v1` WHERE `b` > ? 2
+test a337bc71da7c8fbc66b6de7512427352 EXPLAIN SELECT `a` , `b` FROM `test` . `v1` 2
+test 0b41d635fa372446c43c91e2aff21566 EXPLAIN SELECT `b` , `a` FROM `test` . `v1` 2
+test 2bc3eac48624171b6b2fb16df6a5bbea SELECT * FROM `test` . `v1` 2
+test 71eee1f484086701a377431beee4964f SELECT * FROM `test` . `v1` WHERE `a` = ? 2
+test f9fc1d70334997a0d84ae02de912fc3b SELECT * FROM `test` . `v1` WHERE `b` > ? 2
+test 8dd5015809dfc2fef20ff7fb96288bf4 SELECT SCHEMA_NAME , `DIGEST` , `DIGEST_TEXT` , `COUNT_STAR` FROM `performance_schema` . `events_statements_summary_by_digest` ORDER BY `DIGEST_TEXT` 1
+test 51eb6f80b8fc7e42278d2ef05338f9c6 SELECT `a` , `b` FROM `test` . `v1` 2
+test ba48024b5bd0a5bf03ae8e2593f7a8b3 SELECT `b` , `a` FROM `test` . `v1` 2
+test ce479a0a1fd4bd47717f1c9082833700 TRUNCATE TABLE `performance_schema` . `events_statements_summary_by_digest` 1
DROP VIEW test.v1;
DROP TABLE test.t1;
diff --git a/mysql-test/suite/perfschema/r/start_server_low_digest_sql_length.result b/mysql-test/suite/perfschema/r/start_server_low_digest_sql_length.result
index c991efce659..ea505396a08 100644
--- a/mysql-test/suite/perfschema/r/start_server_low_digest_sql_length.result
+++ b/mysql-test/suite/perfschema/r/start_server_low_digest_sql_length.result
@@ -8,5 +8,5 @@ SELECT 1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1
####################################
SELECT event_name, digest, digest_text, sql_text FROM events_statements_history_long;
event_name digest digest_text sql_text
-statement/sql/select ade774bdfbc132a71810ede8ef469660 SELECT ? + ? + SELECT ...
-statement/sql/truncate 0f84807fb4a75d0f391f8a93e7c3c182 TRUNCATE TABLE truncat...
+statement/sql/select d2246116093fecdd0c2fa9a15a9aa724 SELECT ? + ? + SELECT ...
+statement/sql/truncate ac81cac1e4b9fd679e83b21b5e88dfa6 TRUNCATE TABLE truncat...
diff --git a/mysql-test/suite/rpl/r/rpl_parallel_seq.result b/mysql-test/suite/rpl/r/rpl_parallel_seq.result
new file mode 100644
index 00000000000..ae4041f470d
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_parallel_seq.result
@@ -0,0 +1,85 @@
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+# MDEV-29621 the sequence engine binlog_row_image-full events
+# MDL-deadlock on the parallel slave.
+connection master;
+CREATE SEQUENCE s1;
+SET @@session.binlog_row_image=FULL;
+SET @@session.debug_dbug="+d,binlog_force_commit_id";
+SET @commit_id=7;
+SET @@gtid_seq_no=100;
+SELECT NEXT VALUE FOR s1;
+NEXT VALUE FOR s1
+1
+INSERT INTO s1 VALUES(2, 1, 10, 1, 2, 1, 1, 0);
+SET @@session.debug_dbug="";
+connection slave;
+SET @@global.slave_parallel_threads=2;
+SET @@global.slave_parallel_mode=optimistic;
+SET @@global.debug_dbug="+d,hold_worker_on_schedule";
+include/start_slave.inc
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+connection master;
+DROP SEQUENCE s1;
+connection slave;
+include/stop_slave.inc
+# Simulate buggy 10.3.36 master to prove the parallel applier
+# does not deadlock now at replaying the above master load.
+connection master;
+include/rpl_stop_server.inc [server_number=1]
+include/rpl_start_server.inc [server_number=1]
+connection slave;
+RESET MASTER;
+SET @@global.gtid_slave_pos="";
+CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_use_gtid=slave_pos;
+START SLAVE UNTIL MASTER_GTID_POS='0-1-102';
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+# Normal stop is expected
+include/wait_for_slave_to_stop.inc
+# MDEV-31077 ALTER SEQUENCE may end up in optimistic parallel slave binlog out-of-order
+# The test proves ALTER-SEQUENCE binlogs first before the following transaction does so.
+connection slave;
+include/stop_slave.inc
+Warnings:
+Note 1255 Slave already has been stopped
+RESET MASTER;
+SET @@global.gtid_slave_pos="";
+SET @@global.gtid_strict_mode=1;
+connection master;
+RESET MASTER;
+CREATE TABLE ti (a INT) ENGINE=innodb;
+CREATE SEQUENCE s2 ENGINE=innodb;
+SET @@gtid_seq_no=100;
+ALTER SEQUENCE s2 restart with 1;
+INSERT INTO ti SET a=1;
+include/save_master_gtid.inc
+SELECT @@global.gtid_binlog_state "Master gtid state";
+Master gtid state
+0-1-101
+connection slave;
+include/start_slave.inc
+SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "no 100,101 yet in both";
+@@global.gtid_binlog_state no 100,101 yet in both
+0-1-2 0-1-2
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+# Normal sync with master proves the fixes correct
+include/sync_with_master_gtid.inc
+SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "all through 101 have been committed";
+@@global.gtid_binlog_state all through 101 have been committed
+0-1-101 0-1-101
+connection slave;
+include/stop_slave.inc
+SET debug_sync = RESET;
+SET @@global.slave_parallel_threads= 0;
+SET @@global.slave_parallel_mode= optimistic;
+SET @@global.debug_dbug = "";
+SET @@global.gtid_strict_mode=0;
+include/start_slave.inc
+connection master;
+DROP SEQUENCE s2;
+DROP TABLE ti;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_binlog_index.test b/mysql-test/suite/rpl/t/rpl_binlog_index.test
index 95c49c3d574..6112affb3c4 100644
--- a/mysql-test/suite/rpl/t/rpl_binlog_index.test
+++ b/mysql-test/suite/rpl/t/rpl_binlog_index.test
@@ -17,10 +17,6 @@
# BUG#12133 master.index file keeps mysqld from starting if bin log has been moved
# BUG#42576 Relay logs in relay-log.info&localhost-relay-bin.index not processed after move
-source include/master-slave.inc;
-# There is no need to run this test case on all binlog format
-source include/have_binlog_format_row.inc;
-
# Since this test relies heavily on filesystem operations (like
# moving files around, backslashes and so forth) we avoid messing
# around with windows access violations for not cluttering the
@@ -28,6 +24,10 @@ source include/have_binlog_format_row.inc;
# it is not 100% compliant.
--source include/not_windows.inc
+source include/master-slave.inc;
+# There is no need to run this test case on all binlog format
+source include/have_binlog_format_row.inc;
+
connection master;
--let $master_datadir= `select @@datadir`
connection slave;
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
index 5d0d39cadce..25452346523 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
@@ -1,7 +1,7 @@
+--source include/no_valgrind_without_big.inc
--let $rpl_topology=1->2
--source include/rpl_init.inc
--source include/have_innodb.inc
---source include/no_valgrind_without_big.inc
--echo *** Test normal shutdown/restart of slave server configured as a GTID slave. ***
diff --git a/mysql-test/suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test b/mysql-test/suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test
index 9c804d8206a..a27a50d0fc4 100644
--- a/mysql-test/suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test
+++ b/mysql-test/suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test
@@ -15,8 +15,8 @@
#
# (iii) master and slave tables do not differ
#
--- source include/master-slave.inc
-- source include/not_windows.inc
+-- source include/master-slave.inc
SET SQL_LOG_BIN=0;
CREATE DATABASE B37656;
diff --git a/mysql-test/suite/rpl/t/rpl_mdev12179.test b/mysql-test/suite/rpl/t/rpl_mdev12179.test
index 962ce3f0135..6d2dda1044f 100644
--- a/mysql-test/suite/rpl/t/rpl_mdev12179.test
+++ b/mysql-test/suite/rpl/t/rpl_mdev12179.test
@@ -1,7 +1,7 @@
+--source include/no_valgrind_without_big.inc
--source include/have_innodb.inc
--let $rpl_topology=1->2
--source include/rpl_init.inc
---source include/no_valgrind_without_big.inc
--connection server_2
call mtr.add_suppression("The automatically created table.*name may not be entirely in lowercase");
diff --git a/mysql-test/suite/rpl/t/rpl_mdev382.test b/mysql-test/suite/rpl/t/rpl_mdev382.test
index 093b7b92413..84e3c84982d 100644
--- a/mysql-test/suite/rpl/t/rpl_mdev382.test
+++ b/mysql-test/suite/rpl/t/rpl_mdev382.test
@@ -1,7 +1,7 @@
+--source include/not_windows.inc #unix shell escaping used for mysqlbinlog
--source include/have_innodb.inc
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
---source include/not_windows.inc #unix shell escaping used for mysqlbinlog
# MDEV-382: multiple SQL injections in replication code.
diff --git a/mysql-test/suite/rpl/t/rpl_parallel_seq.test b/mysql-test/suite/rpl/t/rpl_parallel_seq.test
new file mode 100644
index 00000000000..2a4fd96ff34
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_parallel_seq.test
@@ -0,0 +1,131 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+--connection slave
+--source include/stop_slave.inc
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+
+--echo # MDEV-29621 the sequence engine binlog_row_image-full events
+--echo # MDL-deadlock on the parallel slave.
+--connection master
+CREATE SEQUENCE s1;
+SET @@session.binlog_row_image=FULL;
+SET @@session.debug_dbug="+d,binlog_force_commit_id";
+SET @commit_id=7;
+SET @@gtid_seq_no=100;
+SELECT NEXT VALUE FOR s1;
+INSERT INTO s1 VALUES(2, 1, 10, 1, 2, 1, 1, 0);
+SET @@session.debug_dbug="";
+
+--connection slave
+--let $slave_parallel_threads=`select @@global.slave_parallel_threads`
+--let $slave_parallel_mode=`select @@global.slave_parallel_mode`
+SET @@global.slave_parallel_threads=2;
+SET @@global.slave_parallel_mode=optimistic;
+SET @@global.debug_dbug="+d,hold_worker_on_schedule";
+--source include/start_slave.inc
+
+--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to start commit"
+--source include/wait_condition.inc
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+
+--connection master
+DROP SEQUENCE s1;
+--sync_slave_with_master
+--source include/stop_slave.inc
+
+--echo # Simulate buggy 10.3.36 master to prove the parallel applier
+--echo # does not deadlock now at replaying the above master load.
+--connection master
+--let $datadir= `SELECT @@datadir`
+
+--let $rpl_server_number= 1
+--source include/rpl_stop_server.inc
+
+--remove_file $datadir/master-bin.000001
+--copy_file $MYSQL_TEST_DIR/std_data/rpl/master-bin-seq_10.3.36.000001 $datadir/master-bin.000001
+
+--let $rpl_server_number= 1
+--source include/rpl_start_server.inc
+
+--source include/wait_until_connected_again.inc
+--save_master_pos
+
+--connection slave
+RESET MASTER;
+SET @@global.gtid_slave_pos="";
+
+--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1
+eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_use_gtid=slave_pos;
+
+START SLAVE UNTIL MASTER_GTID_POS='0-1-102';
+
+--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit"
+--source include/wait_condition.inc
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+
+--echo # Normal stop is expected
+--source include/wait_for_slave_to_stop.inc
+
+--echo # MDEV-31077 ALTER SEQUENCE may end up in optimistic parallel slave binlog out-of-order
+--echo # The test proves ALTER-SEQUENCE binlogs first before the following transaction does so.
+
+--connection slave
+--source include/stop_slave.inc
+RESET MASTER;
+SET @@global.gtid_slave_pos="";
+--let $slave_gtid_strict_mode=`select @@global.gtid_strict_mode`
+SET @@global.gtid_strict_mode=1;
+--connection master
+RESET MASTER;
+
+# Load from master
+CREATE TABLE ti (a INT) ENGINE=innodb;
+CREATE SEQUENCE s2 ENGINE=innodb;
+
+SET @@gtid_seq_no=100;
+ALTER SEQUENCE s2 restart with 1;
+INSERT INTO ti SET a=1;
+--source include/save_master_gtid.inc
+SELECT @@global.gtid_binlog_state "Master gtid state";
+
+--connection slave
+--source include/start_slave.inc
+
+--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit"
+--source include/wait_condition.inc
+
+SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "no 100,101 yet in both";
+
+# DEBUG_DBUG extension point of hold_worker_on_schedule is reused
+# (gets deployed) in Sql_cmd_alter_sequence::execute.
+SET DEBUG_SYNC = 'now SIGNAL continue_worker';
+
+--echo # Normal sync with master proves the fixes correct
+--source include/sync_with_master_gtid.inc
+
+SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "all through 101 have been committed";
+
+#
+# MDEV-29621/MDEV-31077 clean up.
+#
+--connection slave
+--source include/stop_slave.inc
+
+SET debug_sync = RESET;
+--eval SET @@global.slave_parallel_threads= $slave_parallel_threads
+--eval SET @@global.slave_parallel_mode= $slave_parallel_mode
+ SET @@global.debug_dbug = "";
+--eval SET @@global.gtid_strict_mode=$slave_gtid_strict_mode
+--source include/start_slave.inc
+
+--connection master
+DROP SEQUENCE s2;
+DROP TABLE ti;
+
+--sync_slave_with_master
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test b/mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test
index 44c04dd62d3..7decd130a43 100644
--- a/mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test
+++ b/mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test
@@ -3,10 +3,10 @@
# For details look into extra/rpl_tests/rpl_lower_case_table_names.test
#
--- source include/master-slave.inc
--- source include/have_innodb.inc
-- source include/not_windows.inc
+-- source include/have_innodb.inc
-- source include/have_binlog_format_row.inc
+-- source include/master-slave.inc
-- let $engine=InnoDB
-- source include/rpl_lower_case_table_names.test
diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_event.test b/mysql-test/suite/rpl/t/rpl_semi_sync_event.test
index 7a7e1c1e074..d4df9b4041b 100644
--- a/mysql-test/suite/rpl/t/rpl_semi_sync_event.test
+++ b/mysql-test/suite/rpl/t/rpl_semi_sync_event.test
@@ -1,7 +1,7 @@
+source include/no_valgrind_without_big.inc;
source include/not_embedded.inc;
source include/have_innodb.inc;
source include/master-slave.inc;
-source include/no_valgrind_without_big.inc;
let $engine_type= InnoDB;
diff --git a/mysql-test/suite/rpl/t/rpl_ssl.test b/mysql-test/suite/rpl/t/rpl_ssl.test
index 59a2af9f137..0420a6c8c2d 100644
--- a/mysql-test/suite/rpl/t/rpl_ssl.test
+++ b/mysql-test/suite/rpl/t/rpl_ssl.test
@@ -4,9 +4,9 @@
# Please check all dependent tests after modifying it
#
+source include/no_valgrind_without_big.inc;
source include/have_ssl_communication.inc;
source include/master-slave.inc;
-source include/no_valgrind_without_big.inc;
# create a user for replication that requires ssl encryption
connection master;
diff --git a/mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test b/mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test
index 619b57994c2..3809cd89e4a 100644
--- a/mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test
+++ b/mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test
@@ -3,10 +3,10 @@
# For details look into extra/rpl_tests/rpl_lower_case_table_names.test
#
+-- source include/not_windows.inc
+-- source include/have_innodb.inc
-- source include/have_binlog_format_mixed_or_statement.inc
-- source include/master-slave.inc
--- source include/have_innodb.inc
--- source include/not_windows.inc
-- let $engine=InnoDB
-- source include/rpl_lower_case_table_names.test
diff --git a/sql/ha_sequence.cc b/sql/ha_sequence.cc
index b348e6e7025..bab0614706d 100644
--- a/sql/ha_sequence.cc
+++ b/sql/ha_sequence.cc
@@ -250,6 +250,8 @@ int ha_sequence::write_row(const uchar *buf)
on master and slaves
- Check that the new row is an accurate SEQUENCE object
*/
+ /* mark a full binlog image insert to force non-parallel slave */
+ thd->transaction->stmt.mark_trans_did_ddl();
if (table->s->tmp_table == NO_TMP_TABLE &&
thd->mdl_context.upgrade_shared_lock(table->mdl_ticket,
MDL_EXCLUSIVE,
diff --git a/sql/item.cc b/sql/item.cc
index 85bb1f42632..f7a46744e9c 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -10494,7 +10494,8 @@ int Item_cache_str::save_in_field(Field *field, bool no_conversions)
bool Item_cache_row::allocate(THD *thd, uint num)
{
item_count= num;
- return (!(values=
+ return (!values &&
+ !(values=
(Item_cache **) thd->calloc(sizeof(Item_cache *)*item_count)));
}
@@ -10530,11 +10531,12 @@ bool Item_cache_row::setup(THD *thd, Item *item)
return 1;
for (uint i= 0; i < item_count; i++)
{
- Item_cache *tmp;
Item *el= item->element_index(i);
- if (!(tmp= values[i]= el->get_cache(thd)))
+
+ if ((!values[i]) && !(values[i]= el->get_cache(thd)))
return 1;
- tmp->setup(thd, el);
+
+ values[i]->setup(thd, el);
}
return 0;
}
diff --git a/sql/item.h b/sql/item.h
index 6ac8ed9b509..e73d4f61b89 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -6902,6 +6902,9 @@ public:
}
virtual void keep_array() {}
+#ifndef DBUG_OFF
+ bool is_array_kept() { return TRUE; }
+#endif
void print(String *str, enum_query_type query_type) override;
bool eq_def(const Field *field)
{
@@ -7389,13 +7392,14 @@ public:
bool null_inside() override;
void bring_value() override;
void keep_array() override { save_array= 1; }
+#ifndef DBUG_OFF
+ bool is_array_kept() { return save_array; }
+#endif
void cleanup() override
{
DBUG_ENTER("Item_cache_row::cleanup");
Item_cache::cleanup();
- if (save_array)
- bzero(values, item_count*sizeof(Item**));
- else
+ if (!save_array)
values= 0;
DBUG_VOID_RETURN;
}
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 40181c3384c..9912646cbb4 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1306,9 +1306,22 @@ bool Item_in_optimizer::fix_left(THD *thd)
ref0= args[1]->get_IN_subquery()->left_exp_ptr();
args[0]= (*ref0);
}
- if ((*ref0)->fix_fields_if_needed(thd, ref0) ||
- (!cache && !(cache= (*ref0)->get_cache(thd))))
+ if ((*ref0)->fix_fields_if_needed(thd, ref0))
DBUG_RETURN(1);
+ if (!cache)
+ {
+ Query_arena *arena, backup;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ bool rc= !(cache= (*ref0)->get_cache(thd));
+
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+
+ if (rc)
+ DBUG_RETURN(1);
+ cache->keep_array();
+ }
/*
During fix_field() expression could be substituted.
So we copy changes before use
@@ -1669,19 +1682,10 @@ longlong Item_in_optimizer::val_int()
}
-void Item_in_optimizer::keep_top_level_cache()
-{
- cache->keep_array();
- save_cache= 1;
-}
-
-
void Item_in_optimizer::cleanup()
{
DBUG_ENTER("Item_in_optimizer::cleanup");
Item_bool_func::cleanup();
- if (!save_cache)
- cache= 0;
expr_cache= 0;
DBUG_VOID_RETURN;
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index da7337565ac..a64a69eb6c8 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -353,8 +353,7 @@ class Item_in_optimizer: public Item_bool_func
protected:
Item_cache *cache;
Item *expr_cache;
- bool save_cache;
- /*
+ /*
Stores the value of "NULL IN (SELECT ...)" for uncorrelated subqueries:
UNKNOWN - "NULL in (SELECT ...)" has not yet been evaluated
FALSE - result is FALSE
@@ -364,7 +363,7 @@ protected:
public:
Item_in_optimizer(THD *thd, Item *a, Item *b):
Item_bool_func(thd, a, b), cache(0), expr_cache(0),
- save_cache(0), result_for_null_param(UNKNOWN)
+ result_for_null_param(UNKNOWN)
{ m_with_subquery= true; }
bool fix_fields(THD *, Item **) override;
bool fix_left(THD *thd);
@@ -375,7 +374,6 @@ public:
enum Functype functype() const override { return IN_OPTIMIZER_FUNC; }
const char *func_name() const override { return "<in_optimizer>"; }
Item_cache **get_cache() { return &cache; }
- void keep_top_level_cache();
Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override;
Item *expr_cache_insert_transformer(THD *thd, uchar *unused) override;
bool is_expensive_processor(void *arg) override;
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index e94059f486d..8d44f9481b8 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -402,11 +402,11 @@ bool Item_subselect::mark_as_dependent(THD *thd, st_select_lex *select,
{
is_correlated= TRUE;
Ref_to_outside *upper;
- if (!(upper= new (thd->stmt_arena->mem_root) Ref_to_outside()))
+ if (!(upper= new (thd->mem_root) Ref_to_outside()))
return TRUE;
upper->select= select;
upper->item= item;
- if (upper_refs.push_back(upper, thd->stmt_arena->mem_root))
+ if (upper_refs.push_back(upper, thd->mem_root))
return TRUE;
}
return FALSE;
@@ -2030,7 +2030,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
thd->lex->current_select= current;
/* We will refer to upper level cache array => we have to save it for SP */
- optimizer->keep_top_level_cache();
+ DBUG_ASSERT(optimizer->get_cache()[0]->is_array_kept());
/*
As far as Item_in_optimizer does not substitute itself on fix_fields
@@ -2427,7 +2427,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
}
// we will refer to upper level cache array => we have to save it in PS
- optimizer->keep_top_level_cache();
+ DBUG_ASSERT(optimizer->get_cache()[0]->is_array_kept());
thd->lex->current_select= current;
/*
@@ -4575,6 +4575,12 @@ void subselect_uniquesubquery_engine::print(String *str,
{
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
tab->ref.items[0]->print(str, query_type);
+ if (!tab->table)
+ {
+ // table is not opened so unknown
+ str->append(')');
+ return;
+ }
str->append(STRING_WITH_LEN(" in "));
if (tab->table->s->table_category == TABLE_CATEGORY_TEMPORARY)
{
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index f19bebd9863..31075d9b64b 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -7493,8 +7493,18 @@ Rows_log_event::write_row(rpl_group_info *rgi,
int Rows_log_event::update_sequence()
{
TABLE *table= m_table; // pointer to event's table
+ bool old_master= false;
+ int err= 0;
- if (!bitmap_is_set(table->rpl_write_set, MIN_VALUE_FIELD_NO))
+ if (!bitmap_is_set(table->rpl_write_set, MIN_VALUE_FIELD_NO) ||
+ (
+#if defined(WITH_WSREP)
+ ! WSREP(thd) &&
+#endif
+ !(table->in_use->rgi_slave->gtid_ev_flags2 & Gtid_log_event::FL_DDL) &&
+ !(old_master=
+ rpl_master_has_bug(thd->rgi_slave->rli,
+ 29621, FALSE, FALSE, FALSE, TRUE))))
{
/* This event come from a setval function executed on the master.
Update the sequence next_number and round, like we do with setval()
@@ -7507,12 +7517,27 @@ int Rows_log_event::update_sequence()
return table->s->sequence->set_value(table, nextval, round, 0) > 0;
}
-
+ if (old_master && !WSREP(thd) && thd->rgi_slave->is_parallel_exec)
+ {
+ DBUG_ASSERT(thd->rgi_slave->parallel_entry);
+ /*
+ With parallel replication enabled, we can't execute alongside any other
+ transaction in which we may depend, so we force retry to release
+ the server layer table lock for possible prior in binlog order
+ same table transactions.
+ */
+ if (thd->rgi_slave->parallel_entry->last_committed_sub_id <
+ thd->rgi_slave->wait_commit_sub_id)
+ {
+ err= ER_LOCK_DEADLOCK;
+ my_error(err, MYF(0));
+ }
+ }
/*
Update all fields in table and update the active sequence, like with
ALTER SEQUENCE
*/
- return table->file->ha_write_row(table->record[0]);
+ return err == 0 ? table->file->ha_write_row(table->record[0]) : err;
}
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index e148dd42d1e..7b466888716 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -65,7 +65,7 @@
If we have only one equi-join condition then we either push it as
for Q1R or we don't. In a general case we may have much more options.
Consider the query (Q3)
- SELECT
+ SELECT *
FROM t1,t2 (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t3 GROUP BY a,b) t
WHERE t.a = t1.a AND t.b = t2.b
@@ -102,6 +102,47 @@
If we just drop the index on t3(a,b) the chances that the splitting
will be used becomes much lower but they still exists providing that
the fanout of the partial join of t1 and t2 is small enough.
+
+ The lateral derived table LT formed as a result of SM optimization applied
+ to a materialized derived table DT must be joined after all parameters
+ of splitting has been evaluated, i.e. after all expressions used in the
+ equalities pushed into DT that make the employed splitting effective
+ could be evaluated. With the chosen join order all the parameters can be
+ evaluated after the last table LPT that contains any columns referenced in
+ the parameters has been joined and the table APT following LPT in the chosen
+ join order is accessed.
+ Usually the formed lateral derived table LT is accessed right after the table
+ LPT. As in such cases table LT must be refilled for each combination of
+ splitting parameters this table must be populated before each access to LT
+ and the estimate of the expected number of refills that could be suggested in
+ such cases is the number of rows in the partial join ending with table LPT.
+ However in other cases the chosen join order may contain tables between LPT
+ and LT.
+ Consider the query (Q4)
+ SELECT *
+ FROM t1 JOIN t2 ON t1.b = t2.b
+ LEFT JOIN (SELECT t3.a, t3.b, MIN(t3.c) as min
+ FROM t3 GROUP BY a,b) t
+ ON t.a = t1.a AND t.c > 0
+ [WHERE P(t1,t2)];
+ Let's assume that the join order t1,t2,t was chosen for this query and
+ SP optimization was applied to t with splitting over t3.a using the index
+ on column t3.a. Here the table t1 serves as LPT, t2 as APT while t with
+ pushed condition t.a = t1.a serves as LT. Note that here LT is accessed
+ after t2, not right after t1. Here the number of refills of the lateral
+ derived is not more that the number of key values of t1.a that might be
+ less than the cardinality of the partial join (t1,t2). That's why it makes
+ sense to signal that t3 has to be refilled just before t2 is accessed.
+ However if the cardinality of the partial join (t1,t2) happens to be less
+ than the cardinality of the partial join (t1) due to additional selective
+ condition P(t1,t2) then the flag informing about necessity of a new refill
+ can be set either when accessing t2 or right after it has been joined.
+ The current code sets such flag right after generating a record of the
+ partial join with minimal cardinality for all those partial joins that
+ end between APT and LT. It allows sometimes to push extra conditions
+ into the lateral derived without any increase of the number of refills.
+ However this flag can be set only after the last join table between
+ APT and LT using join buffer has been joined.
*/
/*
@@ -187,6 +228,7 @@
#include "mariadb.h"
#include "sql_select.h"
+#include "opt_trace.h"
/* Info on a splitting field */
struct SplM_field_info
@@ -248,6 +290,7 @@ public:
double unsplit_card;
/* Lastly evaluated execution plan for 'join' with pushed equalities */
SplM_plan_info *last_plan;
+ double last_refills;
SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
};
@@ -345,6 +388,9 @@ bool JOIN::check_for_splittable_materialized()
if (!partition_list)
return false;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_split(thd, "check_split_materialized");
+
ORDER *ord;
Dynamic_array<SplM_field_ext_info> candidates(PSI_INSTRUMENT_MEM);
@@ -390,7 +436,10 @@ bool JOIN::check_for_splittable_materialized()
}
}
if (candidates.elements() == 0) // no candidates satisfying (8.1) && (8.2)
+ {
+ trace_split.add("not_applicable", "group list has no candidates");
return false;
+ }
/*
For each table from this join find the keys that can be used for ref access
@@ -449,7 +498,11 @@ bool JOIN::check_for_splittable_materialized()
}
if (!spl_field_cnt) // No candidate field can be accessed by ref => !(9)
+ {
+ trace_split.add("not_applicable",
+ "no candidate field can be accessed through ref");
return false;
+ }
/*
Create a structure of the type SplM_opt_info and fill it with
@@ -467,16 +520,22 @@ bool JOIN::check_for_splittable_materialized()
spl_opt_info->tables_usable_for_splitting= 0;
spl_opt_info->spl_field_cnt= spl_field_cnt;
spl_opt_info->spl_fields= spl_field;
- for (cand= cand_start; cand < cand_end; cand++)
+
{
- if (!cand->is_usable_for_ref_access)
- continue;
- spl_field->producing_item= cand->producing_item;
- spl_field->underlying_field= cand->underlying_field;
- spl_field->mat_field= cand->mat_field;
- spl_opt_info->tables_usable_for_splitting|=
- cand->underlying_field->table->map;
- spl_field++;
+ Json_writer_array trace_range(thd, "split_candidates");
+ for (cand= cand_start; cand < cand_end; cand++)
+ {
+ if (!cand->is_usable_for_ref_access)
+ continue;
+ trace_range.add(cand->producing_item);
+
+ spl_field->producing_item= cand->producing_item;
+ spl_field->underlying_field= cand->underlying_field;
+ spl_field->mat_field= cand->mat_field;
+ spl_opt_info->tables_usable_for_splitting|=
+ cand->underlying_field->table->map;
+ spl_field++;
+ }
}
/* Attach this info to the table T */
@@ -731,7 +790,7 @@ void JOIN::add_keyuses_for_splitting()
bzero((char*) &keyuse_ext_end, sizeof(keyuse_ext_end));
if (ext_keyuses_for_splitting->push(keyuse_ext_end))
goto err;
-
+ // psergey-todo: trace anything here?
spl_opt_info->unsplit_card= join_record_count;
rec_len= table->s->rec_buff_length;
@@ -829,13 +888,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
static
void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
TABLE *table, uint key,
- table_map remaining_tables,
+ table_map excluded_tables,
bool validity_val)
{
KEYUSE_EXT *keyuse_ext= key_keyuse_ext_start;
do
{
- if (!(keyuse_ext->needed_in_prefix & remaining_tables))
+ if (!(keyuse_ext->needed_in_prefix & excluded_tables))
{
/*
The enabling/disabling flags are set just in KEYUSE_EXT structures.
@@ -855,8 +914,11 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
@param
- record_count estimated cardinality of the extended partial join
+ idx index for joined table T in current partial join P
remaining_tables tables not joined yet
+ spl_pd_boundary OUT bitmap of the table from P extended by T that
+ starts the sub-sequence of tables S from which
+ no conditions are allowed to be pushed into T.
@details
This function is called during the search for the best execution
@@ -871,17 +933,19 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
splitting the function set it as the true plan of materialization
of the table T.
The function caches the found plans for materialization of table T
- together if the info what key was used for splitting. Next time when
+ together with the info what key was used for splitting. Next time when
the optimizer prefers to use the same key the plan is taken from
the cache of plans
@retval
Pointer to the info on the found plan that employs the pushed equalities
if the plan has been chosen, NULL - otherwise.
+ If the function returns NULL the value of spl_param_tables is set to 0.
*/
-SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
- table_map remaining_tables)
+SplM_plan_info * JOIN_TAB::choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
DBUG_ASSERT(spl_opt_info != NULL);
@@ -896,7 +960,9 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
-
+ table_map best_param_tables;
+ Json_writer_object trace_obj(thd, "choose_best_splitting");
+ Json_writer_array trace_arr(thd, "considered_keys");
/*
Check whether there are keys that can be used to join T employing splitting
and if so, select the best out of such keys
@@ -914,6 +980,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint key= keyuse_ext->key;
KEYUSE_EXT *key_keyuse_ext_start= keyuse_ext;
key_part_map found_parts= 0;
+ table_map needed_in_prefix= 0;
do
{
if (keyuse_ext->needed_in_prefix & remaining_tables)
@@ -939,6 +1006,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY *key_info= table->key_info + key;
double rec_per_key=
key_info->actual_rec_per_key(keyuse_ext->keypart);
+ needed_in_prefix|= keyuse_ext->needed_in_prefix;
if (rec_per_key < best_rec_per_key)
{
best_table= keyuse_ext->table;
@@ -946,6 +1014,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts= keyuse_ext->keypart + 1;
best_rec_per_key= rec_per_key;
best_key_keyuse_ext_start= key_keyuse_ext_start;
+ best_param_tables= needed_in_prefix;
+ // trace table, key_name, parts, needed_tables.
+ Json_writer_object cur_index(thd);
+ cur_index.
+ add("table_name", best_table->alias.ptr()).
+ add("index", best_table->key_info[best_key].name).
+ add("rec_per_key", best_rec_per_key).
+ add("param_tables", best_param_tables);
}
keyuse_ext++;
}
@@ -953,9 +1029,37 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
}
while (keyuse_ext->table == table);
}
+ trace_arr.end();
+
spl_opt_info->last_plan= 0;
+ double refills= DBL_MAX;
+ table_map excluded_tables= remaining_tables | this->join->sjm_lookup_tables;
if (best_table)
{
+ *spl_pd_boundary= this->table->map;
+ if (!best_param_tables)
+ refills= 1;
+ else
+ {
+ table_map last_found= this->table->map;
+ for (POSITION *pos= &this->join->positions[idx - 1]; ; pos--)
+ {
+ if (pos->table->table->map & excluded_tables)
+ continue;
+ if (pos->partial_join_cardinality < refills)
+ {
+ *spl_pd_boundary= last_found;
+ refills= pos->partial_join_cardinality;
+ }
+ last_found= pos->table->table->map;
+ if ((last_found & best_param_tables) || pos->use_join_buffer)
+ break;
+ }
+ }
+
+ trace_obj.add("refills", refills).
+ add("spl_pd_boundary", *spl_pd_boundary);
+
/*
The key for splitting was chosen, look for the plan for this key
in the cache
@@ -967,11 +1071,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
The plan for the chosen key has not been found in the cache.
Build a new plan and save info on it in the cache
*/
+ Json_writer_array wrapper(thd, "split_plan_search");
table_map all_table_map= (((table_map) 1) << join->table_count) - 1;
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, true);
+ best_key, excluded_tables, true);
choose_plan(join, all_table_map & ~join->const_table_map);
+ wrapper.end();
/*
Check that the chosen plan is really a splitting plan.
If not or if there is not enough memory to save the plan in the cache
@@ -988,7 +1094,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info->plan_cache.push_back(spl_plan))
{
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
+ trace_obj.add("split_plan_discarded", "constructed unapplicable query plan");
return 0;
}
@@ -1012,18 +1119,34 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
(char *) join->best_positions,
sizeof(POSITION) * join->table_count);
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
}
+ else
+ trace_obj.add("cached_plan_found", 1);
+
if (spl_plan)
{
- if(record_count * spl_plan->cost < spl_opt_info->unsplit_cost - 0.01)
+ trace_obj.
+ add("lead_table", spl_plan->table->alias.ptr()).
+ add("index", spl_plan->table->key_info[spl_plan->key].name).
+ add("parts", spl_plan->parts).
+ add("split_sel", spl_plan->split_sel).
+ add("cost", spl_plan->cost).
+ add("records", (ha_rows) (records * spl_plan->split_sel)).
+ add("refills", refills);
+
+ if (refills * spl_plan->cost < spl_opt_info->unsplit_cost - 0.01)
{
/*
The best plan that employs splitting is cheaper than
the plan without splitting
*/
spl_opt_info->last_plan= spl_plan;
+ spl_opt_info->last_refills= refills;
+ trace_obj.add("chosen", true);
}
+ else
+ trace_obj.add("chosen", false);
}
}
@@ -1032,11 +1155,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan= spl_opt_info->last_plan;
if (spl_plan)
{
- startup_cost= record_count * spl_plan->cost;
+ startup_cost= spl_opt_info->last_refills * spl_plan->cost;
records= (ha_rows) (records * spl_plan->split_sel);
}
else
+ {
startup_cost= spl_opt_info->unsplit_cost;
+ *spl_pd_boundary= 0;
+ }
return spl_plan;
}
@@ -1046,13 +1172,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
@param
- excluded_tables used to filter out the equalities that cannot
- be pushed.
+ excluded_tables used to filter out the equalities that are not
+ to be pushed.
@details
This function injects equalities pushed into a derived table T for which
the split optimization has been chosen by the optimizer. The function
- is called by JOIN::inject_splitting_cond_for_all_tables_with_split_op().
+ is called by JOIN::inject_splitting_cond_for_all_tables_with_split_opt().
All equalities usable for splitting T whose right parts do not depend on
any of the 'excluded_tables' can be pushed into the where clause of the
derived table T.
@@ -1140,7 +1266,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
@param
spl_plan info on the splitting plan chosen for the splittable table T
- remaining_tables the table T is joined just before these tables
+ excluded_tables tables that cannot be used in equalities pushed into T
is_const_table the table T is a constant table
@details
@@ -1155,7 +1281,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
*/
bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
- table_map remaining_tables,
+ table_map excluded_tables,
bool is_const_table)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
@@ -1163,6 +1289,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN *md_join= spl_opt_info->join;
if (spl_plan && !is_const_table)
{
+ is_split_derived= true;
memcpy((char *) md_join->best_positions,
(char *) spl_plan->best_positions,
sizeof(POSITION) * md_join->table_count);
@@ -1173,7 +1300,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
reset_validity_vars_for_keyuses(spl_plan->keyuse_ext_start,
spl_plan->table,
spl_plan->key,
- remaining_tables,
+ excluded_tables,
true);
}
else if (md_join->save_qep)
@@ -1209,8 +1336,21 @@ bool JOIN::fix_all_splittings_in_plan()
if (tab->table->is_splittable())
{
SplM_plan_info *spl_plan= cur_pos->spl_plan;
+ table_map excluded_tables= (all_tables & ~prev_tables) |
+ sjm_lookup_tables;
+ ;
+ if (spl_plan)
+ {
+ POSITION *pos= cur_pos;
+ table_map spl_pd_boundary= pos->spl_pd_boundary;
+ do
+ {
+ excluded_tables|= pos->table->table->map;
+ }
+ while (!((pos--)->table->table->map & spl_pd_boundary));
+ }
if (tab->fix_splitting(spl_plan,
- all_tables & ~prev_tables,
+ excluded_tables,
tablenr < const_tables ))
return true;
}
@@ -1249,13 +1389,21 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue;
SplM_opt_info *spl_opt_info= tab->table->spl_opt_info;
JOIN *join= spl_opt_info->join;
- /*
- Currently the equalities referencing columns of SJM tables with
- look-up access cannot be pushed into materialized derived.
- */
- if (join->inject_best_splitting_cond((all_tables & ~prev_tables) |
- sjm_lookup_tables))
- return true;
+ table_map excluded_tables= (all_tables & ~prev_tables) | sjm_lookup_tables;
+ table_map spl_pd_boundary= cur_pos->spl_pd_boundary;
+ for (POSITION *pos= cur_pos; ; pos--)
+ {
+ excluded_tables|= pos->table->table->map;
+ pos->table->no_forced_join_cache= true;
+ if (pos->table->table->map & spl_pd_boundary)
+ {
+ pos->table->split_derived_to_update|= tab->table->map;
+ break;
+ }
+ }
+
+ if (join->inject_best_splitting_cond(excluded_tables))
+ return true;
}
return false;
}
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index a9b4f046988..fc3d6f35ee5 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -2859,7 +2859,12 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
if (mode <= SLAVE_PARALLEL_MINIMAL ||
!(gtid_flags & Gtid_log_event::FL_GROUP_COMMIT_ID) ||
- e->last_commit_id != gtid_ev->commit_id)
+ e->last_commit_id != gtid_ev->commit_id ||
+ /*
+ MULTI_BATCH is also set when the current gtid even being a member
+ of a commit group is flagged as DDL which disallows parallel.
+ */
+ (gtid_flags & Gtid_log_event::FL_DDL))
flags|= group_commit_orderer::MULTI_BATCH;
/* Make sure we do not attempt to run DDL in parallel speculatively. */
if (gtid_flags & Gtid_log_event::FL_DDL)
diff --git a/sql/slave.cc b/sql/slave.cc
index 17379eda5a6..41b48eb0778 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -8109,14 +8109,15 @@ end:
@return TRUE if master has the bug, FALSE if it does not.
*/
bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
- bool (*pred)(const void *), const void *param)
+ bool (*pred)(const void *), const void *param,
+ bool maria_master)
{
struct st_version_range_for_one_bug {
uint bug_id;
Version introduced_in; // first version with bug
Version fixed_in; // first version with fix
};
- static struct st_version_range_for_one_bug versions_for_all_bugs[]=
+ static struct st_version_range_for_one_bug versions_for_their_bugs[]=
{
{24432, { 5, 0, 24 }, { 5, 0, 38 } },
{24432, { 5, 1, 12 }, { 5, 1, 17 } },
@@ -8124,11 +8125,27 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
{33029, { 5, 1, 0 }, { 5, 1, 12 } },
{37426, { 5, 1, 0 }, { 5, 1, 26 } },
};
+ static struct st_version_range_for_one_bug versions_for_our_bugs[]=
+ {
+ {29621, { 10, 3, 36 }, { 10, 3, 39 } },
+ {29621, { 10, 4, 26 }, { 10, 4, 29 } },
+ {29621, { 10, 5, 17 }, { 10, 5, 20 } },
+ {29621, { 10, 6, 9 }, { 10, 6, 13 } },
+ {29621, { 10, 7, 5 }, { 10, 7, 9 } },
+ {29621, { 10, 8, 4 }, { 10, 8, 8 } },
+ {29621, { 10, 9, 2 }, { 10, 9, 6 } },
+ {29621, { 10, 10,1 }, { 10, 10,4 } },
+ {29621, { 10, 11,1 }, { 10, 11,3 } },
+ };
const Version &master_ver=
rli->relay_log.description_event_for_exec->server_version_split;
+ struct st_version_range_for_one_bug* versions_for_all_bugs= maria_master ?
+ versions_for_our_bugs : versions_for_their_bugs;
+ uint all_size= maria_master ?
+ sizeof(versions_for_our_bugs)/sizeof(*versions_for_our_bugs) :
+ sizeof(versions_for_their_bugs)/sizeof(*versions_for_their_bugs);
- for (uint i= 0;
- i < sizeof(versions_for_all_bugs)/sizeof(*versions_for_all_bugs);i++)
+ for (uint i= 0; i < all_size; i++)
{
const Version &introduced_in= versions_for_all_bugs[i].introduced_in;
const Version &fixed_in= versions_for_all_bugs[i].fixed_in;
@@ -8137,18 +8154,21 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
fixed_in > master_ver &&
(pred == NULL || (*pred)(param)))
{
+ const char *bug_source= maria_master ?
+ "https://jira.mariadb.org/browse/MDEV-" :
+ "http://bugs.mysql.com/bug.php?id=";
if (!report)
return TRUE;
// a short message for SHOW SLAVE STATUS (message length constraints)
my_printf_error(ER_UNKNOWN_ERROR, "master may suffer from"
- " http://bugs.mysql.com/bug.php?id=%u"
+ " %s%u"
" so slave stops; check error log on slave"
- " for more info", MYF(0), bug_id);
+ " for more info", MYF(0), bug_source, bug_id);
// a verbose message for the error log
rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, NULL,
"According to the master's version ('%s'),"
" it is probable that master suffers from this bug:"
- " http://bugs.mysql.com/bug.php?id=%u"
+ " %s%u"
" and thus replicating the current binary log event"
" may make the slave's data become different from the"
" master's data."
@@ -8162,6 +8182,7 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
" equal to '%d.%d.%d'. Then replication can be"
" restarted.",
rli->relay_log.description_event_for_exec->server_version,
+ bug_source,
bug_id,
fixed_in[0], fixed_in[1], fixed_in[2]);
return TRUE;
diff --git a/sql/slave.h b/sql/slave.h
index e2bd5cec1b9..02de9135c2a 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -231,7 +231,8 @@ bool show_all_master_info(THD* thd);
void show_binlog_info_get_fields(THD *thd, List<Item> *field_list);
bool show_binlog_info(THD* thd);
bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
- bool (*pred)(const void *), const void *param);
+ bool (*pred)(const void *), const void *param,
+ bool maria_master= false);
bool rpl_master_erroneous_autoinc(THD* thd);
const char *print_slave_db_safe(const char *db);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index c17c93c5a9c..4c4bf237e90 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -2211,7 +2211,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, TABLE_LIST *t
for (Field **field=entry->field ; *field ; field++)
{
if (!bitmap_is_set(write_set, (*field)->field_index) &&
- !(*field)->vers_sys_field() &&
+ !(*field)->vers_sys_field() && !(*field)->vcol_info &&
has_no_default_value(thd, *field, table_list) &&
((*field)->real_type() != MYSQL_TYPE_ENUM))
err=1;
@@ -4148,6 +4148,7 @@ bool select_insert::store_values(List<Item> &values)
DBUG_ENTER("select_insert::store_values");
bool error;
+ table->reset_default_fields();
if (fields->elements)
error= fill_record_n_invoke_before_triggers(thd, table, *fields, values,
true, TRG_EVENT_INSERT);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 91837ffb5c3..257acebe9e0 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -884,7 +884,7 @@ void Lex_input_stream::body_utf8_start(THD *thd, const char *begin_ptr)
}
-size_t Lex_input_stream::get_body_utf8_maximum_length(THD *thd)
+size_t Lex_input_stream::get_body_utf8_maximum_length(THD *thd) const
{
/*
String literals can grow during escaping:
@@ -1387,7 +1387,7 @@ Yacc_state::~Yacc_state()
}
int Lex_input_stream::find_keyword(Lex_ident_cli_st *kwd,
- uint len, bool function)
+ uint len, bool function) const
{
const char *tok= m_tok_start;
@@ -9310,33 +9310,6 @@ bool LEX::add_grant_command(THD *thd, const List<LEX_COLUMN> &columns)
}
-Item *LEX::make_item_func_substr(THD *thd, Item *a, Item *b, Item *c)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_substr_oracle(thd, a, b, c) :
- new (thd->mem_root) Item_func_substr(thd, a, b, c);
-}
-
-
-Item *LEX::make_item_func_substr(THD *thd, Item *a, Item *b)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_substr_oracle(thd, a, b) :
- new (thd->mem_root) Item_func_substr(thd, a, b);
-}
-
-
-Item *LEX::make_item_func_replace(THD *thd,
- Item *org,
- Item *find,
- Item *replace)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_replace_oracle(thd, org, find, replace) :
- new (thd->mem_root) Item_func_replace(thd, org, find, replace);
-}
-
-
bool SELECT_LEX::vers_push_field(THD *thd, TABLE_LIST *table,
const LEX_CSTRING field_name)
{
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index f7db8c6942c..2e3ce7d7503 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -2487,7 +2487,7 @@ private:
Get the last character accepted.
@return the last character accepted.
*/
- unsigned char yyGetLast()
+ unsigned char yyGetLast() const
{
return m_ptr[-1];
}
@@ -2495,7 +2495,7 @@ private:
/**
Look at the next character to parse, but do not accept it.
*/
- unsigned char yyPeek()
+ unsigned char yyPeek() const
{
return m_ptr[0];
}
@@ -2504,7 +2504,7 @@ private:
Look ahead at some character to parse.
@param n offset of the character to look up
*/
- unsigned char yyPeekn(int n)
+ unsigned char yyPeekn(int n) const
{
return m_ptr[n];
}
@@ -2565,7 +2565,7 @@ private:
@param n number of characters expected
@return true if there are less than n characters to parse
*/
- bool eof(int n)
+ bool eof(int n) const
{
return ((m_ptr + n) >= m_end_of_query);
}
@@ -2596,10 +2596,10 @@ private:
Get the maximum length of the utf8-body buffer.
The utf8 body can grow because of the character set conversion and escaping.
*/
- size_t get_body_utf8_maximum_length(THD *thd);
+ size_t get_body_utf8_maximum_length(THD *thd) const;
/** Get the length of the current token, in the raw buffer. */
- uint yyLength()
+ uint yyLength() const
{
/*
The assumption is that the lexical analyser is always 1 character ahead,
@@ -2624,31 +2624,31 @@ public:
End of file indicator for the query text to parse.
@return true if there are no more characters to parse
*/
- bool eof()
+ bool eof() const
{
return (m_ptr >= m_end_of_query);
}
/** Get the raw query buffer. */
- const char *get_buf()
+ const char *get_buf() const
{
return m_buf;
}
/** Get the pre-processed query buffer. */
- const char *get_cpp_buf()
+ const char *get_cpp_buf() const
{
return m_cpp_buf;
}
/** Get the end of the raw query buffer. */
- const char *get_end_of_query()
+ const char *get_end_of_query() const
{
return m_end_of_query;
}
/** Get the token start position, in the raw buffer. */
- const char *get_tok_start()
+ const char *get_tok_start() const
{
return has_lookahead() ? m_tok_start_prev : m_tok_start;
}
@@ -2659,25 +2659,25 @@ public:
}
/** Get the token end position, in the raw buffer. */
- const char *get_tok_end()
+ const char *get_tok_end() const
{
return m_tok_end;
}
/** Get the current stream pointer, in the raw buffer. */
- const char *get_ptr()
+ const char *get_ptr() const
{
return m_ptr;
}
/** Get the token start position, in the pre-processed buffer. */
- const char *get_cpp_tok_start()
+ const char *get_cpp_tok_start() const
{
return has_lookahead() ? m_cpp_tok_start_prev : m_cpp_tok_start;
}
/** Get the token end position, in the pre-processed buffer. */
- const char *get_cpp_tok_end()
+ const char *get_cpp_tok_end() const
{
return m_cpp_tok_end;
}
@@ -2686,7 +2686,7 @@ public:
Get the token end position in the pre-processed buffer,
with trailing spaces removed.
*/
- const char *get_cpp_tok_end_rtrim()
+ const char *get_cpp_tok_end_rtrim() const
{
const char *p;
for (p= m_cpp_tok_end;
@@ -2697,7 +2697,7 @@ public:
}
/** Get the current stream pointer, in the pre-processed buffer. */
- const char *get_cpp_ptr()
+ const char *get_cpp_ptr() const
{
return m_cpp_ptr;
}
@@ -2706,7 +2706,7 @@ public:
Get the current stream pointer, in the pre-processed buffer,
with traling spaces removed.
*/
- const char *get_cpp_ptr_rtrim()
+ const char *get_cpp_ptr_rtrim() const
{
const char *p;
for (p= m_cpp_ptr;
@@ -2716,13 +2716,13 @@ public:
return p;
}
/** Get the utf8-body string. */
- const char *get_body_utf8_str()
+ const char *get_body_utf8_str() const
{
return m_body_utf8;
}
/** Get the utf8-body length. */
- size_t get_body_utf8_length()
+ size_t get_body_utf8_length() const
{
return (size_t) (m_body_utf8_ptr - m_body_utf8);
}
@@ -2758,7 +2758,7 @@ private:
bool consume_comment(int remaining_recursions_permitted);
int lex_one_token(union YYSTYPE *yylval, THD *thd);
- int find_keyword(Lex_ident_cli_st *str, uint len, bool function);
+ int find_keyword(Lex_ident_cli_st *str, uint len, bool function) const;
LEX_CSTRING get_token(uint skip, uint length);
int scan_ident_sysvar(THD *thd, Lex_ident_cli_st *str);
int scan_ident_start(THD *thd, Lex_ident_cli_st *str);
@@ -4103,9 +4103,6 @@ public:
Item *create_item_query_expression(THD *thd, st_select_lex_unit *unit);
- Item *make_item_func_replace(THD *thd, Item *org, Item *find, Item *replace);
- Item *make_item_func_substr(THD *thd, Item *a, Item *b, Item *c);
- Item *make_item_func_substr(THD *thd, Item *a, Item *b);
Item *make_item_func_call_generic(THD *thd, Lex_ident_cli_st *db,
Lex_ident_cli_st *name, List<Item> *args);
Item *make_item_func_call_generic(THD *thd,
diff --git a/sql/sql_schema.cc b/sql/sql_schema.cc
index 0bf4a63c2f8..f08204d272d 100644
--- a/sql/sql_schema.cc
+++ b/sql/sql_schema.cc
@@ -32,6 +32,14 @@ public:
return thd->type_handler_for_datetime();
return src;
}
+
+ Item *make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const;
+ Item *make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const;
+ Item *make_item_func_trim(THD *thd, const Lex_trim_st &spec) const;
};
@@ -78,3 +86,56 @@ Schema *Schema::find_implied(THD *thd)
return &maxdb_schema;
return &mariadb_schema;
}
+
+
+Item *Schema::make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const
+{
+ return new (thd->mem_root) Item_func_replace(thd, subj, find, replace);
+}
+
+
+Item *Schema::make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const
+{
+ return spec.m_for ?
+ new (thd->mem_root) Item_func_substr(thd, spec.m_subject, spec.m_from,
+ spec.m_for) :
+ new (thd->mem_root) Item_func_substr(thd, spec.m_subject, spec.m_from);
+}
+
+
+Item *Schema::make_item_func_trim(THD *thd, const Lex_trim_st &spec) const
+{
+ return spec.make_item_func_trim_std(thd);
+}
+
+
+Item *Schema_oracle::make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const
+{
+ return new (thd->mem_root) Item_func_replace_oracle(thd, subj, find, replace);
+}
+
+
+Item *Schema_oracle::make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const
+{
+ return spec.m_for ?
+ new (thd->mem_root) Item_func_substr_oracle(thd, spec.m_subject,
+ spec.m_from,
+ spec.m_for) :
+ new (thd->mem_root) Item_func_substr_oracle(thd, spec.m_subject,
+ spec.m_from);
+}
+
+
+Item *Schema_oracle::make_item_func_trim(THD *thd,
+ const Lex_trim_st &spec) const
+{
+ return spec.make_item_func_trim_oracle(thd);
+}
diff --git a/sql/sql_schema.h b/sql/sql_schema.h
index 37f8ceb7250..0258ff2dc97 100644
--- a/sql/sql_schema.h
+++ b/sql/sql_schema.h
@@ -33,6 +33,17 @@ public:
{
return src;
}
+
+ // Builders for native SQL function with a special syntax in sql_yacc.yy
+ virtual Item *make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const;
+ virtual Item *make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const;
+
+ virtual Item *make_item_func_trim(THD *thd, const Lex_trim_st &spec) const;
+
/*
For now we have *hard-coded* compatibility schemas:
schema_mariadb, schema_oracle, schema_maxdb.
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 18e3d214e66..b6fee1c6f85 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7344,6 +7344,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].records_read=1.0; /* This is a const table */
join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].ref_depend_map= 0;
+ join->positions[idx].partial_join_cardinality= 1;
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
join->positions[idx].sj_strategy= SJ_OPT_NONE;
@@ -7361,6 +7362,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
}
join->best_ref[idx]=table;
join->positions[idx].spl_plan= 0;
+ join->positions[idx].spl_pd_boundary= 0;
}
@@ -7533,6 +7535,7 @@ best_access_path(JOIN *join,
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
+ table_map spl_pd_boundary= 0;
Range_rowid_filter_cost_info *filter= 0;
const char* cause= NULL;
enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
@@ -7543,15 +7546,17 @@ best_access_path(JOIN *join,
DBUG_ENTER("best_access_path");
Json_writer_object trace_wrapper(thd, "best_access_path");
- Json_writer_array trace_paths(thd, "considered_access_paths");
bitmap_clear_all(eq_join_set);
loose_scan_opt.init(join, s, remaining_tables);
if (s->table->is_splittable())
- spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ spl_plan= s->choose_best_splitting(idx,
+ remaining_tables,
+ &spl_pd_boundary);
+ Json_writer_array trace_paths(thd, "considered_access_paths");
if (s->keyuse)
{ /* Use key if possible */
KEYUSE *keyuse;
@@ -8347,8 +8352,9 @@ best_access_path(JOIN *join,
best_filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
best_ref_depends_map= 0;
- best_uses_jbuf= MY_TEST(!disable_jbuf && !((s->table->map &
- join->outer_join)));
+ best_uses_jbuf= MY_TEST(!disable_jbuf &&
+ (join->allowed_outer_join_with_cache ||
+ !(s->table->map & join->outer_join)));
spl_plan= 0;
best_type= type;
}
@@ -8370,6 +8376,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
+ pos->spl_pd_boundary= !spl_plan ? 0 : spl_pd_boundary;
pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -8899,6 +8906,9 @@ optimize_straight_join(JOIN *join, table_map join_tables)
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
join_tables);
position->cond_selectivity= pushdown_cond_selectivity;
+ double partial_join_cardinality= record_count *
+ pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
++idx;
}
@@ -9939,6 +9949,8 @@ best_extension_by_limited_search(JOIN *join,
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
+
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
@@ -12975,6 +12987,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
join->return_tab= 0;
+ if (tab->no_forced_join_cache)
+ goto no_join_cache;
+
/*
Don't use join cache if @@join_cache_level==0 or this table is the first
one join suborder (either at top level or inside a bush)
@@ -13936,7 +13951,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
- derived->is_nonrecursive_derived_with_rec_ref())
+ derived->is_nonrecursive_derived_with_rec_ref() ||
+ is_split_derived)
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -17404,6 +17420,9 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
reopt_remaining_tables &
~real_table_bit);
}
+ double partial_join_cardinality= rec_count *
+ pushdown_cond_selectivity;
+ join->positions[i].partial_join_cardinality= partial_join_cardinality;
(*outer_rec_count) *= pushdown_cond_selectivity;
if (!rs->emb_sj_nest)
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
@@ -21048,6 +21067,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
DBUG_ENTER("sub_select");
+ if (join_tab->split_derived_to_update && !end_of_records)
+ {
+ table_map tab_map= join_tab->split_derived_to_update;
+ for (uint i= 0; tab_map; i++, tab_map>>= 1)
+ {
+ if (tab_map & 1)
+ join->map2table[i]->preread_init_done= false;
+ }
+ }
+
if (join_tab->last_inner)
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
@@ -24531,7 +24560,7 @@ JOIN_TAB::remove_duplicates()
if (!(sortorder= (SORT_FIELD*) my_malloc(PSI_INSTRUMENT_ME,
(fields->elements+1) *
sizeof(SORT_FIELD),
- MYF(MY_WME))))
+ MYF(MY_WME | MY_ZEROFILL))))
DBUG_RETURN(TRUE);
/* Calculate how many saved fields there is in list */
@@ -24550,7 +24579,6 @@ JOIN_TAB::remove_duplicates()
else
{
/* Item is not stored in temporary table, remember it */
- sorder->field= 0; // Safety, not used
sorder->item= item;
/* Calculate sorder->length */
item->type_handler()->sort_length(thd, item, sorder);
diff --git a/sql/sql_select.h b/sql/sql_select.h
index a93bcdfc5cd..4eafffd385f 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -394,6 +394,8 @@ typedef struct st_join_table {
*/
bool idx_cond_fact_out;
bool use_join_cache;
+ /* TRUE <=> it is prohibited to join this table using join buffer */
+ bool no_forced_join_cache;
uint used_join_cache_level;
ulong join_buffer_size_limit;
JOIN_CACHE *cache;
@@ -520,6 +522,16 @@ typedef struct st_join_table {
bool preread_init_done;
+ /* true <=> split optimization has been applied to this materialized table */
+ bool is_split_derived;
+
+ /*
+ Bitmap of split materialized derived tables that can be filled just before
+ this join table is to be joined. All parameters of the split derived tables
+ belong to tables preceding this join table.
+ */
+ table_map split_derived_to_update;
+
/*
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
@@ -682,9 +694,10 @@ typedef struct st_join_table {
void partial_cleanup();
void add_keyuses_for_splitting();
- SplM_plan_info *choose_best_splitting(double record_count,
- table_map remaining_tables);
- bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
+ SplM_plan_info *choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary);
+ bool fix_splitting(SplM_plan_info *spl_plan, table_map excluded_tables,
bool is_const_table);
} JOIN_TAB;
@@ -949,9 +962,21 @@ public:
*/
KEYUSE *key;
+ /* Cardinality of current partial join ending with this position */
+ double partial_join_cardinality;
+
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
+ /*
+ If spl_plan is NULL the value of spl_pd_boundary is 0. Otherwise
+ spl_pd_boundary contains the bitmap of the table from the current
+ partial join ending at this position that starts the sub-sequence of
+ tables S from which no conditions are allowed to be used in the plan
+ spl_plan for the split table joined at this position.
+ */
+ table_map spl_pd_boundary;
+
/* Cost info for the range filter used at this position */
Range_rowid_filter_cost_info *range_rowid_filter_info;
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index 60da595afd0..fdb9c647727 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -1011,10 +1011,19 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
else
table->file->print_error(error, MYF(0));
seq->write_unlock(table);
- if (trans_commit_stmt(thd))
- error= 1;
- if (trans_commit_implicit(thd))
- error= 1;
+ {
+ wait_for_commit* suspended_wfc= thd->suspend_subsequent_commits();
+ if (trans_commit_stmt(thd))
+ error= 1;
+ if (trans_commit_implicit(thd))
+ error= 1;
+ thd->resume_subsequent_commits(suspended_wfc);
+ DBUG_EXECUTE_IF("hold_worker_on_schedule",
+ {
+ /* delay binlogging of a parent trx in rpl_parallel_seq */
+ my_sleep(100000);
+ });
+ }
if (likely(!error))
error= write_bin_log(thd, 1, thd->query(), thd->query_length());
if (likely(!error))
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 56ac4de6297..fb9364e0002 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -3901,50 +3901,16 @@ double Histogram::point_selectivity(double pos, double avg_sel)
}
else
{
- /*
+ /*
The value 'pos' fits within one single histogram bucket.
- Histogram buckets have the same numbers of rows, but they cover
- different ranges of values.
-
- We assume that values are uniformly distributed across the [0..1] value
- range.
- */
-
- /*
- If all buckets covered value ranges of the same size, the width of
- value range would be:
+ We also have avg_sel which is per-table average selectivity of col=const.
+ If there are popular values, this may be larger than one bucket, so
+ cap the returned number by the selectivity of one bucket.
*/
double avg_bucket_width= 1.0 / (get_width() + 1);
-
- /*
- Let's see what is the width of value range that our bucket is covering.
- (min==max currently. they are kept in the formula just in case we
- will want to extend it to handle multi-bucket case)
- */
- double inv_prec_factor= (double) 1.0 / prec_factor();
- double current_bucket_width=
- (max + 1 == get_width() ? 1.0 : (get_value(max) * inv_prec_factor)) -
- (min == 0 ? 0.0 : (get_value(min-1) * inv_prec_factor));
-
- DBUG_ASSERT(current_bucket_width); /* We shouldn't get a one zero-width bucket */
-
- /*
- So:
- - each bucket has the same #rows
- - values are unformly distributed across the [min_value,max_value] domain.
- If a bucket has value range that's N times bigger then average, than
- each value will have to have N times fewer rows than average.
- */
- sel= avg_sel * avg_bucket_width / current_bucket_width;
-
- /*
- (Q: if we just follow this proportion we may end up in a situation
- where number of different values we expect to find in this bucket
- exceeds the number of rows that this histogram has in a bucket. Are
- we ok with this or we would want to have certain caps?)
- */
+ sel= MY_MIN(avg_bucket_width, avg_sel);
}
return sel;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index a10119e2b23..a5a39228776 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -223,6 +223,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
Lex_for_loop_st for_loop;
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
+ Lex_substring_spec_st substring_spec;
vers_history_point_t vers_history_point;
struct
{
@@ -626,7 +627,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> RELEASE_SYM /* SQL-2003-R */
%token <kwd> RENAME
%token <kwd> REPEAT_SYM /* MYSQL-FUNC */
-%token <kwd> REPLACE /* MYSQL-FUNC */
%token <kwd> REQUIRE_SYM
%token <kwd> RESIGNAL_SYM /* SQL-2003-R */
%token <kwd> RESTRICT
@@ -663,7 +663,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> STDDEV_SAMP_SYM /* SQL-2003-N */
%token <kwd> STD_SYM
%token <kwd> STRAIGHT_JOIN
-%token <kwd> SUBSTRING /* SQL-2003-N */
%token <kwd> SUM_SYM /* SQL-2003-N */
%token <kwd> SYSDATE
%token <kwd> TABLE_REF_PRIORITY
@@ -676,7 +675,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> TO_SYM /* SQL-2003-R */
%token <kwd> TRAILING /* SQL-2003-R */
%token <kwd> TRIGGER_SYM /* SQL-2003-R */
-%token <kwd> TRIM /* SQL-2003-N */
%token <kwd> TRUE_SYM /* SQL-2003-R */
%token <kwd> UNDO_SYM /* FUTURE-USE */
%token <kwd> UNION_SYM /* SQL-2003-R */
@@ -721,6 +719,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> ROWTYPE_MARIADB_SYM // PLSQL-R
/*
+ SQL functions with a special syntax
+*/
+%token <kwd> REPLACE /* MYSQL-FUNC */
+%token <kwd> SUBSTRING /* SQL-2003-N */
+%token <kwd> TRIM /* SQL-2003-N */
+
+
+/*
Non-reserved keywords
*/
@@ -1737,6 +1743,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <for_loop> sp_for_loop_index_and_bounds
%type <for_loop_bounds> sp_for_loop_bounds
%type <trim> trim_operands
+%type <substring_spec> substring_operands
%type <num> opt_sp_for_loop_direction
%type <spvar_mode> sp_parameter_type
%type <index_hint> index_hint_type
@@ -10219,7 +10226,8 @@ function_call_keyword:
}
| TRIM '(' trim_operands ')'
{
- if (unlikely(!($$= $3.make_item_func_trim(thd))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_trim(thd, $3))))
MYSQL_YYABORT;
}
| USER_SYM '(' ')'
@@ -10238,6 +10246,26 @@ function_call_keyword:
}
;
+substring_operands:
+ expr ',' expr ',' expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3, $5);
+ }
+ | expr ',' expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3);
+ }
+ | expr FROM expr FOR_SYM expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3, $5);
+ }
+ | expr FROM expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3);
+ }
+ ;
+
+
/*
Function calls using non reserved keywords, with special syntaxic forms.
Dedicated grammar rules are needed because of the syntax,
@@ -10352,24 +10380,10 @@ function_call_nonkeyword:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | SUBSTRING '(' expr ',' expr ',' expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr ',' expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr FROM expr FOR_SYM expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr FROM expr ')'
+ | SUBSTRING '(' substring_operands ')'
{
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_substr(thd, $3))))
MYSQL_YYABORT;
}
| SYSDATE opt_time_precision
@@ -10585,7 +10599,8 @@ function_call_conflict:
}
| REPLACE '(' expr ',' expr ',' expr ')'
{
- if (unlikely(!($$= Lex->make_item_func_replace(thd, $3, $5, $7))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_replace(thd, $3, $5, $7))))
MYSQL_YYABORT;
}
| REVERSE_SYM '(' expr ')'
diff --git a/sql/structs.h b/sql/structs.h
index ecdf808f613..fc87f101174 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -782,6 +782,11 @@ public:
}
Item *make_item_func_trim_std(THD *thd) const;
Item *make_item_func_trim_oracle(THD *thd) const;
+ /*
+ This method is still used to handle LTRIM and RTRIM,
+ while the special syntax TRIM(... BOTH|LEADING|TRAILING)
+ is now handled by Schema::make_item_func_trim().
+ */
Item *make_item_func_trim(THD *thd) const;
};
@@ -793,6 +798,25 @@ public:
};
+class Lex_substring_spec_st
+{
+public:
+ Item *m_subject;
+ Item *m_from;
+ Item *m_for;
+ static Lex_substring_spec_st init(Item *subject,
+ Item *from,
+ Item *xfor= NULL)
+ {
+ Lex_substring_spec_st res;
+ res.m_subject= subject;
+ res.m_from= from;
+ res.m_for= xfor;
+ return res;
+ }
+};
+
+
class st_select_lex;
class Lex_select_lock
diff --git a/sql/table.cc b/sql/table.cc
index 0bd2148bdd5..a25f279db08 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -3371,7 +3371,6 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
char *sql_copy;
handler *file;
LEX *old_lex;
- Query_arena *arena, backup;
LEX tmp_lex;
KEY *unused1;
uint unused2;
@@ -3398,12 +3397,6 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
old_lex= thd->lex;
thd->lex= &tmp_lex;
- arena= thd->stmt_arena;
- if (arena->is_conventional())
- arena= 0;
- else
- thd->set_n_backup_active_arena(arena, &backup);
-
thd->reset_db(&db);
lex_start(thd);
@@ -3438,8 +3431,6 @@ ret:
lex_end(thd->lex);
thd->reset_db(&db_backup);
thd->lex= old_lex;
- if (arena)
- thd->restore_active_arena(arena, &backup);
reenable_binlog(thd);
thd->variables.character_set_client= old_cs;
if (unlikely(thd->is_error() || error))
@@ -9386,8 +9377,13 @@ void TABLE_LIST::wrap_into_nested_join(List<TABLE_LIST> &join_list)
static inline bool derived_table_optimization_done(TABLE_LIST *table)
{
- return table->derived &&
- (table->derived->is_excluded() ||
+ SELECT_LEX_UNIT *derived= (table->derived ?
+ table->derived :
+ (table->view ?
+ &table->view->unit:
+ NULL));
+ return derived &&
+ (derived->is_excluded() ||
table->is_materialized_derived());
}
@@ -9449,8 +9445,7 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
set_derived();
}
- if (is_view() ||
- !derived_table_optimization_done(this))
+ if (!derived_table_optimization_done(this))
{
/* A subquery might be forced to be materialized due to a side-effect. */
if (!is_materialized_derived() && unit->can_be_merged() &&
diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc
index dab60de49fd..07db2e73af1 100644
--- a/sql/wsrep_schema.cc
+++ b/sql/wsrep_schema.cc
@@ -1016,10 +1016,9 @@ int Wsrep_schema::append_fragment(THD* thd,
Wsrep_schema_impl::store(frag_table, 3, flags);
Wsrep_schema_impl::store(frag_table, 4, data.data(), data.size());
- int error;
- if ((error= Wsrep_schema_impl::insert(frag_table))) {
- WSREP_ERROR("Failed to write to frag table: %d", error);
+ if (Wsrep_schema_impl::insert(frag_table)) {
trans_rollback_stmt(thd);
+ close_thread_tables(thd);
thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h
index 3d1bf3733a8..0ce612d6097 100644
--- a/sql/wsrep_thd.h
+++ b/sql/wsrep_thd.h
@@ -228,7 +228,14 @@ static inline void wsrep_override_error(THD* thd,
break;
case wsrep::e_append_fragment_error:
/* TODO: Figure out better error number */
- wsrep_override_error(thd, ER_ERROR_DURING_COMMIT, 0, status);
+ if (status)
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT,
+ "Error while appending streaming replication fragment"
+ "(provider status: %s)",
+ wsrep::provider::to_string(status).c_str());
+ else
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT,
+ "Error while appending streaming replication fragment");
break;
case wsrep::e_not_supported_error:
wsrep_override_error(thd, ER_NOT_SUPPORTED_YET);
diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt
index 216df0d45e3..3570aadfc9f 100644
--- a/storage/rocksdb/CMakeLists.txt
+++ b/storage/rocksdb/CMakeLists.txt
@@ -30,6 +30,11 @@ IF(WITH_VALGRIND)
ADD_DEFINITIONS(-DROCKSDB_VALGRIND_RUN=1)
ENDIF()
+ADD_DEFINITIONS(-Duint64_t=u_int64_t)
+ADD_DEFINITIONS(-Duint32_t=u_int32_t)
+ADD_DEFINITIONS(-Duint16_t=u_int16_t)
+ADD_DEFINITIONS(-Duint8_t=u_int8_t)
+
# We've had our builders hang during the build process. This prevents MariaRocks
# to be built on 32 bit intel OS kernels.
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "i[36]86")
diff --git a/storage/rocksdb/ut0counter.h b/storage/rocksdb/ut0counter.h
index 3a7ee85d01c..99134ebd3f4 100644
--- a/storage/rocksdb/ut0counter.h
+++ b/storage/rocksdb/ut0counter.h
@@ -61,7 +61,7 @@ struct get_sched_indexer_t : public generic_indexer_t<Type, N> {
size_t cpu = sched_getcpu();
if (cpu == (size_t) -1) {
- cpu = get_curr_thread_id();
+ cpu = (size_t) get_curr_thread_id();
}
return(cpu);
diff --git a/wsrep-lib b/wsrep-lib
-Subproject 275a0af8c5b92f0ee33cfe9e23f3db5f59b56e9
+Subproject 4951c38357737d568b554402bc5b6abe88a38fe
diff --git a/zlib/ChangeLog b/zlib/ChangeLog
index f0b0e618092..457526bc6a5 100644
--- a/zlib/ChangeLog
+++ b/zlib/ChangeLog
@@ -1,6 +1,18 @@
ChangeLog file for zlib
+Changes in 1.2.13 (13 Oct 2022)
+- Fix configure issue that discarded provided CC definition
+- Correct incorrect inputs provided to the CRC functions
+- Repair prototypes and exporting of new CRC functions
+- Fix inflateBack to detect invalid input with distances too far
+- Have infback() deliver all of the available output up to any error
+- Fix a bug when getting a gzip header extra field with inflate()
+- Fix bug in block type selection when Z_FIXED used
+- Tighten deflateBound bounds
+- Remove deleted assembler code references
+- Various portability and appearance improvements
+
Changes in 1.2.12 (27 Mar 2022)
- Cygwin does not have _wopen(), so do not create gzopen_w() there
- Permit a deflateParams() parameter change as soon as possible
@@ -159,7 +171,7 @@ Changes in 1.2.7.1 (24 Mar 2013)
- Fix types in contrib/minizip to match result of get_crc_table()
- Simplify contrib/vstudio/vc10 with 'd' suffix
- Add TOP support to win32/Makefile.msc
-- Suport i686 and amd64 assembler builds in CMakeLists.txt
+- Support i686 and amd64 assembler builds in CMakeLists.txt
- Fix typos in the use of _LARGEFILE64_SOURCE in zconf.h
- Add vc11 and vc12 build files to contrib/vstudio
- Add gzvprintf() as an undocumented function in zlib
@@ -359,14 +371,14 @@ Changes in 1.2.5.1 (10 Sep 2011)
- Use u4 type for crc_table to avoid conversion warnings
- Apply casts in zlib.h to avoid conversion warnings
- Add OF to prototypes for adler32_combine_ and crc32_combine_ [Miller]
-- Improve inflateSync() documentation to note indeterminancy
+- Improve inflateSync() documentation to note indeterminacy
- Add deflatePending() function to return the amount of pending output
- Correct the spelling of "specification" in FAQ [Randers-Pehrson]
- Add a check in configure for stdarg.h, use for gzprintf()
- Check that pointers fit in ints when gzprint() compiled old style
- Add dummy name before $(SHAREDLIBV) in Makefile [Bar-Lev, Bowler]
- Delete line in configure that adds -L. libz.a to LDFLAGS [Weigelt]
-- Add debug records in assmebler code [Londer]
+- Add debug records in assembler code [Londer]
- Update RFC references to use http://tools.ietf.org/html/... [Li]
- Add --archs option, use of libtool to configure for Mac OS X [Borstel]
@@ -1033,7 +1045,7 @@ Changes in 1.2.0.1 (17 March 2003)
- Include additional header file on VMS for off_t typedef
- Try to use _vsnprintf where it supplants vsprintf [Vollant]
- Add some casts in inffast.c
-- Enchance comments in zlib.h on what happens if gzprintf() tries to
+- Enhance comments in zlib.h on what happens if gzprintf() tries to
write more than 4095 bytes before compression
- Remove unused state from inflateBackEnd()
- Remove exit(0) from minigzip.c, example.c
@@ -1211,7 +1223,7 @@ Changes in 1.0.9 (17 Feb 1998)
- Avoid gcc 2.8.0 comparison bug a little differently than zlib 1.0.8
- in inftrees.c, avoid cc -O bug on HP (Farshid Elahi)
- in zconf.h move the ZLIB_DLL stuff earlier to avoid problems with
- the declaration of FAR (Gilles VOllant)
+ the declaration of FAR (Gilles Vollant)
- install libz.so* with mode 755 (executable) instead of 644 (Marc Lehmann)
- read_buf buf parameter of type Bytef* instead of charf*
- zmemcpy parameters are of type Bytef*, not charf* (Joseph Strout)
@@ -1567,7 +1579,7 @@ Changes in 0.4:
- renamed deflateOptions as deflateInit2, call one or the other but not both
- added the method parameter for deflateInit2
- added inflateInit2
-- simplied considerably deflateInit and inflateInit by not supporting
+- simplified considerably deflateInit and inflateInit by not supporting
user-provided history buffer. This is supported only in deflateInit2
and inflateInit2
diff --git a/zlib/LICENSE b/zlib/LICENSE
new file mode 100644
index 00000000000..ab8ee6f7142
--- /dev/null
+++ b/zlib/LICENSE
@@ -0,0 +1,22 @@
+Copyright notice:
+
+ (C) 1995-2022 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
diff --git a/zlib/README b/zlib/README
index 024b79d3d8c..ba34d1894a9 100644
--- a/zlib/README
+++ b/zlib/README
@@ -1,6 +1,6 @@
ZLIB DATA COMPRESSION LIBRARY
-zlib 1.2.12 is a general purpose data compression library. All the code is
+zlib 1.2.13 is a general purpose data compression library. All the code is
thread safe. The data format used by the zlib library is described by RFCs
(Request for Comments) 1950 to 1952 in the files
http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and
@@ -31,7 +31,7 @@ Mark Nelson <markn@ieee.org> wrote an article about zlib for the Jan. 1997
issue of Dr. Dobb's Journal; a copy of the article is available at
http://marknelson.us/1997/01/01/zlib-engine/ .
-The changes made in version 1.2.12 are documented in the file ChangeLog.
+The changes made in version 1.2.13 are documented in the file ChangeLog.
Unsupported third party contributions are provided in directory contrib/ .
diff --git a/zlib/compress.c b/zlib/compress.c
index e2db404abf8..2ad5326c14e 100644
--- a/zlib/compress.c
+++ b/zlib/compress.c
@@ -19,7 +19,7 @@
memory, Z_BUF_ERROR if there was not enough room in the output buffer,
Z_STREAM_ERROR if the level parameter is invalid.
*/
-int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
+int ZEXPORT compress2(dest, destLen, source, sourceLen, level)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
@@ -65,7 +65,7 @@ int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
/* ===========================================================================
*/
-int ZEXPORT compress (dest, destLen, source, sourceLen)
+int ZEXPORT compress(dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
@@ -78,7 +78,7 @@ int ZEXPORT compress (dest, destLen, source, sourceLen)
If the default memLevel or windowBits for deflateInit() is changed, then
this function needs to be updated.
*/
-uLong ZEXPORT compressBound (sourceLen)
+uLong ZEXPORT compressBound(sourceLen)
uLong sourceLen;
{
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
diff --git a/zlib/crc32.c b/zlib/crc32.c
index a1bdce5c23c..f8357b083f7 100644
--- a/zlib/crc32.c
+++ b/zlib/crc32.c
@@ -98,13 +98,22 @@
# endif
#endif
+/* If available, use the ARM processor CRC32 instruction. */
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8
+# define ARMCRC32
+#endif
+
/* Local functions. */
local z_crc_t multmodp OF((z_crc_t a, z_crc_t b));
local z_crc_t x2nmodp OF((z_off64_t n, unsigned k));
-/* If available, use the ARM processor CRC32 instruction. */
-#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8
-# define ARMCRC32
+#if defined(W) && (!defined(ARMCRC32) || defined(DYNAMIC_CRC_TABLE))
+ local z_word_t byte_swap OF((z_word_t word));
+#endif
+
+#if defined(W) && !defined(ARMCRC32)
+ local z_crc_t crc_word OF((z_word_t data));
+ local z_word_t crc_word_big OF((z_word_t data));
#endif
#if defined(W) && (!defined(ARMCRC32) || defined(DYNAMIC_CRC_TABLE))
@@ -630,7 +639,7 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
#endif /* DYNAMIC_CRC_TABLE */
/* Pre-condition the CRC */
- crc ^= 0xffffffff;
+ crc = (~crc) & 0xffffffff;
/* Compute the CRC up to a word boundary. */
while (len && ((z_size_t)buf & 7) != 0) {
@@ -645,8 +654,8 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
len &= 7;
/* Do three interleaved CRCs to realize the throughput of one crc32x
- instruction per cycle. Each CRC is calcuated on Z_BATCH words. The three
- CRCs are combined into a single CRC after each set of batches. */
+ instruction per cycle. Each CRC is calculated on Z_BATCH words. The
+ three CRCs are combined into a single CRC after each set of batches. */
while (num >= 3 * Z_BATCH) {
crc1 = 0;
crc2 = 0;
@@ -749,7 +758,7 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
#endif /* DYNAMIC_CRC_TABLE */
/* Pre-condition the CRC */
- crc ^= 0xffffffff;
+ crc = (~crc) & 0xffffffff;
#ifdef W
@@ -1077,7 +1086,7 @@ uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
- return multmodp(x2nmodp(len2, 3), crc1) ^ crc2;
+ return multmodp(x2nmodp(len2, 3), crc1) ^ (crc2 & 0xffffffff);
}
/* ========================================================================= */
@@ -1086,7 +1095,7 @@ uLong ZEXPORT crc32_combine(crc1, crc2, len2)
uLong crc2;
z_off_t len2;
{
- return crc32_combine64(crc1, crc2, len2);
+ return crc32_combine64(crc1, crc2, (z_off64_t)len2);
}
/* ========================================================================= */
@@ -1103,14 +1112,14 @@ uLong ZEXPORT crc32_combine_gen64(len2)
uLong ZEXPORT crc32_combine_gen(len2)
z_off_t len2;
{
- return crc32_combine_gen64(len2);
+ return crc32_combine_gen64((z_off64_t)len2);
}
/* ========================================================================= */
-uLong crc32_combine_op(crc1, crc2, op)
+uLong ZEXPORT crc32_combine_op(crc1, crc2, op)
uLong crc1;
uLong crc2;
uLong op;
{
- return multmodp(op, crc1) ^ crc2;
+ return multmodp(op, crc1) ^ (crc2 & 0xffffffff);
}
diff --git a/zlib/deflate.c b/zlib/deflate.c
index 799fb93cc04..4a689db3598 100644
--- a/zlib/deflate.c
+++ b/zlib/deflate.c
@@ -52,7 +52,7 @@
#include "deflate.h"
const char deflate_copyright[] =
- " deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler ";
+ " deflate 1.2.13 Copyright 1995-2022 Jean-loup Gailly and Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@@ -87,13 +87,7 @@ local void lm_init OF((deflate_state *s));
local void putShortMSB OF((deflate_state *s, uInt b));
local void flush_pending OF((z_streamp strm));
local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
-#ifdef ASMV
-# pragma message("Assembler code may have bugs -- use at your own risk")
- void match_init OF((void)); /* asm code initialization */
- uInt longest_match OF((deflate_state *s, IPos cur_match));
-#else
local uInt longest_match OF((deflate_state *s, IPos cur_match));
-#endif
#ifdef ZLIB_DEBUG
local void check_match OF((deflate_state *s, IPos start, IPos match,
@@ -160,7 +154,7 @@ local const config configuration_table[10] = {
* characters, so that a running hash key can be computed from the previous
* key instead of complete recalculation each time.
*/
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+#define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask)
/* ===========================================================================
@@ -191,9 +185,9 @@ local const config configuration_table[10] = {
*/
#define CLEAR_HASH(s) \
do { \
- s->head[s->hash_size-1] = NIL; \
+ s->head[s->hash_size - 1] = NIL; \
zmemzero((Bytef *)s->head, \
- (unsigned)(s->hash_size-1)*sizeof(*s->head)); \
+ (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
} while (0)
/* ===========================================================================
@@ -285,6 +279,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
if (windowBits < 0) { /* suppress zlib wrapper */
wrap = 0;
+ if (windowBits < -15)
+ return Z_STREAM_ERROR;
windowBits = -windowBits;
}
#ifdef GZIP
@@ -314,7 +310,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
s->hash_bits = (uInt)memLevel + 7;
s->hash_size = 1 << s->hash_bits;
s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+ s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH);
s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
@@ -340,11 +336,11 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
* sym_buf value to read moves forward three bytes. From that symbol, up to
* 31 bits are written to pending_buf. The closest the written pending_buf
* bits gets to the next sym_buf symbol to read is just before the last
- * code is written. At that time, 31*(n-2) bits have been written, just
- * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
- * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
+ * code is written. At that time, 31*(n - 2) bits have been written, just
+ * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at
+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1
* symbols are written.) The closest the writing gets to what is unread is
- * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
+ * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and
* can range from 128 to 32768.
*
* Therefore, at a minimum, there are 142 bits of space between what is
@@ -390,7 +386,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
/* =========================================================================
* Check for a valid deflate stream state. Return 0 if ok, 1 if not.
*/
-local int deflateStateCheck (strm)
+local int deflateStateCheck(strm)
z_streamp strm;
{
deflate_state *s;
@@ -413,7 +409,7 @@ local int deflateStateCheck (strm)
}
/* ========================================================================= */
-int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
+int ZEXPORT deflateSetDictionary(strm, dictionary, dictLength)
z_streamp strm;
const Bytef *dictionary;
uInt dictLength;
@@ -482,7 +478,7 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
}
/* ========================================================================= */
-int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
+int ZEXPORT deflateGetDictionary(strm, dictionary, dictLength)
z_streamp strm;
Bytef *dictionary;
uInt *dictLength;
@@ -504,7 +500,7 @@ int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
}
/* ========================================================================= */
-int ZEXPORT deflateResetKeep (strm)
+int ZEXPORT deflateResetKeep(strm)
z_streamp strm;
{
deflate_state *s;
@@ -542,7 +538,7 @@ int ZEXPORT deflateResetKeep (strm)
}
/* ========================================================================= */
-int ZEXPORT deflateReset (strm)
+int ZEXPORT deflateReset(strm)
z_streamp strm;
{
int ret;
@@ -554,7 +550,7 @@ int ZEXPORT deflateReset (strm)
}
/* ========================================================================= */
-int ZEXPORT deflateSetHeader (strm, head)
+int ZEXPORT deflateSetHeader(strm, head)
z_streamp strm;
gz_headerp head;
{
@@ -565,7 +561,7 @@ int ZEXPORT deflateSetHeader (strm, head)
}
/* ========================================================================= */
-int ZEXPORT deflatePending (strm, pending, bits)
+int ZEXPORT deflatePending(strm, pending, bits)
unsigned *pending;
int *bits;
z_streamp strm;
@@ -579,7 +575,7 @@ int ZEXPORT deflatePending (strm, pending, bits)
}
/* ========================================================================= */
-int ZEXPORT deflatePrime (strm, bits, value)
+int ZEXPORT deflatePrime(strm, bits, value)
z_streamp strm;
int bits;
int value;
@@ -674,36 +670,50 @@ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
}
/* =========================================================================
- * For the default windowBits of 15 and memLevel of 8, this function returns
- * a close to exact, as well as small, upper bound on the compressed size.
- * They are coded as constants here for a reason--if the #define's are
- * changed, then this function needs to be changed as well. The return
- * value for 15 and 8 only works for those exact settings.
+ * For the default windowBits of 15 and memLevel of 8, this function returns a
+ * close to exact, as well as small, upper bound on the compressed size. This
+ * is an expansion of ~0.03%, plus a small constant.
+ *
+ * For any setting other than those defaults for windowBits and memLevel, one
+ * of two worst case bounds is returned. This is at most an expansion of ~4% or
+ * ~13%, plus a small constant.
*
- * For any setting other than those defaults for windowBits and memLevel,
- * the value returned is a conservative worst case for the maximum expansion
- * resulting from using fixed blocks instead of stored blocks, which deflate
- * can emit on compressed data for some combinations of the parameters.
+ * Both the 0.03% and 4% derive from the overhead of stored blocks. The first
+ * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second
+ * is for stored blocks of 127 bytes (the worst case memLevel == 1). The
+ * expansion results from five bytes of header for each stored block.
*
- * This function could be more sophisticated to provide closer upper bounds for
- * every combination of windowBits and memLevel. But even the conservative
- * upper bound of about 14% expansion does not seem onerous for output buffer
- * allocation.
+ * The larger expansion of 13% results from a window size less than or equal to
+ * the symbols buffer size (windowBits <= memLevel + 7). In that case some of
+ * the data being compressed may have slid out of the sliding window, impeding
+ * a stored block from being emitted. Then the only choice is a fixed or
+ * dynamic block, where a fixed block limits the maximum expansion to 9 bits
+ * per 8-bit byte, plus 10 bits for every block. The smallest block size for
+ * which this can occur is 255 (memLevel == 2).
+ *
+ * Shifts are used to approximate divisions, for speed.
*/
uLong ZEXPORT deflateBound(strm, sourceLen)
z_streamp strm;
uLong sourceLen;
{
deflate_state *s;
- uLong complen, wraplen;
+ uLong fixedlen, storelen, wraplen;
+
+ /* upper bound for fixed blocks with 9-bit literals and length 255
+ (memLevel == 2, which is the lowest that may not use stored blocks) --
+ ~13% overhead plus a small constant */
+ fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
+ (sourceLen >> 9) + 4;
- /* conservative upper bound for compressed data */
- complen = sourceLen +
- ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
+ /* upper bound for stored blocks with length 127 (memLevel == 1) --
+ ~4% overhead plus a small constant */
+ storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
+ (sourceLen >> 11) + 7;
- /* if can't get parameters, return conservative bound plus zlib wrapper */
+ /* if can't get parameters, return larger bound plus a zlib wrapper */
if (deflateStateCheck(strm))
- return complen + 6;
+ return (fixedlen > storelen ? fixedlen : storelen) + 6;
/* compute wrapper length */
s = strm->state;
@@ -740,11 +750,12 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
wraplen = 6;
}
- /* if not default parameters, return conservative bound */
+ /* if not default parameters, return one of the conservative bounds */
if (s->w_bits != 15 || s->hash_bits != 8 + 7)
- return complen + wraplen;
+ return (s->w_bits <= s->hash_bits ? fixedlen : storelen) + wraplen;
- /* default settings: return tight bound for that case */
+ /* default settings: return tight bound for that case -- ~0.03% overhead
+ plus a small constant */
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13 - 6 + wraplen;
}
@@ -754,7 +765,7 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
* IN assertion: the stream state is correct and there is enough room in
* pending_buf.
*/
-local void putShortMSB (s, b)
+local void putShortMSB(s, b)
deflate_state *s;
uInt b;
{
@@ -801,7 +812,7 @@ local void flush_pending(strm)
} while (0)
/* ========================================================================= */
-int ZEXPORT deflate (strm, flush)
+int ZEXPORT deflate(strm, flush)
z_streamp strm;
int flush;
{
@@ -856,7 +867,7 @@ int ZEXPORT deflate (strm, flush)
s->status = BUSY_STATE;
if (s->status == INIT_STATE) {
/* zlib header */
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8;
uInt level_flags;
if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
@@ -1116,7 +1127,7 @@ int ZEXPORT deflate (strm, flush)
}
/* ========================================================================= */
-int ZEXPORT deflateEnd (strm)
+int ZEXPORT deflateEnd(strm)
z_streamp strm;
{
int status;
@@ -1142,7 +1153,7 @@ int ZEXPORT deflateEnd (strm)
* To simplify the source, this is not supported for 16-bit MSDOS (which
* doesn't have enough memory anyway to duplicate compression states).
*/
-int ZEXPORT deflateCopy (dest, source)
+int ZEXPORT deflateCopy(dest, source)
z_streamp dest;
z_streamp source;
{
@@ -1231,7 +1242,7 @@ local unsigned read_buf(strm, buf, size)
/* ===========================================================================
* Initialize the "longest match" routines for a new zlib stream
*/
-local void lm_init (s)
+local void lm_init(s)
deflate_state *s;
{
s->window_size = (ulg)2L*s->w_size;
@@ -1252,11 +1263,6 @@ local void lm_init (s)
s->match_length = s->prev_length = MIN_MATCH-1;
s->match_available = 0;
s->ins_h = 0;
-#ifndef FASTEST
-#ifdef ASMV
- match_init(); /* initialize the asm code */
-#endif
-#endif
}
#ifndef FASTEST
@@ -1269,10 +1275,6 @@ local void lm_init (s)
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
* OUT assertion: the match length is not greater than s->lookahead.
*/
-#ifndef ASMV
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
local uInt longest_match(s, cur_match)
deflate_state *s;
IPos cur_match; /* current match */
@@ -1297,10 +1299,10 @@ local uInt longest_match(s, cur_match)
*/
register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
register ush scan_start = *(ushf*)scan;
- register ush scan_end = *(ushf*)(scan+best_len-1);
+ register ush scan_end = *(ushf*)(scan + best_len - 1);
#else
register Bytef *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end1 = scan[best_len - 1];
register Byte scan_end = scan[best_len];
#endif
@@ -1318,7 +1320,8 @@ local uInt longest_match(s, cur_match)
*/
if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "need lookahead");
do {
Assert(cur_match < s->strstart, "no future");
@@ -1336,43 +1339,44 @@ local uInt longest_match(s, cur_match)
/* This code assumes sizeof(unsigned short) == 2. Do not use
* UNALIGNED_OK if your compiler uses a different size.
*/
- if (*(ushf*)(match+best_len-1) != scan_end ||
+ if (*(ushf*)(match + best_len - 1) != scan_end ||
*(ushf*)match != scan_start) continue;
/* It is not necessary to compare scan[2] and match[2] since they are
* always equal when the other bytes match, given that the hash keys
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * strstart + 3, + 5, up to strstart + 257. We check for insufficient
* lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is
* necessary to put more guard bytes at the end of the window, or
* to check more often for insufficient lookahead.
*/
Assert(scan[2] == match[2], "scan[2]?");
scan++, match++;
do {
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
scan < strend);
/* The funny "do {}" generates better code on most compilers */
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ /* Here, scan <= window + strstart + 257 */
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
+ "wild scan");
if (*scan == *match) scan++;
- len = (MAX_MATCH - 1) - (int)(strend-scan);
+ len = (MAX_MATCH - 1) - (int)(strend - scan);
scan = strend - (MAX_MATCH-1);
#else /* UNALIGNED_OK */
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
+ if (match[best_len] != scan_end ||
+ match[best_len - 1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
- /* The check at best_len-1 can be removed because it will be made
+ /* The check at best_len - 1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
@@ -1382,7 +1386,7 @@ local uInt longest_match(s, cur_match)
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
+ * the 256th check will be made at strstart + 258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
@@ -1391,7 +1395,8 @@ local uInt longest_match(s, cur_match)
*++scan == *++match && *++scan == *++match &&
scan < strend);
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
+ "wild scan");
len = MAX_MATCH - (int)(strend - scan);
scan = strend - MAX_MATCH;
@@ -1403,9 +1408,9 @@ local uInt longest_match(s, cur_match)
best_len = len;
if (len >= nice_match) break;
#ifdef UNALIGNED_OK
- scan_end = *(ushf*)(scan+best_len-1);
+ scan_end = *(ushf*)(scan + best_len - 1);
#else
- scan_end1 = scan[best_len-1];
+ scan_end1 = scan[best_len - 1];
scan_end = scan[best_len];
#endif
}
@@ -1415,7 +1420,6 @@ local uInt longest_match(s, cur_match)
if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
return s->lookahead;
}
-#endif /* ASMV */
#else /* FASTEST */
@@ -1436,7 +1440,8 @@ local uInt longest_match(s, cur_match)
*/
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "need lookahead");
Assert(cur_match < s->strstart, "no future");
@@ -1446,7 +1451,7 @@ local uInt longest_match(s, cur_match)
*/
if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
- /* The check at best_len-1 can be removed because it will be made
+ /* The check at best_len - 1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
@@ -1456,7 +1461,7 @@ local uInt longest_match(s, cur_match)
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
+ * the 256th check will be made at strstart + 258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
@@ -1465,7 +1470,7 @@ local uInt longest_match(s, cur_match)
*++scan == *++match && *++scan == *++match &&
scan < strend);
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan");
len = MAX_MATCH - (int)(strend - scan);
@@ -1501,7 +1506,7 @@ local void check_match(s, start, match, length)
z_error("invalid match");
}
if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
+ fprintf(stderr,"\\[%d,%d]", start - match, length);
do { putc(s->window[start++], stderr); } while (--length != 0);
}
}
@@ -1547,9 +1552,9 @@ local void fill_window(s)
/* If the window is almost full and there is insufficient lookahead,
* move the upper half to the lower one to make room in the upper half.
*/
- if (s->strstart >= wsize+MAX_DIST(s)) {
+ if (s->strstart >= wsize + MAX_DIST(s)) {
- zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
+ zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more);
s->match_start -= wsize;
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
s->block_start -= (long) wsize;
@@ -1680,7 +1685,7 @@ local void fill_window(s)
*
* deflate_stored() is written to minimize the number of times an input byte is
* copied. It is most efficient with large input and output buffers, which
- * maximizes the opportunites to have a single copy from next_in to next_out.
+ * maximizes the opportunities to have a single copy from next_in to next_out.
*/
local block_state deflate_stored(s, flush)
deflate_state *s;
@@ -1890,7 +1895,7 @@ local block_state deflate_fast(s, flush)
if (s->lookahead == 0) break; /* flush the current block */
}
- /* Insert the string window[strstart .. strstart+2] in the
+ /* Insert the string window[strstart .. strstart + 2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
hash_head = NIL;
@@ -1938,7 +1943,7 @@ local block_state deflate_fast(s, flush)
s->strstart += s->match_length;
s->match_length = 0;
s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
@@ -1949,7 +1954,7 @@ local block_state deflate_fast(s, flush)
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
}
@@ -1993,7 +1998,7 @@ local block_state deflate_slow(s, flush)
if (s->lookahead == 0) break; /* flush the current block */
}
- /* Insert the string window[strstart .. strstart+2] in the
+ /* Insert the string window[strstart .. strstart + 2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
hash_head = NIL;
@@ -2035,17 +2040,17 @@ local block_state deflate_slow(s, flush)
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+ check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
- _tr_tally_dist(s, s->strstart -1 - s->prev_match,
+ _tr_tally_dist(s, s->strstart - 1 - s->prev_match,
s->prev_length - MIN_MATCH, bflush);
/* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
+ * strstart - 1 and strstart are already inserted. If there is not
* enough lookahead, the last two strings are not inserted in
* the hash table.
*/
- s->lookahead -= s->prev_length-1;
+ s->lookahead -= s->prev_length - 1;
s->prev_length -= 2;
do {
if (++s->strstart <= max_insert) {
@@ -2063,8 +2068,8 @@ local block_state deflate_slow(s, flush)
* single literal. If there was a match but the current match
* is longer, truncate the previous match to a single literal.
*/
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
if (bflush) {
FLUSH_BLOCK_ONLY(s, 0);
}
@@ -2082,8 +2087,8 @@ local block_state deflate_slow(s, flush)
}
Assert (flush != Z_NO_FLUSH, "no flush?");
if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
s->match_available = 0;
}
s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
@@ -2140,7 +2145,8 @@ local block_state deflate_rle(s, flush)
if (s->match_length > s->lookahead)
s->match_length = s->lookahead;
}
- Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (uInt)(s->window_size - 1),
+ "wild scan");
}
/* Emit match if have run of MIN_MATCH or longer, else emit literal */
@@ -2155,7 +2161,7 @@ local block_state deflate_rle(s, flush)
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
}
@@ -2195,7 +2201,7 @@ local block_state deflate_huff(s, flush)
/* Output a literal byte */
s->match_length = 0;
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
if (bflush) FLUSH_BLOCK(s, 0);
diff --git a/zlib/deflate.h b/zlib/deflate.h
index 3acd0750751..1a06cd5f25d 100644
--- a/zlib/deflate.h
+++ b/zlib/deflate.h
@@ -329,8 +329,8 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
# define _tr_tally_dist(s, distance, length, flush) \
{ uch len = (uch)(length); \
ush dist = (ush)(distance); \
- s->sym_buf[s->sym_next++] = (uchf) dist; \
- s->sym_buf[s->sym_next++] = dist >> 8; \
+ s->sym_buf[s->sym_next++] = (uch)dist; \
+ s->sym_buf[s->sym_next++] = (uch)(dist >> 8); \
s->sym_buf[s->sym_next++] = len; \
dist--; \
s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
diff --git a/zlib/gzlib.c b/zlib/gzlib.c
index dddaf268730..55da46a453f 100644
--- a/zlib/gzlib.c
+++ b/zlib/gzlib.c
@@ -30,7 +30,7 @@ local gzFile gz_open OF((const void *, int, const char *));
The gz_strwinerror function does not change the current setting of
GetLastError. */
-char ZLIB_INTERNAL *gz_strwinerror (error)
+char ZLIB_INTERNAL *gz_strwinerror(error)
DWORD error;
{
static char buf[1024];
diff --git a/zlib/gzread.c b/zlib/gzread.c
index a669b2d5814..a53d54a2208 100644
--- a/zlib/gzread.c
+++ b/zlib/gzread.c
@@ -157,11 +157,9 @@ local int gz_look(state)
the output buffer is larger than the input buffer, which also assures
space for gzungetc() */
state->x.next = state->out;
- if (strm->avail_in) {
- memcpy(state->x.next, strm->next_in, strm->avail_in);
- state->x.have = strm->avail_in;
- strm->avail_in = 0;
- }
+ memcpy(state->x.next, strm->next_in, strm->avail_in);
+ state->x.have = strm->avail_in;
+ strm->avail_in = 0;
state->how = COPY;
state->direct = 1;
return 0;
diff --git a/zlib/gzwrite.c b/zlib/gzwrite.c
index a928257152a..b7236f70d14 100644
--- a/zlib/gzwrite.c
+++ b/zlib/gzwrite.c
@@ -474,7 +474,7 @@ int ZEXPORTVA gzprintf(gzFile file, const char *format, ...)
#else /* !STDC && !Z_HAVE_STDARG_H */
/* -- see zlib.h -- */
-int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+int ZEXPORTVA gzprintf(file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
a11, a12, a13, a14, a15, a16, a17, a18, a19, a20)
gzFile file;
const char *format;
diff --git a/zlib/infback.c b/zlib/infback.c
index a390c58e816..babeaf1806f 100644
--- a/zlib/infback.c
+++ b/zlib/infback.c
@@ -66,6 +66,7 @@ int stream_size;
state->window = window;
state->wnext = 0;
state->whave = 0;
+ state->sane = 1;
return Z_OK;
}
@@ -605,25 +606,27 @@ void FAR *out_desc;
break;
case DONE:
- /* inflate stream terminated properly -- write leftover output */
+ /* inflate stream terminated properly */
ret = Z_STREAM_END;
- if (left < state->wsize) {
- if (out(out_desc, state->window, state->wsize - left))
- ret = Z_BUF_ERROR;
- }
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
- default: /* can't happen, but makes compilers happy */
+ default:
+ /* can't happen, but makes compilers happy */
ret = Z_STREAM_ERROR;
goto inf_leave;
}
- /* Return unused input */
+ /* Write leftover output and return unused input */
inf_leave:
+ if (left < state->wsize) {
+ if (out(out_desc, state->window, state->wsize - left) &&
+ ret == Z_STREAM_END)
+ ret = Z_BUF_ERROR;
+ }
strm->next_in = next;
strm->avail_in = have;
return ret;
diff --git a/zlib/inflate.c b/zlib/inflate.c
index 7be8c63662a..8acbef44e99 100644
--- a/zlib/inflate.c
+++ b/zlib/inflate.c
@@ -168,6 +168,8 @@ int windowBits;
/* extract wrap request from windowBits parameter */
if (windowBits < 0) {
+ if (windowBits < -15)
+ return Z_STREAM_ERROR;
wrap = 0;
windowBits = -windowBits;
}
@@ -764,8 +766,9 @@ int flush;
if (copy > have) copy = have;
if (copy) {
if (state->head != Z_NULL &&
- state->head->extra != Z_NULL) {
- len = state->head->extra_len - state->length;
+ state->head->extra != Z_NULL &&
+ (len = state->head->extra_len - state->length) <
+ state->head->extra_max) {
zmemcpy(state->head->extra + len, next,
len + copy > state->head->extra_max ?
state->head->extra_max - len : copy);
diff --git a/zlib/inftrees.c b/zlib/inftrees.c
index 09462a740b1..57d2793bec9 100644
--- a/zlib/inftrees.c
+++ b/zlib/inftrees.c
@@ -9,7 +9,7 @@
#define MAXBITS 15
const char inflate_copyright[] =
- " inflate 1.2.12 Copyright 1995-2022 Mark Adler ";
+ " inflate 1.2.13 Copyright 1995-2022 Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@@ -62,7 +62,7 @@ unsigned short FAR *work;
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
- 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 199, 202};
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 194, 65};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
diff --git a/zlib/inftrees.h b/zlib/inftrees.h
index baa53a0b1a1..f53665311c1 100644
--- a/zlib/inftrees.h
+++ b/zlib/inftrees.h
@@ -38,7 +38,7 @@ typedef struct {
/* Maximum size of the dynamic table. The maximum number of code structures is
1444, which is the sum of 852 for literal/length codes and 592 for distance
codes. These values were found by exhaustive searches using the program
- examples/enough.c found in the zlib distribtution. The arguments to that
+ examples/enough.c found in the zlib distribution. The arguments to that
program are the number of symbols, the initial root table size, and the
maximum bit length of a code. "enough 286 9 15" for literal/length codes
returns returns 852, and "enough 30 6 15" for distance codes returns 592.
diff --git a/zlib/make_vms.com b/zlib/make_vms.com
index 65e9d0cbc8e..4dc8a891355 100644
--- a/zlib/make_vms.com
+++ b/zlib/make_vms.com
@@ -14,9 +14,9 @@ $! 0.02 20061008 Adapt to new Makefile.in
$! 0.03 20091224 Add support for large file check
$! 0.04 20100110 Add new gzclose, gzlib, gzread, gzwrite
$! 0.05 20100221 Exchange zlibdefs.h by zconf.h.in
-$! 0.06 20120111 Fix missing amiss_err, update zconf_h.in, fix new exmples
+$! 0.06 20120111 Fix missing amiss_err, update zconf_h.in, fix new examples
$! subdir path, update module search in makefile.in
-$! 0.07 20120115 Triggered by work done by Alexey Chupahin completly redesigned
+$! 0.07 20120115 Triggered by work done by Alexey Chupahin completely redesigned
$! shared image creation
$! 0.08 20120219 Make it work on VAX again, pre-load missing symbols to shared
$! image
diff --git a/zlib/qnx/package.qpg b/zlib/qnx/package.qpg
index badd1d5a0a7..ba2f1a2d6c3 100644
--- a/zlib/qnx/package.qpg
+++ b/zlib/qnx/package.qpg
@@ -25,10 +25,10 @@
<QPG:Files>
<QPG:Add file="../zconf.h" install="/opt/include/" user="root:sys" permission="644"/>
<QPG:Add file="../zlib.h" install="/opt/include/" user="root:sys" permission="644"/>
- <QPG:Add file="../libz.so.1.2.12" install="/opt/lib/" user="root:bin" permission="644"/>
- <QPG:Add file="libz.so" install="/opt/lib/" component="dev" filetype="symlink" linkto="libz.so.1.2.12"/>
- <QPG:Add file="libz.so.1" install="/opt/lib/" filetype="symlink" linkto="libz.so.1.2.12"/>
- <QPG:Add file="../libz.so.1.2.12" install="/opt/lib/" component="slib"/>
+ <QPG:Add file="../libz.so.1.2.13" install="/opt/lib/" user="root:bin" permission="644"/>
+ <QPG:Add file="libz.so" install="/opt/lib/" component="dev" filetype="symlink" linkto="libz.so.1.2.13"/>
+ <QPG:Add file="libz.so.1" install="/opt/lib/" filetype="symlink" linkto="libz.so.1.2.13"/>
+ <QPG:Add file="../libz.so.1.2.13" install="/opt/lib/" component="slib"/>
</QPG:Files>
<QPG:PackageFilter>
@@ -63,7 +63,7 @@
</QPM:ProductDescription>
<QPM:ReleaseDescription>
- <QPM:ReleaseVersion>1.2.12</QPM:ReleaseVersion>
+ <QPM:ReleaseVersion>1.2.13</QPM:ReleaseVersion>
<QPM:ReleaseUrgency>Medium</QPM:ReleaseUrgency>
<QPM:ReleaseStability>Stable</QPM:ReleaseStability>
<QPM:ReleaseNoteMinor></QPM:ReleaseNoteMinor>
diff --git a/zlib/treebuild.xml b/zlib/treebuild.xml
index 781b4c98cc2..0017a45d3c5 100644
--- a/zlib/treebuild.xml
+++ b/zlib/treebuild.xml
@@ -1,6 +1,6 @@
<?xml version="1.0" ?>
-<package name="zlib" version="1.2.12">
- <library name="zlib" dlversion="1.2.12" dlname="z">
+<package name="zlib" version="1.2.13">
+ <library name="zlib" dlversion="1.2.13" dlname="z">
<property name="description"> zip compression library </property>
<property name="include-target-dir" value="$(@PACKAGE/install-includedir)" />
diff --git a/zlib/trees.c b/zlib/trees.c
index f73fd99c37b..5f305c47221 100644
--- a/zlib/trees.c
+++ b/zlib/trees.c
@@ -193,7 +193,7 @@ local void send_bits(s, value, length)
s->bits_sent += (ulg)length;
/* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * (16 - bi_valid) bits from value, leaving (width - (16 - bi_valid))
* unused bits in value.
*/
if (s->bi_valid > (int)Buf_size - length) {
@@ -256,7 +256,7 @@ local void tr_static_init()
length = 0;
for (code = 0; code < LENGTH_CODES-1; code++) {
base_length[code] = length;
- for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ for (n = 0; n < (1 << extra_lbits[code]); n++) {
_length_code[length++] = (uch)code;
}
}
@@ -265,13 +265,13 @@ local void tr_static_init()
* in two different ways: code 284 + 5 bits or code 285, so we
* overwrite length_code[255] to use the best encoding:
*/
- _length_code[length-1] = (uch)code;
+ _length_code[length - 1] = (uch)code;
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */
dist = 0;
for (code = 0 ; code < 16; code++) {
base_dist[code] = dist;
- for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ for (n = 0; n < (1 << extra_dbits[code]); n++) {
_dist_code[dist++] = (uch)code;
}
}
@@ -279,11 +279,11 @@ local void tr_static_init()
dist >>= 7; /* from now on, all distances are divided by 128 */
for ( ; code < D_CODES; code++) {
base_dist[code] = dist << 7;
- for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {
_dist_code[256 + dist++] = (uch)code;
}
}
- Assert (dist == 256, "tr_static_init: 256+dist != 512");
+ Assert (dist == 256, "tr_static_init: 256 + dist != 512");
/* Construct the codes of the static literal tree */
for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
@@ -312,7 +312,7 @@ local void tr_static_init()
}
/* ===========================================================================
- * Genererate the file trees.h describing the static trees.
+ * Generate the file trees.h describing the static trees.
*/
#ifdef GEN_TREES_H
# ifndef ZLIB_DEBUG
@@ -321,7 +321,7 @@ local void tr_static_init()
# define SEPARATOR(i, last, width) \
((i) == (last)? "\n};\n\n" : \
- ((i) % (width) == (width)-1 ? ",\n" : ", "))
+ ((i) % (width) == (width) - 1 ? ",\n" : ", "))
void gen_trees_header()
{
@@ -458,7 +458,7 @@ local void pqdownheap(s, tree, k)
while (j <= s->heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s->heap_len &&
- smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ smaller(tree, s->heap[j + 1], s->heap[j], s->depth)) {
j++;
}
/* Exit if v is smaller than both sons */
@@ -507,7 +507,7 @@ local void gen_bitlen(s, desc)
*/
tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
- for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ for (h = s->heap_max + 1; h < HEAP_SIZE; h++) {
n = s->heap[h];
bits = tree[tree[n].Dad].Len + 1;
if (bits > max_length) bits = max_length, overflow++;
@@ -518,7 +518,7 @@ local void gen_bitlen(s, desc)
s->bl_count[bits]++;
xbits = 0;
- if (n >= base) xbits = extra[n-base];
+ if (n >= base) xbits = extra[n - base];
f = tree[n].Freq;
s->opt_len += (ulg)f * (unsigned)(bits + xbits);
if (stree) s->static_len += (ulg)f * (unsigned)(stree[n].Len + xbits);
@@ -530,10 +530,10 @@ local void gen_bitlen(s, desc)
/* Find the first bit length which could increase: */
do {
- bits = max_length-1;
+ bits = max_length - 1;
while (s->bl_count[bits] == 0) bits--;
- s->bl_count[bits]--; /* move one leaf down the tree */
- s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits + 1] += 2; /* move one overflow item as its brother */
s->bl_count[max_length]--;
/* The brother of the overflow item also moves one step up,
* but this does not affect bl_count[max_length]
@@ -569,7 +569,7 @@ local void gen_bitlen(s, desc)
* OUT assertion: the field code is set for all tree elements of non
* zero code length.
*/
-local void gen_codes (tree, max_code, bl_count)
+local void gen_codes(tree, max_code, bl_count)
ct_data *tree; /* the tree to decorate */
int max_code; /* largest code with non zero frequency */
ushf *bl_count; /* number of codes at each bit length */
@@ -583,13 +583,13 @@ local void gen_codes (tree, max_code, bl_count)
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
- code = (code + bl_count[bits-1]) << 1;
+ code = (code + bl_count[bits - 1]) << 1;
next_code[bits] = (ush)code;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
- Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ Assert (code + bl_count[MAX_BITS] - 1 == (1 << MAX_BITS) - 1,
"inconsistent bit counts");
Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
@@ -600,7 +600,7 @@ local void gen_codes (tree, max_code, bl_count)
tree[n].Code = (ush)bi_reverse(next_code[len]++, len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
- n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len] - 1));
}
}
@@ -624,7 +624,7 @@ local void build_tree(s, desc)
int node; /* new node being created */
/* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n + 1].
* heap[0] is not used.
*/
s->heap_len = 0, s->heap_max = HEAP_SIZE;
@@ -652,7 +652,7 @@ local void build_tree(s, desc)
}
desc->max_code = max_code;
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ /* The elements heap[heap_len/2 + 1 .. heap_len] are leaves of the tree,
* establish sub-heaps of increasing lengths:
*/
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
@@ -700,7 +700,7 @@ local void build_tree(s, desc)
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree.
*/
-local void scan_tree (s, tree, max_code)
+local void scan_tree(s, tree, max_code)
deflate_state *s;
ct_data *tree; /* the tree to be scanned */
int max_code; /* and its largest code of non zero frequency */
@@ -714,10 +714,10 @@ local void scan_tree (s, tree, max_code)
int min_count = 4; /* min repeat count */
if (nextlen == 0) max_count = 138, min_count = 3;
- tree[max_code+1].Len = (ush)0xffff; /* guard */
+ tree[max_code + 1].Len = (ush)0xffff; /* guard */
for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
+ curlen = nextlen; nextlen = tree[n + 1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
@@ -745,7 +745,7 @@ local void scan_tree (s, tree, max_code)
* Send a literal or distance tree in compressed form, using the codes in
* bl_tree.
*/
-local void send_tree (s, tree, max_code)
+local void send_tree(s, tree, max_code)
deflate_state *s;
ct_data *tree; /* the tree to be scanned */
int max_code; /* and its largest code of non zero frequency */
@@ -758,11 +758,11 @@ local void send_tree (s, tree, max_code)
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
- /* tree[max_code+1].Len = -1; */ /* guard already set */
+ /* tree[max_code + 1].Len = -1; */ /* guard already set */
if (nextlen == 0) max_count = 138, min_count = 3;
for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
+ curlen = nextlen; nextlen = tree[n + 1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
@@ -773,13 +773,13 @@ local void send_tree (s, tree, max_code)
send_code(s, curlen, s->bl_tree); count--;
}
Assert(count >= 3 && count <= 6, " 3_6?");
- send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count - 3, 2);
} else if (count <= 10) {
- send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count - 3, 3);
} else {
- send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count - 11, 7);
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
@@ -807,8 +807,8 @@ local int build_bl_tree(s)
/* Build the bit length tree: */
build_tree(s, (tree_desc *)(&(s->bl_desc)));
- /* opt_len now includes the length of the tree representations, except
- * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ /* opt_len now includes the length of the tree representations, except the
+ * lengths of the bit lengths codes and the 5 + 5 + 4 bits for the counts.
*/
/* Determine the number of bit length codes to send. The pkzip format
@@ -819,7 +819,7 @@ local int build_bl_tree(s)
if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
}
/* Update opt_len to include the bit length tree and counts */
- s->opt_len += 3*((ulg)max_blindex+1) + 5+5+4;
+ s->opt_len += 3*((ulg)max_blindex + 1) + 5 + 5 + 4;
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
s->opt_len, s->static_len));
@@ -841,19 +841,19 @@ local void send_all_trees(s, lcodes, dcodes, blcodes)
Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
"too many codes");
Tracev((stderr, "\nbl counts: "));
- send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
- send_bits(s, dcodes-1, 5);
- send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes - 1, 5);
+ send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */
for (rank = 0; rank < blcodes; rank++) {
Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
}
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
- send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes - 1); /* literal tree */
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
- send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes - 1); /* distance tree */
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
}
@@ -866,7 +866,7 @@ void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last)
ulg stored_len; /* length of input block */
int last; /* one if this is the last block for a file */
{
- send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */
+ send_bits(s, (STORED_BLOCK<<1) + last, 3); /* send block type */
bi_windup(s); /* align on byte boundary */
put_short(s, (ush)stored_len);
put_short(s, (ush)~stored_len);
@@ -877,7 +877,7 @@ void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last)
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
s->compressed_len += (stored_len + 4) << 3;
s->bits_sent += 2*16;
- s->bits_sent += stored_len<<3;
+ s->bits_sent += stored_len << 3;
#endif
}
@@ -943,14 +943,17 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
max_blindex = build_bl_tree(s);
/* Determine the best encoding. Compute the block lengths in bytes. */
- opt_lenb = (s->opt_len+3+7)>>3;
- static_lenb = (s->static_len+3+7)>>3;
+ opt_lenb = (s->opt_len + 3 + 7) >> 3;
+ static_lenb = (s->static_len + 3 + 7) >> 3;
Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
s->sym_next / 3));
- if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+#ifndef FORCE_STATIC
+ if (static_lenb <= opt_lenb || s->strategy == Z_FIXED)
+#endif
+ opt_lenb = static_lenb;
} else {
Assert(buf != (char*)0, "lost buf");
@@ -960,7 +963,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
#ifdef FORCE_STORED
if (buf != (char*)0) { /* force stored block */
#else
- if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ if (stored_len + 4 <= opt_lenb && buf != (char*)0) {
/* 4: two words for the lengths */
#endif
/* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
@@ -971,21 +974,17 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
*/
_tr_stored_block(s, buf, stored_len, last);
-#ifdef FORCE_STATIC
- } else if (static_lenb >= 0) { /* force static trees */
-#else
- } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) {
-#endif
- send_bits(s, (STATIC_TREES<<1)+last, 3);
+ } else if (static_lenb == opt_lenb) {
+ send_bits(s, (STATIC_TREES<<1) + last, 3);
compress_block(s, (const ct_data *)static_ltree,
(const ct_data *)static_dtree);
#ifdef ZLIB_DEBUG
s->compressed_len += 3 + s->static_len;
#endif
} else {
- send_bits(s, (DYN_TREES<<1)+last, 3);
- send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
- max_blindex+1);
+ send_bits(s, (DYN_TREES<<1) + last, 3);
+ send_all_trees(s, s->l_desc.max_code + 1, s->d_desc.max_code + 1,
+ max_blindex + 1);
compress_block(s, (const ct_data *)s->dyn_ltree,
(const ct_data *)s->dyn_dtree);
#ifdef ZLIB_DEBUG
@@ -1004,22 +1003,22 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
s->compressed_len += 7; /* align on byte boundary */
#endif
}
- Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
- s->compressed_len-7*last));
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len >> 3,
+ s->compressed_len - 7*last));
}
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
* the current block must be flushed.
*/
-int ZLIB_INTERNAL _tr_tally (s, dist, lc)
+int ZLIB_INTERNAL _tr_tally(s, dist, lc)
deflate_state *s;
unsigned dist; /* distance of matched string */
- unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+ unsigned lc; /* match length - MIN_MATCH or unmatched char (dist==0) */
{
- s->sym_buf[s->sym_next++] = dist;
- s->sym_buf[s->sym_next++] = dist >> 8;
- s->sym_buf[s->sym_next++] = lc;
+ s->sym_buf[s->sym_next++] = (uch)dist;
+ s->sym_buf[s->sym_next++] = (uch)(dist >> 8);
+ s->sym_buf[s->sym_next++] = (uch)lc;
if (dist == 0) {
/* lc is the unmatched char */
s->dyn_ltree[lc].Freq++;
@@ -1031,7 +1030,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
(ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
(ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
- s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_ltree[_length_code[lc] + LITERALS + 1].Freq++;
s->dyn_dtree[d_code(dist)].Freq++;
}
return (s->sym_next == s->sym_end);
@@ -1061,7 +1060,7 @@ local void compress_block(s, ltree, dtree)
} else {
/* Here, lc is the match length - MIN_MATCH */
code = _length_code[lc];
- send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ send_code(s, code + LITERALS + 1, ltree); /* send length code */
extra = extra_lbits[code];
if (extra != 0) {
lc -= base_length[code];
@@ -1177,6 +1176,6 @@ local void bi_windup(s)
s->bi_buf = 0;
s->bi_valid = 0;
#ifdef ZLIB_DEBUG
- s->bits_sent = (s->bits_sent+7) & ~7;
+ s->bits_sent = (s->bits_sent + 7) & ~7;
#endif
}
diff --git a/zlib/uncompr.c b/zlib/uncompr.c
index f03a1a865e3..f9532f46c1a 100644
--- a/zlib/uncompr.c
+++ b/zlib/uncompr.c
@@ -24,7 +24,7 @@
Z_DATA_ERROR if the input data was corrupted, including if the input data is
an incomplete zlib stream.
*/
-int ZEXPORT uncompress2 (dest, destLen, source, sourceLen)
+int ZEXPORT uncompress2(dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
@@ -83,7 +83,7 @@ int ZEXPORT uncompress2 (dest, destLen, source, sourceLen)
err;
}
-int ZEXPORT uncompress (dest, destLen, source, sourceLen)
+int ZEXPORT uncompress(dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
diff --git a/zlib/win32/README-WIN32.txt b/zlib/win32/README-WIN32.txt
index 536cfec6f67..050197d80f7 100644
--- a/zlib/win32/README-WIN32.txt
+++ b/zlib/win32/README-WIN32.txt
@@ -1,6 +1,6 @@
ZLIB DATA COMPRESSION LIBRARY
-zlib 1.2.12 is a general purpose data compression library. All the code is
+zlib 1.2.13 is a general purpose data compression library. All the code is
thread safe. The data format used by the zlib library is described by RFCs
(Request for Comments) 1950 to 1952 in the files
http://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format)
@@ -22,7 +22,7 @@ before asking for help.
Manifest:
-The package zlib-1.2.12-win32-x86.zip will contain the following files:
+The package zlib-1.2.13-win32-x86.zip will contain the following files:
README-WIN32.txt This document
ChangeLog Changes since previous zlib packages
diff --git a/zlib/win32/zlib1.rc b/zlib/win32/zlib1.rc
index 234e641c329..ceb4ee5c69e 100644
--- a/zlib/win32/zlib1.rc
+++ b/zlib/win32/zlib1.rc
@@ -26,7 +26,7 @@ BEGIN
VALUE "FileDescription", "zlib data compression library\0"
VALUE "FileVersion", ZLIB_VERSION "\0"
VALUE "InternalName", "zlib1.dll\0"
- VALUE "LegalCopyright", "(C) 1995-2017 Jean-loup Gailly & Mark Adler\0"
+ VALUE "LegalCopyright", "(C) 1995-2022 Jean-loup Gailly & Mark Adler\0"
VALUE "OriginalFilename", "zlib1.dll\0"
VALUE "ProductName", "zlib\0"
VALUE "ProductVersion", ZLIB_VERSION "\0"
diff --git a/zlib/zconf.h.cmakein b/zlib/zconf.h.cmakein
index a7f24cce60f..247ba2461dd 100644
--- a/zlib/zconf.h.cmakein
+++ b/zlib/zconf.h.cmakein
@@ -40,6 +40,9 @@
# define crc32 z_crc32
# define crc32_combine z_crc32_combine
# define crc32_combine64 z_crc32_combine64
+# define crc32_combine_gen z_crc32_combine_gen
+# define crc32_combine_gen64 z_crc32_combine_gen64
+# define crc32_combine_op z_crc32_combine_op
# define crc32_z z_crc32_z
# define deflate z_deflate
# define deflateBound z_deflateBound
@@ -351,6 +354,9 @@
# ifdef FAR
# undef FAR
# endif
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
# include <windows.h>
/* No need for _export, use ZLIB.DEF instead. */
/* For complete Windows compatibility, use WINAPI, not __stdcall. */
@@ -469,11 +475,18 @@ typedef uLong FAR uLongf;
# undef _LARGEFILE64_SOURCE
#endif
-#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H)
-# define Z_HAVE_UNISTD_H
+#ifndef Z_HAVE_UNISTD_H
+# ifdef __WATCOMC__
+# define Z_HAVE_UNISTD_H
+# endif
+#endif
+#ifndef Z_HAVE_UNISTD_H
+# if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32)
+# define Z_HAVE_UNISTD_H
+# endif
#endif
#ifndef Z_SOLO
-# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
+# if defined(Z_HAVE_UNISTD_H)
# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
# ifdef VMS
# include <unixio.h> /* for off_t */
diff --git a/zlib/zconf.h.in b/zlib/zconf.h.in
index 5e1d68a004e..bf977d3e70a 100644
--- a/zlib/zconf.h.in
+++ b/zlib/zconf.h.in
@@ -38,6 +38,9 @@
# define crc32 z_crc32
# define crc32_combine z_crc32_combine
# define crc32_combine64 z_crc32_combine64
+# define crc32_combine_gen z_crc32_combine_gen
+# define crc32_combine_gen64 z_crc32_combine_gen64
+# define crc32_combine_op z_crc32_combine_op
# define crc32_z z_crc32_z
# define deflate z_deflate
# define deflateBound z_deflateBound
@@ -349,6 +352,9 @@
# ifdef FAR
# undef FAR
# endif
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
# include <windows.h>
/* No need for _export, use ZLIB.DEF instead. */
/* For complete Windows compatibility, use WINAPI, not __stdcall. */
@@ -467,11 +473,18 @@ typedef uLong FAR uLongf;
# undef _LARGEFILE64_SOURCE
#endif
-#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H)
-# define Z_HAVE_UNISTD_H
+#ifndef Z_HAVE_UNISTD_H
+# ifdef __WATCOMC__
+# define Z_HAVE_UNISTD_H
+# endif
+#endif
+#ifndef Z_HAVE_UNISTD_H
+# if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32)
+# define Z_HAVE_UNISTD_H
+# endif
#endif
#ifndef Z_SOLO
-# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
+# if defined(Z_HAVE_UNISTD_H)
# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
# ifdef VMS
# include <unixio.h> /* for off_t */
diff --git a/zlib/zlib.3 b/zlib/zlib.3
index bcaebd9f02c..6f6e91404df 100644
--- a/zlib/zlib.3
+++ b/zlib/zlib.3
@@ -1,4 +1,4 @@
-.TH ZLIB 3 "27 Mar 2022"
+.TH ZLIB 3 "13 Oct 2022"
.SH NAME
zlib \- compression/decompression library
.SH SYNOPSIS
@@ -105,7 +105,7 @@ before asking for help.
Send questions and/or comments to zlib@gzip.org,
or (for the Windows DLL version) to Gilles Vollant (info@winimage.com).
.SH AUTHORS AND LICENSE
-Version 1.2.12
+Version 1.2.13
.LP
Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
.LP
diff --git a/zlib/zlib.h b/zlib/zlib.h
index a55e85d89cb..429dfbb7659 100644
--- a/zlib/zlib.h
+++ b/zlib/zlib.h
@@ -1,5 +1,5 @@
/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.2.12, March 11th, 2022
+ version 1.2.13, October 13th, 2022
Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
@@ -37,11 +37,11 @@
extern "C" {
#endif
-#define ZLIB_VERSION "1.2.12"
-#define ZLIB_VERNUM 0x12c0
+#define ZLIB_VERSION "1.2.13"
+#define ZLIB_VERNUM 0x12d0
#define ZLIB_VER_MAJOR 1
#define ZLIB_VER_MINOR 2
-#define ZLIB_VER_REVISION 12
+#define ZLIB_VER_REVISION 13
#define ZLIB_VER_SUBREVISION 0
/*
@@ -276,7 +276,7 @@ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush));
== 0), or after each call of deflate(). If deflate returns Z_OK and with
zero avail_out, it must be called again after making room in the output
buffer because there might be more output pending. See deflatePending(),
- which can be used if desired to determine whether or not there is more ouput
+ which can be used if desired to determine whether or not there is more output
in that case.
Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to
@@ -660,7 +660,7 @@ ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm,
to dictionary. dictionary must have enough space, where 32768 bytes is
always enough. If deflateGetDictionary() is called with dictionary equal to
Z_NULL, then only the dictionary length is returned, and nothing is copied.
- Similary, if dictLength is Z_NULL, then it is not set.
+ Similarly, if dictLength is Z_NULL, then it is not set.
deflateGetDictionary() may return a length less than the window size, even
when more than the window size in input has been provided. It may return up
@@ -915,7 +915,7 @@ ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm,
to dictionary. dictionary must have enough space, where 32768 bytes is
always enough. If inflateGetDictionary() is called with dictionary equal to
Z_NULL, then only the dictionary length is returned, and nothing is copied.
- Similary, if dictLength is Z_NULL, then it is not set.
+ Similarly, if dictLength is Z_NULL, then it is not set.
inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
stream state is inconsistent.
@@ -1437,7 +1437,7 @@ ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems,
In the event that the end of file is reached and only a partial item is
available at the end, i.e. the remaining uncompressed data length is not a
- multiple of size, then the final partial item is nevetheless read into buf
+ multiple of size, then the final partial item is nevertheless read into buf
and the end-of-file flag is set. The length of the partial item read is not
provided, but could be inferred from the result of gztell(). This behavior
is the same as the behavior of fread() implementations in common libraries,
@@ -1913,7 +1913,7 @@ ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp));
ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void));
ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int));
ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int));
-ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp));
+ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF((z_streamp));
ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp));
ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp));
#if defined(_WIN32) && !defined(Z_SOLO)
diff --git a/zlib/zlib2ansi b/zlib/zlib2ansi
index 15e3e165f37..23b2a1d5a3e 100755
--- a/zlib/zlib2ansi
+++ b/zlib/zlib2ansi
@@ -8,7 +8,7 @@
# TODO
#
-# Asumes no function pointer parameters. unless they are typedefed.
+# Assumes no function pointer parameters. unless they are typedefed.
# Assumes no literal strings that look like function definitions
# Assumes functions start at the beginning of a line
@@ -104,7 +104,7 @@ sub StripComments
no warnings;
- # Strip C & C++ coments
+ # Strip C & C++ comments
# From the perlfaq
$_[0] =~
diff --git a/zlib/zutil.c b/zlib/zutil.c
index dcab28a0d51..9543ae825e3 100644
--- a/zlib/zutil.c
+++ b/zlib/zutil.c
@@ -61,9 +61,11 @@ uLong ZEXPORT zlibCompileFlags()
#ifdef ZLIB_DEBUG
flags += 1 << 8;
#endif
+ /*
#if defined(ASMV) || defined(ASMINF)
flags += 1 << 9;
#endif
+ */
#ifdef ZLIB_WINAPI
flags += 1 << 10;
#endif
@@ -119,7 +121,7 @@ uLong ZEXPORT zlibCompileFlags()
# endif
int ZLIB_INTERNAL z_verbose = verbose;
-void ZLIB_INTERNAL z_error (m)
+void ZLIB_INTERNAL z_error(m)
char *m;
{
fprintf(stderr, "%s\n", m);
@@ -214,7 +216,7 @@ local ptr_table table[MAX_PTR];
* a protected system like OS/2. Use Microsoft C instead.
*/
-voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size)
+voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, unsigned items, unsigned size)
{
voidpf buf;
ulg bsize = (ulg)items*size;
@@ -240,7 +242,7 @@ voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size)
return buf;
}
-void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr)
{
int n;
@@ -277,13 +279,13 @@ void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
# define _hfree hfree
#endif
-voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size)
+voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, uInt items, uInt size)
{
(void)opaque;
return _halloc((long)items, size);
}
-void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr)
{
(void)opaque;
_hfree(ptr);
@@ -302,7 +304,7 @@ extern voidp calloc OF((uInt items, uInt size));
extern void free OF((voidpf ptr));
#endif
-voidpf ZLIB_INTERNAL zcalloc (opaque, items, size)
+voidpf ZLIB_INTERNAL zcalloc(opaque, items, size)
voidpf opaque;
unsigned items;
unsigned size;
@@ -312,7 +314,7 @@ voidpf ZLIB_INTERNAL zcalloc (opaque, items, size)
(voidpf)calloc(items, size);
}
-void ZLIB_INTERNAL zcfree (opaque, ptr)
+void ZLIB_INTERNAL zcfree(opaque, ptr)
voidpf opaque;
voidpf ptr;
{
diff --git a/zlib/zutil.h b/zlib/zutil.h
index d9a20ae1bf4..0bc7f4ecd1c 100644
--- a/zlib/zutil.h
+++ b/zlib/zutil.h
@@ -193,6 +193,7 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
(!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0)
ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine_gen64 OF((z_off_t));
#endif
/* common defaults */