summaryrefslogtreecommitdiff
path: root/mysql-test
diff options
context:
space:
mode:
Diffstat (limited to 'mysql-test')
-rw-r--r--mysql-test/main/derived_split_innodb.result537
-rw-r--r--mysql-test/main/derived_split_innodb.test212
-rw-r--r--mysql-test/main/explain_innodb.result18
-rw-r--r--mysql-test/main/explain_innodb.test19
-rw-r--r--mysql-test/main/opt_trace.result250
-rw-r--r--mysql-test/main/opt_trace.test77
-rw-r--r--mysql-test/main/selectivity.result69
-rw-r--r--mysql-test/main/selectivity.test84
-rw-r--r--mysql-test/main/selectivity_innodb.result69
-rw-r--r--mysql-test/main/selectivity_innodb_notembedded.result104
-rw-r--r--mysql-test/main/selectivity_innodb_notembedded.test16
-rw-r--r--mysql-test/main/selectivity_notembedded.result95
-rw-r--r--mysql-test/main/selectivity_notembedded.test121
13 files changed, 1379 insertions, 292 deletions
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 74876836a53..2ca0de3bd91 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -287,4 +287,541 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
# End of 10.3 tests
+#
+# MDEV-26301: Split optimization refills temporary table too many times
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+explain
+select * from
+(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
+1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+# The important part in the below output is:
+# "lateral": 1,
+# "query_block": {
+# "select_id": 2,
+# "r_loops": 5, <-- must be 5, not 30.
+analyze format=json select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "const_condition": "1",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 5,
+ "r_rows": 5,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ }
+ },
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 2,
+ "r_rows": 2,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ }
+ },
+ {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 10,
+ "rows": 3,
+ "r_rows": 3,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ }
+ },
+ {
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 30,
+ "rows": 10,
+ "r_rows": 1,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))",
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 5,
+ "r_total_time_ms": "REPLACED",
+ "outer_ref_condition": "t1.b is not null",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t10",
+ "access_type": "ref",
+ "possible_keys": ["grp_id"],
+ "key": "grp_id",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 100,
+ "r_rows": 100,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ }
+ },
+ {
+ "block-nl-join": {
+ "table": {
+ "table_name": "t11",
+ "access_type": "ALL",
+ "r_loops": 5,
+ "rows": 10,
+ "r_rows": 10,
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "1Kb",
+ "join_type": "BNL",
+ "attached_condition": "trigcond(t11.col1 = t10.col1)",
+ "r_loops": 500,
+ "r_filtered": 10,
+ "r_unpack_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "r_effective_rows": 10
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+ }
+}
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+explain
+select * from
+t21, t22,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1 and t22.pk=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+explain
+select * from
+t21,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+create table t5 (
+pk int primary key
+);
+insert into t5 select seq from seq_1_to_1000;
+explain
+select * from
+t21,
+(
+(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
+1 PRIMARY t2 ref a a 5 test.t1.b 2
+1 PRIMARY t3 ref a a 5 test.t1.b 3
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 1 1 100
+1 1 1 2 1 2 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 1 2 100
+2 2 2 2 2 2 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 1 3 100
+3 3 3 2 3 2 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 1 4 100
+4 4 4 2 4 2 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 1 5 100
+5 5 5 2 5 2 5 100
+5 5 5 2 5 3 5 100
+set join_cache_level=4;
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 hash_ALL NULL #hash#$hj 5 test.t10.col1 10 Using where; Using join buffer (flat, BNLH join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 1 1 100
+1 1 1 2 1 2 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 1 2 100
+2 2 2 2 2 2 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 1 3 100
+3 3 3 2 3 2 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 1 4 100
+4 4 4 2 4 2 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 1 5 100
+5 5 5 2 5 2 5 100
+5 5 5 2 5 3 5 100
+set join_cache_level=default;
+drop index a on t2;
+drop index a on t3;
+explain select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 1000 Using where
+2 DERIVED t10 ALL grp_id NULL NULL NULL 10000 Using temporary; Using filesort
+2 DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select *
+from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join
+t3
+on t3.a=t1.b
+)
+left join
+(
+select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id
+)dt
+on dt.grp_id=t1.b;
+a b a b a b grp_id count(*)
+1 1 1 1 1 1 1 100
+1 1 1 2 1 1 1 100
+1 1 1 1 1 2 1 100
+1 1 1 2 1 2 1 100
+1 1 1 1 1 3 1 100
+1 1 1 2 1 3 1 100
+2 2 2 1 2 1 2 100
+2 2 2 2 2 1 2 100
+2 2 2 1 2 2 2 100
+2 2 2 2 2 2 2 100
+2 2 2 1 2 3 2 100
+2 2 2 2 2 3 2 100
+3 3 3 1 3 1 3 100
+3 3 3 2 3 1 3 100
+3 3 3 1 3 2 3 100
+3 3 3 2 3 2 3 100
+3 3 3 1 3 3 3 100
+3 3 3 2 3 3 3 100
+4 4 4 1 4 1 4 100
+4 4 4 2 4 1 4 100
+4 4 4 1 4 2 4 100
+4 4 4 2 4 2 4 100
+4 4 4 1 4 3 4 100
+4 4 4 2 4 3 4 100
+5 5 5 1 5 1 5 100
+5 5 5 2 5 1 5 100
+5 5 5 1 5 2 5 100
+5 5 5 2 5 2 5 100
+5 5 5 1 5 3 5 100
+5 5 5 2 5 3 5 100
+drop table t1,t2,t3;
+drop table t10, t11;
+# End of 10.4 tests
SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent;
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 1ebe27cd12c..2dd7988f223 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -233,4 +233,216 @@ drop table t3, t4;
--echo # End of 10.3 tests
+
+--echo #
+--echo # MDEV-26301: Split optimization refills temporary table too many times
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+explain
+select * from
+ (t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+
+# Now, create tables for Groups.
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q1=
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+eval
+explain $q1;
+
+--echo # The important part in the below output is:
+--echo # "lateral": 1,
+--echo # "query_block": {
+--echo # "select_id": 2,
+--echo # "r_loops": 5, <-- must be 5, not 30.
+--source include/analyze-format.inc
+
+eval
+analyze format=json $q1;
+
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+
+# Same as above but throw in a couple of const tables.
+explain
+select * from
+ t21, t22,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1 and t22.pk=2;
+
+explain
+select * from
+ t21,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+# And also add a non-const table
+
+create table t5 (
+ pk int primary key
+ );
+insert into t5 select seq from seq_1_to_1000;
+
+explain
+select * from
+ t21,
+ (
+ (((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q=
+select *
+from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join
+ t3
+ on t3.a=t1.b
+ )
+ left join
+ (
+ select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id
+ )dt
+ on dt.grp_id=t1.b;
+
+eval explain $q;
+eval $q;
+
+set join_cache_level=4;
+eval explain $q;
+eval $q;
+
+set join_cache_level=default;
+
+drop index a on t2;
+drop index a on t3;
+
+eval explain $q;
+eval $q;
+
+drop table t1,t2,t3;
+drop table t10, t11;
+
+--echo # End of 10.4 tests
+
SET GLOBAL innodb_stats_persistent=@save_innodb_stats_persistent;
diff --git a/mysql-test/main/explain_innodb.result b/mysql-test/main/explain_innodb.result
index b46665c279c..c44d7baadea 100644
--- a/mysql-test/main/explain_innodb.result
+++ b/mysql-test/main/explain_innodb.result
@@ -18,3 +18,21 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t1 range NULL id 53 NULL 2 Using index for group-by
SET GLOBAL slow_query_log = @sql_tmp;
drop table t1;
+#
+# MDEV-31181: Server crash in subselect_uniquesubquery_engine::print
+# upon EXPLAIN EXTENDED DELETE
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+EXPLAIN EXTENDED DELETE FROM t1 WHERE a IN (SELECT pk FROM t2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using index
+Warnings:
+Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`))))
+drop table t1, t2;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/explain_innodb.test b/mysql-test/main/explain_innodb.test
index 2c29a6e26da..3dcad4c2d49 100644
--- a/mysql-test/main/explain_innodb.test
+++ b/mysql-test/main/explain_innodb.test
@@ -18,3 +18,22 @@ SELECT * FROM (SELECT id FROM t1 GROUP BY id) dt WHERE 1=0;
SET GLOBAL slow_query_log = @sql_tmp;
drop table t1;
+
+
+--echo #
+--echo # MDEV-31181: Server crash in subselect_uniquesubquery_engine::print
+--echo # upon EXPLAIN EXTENDED DELETE
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+
+EXPLAIN EXTENDED DELETE FROM t1 WHERE a IN (SELECT pk FROM t2);
+
+drop table t1, t2;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index 70038a5f605..77e6103f783 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -481,6 +481,11 @@ select * from v2 {
]
},
{
+ "check_split_materialized": {
+ "not_applicable": "no candidate field can be accessed through ref"
+ }
+ },
+ {
"best_join_order": ["t1"]
},
{
@@ -828,6 +833,11 @@ explain select * from v1 {
]
},
{
+ "check_split_materialized": {
+ "not_applicable": "group list has no candidates"
+ }
+ },
+ {
"best_join_order": ["t1"]
},
{
@@ -10343,6 +10353,110 @@ SET optimizer_trace=DEFAULT;
DROP VIEW v;
DROP TABLE t;
#
+# MDEV-26301: Split optimization improvements: Optimizer Trace coverage
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+set optimizer_trace=1;
+explain
+select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS
+from information_schema.optimizer_trace;
+JS
+[
+ {
+ "split_candidates":
+ ["t10.grp_id"]
+ }
+]
+select
+json_detailed(
+json_remove(
+json_extract(trace, '$**.choose_best_splitting')
+, '$[0].split_plan_search[0]'
+ )
+) as JS
+from information_schema.optimizer_trace;
+JS
+[
+ {
+ "considered_keys":
+ [
+ {
+ "table_name": "t10",
+ "index": "grp_id",
+ "rec_per_key": 100,
+ "param_tables": 1
+ }
+ ],
+ "refills": 5,
+ "spl_pd_boundary": 2,
+ "split_plan_search":
+ [],
+ "lead_table": "t10",
+ "index": "grp_id",
+ "parts": 1,
+ "split_sel": 0.001,
+ "cost": 2535.968504,
+ "unsplit_cost": 253440.0075,
+ "records": 100,
+ "chosen": true
+ }
+]
+drop table t1,t2,t3,t10,t11;
+set optimizer_trace=DEFAULT;
+#
# End of 10.4 tests
#
set optimizer_trace='enabled=on';
@@ -10763,79 +10877,83 @@ from
information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.choose_best_splitting'))
[
- [
- {
- "considered_execution_plans":
- [
- {
- "plan_prefix":
- [],
- "get_costs_for_tables":
- [
- {
- "best_access_path":
+ {
+ "considered_keys":
+ []
+ },
+ {
+ "considered_keys":
+ [
+ {
+ "table_name": "t2",
+ "index": "idx_a",
+ "rec_per_key": 1.8367,
+ "param_tables": 1
+ }
+ ],
+ "refills": 4,
+ "spl_pd_boundary": 2,
+ "split_plan_search":
+ [
+ {
+ "considered_execution_plans":
+ [
+ {
+ "plan_prefix":
+ [],
+ "get_costs_for_tables":
+ [
{
- "table": "t2",
- "considered_access_paths":
- [
+ "best_access_path":
+ {
+ "table": "t2",
+ "considered_access_paths":
+ [
+ {
+ "access_type": "ref",
+ "index": "idx_a",
+ "used_range_estimates": false,
+ "reason": "not available",
+ "rows": 1.8367,
+ "cost": 2.000585794,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "chosen_access_method":
{
- "access_type": "ref",
- "index": "idx_a",
- "used_range_estimates": false,
- "reason": "not available",
- "rows": 1.8367,
+ "type": "ref",
+ "records": 1.8367,
"cost": 2.000585794,
- "chosen": true
- },
- {
- "type": "scan",
- "chosen": false,
- "cause": "cost"
+ "uses_join_buffering": false
}
- ],
- "chosen_access_method":
- {
- "type": "ref",
- "records": 1.8367,
- "cost": 2.000585794,
- "uses_join_buffering": false
}
}
- }
- ]
- },
- {
- "plan_prefix":
- [],
- "table": "t2",
- "rows_for_plan": 1.8367,
- "cost_for_plan": 2.367925794,
- "cost_for_sorting": 1.8367
- }
- ]
- },
- {
- "best_splitting":
- {
- "table": "t2",
- "key": "idx_a",
- "record_count": 4,
- "cost": 2.488945919,
- "unsplit_cost": 25.72361682
+ ]
+ },
+ {
+ "plan_prefix":
+ [],
+ "table": "t2",
+ "rows_for_plan": 1.8367,
+ "cost_for_plan": 2.367925794,
+ "cost_for_sorting": 1.8367
+ }
+ ]
}
- }
- ]
-]
-select
-json_detailed(json_extract(trace, '$**.lateral_derived'))
-from
-information_schema.optimizer_trace;
-json_detailed(json_extract(trace, '$**.lateral_derived'))
-[
- {
- "startup_cost": 9.955783677,
- "splitting_cost": 2.488945919,
- "records": 1
+ ],
+ "lead_table": "t2",
+ "index": "idx_a",
+ "parts": 1,
+ "split_sel": 0.020407778,
+ "cost": 2.488945919,
+ "unsplit_cost": 25.72361682,
+ "records": 1,
+ "chosen": true
}
]
drop table t1,t2;
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 0cacc4a60ea..3f1f1fd1204 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -702,6 +702,76 @@ DROP VIEW v;
DROP TABLE t;
--echo #
+--echo # MDEV-26301: Split optimization improvements: Optimizer Trace coverage
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+set optimizer_trace=1;
+explain
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS
+from information_schema.optimizer_trace;
+
+select
+ json_detailed(
+ json_remove(
+ json_extract(trace, '$**.choose_best_splitting')
+ , '$[0].split_plan_search[0]'
+ )
+ ) as JS
+from information_schema.optimizer_trace;
+
+drop table t1,t2,t3,t10,t11;
+set optimizer_trace=DEFAULT;
+
+--echo #
--echo # End of 10.4 tests
--echo #
@@ -909,13 +979,6 @@ from
information_schema.optimizer_trace;
--enable_view_protocol
-# Same as above. just to show that splitting plan has some coverage in the
-# trace.
-select
- json_detailed(json_extract(trace, '$**.lateral_derived'))
-from
- information_schema.optimizer_trace;
-
drop table t1,t2;
--echo #
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 59d76da8816..62466de1113 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -1943,75 +1943,6 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
DROP TABLE t1;
# End of 10.2 tests
-#
-# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
-#
-create table t0(a int);
-insert into t0 select 1 from seq_1_to_78;
-create table t1(a int);
-insert into t1 select 1 from seq_1_to_26;
-create table t10 (a int);
-insert into t10 select 0 from t0, seq_1_to_4;
-insert into t10 select 8693 from t1;
-insert into t10 select 8694 from t1;
-insert into t10 select 8695 from t1;
-insert into t10 select 34783 from t1;
-insert into t10 select 34784 from t1;
-insert into t10 select 34785 from t1;
-insert into t10 select 34785 from t0, seq_1_to_8;
-insert into t10 select 65214 from t1;
-insert into t10 select 65215 from t1;
-insert into t10 select 65216 from t1;
-insert into t10 select 65216 from t0, seq_1_to_52;
-insert into t10 select 65217 from t1;
-insert into t10 select 65218 from t1;
-insert into t10 select 65219 from t1;
-insert into t10 select 65219 from t0;
-insert into t10 select 73913 from t1;
-insert into t10 select 73914 from t1;
-insert into t10 select 73915 from t1;
-insert into t10 select 73915 from t0, seq_1_to_40;
-insert into t10 select 78257 from t1;
-insert into t10 select 78258 from t1;
-insert into t10 select 78259 from t1;
-insert into t10 select 91300 from t1;
-insert into t10 select 91301 from t1;
-insert into t10 select 91302 from t1;
-insert into t10 select 91302 from t0, seq_1_to_6;
-insert into t10 select 91303 from t1;
-insert into t10 select 91304 from t1;
-insert into t10 select 91305 from t1;
-insert into t10 select 91305 from t0, seq_1_to_8;
-insert into t10 select 99998 from t1;
-insert into t10 select 99999 from t1;
-insert into t10 select 100000 from t1;
-set use_stat_tables=preferably;
-analyze table t10 persistent for all;
-Table Op Msg_type Msg_text
-test.t10 analyze status Engine-independent statistics collected
-test.t10 analyze status OK
-flush tables;
-set @tmp=@@optimizer_trace;
-set optimizer_trace=1;
-explain select * from t10 where a in (91303);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
-# Must have selectivity_from_histogram <= 1.0:
-select json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
-from information_schema.optimizer_trace;
-json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
-[
- [
- {
- "column_name": "a",
- "ranges":
- ["91303 <= a <= 91303"],
- "selectivity_from_histogram": 0.035714283
- }
- ]
-]
-set optimizer_trace=@tmp;
-drop table t0,t1,t10;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index 6f9d4d63eee..df3850d74b7 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -1326,90 +1326,12 @@ DROP TABLE t1;
--echo # End of 10.2 tests
---echo #
---echo # MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
---echo #
-create table t0(a int); # This holds how many rows we hold in a bucket.
-insert into t0 select 1 from seq_1_to_78;
-
-create table t1(a int); # one-third of a bucket
-insert into t1 select 1 from seq_1_to_26;
-
-create table t10 (a int);
-insert into t10 select 0 from t0, seq_1_to_4;
-
-insert into t10 select 8693 from t1;
-insert into t10 select 8694 from t1;
-insert into t10 select 8695 from t1;
-
-
-insert into t10 select 34783 from t1;
-insert into t10 select 34784 from t1;
-insert into t10 select 34785 from t1;
-
-
-insert into t10 select 34785 from t0, seq_1_to_8;
-
-insert into t10 select 65214 from t1;
-insert into t10 select 65215 from t1;
-insert into t10 select 65216 from t1;
-
-insert into t10 select 65216 from t0, seq_1_to_52;
-
-insert into t10 select 65217 from t1;
-insert into t10 select 65218 from t1;
-insert into t10 select 65219 from t1;
-
-insert into t10 select 65219 from t0;
-
-
-insert into t10 select 73913 from t1;
-insert into t10 select 73914 from t1;
-insert into t10 select 73915 from t1;
-
-insert into t10 select 73915 from t0, seq_1_to_40;
-
-insert into t10 select 78257 from t1;
-insert into t10 select 78258 from t1;
-insert into t10 select 78259 from t1;
-
-insert into t10 select 91300 from t1;
-insert into t10 select 91301 from t1;
-insert into t10 select 91302 from t1;
-
-insert into t10 select 91302 from t0, seq_1_to_6;
-
-insert into t10 select 91303 from t1; # Only 1/3rd of bucket matches the search tuple
-insert into t10 select 91304 from t1;
-insert into t10 select 91305 from t1;
-
-insert into t10 select 91305 from t0, seq_1_to_8;
-
-insert into t10 select 99998 from t1;
-insert into t10 select 99999 from t1;
-insert into t10 select 100000 from t1;
-
-set use_stat_tables=preferably;
-analyze table t10 persistent for all;
-flush tables;
-
-set @tmp=@@optimizer_trace;
-set optimizer_trace=1;
-explain select * from t10 where a in (91303);
-
---echo # Must have selectivity_from_histogram <= 1.0:
-select json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
-from information_schema.optimizer_trace;
-
-set optimizer_trace=@tmp;
-drop table t0,t1,t10;
-
-set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
-set histogram_size=@save_histogram_size;
-set use_stat_tables= @save_use_stat_tables;
#
# Clean up
#
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
--source include/restore_charset.inc
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index ddb3cf3795b..a4366214643 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -1955,75 +1955,6 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
DROP TABLE t1;
# End of 10.2 tests
-#
-# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
-#
-create table t0(a int);
-insert into t0 select 1 from seq_1_to_78;
-create table t1(a int);
-insert into t1 select 1 from seq_1_to_26;
-create table t10 (a int);
-insert into t10 select 0 from t0, seq_1_to_4;
-insert into t10 select 8693 from t1;
-insert into t10 select 8694 from t1;
-insert into t10 select 8695 from t1;
-insert into t10 select 34783 from t1;
-insert into t10 select 34784 from t1;
-insert into t10 select 34785 from t1;
-insert into t10 select 34785 from t0, seq_1_to_8;
-insert into t10 select 65214 from t1;
-insert into t10 select 65215 from t1;
-insert into t10 select 65216 from t1;
-insert into t10 select 65216 from t0, seq_1_to_52;
-insert into t10 select 65217 from t1;
-insert into t10 select 65218 from t1;
-insert into t10 select 65219 from t1;
-insert into t10 select 65219 from t0;
-insert into t10 select 73913 from t1;
-insert into t10 select 73914 from t1;
-insert into t10 select 73915 from t1;
-insert into t10 select 73915 from t0, seq_1_to_40;
-insert into t10 select 78257 from t1;
-insert into t10 select 78258 from t1;
-insert into t10 select 78259 from t1;
-insert into t10 select 91300 from t1;
-insert into t10 select 91301 from t1;
-insert into t10 select 91302 from t1;
-insert into t10 select 91302 from t0, seq_1_to_6;
-insert into t10 select 91303 from t1;
-insert into t10 select 91304 from t1;
-insert into t10 select 91305 from t1;
-insert into t10 select 91305 from t0, seq_1_to_8;
-insert into t10 select 99998 from t1;
-insert into t10 select 99999 from t1;
-insert into t10 select 100000 from t1;
-set use_stat_tables=preferably;
-analyze table t10 persistent for all;
-Table Op Msg_type Msg_text
-test.t10 analyze status Engine-independent statistics collected
-test.t10 analyze status OK
-flush tables;
-set @tmp=@@optimizer_trace;
-set optimizer_trace=1;
-explain select * from t10 where a in (91303);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
-# Must have selectivity_from_histogram <= 1.0:
-select json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
-from information_schema.optimizer_trace;
-json_detailed(json_extract(trace, '$**.selectivity_for_columns'))
-[
- [
- {
- "column_name": "a",
- "ranges":
- ["91303 <= a <= 91303"],
- "selectivity_from_histogram": 0.035714283
- }
- ]
-]
-set optimizer_trace=@tmp;
-drop table t0,t1,t10;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set histogram_size=@save_histogram_size;
set use_stat_tables= @save_use_stat_tables;
diff --git a/mysql-test/main/selectivity_innodb_notembedded.result b/mysql-test/main/selectivity_innodb_notembedded.result
new file mode 100644
index 00000000000..8b06fe7556b
--- /dev/null
+++ b/mysql-test/main/selectivity_innodb_notembedded.result
@@ -0,0 +1,104 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+Warnings:
+Warning 1287 '@@storage_engine' is deprecated and will be removed in a future release. Please use '@@default_storage_engine' instead
+set @save_optimizer_switch_for_selectivity_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+drop table if exists t0,t1,t2,t3;
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+set optimizer_use_condition_selectivity=3;
+#
+# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+#
+create table t0(a int);
+insert into t0 select 1 from seq_1_to_78;
+create table t1(a int);
+insert into t1 select 1 from seq_1_to_26;
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+insert into t10 select 34785 from t0, seq_1_to_8;
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+insert into t10 select 65216 from t0, seq_1_to_52;
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+insert into t10 select 65219 from t0;
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+insert into t10 select 73915 from t0, seq_1_to_40;
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+insert into t10 select 91302 from t0, seq_1_to_6;
+insert into t10 select 91303 from t1;
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+insert into t10 select 91305 from t0, seq_1_to_8;
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status OK
+flush tables;
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
+# Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+sel
+[
+ [
+ {
+ "column_name": "a",
+ "ranges":
+ ["91303 <= a <= 91303"],
+ "selectivity_from_histogram": 0.035714283
+ }
+ ]
+]
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+#
+# End of 10.4 tests
+#
+#
+# Clean up
+#
+set @@global.histogram_size=@save_histogram_size;
+set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
+SET SESSION STORAGE_ENGINE=DEFAULT;
+Warnings:
+Warning 1287 '@@storage_engine' is deprecated and will be removed in a future release. Please use '@@default_storage_engine' instead
diff --git a/mysql-test/main/selectivity_innodb_notembedded.test b/mysql-test/main/selectivity_innodb_notembedded.test
new file mode 100644
index 00000000000..387f7dcb7de
--- /dev/null
+++ b/mysql-test/main/selectivity_innodb_notembedded.test
@@ -0,0 +1,16 @@
+--source include/have_innodb.inc
+# This test is slow on buildbot.
+--source include/big_test.inc
+--source include/default_optimizer_switch.inc
+--source include/not_embedded.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+set @save_optimizer_switch_for_selectivity_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+
+--source selectivity_notembedded.test
+
+set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/selectivity_notembedded.result b/mysql-test/main/selectivity_notembedded.result
new file mode 100644
index 00000000000..d2e90a19a68
--- /dev/null
+++ b/mysql-test/main/selectivity_notembedded.result
@@ -0,0 +1,95 @@
+drop table if exists t0,t1,t2,t3;
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+set optimizer_use_condition_selectivity=3;
+#
+# MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+#
+create table t0(a int);
+insert into t0 select 1 from seq_1_to_78;
+create table t1(a int);
+insert into t1 select 1 from seq_1_to_26;
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+insert into t10 select 34785 from t0, seq_1_to_8;
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+insert into t10 select 65216 from t0, seq_1_to_52;
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+insert into t10 select 65219 from t0;
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+insert into t10 select 73915 from t0, seq_1_to_40;
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+insert into t10 select 91302 from t0, seq_1_to_6;
+insert into t10 select 91303 from t1;
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+insert into t10 select 91305 from t0, seq_1_to_8;
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status OK
+flush tables;
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t10 ALL NULL NULL NULL NULL 9984 Using where
+# Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+sel
+[
+ [
+ {
+ "column_name": "a",
+ "ranges":
+ ["91303 <= a <= 91303"],
+ "selectivity_from_histogram": 0.035714283
+ }
+ ]
+]
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+#
+# End of 10.4 tests
+#
+#
+# Clean up
+#
+set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity_notembedded.test b/mysql-test/main/selectivity_notembedded.test
new file mode 100644
index 00000000000..6752bd3c7e1
--- /dev/null
+++ b/mysql-test/main/selectivity_notembedded.test
@@ -0,0 +1,121 @@
+--source include/no_valgrind_without_big.inc
+--source include/have_stat_tables.inc
+--source include/have_sequence.inc
+--source include/default_charset.inc
+--source include/not_embedded.inc
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+
+select @@global.use_stat_tables;
+select @@session.use_stat_tables;
+
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+
+--source include/default_optimizer_switch.inc
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_histogram_size=@@histogram_size;
+set @save_histogram_type=@@histogram_type;
+set join_cache_level=2;
+set @@global.histogram_size=0,@@local.histogram_size=0;
+set histogram_type='single_prec_hb';
+
+# check that statistics on nulls is used
+
+set optimizer_use_condition_selectivity=3;
+
+--echo #
+--echo # MDEV-31067: selectivity_from_histogram >1.0 for a DOUBLE_PREC_HB histogram
+--echo #
+create table t0(a int); # This holds how many rows we hold in a bucket.
+insert into t0 select 1 from seq_1_to_78;
+
+create table t1(a int); # one-third of a bucket
+insert into t1 select 1 from seq_1_to_26;
+
+create table t10 (a int);
+insert into t10 select 0 from t0, seq_1_to_4;
+
+insert into t10 select 8693 from t1;
+insert into t10 select 8694 from t1;
+insert into t10 select 8695 from t1;
+
+
+insert into t10 select 34783 from t1;
+insert into t10 select 34784 from t1;
+insert into t10 select 34785 from t1;
+
+
+insert into t10 select 34785 from t0, seq_1_to_8;
+
+insert into t10 select 65214 from t1;
+insert into t10 select 65215 from t1;
+insert into t10 select 65216 from t1;
+
+insert into t10 select 65216 from t0, seq_1_to_52;
+
+insert into t10 select 65217 from t1;
+insert into t10 select 65218 from t1;
+insert into t10 select 65219 from t1;
+
+insert into t10 select 65219 from t0;
+
+
+insert into t10 select 73913 from t1;
+insert into t10 select 73914 from t1;
+insert into t10 select 73915 from t1;
+
+insert into t10 select 73915 from t0, seq_1_to_40;
+
+
+insert into t10 select 78257 from t1;
+insert into t10 select 78258 from t1;
+insert into t10 select 78259 from t1;
+
+insert into t10 select 91300 from t1;
+insert into t10 select 91301 from t1;
+insert into t10 select 91302 from t1;
+
+insert into t10 select 91302 from t0, seq_1_to_6;
+
+insert into t10 select 91303 from t1; # Only 1/3rd of bucket matches the search tuple
+insert into t10 select 91304 from t1;
+insert into t10 select 91305 from t1;
+
+insert into t10 select 91305 from t0, seq_1_to_8;
+
+insert into t10 select 99998 from t1;
+insert into t10 select 99999 from t1;
+insert into t10 select 100000 from t1;
+
+set use_stat_tables=preferably;
+analyze table t10 persistent for all;
+flush tables;
+
+set @tmp=@@optimizer_trace;
+set optimizer_trace=1;
+explain select * from t10 where a in (91303);
+
+--echo # Must have selectivity_from_histogram <= 1.0:
+select json_detailed(json_extract(trace, '$**.selectivity_for_columns')) as sel
+from information_schema.optimizer_trace;
+
+set optimizer_trace=@tmp;
+drop table t0,t1,t10;
+
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
+
+--echo #
+--echo # Clean up
+--echo #
+--source include/restore_charset.inc
+set @@global.histogram_size=@save_histogram_size;