summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2014-12-03 11:37:26 +0100
committerSergei Golubchik <serg@mariadb.org>2014-12-03 11:37:26 +0100
commitec4137c62b00a2bad91dd108f5782b206fe86a8b (patch)
treeb284d929d1a498df14aafffd0c31831f2b4e9fd5
parent1caee393076dc642a7d8e1283377b59ef8f52dbd (diff)
parentbafe529af76a915f43dbf6a3fb8dc610a4ea121b (diff)
downloadmariadb-git-ec4137c62b00a2bad91dd108f5782b206fe86a8b.tar.gz
Merge branch '10.1' into bb-10.1-merge
-rw-r--r--config.h.cmake2
-rw-r--r--configure.cmake2
-rw-r--r--libmysqld/CMakeLists.txt1
-rw-r--r--mysql-test/r/analyze_format_json.result172
-rw-r--r--mysql-test/r/explain_json.result668
-rw-r--r--mysql-test/r/func_time.result2
-rw-r--r--mysql-test/r/partition_bug18198.result3
-rw-r--r--mysql-test/r/set_statement.result64
-rw-r--r--mysql-test/r/union.result2
-rw-r--r--mysql-test/suite/innodb_fts/r/fulltext_misc.result2
-rw-r--r--mysql-test/t/analyze_format_json.test38
-rw-r--r--mysql-test/t/explain_json.test138
-rw-r--r--mysql-test/t/partition_bug18198.test2
-rw-r--r--mysql-test/t/set_statement.test37
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/item.cc15
-rw-r--r--sql/item_strfunc.cc85
-rw-r--r--sql/item_strfunc.h42
-rw-r--r--sql/item_subselect.cc19
-rw-r--r--sql/item_subselect.h1
-rw-r--r--sql/lex.h1
-rw-r--r--sql/my_json_writer.cc345
-rw-r--r--sql/my_json_writer.h189
-rw-r--r--sql/mysqld.h6
-rw-r--r--sql/opt_range.cc4
-rw-r--r--sql/sql_class.cc13
-rw-r--r--sql/sql_class.h2
-rw-r--r--sql/sql_delete.cc26
-rw-r--r--sql/sql_explain.cc1081
-rw-r--r--sql/sql_explain.h239
-rw-r--r--sql/sql_lex.cc8
-rw-r--r--sql/sql_lex.h8
-rw-r--r--sql/sql_parse.cc37
-rw-r--r--sql/sql_select.cc400
-rw-r--r--sql/sql_select.h44
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy70
-rw-r--r--sql/sys_vars.cc4
-rw-r--r--sql/table.h6
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc18
-rw-r--r--storage/innobase/include/fil0pagecompress.h7
-rw-r--r--storage/innobase/include/fsp0pagecompress.ic13
-rw-r--r--storage/innobase/os/os0file.cc121
-rw-r--r--storage/xtradb/fil/fil0pagecompress.cc18
-rw-r--r--storage/xtradb/include/fil0pagecompress.h7
-rw-r--r--storage/xtradb/include/fsp0pagecompress.ic13
-rw-r--r--storage/xtradb/os/os0file.cc124
47 files changed, 3412 insertions, 692 deletions
diff --git a/config.h.cmake b/config.h.cmake
index adedd64cdba..eadedb41c01 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -216,6 +216,8 @@
#cmakedefine HAVE_POLL 1
#cmakedefine HAVE_PORT_CREATE 1
#cmakedefine HAVE_POSIX_FALLOCATE 1
+#cmakedefine HAVE_LINUX_FALLOC_H 1
+#cmakedefine HAVE_FALLOCATE 1
#cmakedefine HAVE_PREAD 1
#cmakedefine HAVE_PAUSE_INSTRUCTION 1
#cmakedefine HAVE_FAKE_PAUSE_INSTRUCTION 1
diff --git a/configure.cmake b/configure.cmake
index d5076dada02..5fb86acad70 100644
--- a/configure.cmake
+++ b/configure.cmake
@@ -199,6 +199,7 @@ CHECK_INCLUDE_FILES (ieeefp.h HAVE_IEEEFP_H)
CHECK_INCLUDE_FILES (inttypes.h HAVE_INTTYPES_H)
CHECK_INCLUDE_FILES (langinfo.h HAVE_LANGINFO_H)
CHECK_INCLUDE_FILES (linux/unistd.h HAVE_LINUX_UNISTD_H)
+CHECK_INCLUDE_FILES (linux/falloc.h HAVE_LINUX_FALLOC_H)
CHECK_INCLUDE_FILES (limits.h HAVE_LIMITS_H)
CHECK_INCLUDE_FILES (locale.h HAVE_LOCALE_H)
CHECK_INCLUDE_FILES (malloc.h HAVE_MALLOC_H)
@@ -399,6 +400,7 @@ CHECK_FUNCTION_EXISTS (perror HAVE_PERROR)
CHECK_FUNCTION_EXISTS (poll HAVE_POLL)
CHECK_FUNCTION_EXISTS (port_create HAVE_PORT_CREATE)
CHECK_FUNCTION_EXISTS (posix_fallocate HAVE_POSIX_FALLOCATE)
+CHECK_FUNCTION_EXISTS (fallocate HAVE_FALLOCATE)
CHECK_FUNCTION_EXISTS (pread HAVE_PREAD)
CHECK_FUNCTION_EXISTS (pthread_attr_create HAVE_PTHREAD_ATTR_CREATE)
CHECK_FUNCTION_EXISTS (pthread_attr_getstacksize HAVE_PTHREAD_ATTR_GETSTACKSIZE)
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index f44c5daa17c..7dbc8cd70f5 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -100,6 +100,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/rpl_reporting.cc
../sql/sql_expression_cache.cc
../sql/my_apc.cc ../sql/my_apc.h
+ ../sql/my_json_writer.cc ../sql/my_json_writer.h
../sql/rpl_gtid.cc
../sql/sql_explain.cc ../sql/sql_explain.h
../sql/compat56.cc
diff --git a/mysql-test/r/analyze_format_json.result b/mysql-test/r/analyze_format_json.result
new file mode 100644
index 00000000000..b496fe5a68e
--- /dev/null
+++ b/mysql-test/r/analyze_format_json.result
@@ -0,0 +1,172 @@
+drop table if exists t0,t1,t2,t3;
+create table t0 (a int);
+INSERT INTO t0 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+# r_filtered=30%, because 3 rows match: 0,1,2
+analyze format=json select * from t0 where a<3;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 10,
+ "r_rows": 10,
+ "filtered": 100,
+ "r_filtered": 30,
+ "attached_condition": "(t0.a < 3)"
+ }
+ }
+}
+create table t1 (a int, b int, c int, key(a));
+insert into t1 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+analyze
+select * from t0, t1 where t1.a=t0.a and t0.a > 9;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 10 100.00 0.00 Using where
+1 SIMPLE t1 ref a a 5 test.t0.a 1 NULL 100.00 NULL
+analyze format=json
+select * from t0, t1 where t1.a=t0.a and t0.a > 9;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 10,
+ "r_rows": 10,
+ "filtered": 100,
+ "r_filtered": 0,
+ "attached_condition": "((t0.a > 9) and (t0.a is not null))"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t0.a"],
+ "r_loops": 0,
+ "rows": 1,
+ "r_rows": null,
+ "filtered": 100,
+ "r_filtered": null
+ }
+ }
+}
+analyze
+select * from t0, t1 where t1.a=t0.a and t1.b<4;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 10 100.00 100.00 Using where
+1 SIMPLE t1 ref a a 5 test.t0.a 1 1 100.00 40.00 Using where
+analyze format=json
+select * from t0, t1 where t1.a=t0.a and t1.b<4;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 10,
+ "r_rows": 10,
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "(t0.a is not null)"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t0.a"],
+ "r_loops": 10,
+ "rows": 1,
+ "r_rows": 1,
+ "filtered": 100,
+ "r_filtered": 40,
+ "attached_condition": "(t1.b < 4)"
+ }
+ }
+}
+analyze
+select * from t1 tbl1, t1 tbl2 where tbl1.b<2 and tbl2.b>5;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE tbl1 ALL NULL NULL NULL NULL 100 100 100.00 2.00 Using where
+1 SIMPLE tbl2 ALL NULL NULL NULL NULL 100 100 100.00 94.00 Using where; Using join buffer (flat, BNL join)
+analyze format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.b<20 and tbl2.b<60;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tbl1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 100,
+ "r_rows": 100,
+ "filtered": 100,
+ "r_filtered": 20,
+ "attached_condition": "(tbl1.b < 20)"
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "tbl2",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 100,
+ "r_rows": 100,
+ "filtered": 100,
+ "r_filtered": 60,
+ "attached_condition": "(tbl2.b < 60)"
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL",
+ "r_filtered": 100
+ }
+ }
+}
+analyze format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.b<20 and tbl2.b<60 and tbl1.c > tbl2.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tbl1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 100,
+ "r_rows": 100,
+ "filtered": 100,
+ "r_filtered": 20,
+ "attached_condition": "(tbl1.b < 20)"
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "tbl2",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 100,
+ "r_rows": 100,
+ "filtered": 100,
+ "r_filtered": 60,
+ "attached_condition": "(tbl2.b < 60)"
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL",
+ "attached_condition": "(tbl1.c > tbl2.c)",
+ "r_filtered": 15.833
+ }
+ }
+}
+drop table t1;
+drop table t0;
diff --git a/mysql-test/r/explain_json.result b/mysql-test/r/explain_json.result
new file mode 100644
index 00000000000..c159161a8bd
--- /dev/null
+++ b/mysql-test/r/explain_json.result
@@ -0,0 +1,668 @@
+drop table if exists t0,t1;
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+explain format=json select * from t0;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+}
+explain format=json select * from t0 where 1>2;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+explain format=json select * from t0 where a<3;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t0.a < 3)"
+ }
+ }
+}
+# Try a basic join
+create table t1 (a int, b int, filler char(32), key(a));
+insert into t1
+select
+a.a + b.a* 10 + c.a * 100,
+a.a + b.a* 10 + c.a * 100,
+'filler'
+from t0 a, t0 b, t0 c;
+explain format=json select * from t0,t1 where t1.a=t0.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t0.a is not null)"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t0.a"],
+ "rows": 1,
+ "filtered": 100
+ }
+ }
+}
+# Try range and index_merge
+create table t2 (a1 int, a2 int, b1 int, b2 int, key(a1,a2), key(b1,b2));
+insert into t2 select a,a,a,a from t1;
+explain format=json select * from t2 where a1<5;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "range",
+ "possible_keys": ["a1"],
+ "key": "a1",
+ "key_length": "5",
+ "used_key_parts": ["a1"],
+ "rows": 5,
+ "filtered": 100,
+ "index_condition": "(t2.a1 < 5)"
+ }
+ }
+}
+explain format=json select * from t2 where a1=1 or b1=2;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "index_merge",
+ "possible_keys": ["a1", "b1"],
+ "key_length": "5,5",
+ "index_merge": {
+ "sort_union": {
+ "range": {
+ "key": "a1",
+ "used_key_parts": ["a1"]
+ },
+ "range": {
+ "key": "b1",
+ "used_key_parts": ["b1"]
+ }
+ }
+ },
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "((t2.a1 = 1) or (t2.b1 = 2))"
+ }
+ }
+}
+explain format=json select * from t2 where a1=1 or (b1=2 and b2=3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "index_merge",
+ "possible_keys": ["a1", "b1"],
+ "key_length": "5,10",
+ "index_merge": {
+ "sort_union": {
+ "range": {
+ "key": "a1",
+ "used_key_parts": ["a1"]
+ },
+ "range": {
+ "key": "b1",
+ "used_key_parts": ["b1", "b2"]
+ }
+ }
+ },
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "((t2.a1 = 1) or ((t2.b1 = 2) and (t2.b2 = 3)))"
+ }
+ }
+}
+# Try ref access on two key components
+explain format=json select * from t0,t2 where t2.b1=t0.a and t2.b2=4;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t0.a is not null)"
+ },
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["b1"],
+ "key": "b1",
+ "key_length": "10",
+ "used_key_parts": ["b1", "b2"],
+ "ref": ["test.t0.a", "const"],
+ "rows": 1,
+ "filtered": 100
+ }
+ }
+}
+drop table t1,t2;
+#
+# Try a UNION
+#
+explain format=json select * from t0 A union select * from t0 B;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "A",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "B",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+explain format=json select * from t0 A union all select * from t0 B;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "A",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "B",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+#
+# Subqueries
+#
+create table t1 (a int, b int);
+insert into t1 select a,a from t0;
+explain format=json select a, a > (select max(b) from t1 where t1.b=t0.a) from t0;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t1.b = t0.a)"
+ }
+ }
+ }
+ ]
+ }
+}
+explain format=json
+select * from t0 where
+a > (select max(b) from t1 where t1.b=t0.a) or a < 3 ;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "((t0.a > (subquery#2)) or (t0.a < 3))"
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t1.b = t0.a)"
+ }
+ }
+ }
+ ]
+ }
+}
+drop table t1;
+#
+# Join buffering
+#
+create table t1 (a int, b int);
+insert into t1 select tbl1.a+10*tbl2.a, tbl1.a+10*tbl2.a from t0 tbl1, t0 tbl2;
+explain format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.a=tbl2.a and tbl1.b < 3 and tbl2.b < 5;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tbl1",
+ "access_type": "ALL",
+ "rows": 100,
+ "filtered": 100,
+ "attached_condition": "(tbl1.b < 3)"
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "tbl2",
+ "access_type": "ALL",
+ "rows": 100,
+ "filtered": 100,
+ "attached_condition": "(tbl2.b < 5)"
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL",
+ "attached_condition": "(tbl2.a = tbl1.a)"
+ }
+ }
+}
+drop table t1;
+#
+# Single-table UPDATE/DELETE, INSERT
+#
+explain format=json delete from t0;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Deleting all rows"
+ }
+ }
+}
+explain format=json delete from t0 where 1 > 2;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+explain format=json delete from t0 where a < 3;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "delete": 1,
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "attached_condition": "(t0.a < 3)"
+ }
+ }
+}
+explain format=json update t0 set a=3 where a in (2,3,4);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "update": 1,
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "attached_condition": "(t0.a in (2,3,4))"
+ }
+ }
+}
+explain format=json insert into t0 values (1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t0"
+ }
+ }
+}
+create table t1 like t0;
+explain format=json insert into t1 values ((select max(a) from t0));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1"
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+}
+drop table t1;
+#
+# A derived table
+#
+create table t1 (a int, b int);
+insert into t1 select a,a from t0;
+explain format=json
+select * from (select a, count(*) as cnt from t1 group by a) as tbl
+where cnt>0;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(tbl.cnt > 0)",
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+}
+explain format=json
+select * from (select a, count(*) as cnt from t1 group by a) as tbl1, t1 as
+tbl2 where cnt=tbl2.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tbl2",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(tbl2.a is not null)"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "8",
+ "used_key_parts": ["cnt"],
+ "ref": ["test.tbl2.a"],
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "(tbl1.cnt = tbl2.a)",
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+}
+#
+# Non-merged semi-join (aka JTBM)
+#
+explain format=json
+select * from t1 where a in (select max(a) from t1 group by b);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "attached_condition": "(t1.a is not null)"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
+ "used_key_parts": ["max(a)"],
+ "ref": ["test.t1.a"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+}
+#
+# Semi-join Materialization
+#
+create table t2 like t1;
+insert into t2 select * from t1;
+explain format=json
+select * from t1,t2 where t1.a in ( select a from t0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
+ "used_key_parts": ["a"],
+ "ref": ["func"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t0",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ }
+ }
+ }
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL"
+ }
+ }
+}
+#
+# First-Match
+#
+explain
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 10
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
+explain format=json
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100,
+ "first_match": "t2"
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL",
+ "attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
+ }
+ }
+}
+#
+# Duplicate Weedout
+#
+set @tmp= @@optimizer_switch;
+set optimizer_switch='firstmatch=off';
+explain
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 10
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+explain format=json
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "duplicates_removal": {
+ "block-nl-join": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 10,
+ "filtered": 100
+ },
+ "buffer_type": "flat",
+ "join_type": "BNL",
+ "attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
+ }
+ }
+ }
+}
+set optimizer_switch=@tmp;
+drop table t1,t2;
+drop table t0;
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index bf07595bc3a..0007847f22a 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -872,7 +872,7 @@ explain extended select period_add("9602",-12),period_diff(199505,"9404"),from_d
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select period_add('9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,'9404') AS `period_diff(199505,"9404")`,from_days(to_days('960101')) AS `from_days(to_days("960101"))`,dayofmonth('1997-01-02') AS `dayofmonth("1997-01-02")`,month('1997-01-02') AS `month("1997-01-02")`,monthname('1972-03-04') AS `monthname("1972-03-04")`,dayofyear('0000-00-00') AS `dayofyear("0000-00-00")`,hour('1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute('23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week('1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek('2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year('98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname('1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec('0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format('1997-01-02 03:04:05','%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp('1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,('1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,('1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,('1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from '1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,('1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
+Note 1003 select period_add('9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,'9404') AS `period_diff(199505,"9404")`,from_days(to_days('960101')) AS `from_days(to_days("960101"))`,dayofmonth('1997-01-02') AS `dayofmonth("1997-01-02")`,month('1997-01-02') AS `month("1997-01-02")`,monthname('1972-03-04') AS `monthname("1972-03-04")`,dayofyear('0000-00-00') AS `dayofyear("0000-00-00")`,hour('1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute('23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week('1998-03-03',@@default_week_format) AS `WEEK("1998-03-03")`,yearweek('2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year('98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname('1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec('0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format('1997-01-02 03:04:05','%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp('1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,('1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,('1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,('1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from '1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,('1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
SET @TMP='2007-08-01 12:22:49';
CREATE TABLE t1 (d DATETIME);
INSERT INTO t1 VALUES ('2007-08-01 12:22:59');
diff --git a/mysql-test/r/partition_bug18198.result b/mysql-test/r/partition_bug18198.result
index ee7bf514807..80f11edaaf6 100644
--- a/mysql-test/r/partition_bug18198.result
+++ b/mysql-test/r/partition_bug18198.result
@@ -130,7 +130,8 @@ ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitio
create table t1 (col1 datetime)
partition by range(week(col1))
(partition p0 values less than (10), partition p1 values less than (30));
-ERROR HY000: This partition function is not allowed
+ERROR 42000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed near ')
+(partition p0 values less than (10), partition p1 values less than (30))' at line 2
create table t1 (col1 varchar(25))
partition by range(cast(col1 as signed))
(partition p0 values less than (10), partition p1 values less than (30));
diff --git a/mysql-test/r/set_statement.result b/mysql-test/r/set_statement.result
index b7465499a35..51687caeb21 100644
--- a/mysql-test/r/set_statement.result
+++ b/mysql-test/r/set_statement.result
@@ -1070,11 +1070,71 @@ set statement character_set_filesystem=default for select 1;
ERROR 42000: The system variable character_set_filesystem cannot be set in SET STATEMENT.
set statement collation_connection=default for select 1;
ERROR 42000: The system variable collation_connection cannot be set in SET STATEMENT.
-set statement old_passwords=default for select 1;
-ERROR 42000: The system variable old_passwords cannot be set in SET STATEMENT.
set statement query_cache_type=default for select 1;
ERROR 42000: The system variable query_cache_type cannot be set in SET STATEMENT.
set statement wait_timeout=default for select 1;
ERROR 42000: The system variable wait_timeout cannot be set in SET STATEMENT.
set statement interactive_timeout=default for select 1;
ERROR 42000: The system variable interactive_timeout cannot be set in SET STATEMENT.
+set @save_week_format=@@default_week_format;
+set @@default_week_format=0;
+SET STATEMENT default_week_format = 2 FOR SELECT WEEK('2000-01-01');
+WEEK('2000-01-01')
+52
+create table t1 (a date);
+insert t1 values ('2000-01-01');
+explain extended select week(a) from t1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 system NULL NULL NULL NULL 1 100.00
+Warnings:
+Note 1003 select week('2000-01-01',@@default_week_format) AS `week(a)` from dual
+prepare stmt1 from "select week(a) from t1";
+execute stmt1;
+week(a)
+0
+set default_week_format = 2;
+execute stmt1;
+week(a)
+52
+alter table t1 engine=myisam;
+execute stmt1;
+week(a)
+52
+deallocate prepare stmt1;
+drop table t1;
+set @@default_week_format=@save_week_format;
+set @save_old_passwords=@@old_passwords;
+set @@old_passwords=0;
+set statement OLD_PASSWORDS = 0 for select password('test');
+password('test')
+*94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
+set statement OLD_PASSWORDS = 1 for select password('test');
+password('test')
+378b243e220ca493
+set statement OLD_PASSWORDS = 0 for explain extended select password('test');
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 select password('test') AS `password('test')`
+set statement OLD_PASSWORDS = 1 for explain extended select password('test');
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 select password('test') AS `password('test')`
+create table t1 (a char(10));
+insert t1 values ('qwertyuiop');
+prepare stmt1 from "select password(a) from t1";
+execute stmt1;
+password(a)
+*6063C78456BB048BAF36BE1104D12D547834DFEA
+set old_passwords=1;
+execute stmt1;
+password(a)
+2013610f6aac2950
+alter table t1 engine=myisam;
+execute stmt1;
+password(a)
+2013610f6aac2950
+deallocate prepare stmt1;
+drop table t1;
+set @@old_passwords=@save_old_passwords;
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index 6d086163aff..ef1749eda52 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -1617,7 +1617,7 @@ NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'a' of SELECT #3 was resolved in SELECT #-1
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` union select `test`.`t1`.`a` AS `a` from `test`.`t1` order by <expr_cache><>((select `a` from `test`.`t2` where (`test`.`t2`.`b` = 12)))
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` union select `test`.`t1`.`a` AS `a` from `test`.`t1` order by <expr_cache><`a`>((select `a` from `test`.`t2` where (`test`.`t2`.`b` = 12)))
# Should not crash
SELECT * FROM t1 UNION SELECT * FROM t1
ORDER BY (SELECT a FROM t2 WHERE b = 12);
diff --git a/mysql-test/suite/innodb_fts/r/fulltext_misc.result b/mysql-test/suite/innodb_fts/r/fulltext_misc.result
index c33a3f45482..ce7fe46f4e1 100644
--- a/mysql-test/suite/innodb_fts/r/fulltext_misc.result
+++ b/mysql-test/suite/innodb_fts/r/fulltext_misc.result
@@ -181,7 +181,7 @@ NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'a' of SELECT #3 was resolved in SELECT #-1
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` union select `test`.`t1`.`a` AS `a` from `test`.`t1` order by <expr_cache><>((select `a` from `test`.`t2` where (`test`.`t2`.`b` = 12)))
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` union select `test`.`t1`.`a` AS `a` from `test`.`t1` order by <expr_cache><`a`>((select `a` from `test`.`t2` where (`test`.`t2`.`b` = 12)))
# Should not crash
SELECT * FROM t1 UNION SELECT * FROM t1
ORDER BY (SELECT a FROM t2 WHERE b = 12);
diff --git a/mysql-test/t/analyze_format_json.test b/mysql-test/t/analyze_format_json.test
new file mode 100644
index 00000000000..7f7ea3d806e
--- /dev/null
+++ b/mysql-test/t/analyze_format_json.test
@@ -0,0 +1,38 @@
+#
+# Tests for "ANALYZE FORMAT=JSON $statement" syntax
+#
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+
+create table t0 (a int);
+INSERT INTO t0 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+--echo # r_filtered=30%, because 3 rows match: 0,1,2
+analyze format=json select * from t0 where a<3;
+
+create table t1 (a int, b int, c int, key(a));
+insert into t1 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+
+analyze
+select * from t0, t1 where t1.a=t0.a and t0.a > 9;
+analyze format=json
+select * from t0, t1 where t1.a=t0.a and t0.a > 9;
+
+analyze
+select * from t0, t1 where t1.a=t0.a and t1.b<4;
+
+analyze format=json
+select * from t0, t1 where t1.a=t0.a and t1.b<4;
+
+analyze
+select * from t1 tbl1, t1 tbl2 where tbl1.b<2 and tbl2.b>5;
+
+analyze format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.b<20 and tbl2.b<60;
+
+analyze format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.b<20 and tbl2.b<60 and tbl1.c > tbl2.c;
+
+drop table t1;
+drop table t0;
diff --git a/mysql-test/t/explain_json.test b/mysql-test/t/explain_json.test
new file mode 100644
index 00000000000..20c5359e646
--- /dev/null
+++ b/mysql-test/t/explain_json.test
@@ -0,0 +1,138 @@
+#
+# EXPLAIN FORMAT=JSON tests. These are tests developed for MariaDB.
+#
+--disable_warnings
+drop table if exists t0,t1;
+--enable_warnings
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+explain format=json select * from t0;
+
+explain format=json select * from t0 where 1>2;
+
+explain format=json select * from t0 where a<3;
+
+--echo # Try a basic join
+create table t1 (a int, b int, filler char(32), key(a));
+insert into t1
+select
+ a.a + b.a* 10 + c.a * 100,
+ a.a + b.a* 10 + c.a * 100,
+ 'filler'
+from t0 a, t0 b, t0 c;
+
+explain format=json select * from t0,t1 where t1.a=t0.a;
+
+--echo # Try range and index_merge
+create table t2 (a1 int, a2 int, b1 int, b2 int, key(a1,a2), key(b1,b2));
+insert into t2 select a,a,a,a from t1;
+
+explain format=json select * from t2 where a1<5;
+
+explain format=json select * from t2 where a1=1 or b1=2;
+explain format=json select * from t2 where a1=1 or (b1=2 and b2=3);
+
+--echo # Try ref access on two key components
+
+explain format=json select * from t0,t2 where t2.b1=t0.a and t2.b2=4;
+
+drop table t1,t2;
+
+--echo #
+--echo # Try a UNION
+--echo #
+explain format=json select * from t0 A union select * from t0 B;
+explain format=json select * from t0 A union all select * from t0 B;
+
+--echo #
+--echo # Subqueries
+--echo #
+create table t1 (a int, b int);
+insert into t1 select a,a from t0;
+explain format=json select a, a > (select max(b) from t1 where t1.b=t0.a) from t0;
+
+explain format=json
+select * from t0 where
+ a > (select max(b) from t1 where t1.b=t0.a) or a < 3 ;
+
+drop table t1;
+
+--echo #
+--echo # Join buffering
+--echo #
+create table t1 (a int, b int);
+insert into t1 select tbl1.a+10*tbl2.a, tbl1.a+10*tbl2.a from t0 tbl1, t0 tbl2;
+
+explain format=json
+select * from t1 tbl1, t1 tbl2 where tbl1.a=tbl2.a and tbl1.b < 3 and tbl2.b < 5;
+
+drop table t1;
+
+--echo #
+--echo # Single-table UPDATE/DELETE, INSERT
+--echo #
+explain format=json delete from t0;
+explain format=json delete from t0 where 1 > 2;
+
+explain format=json delete from t0 where a < 3;
+
+explain format=json update t0 set a=3 where a in (2,3,4);
+
+explain format=json insert into t0 values (1);
+
+create table t1 like t0;
+explain format=json insert into t1 values ((select max(a) from t0));
+
+drop table t1;
+
+--echo #
+--echo # A derived table
+--echo #
+create table t1 (a int, b int);
+insert into t1 select a,a from t0;
+explain format=json
+select * from (select a, count(*) as cnt from t1 group by a) as tbl
+where cnt>0;
+
+explain format=json
+select * from (select a, count(*) as cnt from t1 group by a) as tbl1, t1 as
+tbl2 where cnt=tbl2.a;
+
+--echo #
+--echo # Non-merged semi-join (aka JTBM)
+--echo #
+explain format=json
+select * from t1 where a in (select max(a) from t1 group by b);
+
+--echo #
+--echo # Semi-join Materialization
+--echo #
+create table t2 like t1;
+insert into t2 select * from t1;
+explain format=json
+select * from t1,t2 where t1.a in ( select a from t0);
+
+--echo #
+--echo # First-Match
+--echo #
+explain
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+explain format=json
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+
+--echo #
+--echo # Duplicate Weedout
+--echo #
+set @tmp= @@optimizer_switch;
+set optimizer_switch='firstmatch=off';
+explain
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+explain format=json
+select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+set optimizer_switch=@tmp;
+
+drop table t1,t2;
+drop table t0;
+
diff --git a/mysql-test/t/partition_bug18198.test b/mysql-test/t/partition_bug18198.test
index 720d483e8ed..75544f58ce8 100644
--- a/mysql-test/t/partition_bug18198.test
+++ b/mysql-test/t/partition_bug18198.test
@@ -163,7 +163,7 @@ create table t1 (col1 date)
partition by range(unix_timestamp(col1))
(partition p0 values less than (10), partition p1 values less than (30));
--- error ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
+-- error ER_PARSE_ERROR
create table t1 (col1 datetime)
partition by range(week(col1))
(partition p0 values less than (10), partition p1 values less than (30));
diff --git a/mysql-test/t/set_statement.test b/mysql-test/t/set_statement.test
index 8b8431315f4..0c1756f86ff 100644
--- a/mysql-test/t/set_statement.test
+++ b/mysql-test/t/set_statement.test
@@ -1025,11 +1025,44 @@ set statement character_set_filesystem=default for select 1;
--error ER_SET_STATEMENT_NOT_SUPPORTED
set statement collation_connection=default for select 1;
--error ER_SET_STATEMENT_NOT_SUPPORTED
-set statement old_passwords=default for select 1;
---error ER_SET_STATEMENT_NOT_SUPPORTED
set statement query_cache_type=default for select 1;
--error ER_SET_STATEMENT_NOT_SUPPORTED
set statement wait_timeout=default for select 1;
--error ER_SET_STATEMENT_NOT_SUPPORTED
set statement interactive_timeout=default for select 1;
+# MDEV-6996: SET STATEMENT default_week_format = .. has no effect
+set @save_week_format=@@default_week_format;
+set @@default_week_format=0;
+SET STATEMENT default_week_format = 2 FOR SELECT WEEK('2000-01-01');
+create table t1 (a date);
+insert t1 values ('2000-01-01');
+explain extended select week(a) from t1;
+prepare stmt1 from "select week(a) from t1";
+execute stmt1;
+set default_week_format = 2;
+execute stmt1;
+alter table t1 engine=myisam;
+execute stmt1;
+deallocate prepare stmt1;
+drop table t1;
+set @@default_week_format=@save_week_format;
+
+# MDEV-7015: SET STATEMENT old_passwords has no effect
+set @save_old_passwords=@@old_passwords;
+set @@old_passwords=0;
+set statement OLD_PASSWORDS = 0 for select password('test');
+set statement OLD_PASSWORDS = 1 for select password('test');
+set statement OLD_PASSWORDS = 0 for explain extended select password('test');
+set statement OLD_PASSWORDS = 1 for explain extended select password('test');
+create table t1 (a char(10));
+insert t1 values ('qwertyuiop');
+prepare stmt1 from "select password(a) from t1";
+execute stmt1;
+set old_passwords=1;
+execute stmt1;
+alter table t1 engine=myisam;
+execute stmt1;
+deallocate prepare stmt1;
+drop table t1;
+set @@old_passwords=@save_old_passwords;
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 1c41ff481aa..59fe2c32056 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -112,6 +112,7 @@ SET (SQL_SOURCE
threadpool_common.cc
../sql-common/mysql_async.c
my_apc.cc my_apc.h
+ my_json_writer.cc my_json_writer.h
rpl_gtid.cc rpl_parallel.cc
${WSREP_SOURCES}
table_cache.cc
diff --git a/sql/item.cc b/sql/item.cc
index 099fe1e7f07..d585ace5717 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2504,7 +2504,13 @@ void Item_ident::print(String *str, enum_query_type query_type)
}
if (db_name && db_name[0] && !alias_name_used)
{
- if (!(cached_table && cached_table->belong_to_view &&
+ /*
+ When printing EXPLAIN, don't print database name when it's the same as
+ current database.
+ */
+ bool skip_db= (query_type & QT_EXPLAIN) && !strcmp(thd->db, db_name);
+ if (!skip_db &&
+ !(cached_table && cached_table->belong_to_view &&
cached_table->belong_to_view->compact_view_format))
{
append_identifier(thd, str, d_name, (uint)strlen(d_name));
@@ -7542,6 +7548,13 @@ void Item_cache_wrapper::init_on_demand()
void Item_cache_wrapper::print(String *str, enum_query_type query_type)
{
+ if (query_type == QT_EXPLAIN)
+ {
+ /* Don't print the cache in EXPLAIN EXTENDED */
+ orig_item->print(str, query_type);
+ return;
+ }
+
str->append(func_name());
if (expr_cache)
{
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index ec5740cf3e2..875d9470dac 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2171,57 +2171,68 @@ void Item_func_trim::print(String *str, enum_query_type query_type)
/* Item_func_password */
-String *Item_func_password::val_str_ascii(String *str)
+bool Item_func_password::fix_fields(THD *thd, Item **ref)
{
- DBUG_ASSERT(fixed == 1);
- String *res= args[0]->val_str(str);
- check_password_policy(res);
- if (args[0]->null_value || res->length() == 0)
- return make_empty_result();
- my_make_scrambled_password(tmp_value, res->ptr(), res->length());
- str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, &my_charset_latin1);
- return str;
-}
-
-char *Item_func_password::alloc(THD *thd, const char *password, size_t pass_len)
-{
- char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1);
- if (buff)
- {
- String *password_str= new (thd->mem_root)String(password, thd->variables.
- character_set_client);
- check_password_policy(password_str);
- my_make_scrambled_password(buff, password, pass_len);
- }
- return buff;
+ if (deflt)
+ alg= (thd->variables.old_passwords ? OLD : NEW);
+ return Item_str_ascii_func::fix_fields(thd, ref);
}
-
-/* Item_func_old_password */
-
-String *Item_func_old_password::val_str_ascii(String *str)
+String *Item_func_password::val_str_ascii(String *str)
{
DBUG_ASSERT(fixed == 1);
- String *res= args[0]->val_str(str);
- if ((null_value=args[0]->null_value))
- return 0;
- if (res->length() == 0)
- return make_empty_result();
- my_make_scrambled_password_323(tmp_value, res->ptr(), res->length());
- str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, &my_charset_latin1);
+ String *res= args[0]->val_str(str);
+ switch (alg){
+ case NEW:
+ check_password_policy(res);
+ if (args[0]->null_value || res->length() == 0)
+ return make_empty_result();
+ my_make_scrambled_password(tmp_value, res->ptr(), res->length());
+ str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, &my_charset_latin1);
+ break;
+ case OLD:
+ if ((null_value=args[0]->null_value))
+ return 0;
+ if (res->length() == 0)
+ return make_empty_result();
+ my_make_scrambled_password_323(tmp_value, res->ptr(), res->length());
+ str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, &my_charset_latin1);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
return str;
}
-char *Item_func_old_password::alloc(THD *thd, const char *password,
- size_t pass_len)
+char *Item_func_password::alloc(THD *thd, const char *password,
+ size_t pass_len, enum PW_Alg al)
{
- char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1);
- if (buff)
+ char *buff= (char *) thd->alloc((al==NEW)?
+ SCRAMBLED_PASSWORD_CHAR_LENGTH + 1:
+ SCRAMBLED_PASSWORD_CHAR_LENGTH_323 + 1);
+ if (!buff)
+ return NULL;
+
+ switch (al) {
+ case NEW:
+ {
+ String *password_str= new (thd->mem_root)String(password, thd->variables.
+ character_set_client);
+ check_password_policy(password_str);
+ my_make_scrambled_password(buff, password, pass_len);
+ break;
+ }
+ case OLD:
my_make_scrambled_password_323(buff, password, pass_len);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
return buff;
}
+
#define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.')
String *Item_func_encrypt::val_str(String *str)
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 8377a20e0a4..b79009c6778 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -407,40 +407,32 @@ public:
class Item_func_password :public Item_str_ascii_func
{
+public:
+ enum PW_Alg {OLD, NEW};
+private:
char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH+1];
+ enum PW_Alg alg;
+ bool deflt;
public:
- Item_func_password(Item *a) :Item_str_ascii_func(a) {}
+ Item_func_password(Item *a) :Item_str_ascii_func(a), alg(NEW), deflt(1) {}
+ Item_func_password(Item *a, PW_Alg al) :Item_str_ascii_func(a),
+ alg(al), deflt(0) {}
String *val_str_ascii(String *str);
+ bool fix_fields(THD *thd, Item **ref);
void fix_length_and_dec()
{
- fix_length_and_charset(SCRAMBLED_PASSWORD_CHAR_LENGTH, default_charset());
+ fix_length_and_charset((alg == 1 ?
+ SCRAMBLED_PASSWORD_CHAR_LENGTH :
+ SCRAMBLED_PASSWORD_CHAR_LENGTH_323),
+ default_charset());
}
- const char *func_name() const { return "password"; }
- static char *alloc(THD *thd, const char *password, size_t pass_len);
+ const char *func_name() const { return ((deflt || alg == 1) ?
+ "password" : "old_password"); }
+ static char *alloc(THD *thd, const char *password, size_t pass_len,
+ enum PW_Alg al);
};
-/*
- Item_func_old_password -- PASSWORD() implementation used in MySQL 3.21 - 4.0
- compatibility mode. This item is created in sql_yacc.yy when
- 'old_passwords' session variable is set, and to handle OLD_PASSWORD()
- function.
-*/
-
-class Item_func_old_password :public Item_str_ascii_func
-{
- char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1];
-public:
- Item_func_old_password(Item *a) :Item_str_ascii_func(a) {}
- String *val_str_ascii(String *str);
- void fix_length_and_dec()
- {
- fix_length_and_charset(SCRAMBLED_PASSWORD_CHAR_LENGTH_323, default_charset());
- }
- const char *func_name() const { return "old_password"; }
- static char *alloc(THD *thd, const char *password, size_t pass_len);
-};
-
class Item_func_des_encrypt :public Item_str_func
{
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 65974fb6155..964be7eb061 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -895,6 +895,21 @@ void Item_subselect::update_used_tables()
void Item_subselect::print(String *str, enum_query_type query_type)
{
+ if (query_type == QT_EXPLAIN)
+ {
+ str->append("(subquery#");
+ if (engine)
+ {
+ char buf[64];
+ ll2str(engine->get_identifier(), buf, 10, 0);
+ str->append(buf);
+ }
+ else
+ str->append("NULL"); // TODO: what exactly does this mean?
+
+ str->append(")");
+ return;
+ }
if (engine)
{
str->append('(');
@@ -3717,6 +3732,10 @@ int subselect_union_engine::exec()
return res;
}
+int subselect_union_engine::get_identifier()
+{
+ return unit->first_select()->select_number;
+}
/*
Search for at least one row satisfying select condition
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 92b269d02f1..02e487c82d0 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -871,6 +871,7 @@ public:
bool is_executed() const;
bool no_rows();
virtual enum_engine_type engine_type() { return UNION_ENGINE; }
+ int get_identifier();
};
diff --git a/sql/lex.h b/sql/lex.h
index 51b69f00990..137790e6227 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -242,6 +242,7 @@ static SYMBOL symbols[] = {
{ "FOR", SYM(FOR_SYM)},
{ "FORCE", SYM(FORCE_SYM)},
{ "FOREIGN", SYM(FOREIGN)},
+ { "FORMAT", SYM(FORMAT_SYM)},
{ "FOUND", SYM(FOUND_SYM)},
{ "FROM", SYM(FROM)},
{ "FULL", SYM(FULL)},
diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc
new file mode 100644
index 00000000000..4f933583347
--- /dev/null
+++ b/sql/my_json_writer.cc
@@ -0,0 +1,345 @@
+/* Copyright (C) 2014 SkySQL Ab, MariaDB Corporation Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <my_global.h>
+#include "sql_priv.h"
+#include "sql_string.h"
+
+#include "my_json_writer.h"
+
+void Json_writer::append_indent()
+{
+ if (!document_start)
+ output.append('\n');
+ for (int i=0; i< indent_level; i++)
+ output.append(' ');
+}
+
+void Json_writer::start_object()
+{
+ fmt_helper.on_start_object();
+
+ if (!element_started)
+ start_element();
+
+ output.append("{");
+ indent_level+=INDENT_SIZE;
+ first_child=true;
+ element_started= false;
+ document_start= false;
+}
+
+void Json_writer::start_array()
+{
+ if (fmt_helper.on_start_array())
+ return;
+
+ if (!element_started)
+ start_element();
+
+ output.append("[");
+ indent_level+=INDENT_SIZE;
+ first_child=true;
+ element_started= false;
+ document_start= false;
+}
+
+
+void Json_writer::end_object()
+{
+ indent_level-=INDENT_SIZE;
+ if (!first_child)
+ append_indent();
+ output.append("}");
+}
+
+
+void Json_writer::end_array()
+{
+ if (fmt_helper.on_end_array())
+ return;
+ indent_level-=INDENT_SIZE;
+ if (!first_child)
+ append_indent();
+ output.append("]");
+}
+
+
+Json_writer& Json_writer::add_member(const char *name)
+{
+ if (fmt_helper.on_add_member(name))
+ return *this; // handled
+
+ // assert that we are in an object
+ DBUG_ASSERT(!element_started);
+ start_element();
+
+ output.append('"');
+ output.append(name);
+ output.append("\": ");
+ return *this;
+}
+
+
+/*
+ Used by formatting helper to print something that is formatted by the helper.
+ We should only separate it from the previous element.
+*/
+
+void Json_writer::start_sub_element()
+{
+ //element_started= true;
+ if (first_child)
+ first_child= false;
+ else
+ output.append(',');
+
+ append_indent();
+}
+
+
+void Json_writer::start_element()
+{
+ element_started= true;
+
+ if (first_child)
+ first_child= false;
+ else
+ output.append(',');
+
+ append_indent();
+}
+
+void Json_writer::add_ll(longlong val)
+{
+ char buf[64];
+ my_snprintf(buf, sizeof(buf), "%ld", val);
+ add_unquoted_str(buf);
+}
+
+
+void Json_writer::add_double(double val)
+{
+ char buf[64];
+ my_snprintf(buf, sizeof(buf), "%lg", val);
+ add_unquoted_str(buf);
+}
+
+
+void Json_writer::add_bool(bool val)
+{
+ add_unquoted_str(val? "true" : "false");
+}
+
+
+void Json_writer::add_null()
+{
+ add_unquoted_str("null");
+}
+
+
+void Json_writer::add_unquoted_str(const char* str)
+{
+ if (fmt_helper.on_add_str(str))
+ return;
+
+ if (!element_started)
+ start_element();
+
+ output.append(str);
+ element_started= false;
+}
+
+
+void Json_writer::add_str(const char *str)
+{
+ if (fmt_helper.on_add_str(str))
+ return;
+
+ if (!element_started)
+ start_element();
+
+ output.append('"');
+ output.append(str);
+ output.append('"');
+ element_started= false;
+}
+
+
+void Json_writer::add_str(const String &str)
+{
+ add_str(str.ptr());
+}
+
+
+bool Single_line_formatting_helper::on_add_member(const char *name)
+{
+ DBUG_ASSERT(state== INACTIVE || state == DISABLED);
+ if (state != DISABLED)
+ {
+ // remove everything from the array
+ buf_ptr= buffer;
+
+ //append member name to the array
+ size_t len= strlen(name);
+ if (len < MAX_LINE_LEN)
+ {
+ memcpy(buf_ptr, name, len);
+ buf_ptr+=len;
+ *(buf_ptr++)= 0;
+
+ line_len= owner->indent_level + len + 1;
+ state= ADD_MEMBER;
+ return true; // handled
+ }
+ }
+ return false; // not handled
+}
+
+
+bool Single_line_formatting_helper::on_start_array()
+{
+ if (state == ADD_MEMBER)
+ {
+ state= IN_ARRAY;
+ return true; // handled
+ }
+ else
+ {
+ state= INACTIVE;
+ // TODO: what if we have accumulated some stuff already? shouldn't we
+ // flush it?
+ return false; // not handled
+ }
+}
+
+
+bool Single_line_formatting_helper::on_end_array()
+{
+ if (state == IN_ARRAY)
+ {
+ flush_on_one_line();
+ state= INACTIVE;
+ return true; // handled
+ }
+ return false; // not handled
+}
+
+
+void Single_line_formatting_helper::on_start_object()
+{
+ // Nested objects will not be printed on one line
+ disable_and_flush();
+}
+
+
+bool Single_line_formatting_helper::on_add_str(const char *str)
+{
+ if (state == IN_ARRAY)
+ {
+ size_t len= strlen(str);
+
+ // New length will be:
+ // "$string",
+ // quote + quote + comma + space = 4
+ if (line_len + len + 4 > MAX_LINE_LEN)
+ {
+ disable_and_flush();
+ return false; // didn't handle the last element
+ }
+
+ //append string to array
+ memcpy(buf_ptr, str, len);
+ buf_ptr+=len;
+ *(buf_ptr++)= 0;
+ line_len += len + 4;
+ return true; // handled
+ }
+
+ disable_and_flush();
+ return false; // not handled
+}
+
+
+/*
+ Append everything accumulated to the output on one line
+*/
+
+void Single_line_formatting_helper::flush_on_one_line()
+{
+ owner->start_sub_element();
+ char *ptr= buffer;
+ int nr= 0;
+ while (ptr < buf_ptr)
+ {
+ char *str= ptr;
+
+ if (nr == 0)
+ {
+ owner->output.append('"');
+ owner->output.append(str);
+ owner->output.append("\": ");
+ owner->output.append('[');
+ }
+ else
+ {
+ if (nr != 1)
+ owner->output.append(", ");
+ owner->output.append('"');
+ owner->output.append(str);
+ owner->output.append('"');
+ }
+ nr++;
+
+ while (*ptr!=0)
+ ptr++;
+ ptr++;
+ }
+ owner->output.append(']');
+}
+
+
+void Single_line_formatting_helper::disable_and_flush()
+{
+ bool start_array= (state == IN_ARRAY);
+ state= DISABLED;
+ // deactivate ourselves and flush all accumulated calls.
+ char *ptr= buffer;
+ int nr= 0;
+ while (ptr < buf_ptr)
+ {
+ char *str= ptr;
+ if (nr == 0)
+ {
+ owner->add_member(str);
+ if (start_array)
+ owner->start_array();
+ }
+ else
+ {
+ //if (nr == 1)
+ // owner->start_array();
+ owner->add_str(str);
+ }
+
+ nr++;
+ while (*ptr!=0)
+ ptr++;
+ ptr++;
+ }
+ buf_ptr= buffer;
+ state= INACTIVE;
+}
+
diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h
new file mode 100644
index 00000000000..3a7defc3566
--- /dev/null
+++ b/sql/my_json_writer.h
@@ -0,0 +1,189 @@
+/* Copyright (C) 2014 SkySQL Ab, MariaDB Corporation Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+class Json_writer;
+
+/*
+ Single_line_formatting_helper is used by Json_writer to do better formatting
+ of JSON documents.
+
+ The idea is to catch arrays that can be printed on one line:
+
+ arrayName : [ "boo", 123, 456 ]
+
+ and actually print them on one line. Arrrays that occupy too much space on
+ the line, or have nested members cannot be printed on one line.
+
+ We hook into JSON printing functions and try to detect the pattern. While
+ detecting the pattern, we will accumulate "boo", 123, 456 as strings.
+
+ Then,
+ - either the pattern is broken, and we print the elements out,
+ - or the pattern lasts till the end of the array, and we print the
+ array on one line.
+*/
+
+class Single_line_formatting_helper
+{
+ enum enum_state
+ {
+ INACTIVE,
+ ADD_MEMBER,
+ IN_ARRAY,
+ DISABLED
+ };
+
+ /*
+ This works like a finite automaton.
+
+ state=DISABLED means the helper is disabled - all on_XXX functions will
+ return false (which means "not handled") and do nothing.
+
+ +->-+
+ | v
+ INACTIVE ---> ADD_MEMBER ---> IN_ARRAY--->-+
+ ^ |
+ +------------------<--------------------+
+
+ For other states:
+ INACTIVE - initial state, we have nothing.
+ ADD_MEMBER - add_member() was called, the buffer has "member_name\0".
+ IN_ARRAY - start_array() was called.
+
+
+ */
+ enum enum_state state;
+ enum { MAX_LINE_LEN= 80 };
+ char buffer[80];
+
+ /* The data in the buffer is located between buffer[0] and buf_ptr */
+ char *buf_ptr;
+ uint line_len;
+
+ Json_writer *owner;
+public:
+ Single_line_formatting_helper() : state(INACTIVE), buf_ptr(buffer) {}
+
+ void init(Json_writer *owner_arg) { owner= owner_arg; }
+
+ bool on_add_member(const char *name);
+
+ bool on_start_array();
+ bool on_end_array();
+ void on_start_object();
+ // on_end_object() is not needed.
+
+ bool on_add_str(const char *str);
+
+ void flush_on_one_line();
+ void disable_and_flush();
+};
+
+
+/*
+ A class to write well-formed JSON documents. The documents are also formatted
+ for human readability.
+*/
+
+class Json_writer
+{
+public:
+ /* Add a member. We must be in an object. */
+ Json_writer& add_member(const char *name);
+
+ /* Add atomic values */
+ void add_str(const char* val);
+ void add_str(const String &str);
+
+ void add_ll(longlong val);
+ void add_double(double val);
+ void add_bool(bool val);
+ void add_null();
+
+private:
+ void add_unquoted_str(const char* val);
+public:
+ /* Start a child object */
+ void start_object();
+ void start_array();
+
+ void end_object();
+ void end_array();
+
+ Json_writer() :
+ indent_level(0), document_start(true), element_started(false),
+ first_child(true)
+ {
+ fmt_helper.init(this);
+ }
+private:
+ // TODO: a stack of (name, bool is_object_or_array) elements.
+ int indent_level;
+ enum { INDENT_SIZE = 2 };
+
+ friend class Single_line_formatting_helper;
+ friend class Json_writer_nesting_guard;
+ bool document_start;
+ bool element_started;
+ bool first_child;
+
+ Single_line_formatting_helper fmt_helper;
+
+ void append_indent();
+ void start_element();
+ void start_sub_element();
+
+ //const char *new_member_name;
+public:
+ String output;
+};
+
+
+/*
+ RAII-based helper class to detect incorrect use of Json_writer.
+
+ The idea is that a function typically must leave Json_writer at the same
+ identation level as it was when it was invoked. Leaving it at a different
+ level typically means we forgot to close an object or an array
+
+ So, here is a way to guard
+ void foo(Json_writer *writer)
+ {
+ Json_writer_nesting_guard(writer);
+ .. do something with writer
+
+ // at the end of the function, ~Json_writer_nesting_guard() is called
+ // and it makes sure that the nesting is the same as when the function was
+ // entered.
+ }
+*/
+
+class Json_writer_nesting_guard
+{
+ Json_writer* writer;
+ int indent_level;
+public:
+ Json_writer_nesting_guard(Json_writer *writer_arg) :
+ writer(writer_arg),
+ indent_level(writer->indent_level)
+ {}
+
+ ~Json_writer_nesting_guard()
+ {
+ DBUG_ASSERT(indent_level == writer->indent_level);
+ }
+};
+
+
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 94077ec01b8..19616867ce9 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -613,9 +613,13 @@ enum enum_query_type
/// Without character set introducers.
QT_WITHOUT_INTRODUCERS= (1 << 1),
/// view internal representation (like QT_ORDINARY except ORDER BY clause)
- QT_VIEW_INTERNAL= (1 << 2)
+ QT_VIEW_INTERNAL= (1 << 2),
+ /// This value means focus on readability, not on ability to parse back, etc.
+ QT_EXPLAIN= (1 << 4)
};
+
+
/* query_id */
typedef int64 query_id_t;
extern query_id_t global_query_id;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 297a8b91f27..531721750cc 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -12269,7 +12269,7 @@ Explain_quick_select* QUICK_RANGE_SELECT::get_explain(MEM_ROOT *alloc)
{
Explain_quick_select *res;
if ((res= new (alloc) Explain_quick_select(QS_TYPE_RANGE)))
- res->range.set(alloc, head->key_info[index].name, max_used_key_length);
+ res->range.set(alloc, &head->key_info[index], max_used_key_length);
return res;
}
@@ -12278,7 +12278,7 @@ Explain_quick_select* QUICK_GROUP_MIN_MAX_SELECT::get_explain(MEM_ROOT *alloc)
{
Explain_quick_select *res;
if ((res= new (alloc) Explain_quick_select(QS_TYPE_GROUP_MIN_MAX)))
- res->range.set(alloc, head->key_info[index].name, max_used_key_length);
+ res->range.set(alloc, &head->key_info[index], max_used_key_length);
return res;
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index dbdf90066d0..f5d51cfeaee 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -2370,7 +2370,11 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
int THD::send_explain_fields(select_result *result, uint8 explain_flags, bool is_analyze)
{
List<Item> field_list;
- make_explain_field_list(field_list, explain_flags, is_analyze);
+ if (lex->explain_json)
+ make_explain_json_field_list(field_list);
+ else
+ make_explain_field_list(field_list, explain_flags, is_analyze);
+
result->prepare(field_list, NULL);
return (result->send_result_set_metadata(field_list,
Protocol::SEND_NUM_ROWS |
@@ -2378,6 +2382,13 @@ int THD::send_explain_fields(select_result *result, uint8 explain_flags, bool is
}
+void THD::make_explain_json_field_list(List<Item> &field_list)
+{
+ Item *item= new Item_empty_string("EXPLAIN", 78, system_charset_info);
+ field_list.push_back(item);
+}
+
+
/*
Populate the provided field_list with EXPLAIN output columns.
this->lex->describe has the EXPLAIN flags
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 236975a6e94..24eda567dcc 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3097,6 +3097,8 @@ public:
bool is_analyze);
void make_explain_field_list(List<Item> &field_list, uint8 explain_flags,
bool is_analyze);
+ void make_explain_json_field_list(List<Item> &field_list);
+
/**
Clear the current error, if any.
We do not clear is_fatal_error or is_fatal_sub_stmt_error since we
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index c85068e5d10..65c748cc590 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -52,7 +52,7 @@
invoked on a running DELETE statement.
*/
-void Delete_plan::save_explain_data(Explain_query *query)
+void Delete_plan::save_explain_data(MEM_ROOT *mem_root, Explain_query *query)
{
Explain_delete* explain= new Explain_delete;
@@ -65,22 +65,23 @@ void Delete_plan::save_explain_data(Explain_query *query)
else
{
explain->deleting_all_rows= false;
- Update_plan::save_explain_data_intern(query, explain);
+ Update_plan::save_explain_data_intern(mem_root, query, explain);
}
query->add_upd_del_plan(explain);
}
-void Update_plan::save_explain_data(Explain_query *query)
+void Update_plan::save_explain_data(MEM_ROOT *mem_root, Explain_query *query)
{
Explain_update* explain= new Explain_update;
- save_explain_data_intern(query, explain);
+ save_explain_data_intern(mem_root, query, explain);
query->add_upd_del_plan(explain);
}
-void Update_plan::save_explain_data_intern(Explain_query *query,
+void Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
+ Explain_query *query,
Explain_update *explain)
{
explain->select_type= "SIMPLE";
@@ -142,10 +143,12 @@ void Update_plan::save_explain_data_intern(Explain_query *query,
}
explain->using_where= MY_TEST(select && select->cond);
+ explain->where_cond= select? select->cond: NULL;
explain->using_filesort= using_filesort;
explain->using_io_buffer= using_io_buffer;
- make_possible_keys_line(table, possible_keys, &explain->possible_keys_line);
+ append_possible_keys(mem_root, explain->possible_keys, table,
+ possible_keys);
explain->quick_info= NULL;
@@ -158,11 +161,8 @@ void Update_plan::save_explain_data_intern(Explain_query *query,
{
if (index != MAX_KEY)
{
- explain->key_str.append(table->key_info[index].name);
- char buf[64];
- size_t length;
- length= longlong10_to_str(table->key_info[index].key_length, buf, 10) - buf;
- explain->key_len_str.append(buf, length);
+ explain->key.set(mem_root, &table->key_info[index],
+ table->key_info[index].key_length);
}
}
explain->rows= scanned_rows;
@@ -461,7 +461,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (thd->lex->describe)
goto produce_explain_and_leave;
- query_plan.save_explain_data(thd->lex->explain);
+ query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
DBUG_EXECUTE_IF("show_explain_probe_delete_exec_start",
dbug_serve_apcs(thd, 1););
@@ -699,7 +699,7 @@ produce_explain_and_leave:
We come here for various "degenerate" query plans: impossible WHERE,
no-partitions-used, impossible-range, etc.
*/
- query_plan.save_explain_data(thd->lex->explain);
+ query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
send_nothing_and_leave:
/*
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index f713e74259d..59f38f7f167 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -21,7 +21,11 @@
#include <my_global.h>
#include "sql_priv.h"
#include "sql_select.h"
+#include "my_json_writer.h"
+const char * STR_DELETING_ALL_ROWS= "Deleting all rows";
+const char * STR_IMPOSSIBLE_WHERE= "Impossible WHERE";
+const char * STR_NO_ROWS_AFTER_PRUNING= "No matching rows after partition pruning";
Explain_query::Explain_query(THD *thd_arg) :
upd_del_plan(NULL), insert_plan(NULL), thd(thd_arg), apc_enabled(false)
@@ -140,8 +144,13 @@ int Explain_query::send_explain(THD *thd)
thd->send_explain_fields(result, lex->describe, lex->analyze_stmt))
return 1;
- int res;
- if ((res= print_explain(result, lex->describe, lex->analyze_stmt)))
+ int res= 0;
+ if (thd->lex->explain_json)
+ print_explain_json(result, thd->lex->analyze_stmt);
+ else
+ res= print_explain(result, lex->describe, thd->lex->analyze_stmt);
+
+ if (res)
result->abort_result_set();
else
result->send_eof();
@@ -178,6 +187,34 @@ int Explain_query::print_explain(select_result_sink *output,
}
+void Explain_query::print_explain_json(select_result_sink *output, bool is_analyze)
+{
+ Json_writer writer;
+ writer.start_object();
+
+ if (upd_del_plan)
+ upd_del_plan->print_explain_json(this, &writer, is_analyze);
+ else if (insert_plan)
+ insert_plan->print_explain_json(this, &writer, is_analyze);
+ else
+ {
+ /* Start printing from node with id=1 */
+ Explain_node *node= get_node(1);
+ if (!node)
+ return; /* No query plan */
+ node->print_explain_json(this, &writer, is_analyze);
+ }
+
+ writer.end_object();
+
+ const CHARSET_INFO *cs= system_charset_info;
+ List<Item> item_list;
+ String *buf= &writer.output;
+ item_list.push_back(new Item_string(buf->ptr(), buf->length(), cs));
+ output->send_data(item_list);
+}
+
+
bool print_explain_for_slow_log(LEX *lex, THD *thd, String *str)
{
return lex->explain->print_explain_str(thd, str, /*is_analyze*/ true);
@@ -213,12 +250,173 @@ static void push_string(List<Item> *item_list, String *str)
item_list->push_back(new Item_string_sys(str->ptr(), str->length()));
}
+static void push_string_list(List<Item> *item_list, String_list &lines,
+ String *buf)
+{
+ List_iterator_fast<char> it(lines);
+ char *line;
+ bool first= true;
+ while ((line= it++))
+ {
+ if (first)
+ first= false;
+ else
+ buf->append(',');
+
+ buf->append(line);
+ }
+ push_string(item_list, buf);
+}
+
+
+/*
+ Print an EXPLAIN output row, based on information provided in the parameters
+
+ @note
+ Parameters that may have NULL value in EXPLAIN output, should be passed
+ (char*)NULL.
+
+ @return
+ 0 - OK
+ 1 - OOM Error
+*/
+
+static
+int print_explain_row(select_result_sink *result,
+ uint8 options, bool is_analyze,
+ uint select_number,
+ const char *select_type,
+ const char *table_name,
+ const char *partitions,
+ enum join_type jtype,
+ String_list *possible_keys,
+ const char *index,
+ const char *key_len,
+ const char *ref,
+ ha_rows *rows,
+ ha_rows *r_rows,
+ double r_filtered,
+ const char *extra)
+{
+ Item *item_null= new Item_null();
+ List<Item> item_list;
+ Item *item;
+
+ item_list.push_back(new Item_int((int32) select_number));
+ item_list.push_back(new Item_string_sys(select_type));
+ item_list.push_back(new Item_string_sys(table_name));
+ if (options & DESCRIBE_PARTITIONS)
+ {
+ if (partitions)
+ {
+ item_list.push_back(new Item_string_sys(partitions));
+ }
+ else
+ item_list.push_back(item_null);
+ }
+
+ const char *jtype_str= join_type_str[jtype];
+ item_list.push_back(new Item_string_sys(jtype_str));
+
+ /* 'possible_keys' */
+ if (possible_keys && !possible_keys->is_empty())
+ {
+ StringBuffer<64> possible_keys_buf;
+ push_string_list(&item_list, *possible_keys, &possible_keys_buf);
+ }
+ else
+ item_list.push_back(item_null);
+
+ /* 'index */
+ item= index ? new Item_string_sys(index) : item_null;
+ item_list.push_back(item);
+
+ /* 'key_len */
+ item= key_len ? new Item_string_sys(key_len) : item_null;
+ item_list.push_back(item);
+
+ /* 'ref' */
+ item= ref ? new Item_string_sys(ref) : item_null;
+ item_list.push_back(item);
+
+ /* 'rows' */
+ if (rows)
+ {
+ item_list.push_back(new Item_int(*rows,
+ MY_INT64_NUM_DECIMAL_DIGITS));
+ }
+ else
+ item_list.push_back(item_null);
+
+ /* 'r_rows' */
+ if (is_analyze)
+ {
+ if (r_rows)
+ {
+ item_list.push_back(new Item_int(*r_rows,
+ MY_INT64_NUM_DECIMAL_DIGITS));
+ }
+ else
+ item_list.push_back(item_null);
+ }
+
+ /* 'filtered' */
+ const double filtered=100.0;
+ if (options & DESCRIBE_EXTENDED || is_analyze)
+ item_list.push_back(new Item_float(filtered, 2));
+
+ /* 'r_filtered' */
+ if (is_analyze)
+ item_list.push_back(new Item_float(r_filtered, 2));
+
+ /* 'Extra' */
+ if (extra)
+ item_list.push_back(new Item_string_sys(extra));
+ else
+ item_list.push_back(item_null);
+
+ if (result->send_data(item_list))
+ return 1;
+ return 0;
+}
+
+
+
+
+uint Explain_union::make_union_table_name(char *buf)
+{
+ uint childno= 0;
+ uint len= 6, lastop= 0;
+ memcpy(buf, STRING_WITH_LEN("<union"));
+
+ for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN;
+ childno++)
+ {
+ len+= lastop;
+ lastop= my_snprintf(buf + len, NAME_LEN - len,
+ "%u,", union_members.at(childno));
+ }
+
+ if (childno < union_members.elements() || len + lastop >= NAME_LEN)
+ {
+ memcpy(buf + len, STRING_WITH_LEN("...>") + 1);
+ len+= 4;
+ }
+ else
+ {
+ len+= lastop;
+ buf[len - 1]= '>'; // change ',' to '>'
+ }
+ return len;
+}
+
int Explain_union::print_explain(Explain_query *query,
select_result_sink *output,
uint8 explain_flags,
bool is_analyze)
{
+ //const CHARSET_INFO *cs= system_charset_info;
char table_name_buffer[SAFE_NAME_LEN];
/* print all UNION children, in order */
@@ -242,31 +440,8 @@ int Explain_union::print_explain(Explain_query *query,
push_str(&item_list, fake_select_type);
/* `table` column: something like "<union1,2>" */
- {
- uint childno= 0;
- uint len= 6, lastop= 0;
- memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
-
- for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN;
- childno++)
- {
- len+= lastop;
- lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
- "%u,", union_members.at(childno));
- }
-
- if (childno < union_members.elements() || len + lastop >= NAME_LEN)
- {
- memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
- len+= 4;
- }
- else
- {
- len+= lastop;
- table_name_buffer[len - 1]= '>'; // change ',' to '>'
- }
- item_list.push_back(new Item_string_sys(table_name_buffer, len));
- }
+ uint len= make_union_table_name(table_name_buffer);
+ item_list.push_back(new Item_string_sys(table_name_buffer, len));
/* `partitions` column */
if (explain_flags & DESCRIBE_PARTITIONS)
@@ -326,14 +501,46 @@ int Explain_union::print_explain(Explain_query *query,
}
+void Explain_union::print_explain_json(Explain_query *query,
+ Json_writer *writer, bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+ char table_name_buffer[SAFE_NAME_LEN];
+
+ writer->add_member("query_block").start_object();
+ writer->add_member("union_result").start_object();
+ // using_temporary_table
+ make_union_table_name(table_name_buffer);
+ writer->add_member("table_name").add_str(table_name_buffer);
+ writer->add_member("access_type").add_str("ALL"); // not very useful
+ writer->add_member("query_specifications").start_array();
+
+ for (int i= 0; i < (int) union_members.elements(); i++)
+ {
+ writer->start_object();
+ //writer->add_member("dependent").add_str("TODO");
+ //writer->add_member("cacheable").add_str("TODO");
+ Explain_select *sel= query->get_select(union_members.at(i));
+ sel->print_explain_json(query, writer, is_analyze);
+ writer->end_object();
+ }
+ writer->end_array();
+
+ print_explain_json_for_children(query, writer, is_analyze);
+
+ writer->end_object(); // union_result
+ writer->end_object(); // query_block
+}
+
+
/*
Print EXPLAINs for all children nodes (i.e. for subqueries)
*/
int Explain_node::print_explain_for_children(Explain_query *query,
- select_result_sink *output,
- uint8 explain_flags,
- bool is_analyze)
+ select_result_sink *output,
+ uint8 explain_flags,
+ bool is_analyze)
{
for (int i= 0; i < (int) children.elements(); i++)
{
@@ -345,6 +552,50 @@ int Explain_node::print_explain_for_children(Explain_query *query,
}
+/*
+ This tells whether a child subquery should be printed in JSON output.
+
+ Derived tables and Non-merged semi-joins should not be printed, because they
+ are printed inline in Explain_table_access.
+*/
+bool is_connection_printable_in_json(enum Explain_node::explain_connection_type type)
+{
+ return (type != Explain_node::EXPLAIN_NODE_DERIVED &&
+ type != Explain_node::EXPLAIN_NODE_NON_MERGED_SJ);
+}
+
+
+void Explain_node::print_explain_json_for_children(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ bool started= false;
+ for (int i= 0; i < (int) children.elements(); i++)
+ {
+ Explain_node *node= query->get_node(children.at(i));
+ /* Derived tables are printed inside Explain_table_access objects */
+
+ if (!is_connection_printable_in_json(node->connection_type))
+ continue;
+
+ if (!started)
+ {
+ writer->add_member("subqueries").start_array();
+ started= true;
+ }
+
+ writer->start_object();
+ node->print_explain_json(query, writer, is_analyze);
+ writer->end_object();
+ }
+
+ if (started)
+ writer->end_array();
+}
+
+
void Explain_select::replace_table(uint idx, Explain_table_access *new_tab)
{
delete join_tabs[idx];
@@ -352,7 +603,7 @@ void Explain_select::replace_table(uint idx, Explain_table_access *new_tab)
}
-Explain_select::~Explain_select()
+Explain_basic_join::~Explain_basic_join()
{
if (join_tabs)
{
@@ -413,111 +664,163 @@ int Explain_select::print_explain(Explain_query *query,
using_fs= false;
}
}
+ for (uint i=0; i< n_join_tabs; i++)
+ {
+ Explain_basic_join* nest;
+ if ((nest= join_tabs[i]->sjm_nest))
+ nest->print_explain(query, output, explain_flags, is_analyze);
+ }
}
return print_explain_for_children(query, output, explain_flags, is_analyze);
}
-void Explain_table_access::push_extra(enum explain_extra_tag extra_tag)
+int Explain_basic_join::print_explain(Explain_query *query,
+ select_result_sink *output,
+ uint8 explain_flags, bool is_analyze)
{
- extra_tags.append(extra_tag);
+ for (uint i=0; i< n_join_tabs; i++)
+ {
+ if (join_tabs[i]->print_explain(output, explain_flags, is_analyze,
+ select_id,
+ "MATERIALIZED" /*select_type*/,
+ FALSE /*using temporary*/,
+ FALSE /*using filesort*/))
+ return 1;
+ }
+ return 0;
}
-int Explain_table_access::print_explain(select_result_sink *output, uint8 explain_flags,
- bool is_analyze,
- uint select_id, const char *select_type,
- bool using_temporary, bool using_filesort)
+void Explain_select::print_explain_json(Explain_query *query,
+ Json_writer *writer, bool is_analyze)
{
- const CHARSET_INFO *cs= system_charset_info;
- const char *hash_key_prefix= "#hash#";
- bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
- type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
+ Json_writer_nesting_guard guard(writer);
- List<Item> item_list;
- Item *item_null= new Item_null();
-
- if (sjm_nest_select_id)
- select_id= sjm_nest_select_id;
+ if (message)
+ {
+ writer->add_member("query_block").start_object();
+ writer->add_member("select_id").add_ll(select_id);
- /* `id` column */
- item_list.push_back(new Item_int((int32) select_id));
+ writer->add_member("table").start_object();
+ writer->add_member("message").add_str(message);
+ writer->end_object();
- /* `select_type` column */
- if (sjm_nest_select_id)
- push_str(&item_list, "MATERIALIZED");
+ print_explain_json_for_children(query, writer, is_analyze);
+ writer->end_object();
+ }
else
- push_str(&item_list, select_type);
+ {
+ /*
+ TODO: how does this approach allow to print ORDER BY members?
+ Explain_basic_join does not have ORDER/GROUP.
+ A: factor out join tab printing loop into a common func.
+ */
+ Explain_basic_join::print_explain_json(query, writer, is_analyze);
+ }
- /* `table` column */
- push_string(&item_list, &table_name);
-
- /* `partitions` column */
- if (explain_flags & DESCRIBE_PARTITIONS)
+}
+
+
+void Explain_basic_join::print_explain_json(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ writer->add_member("query_block").start_object();
+ writer->add_member("select_id").add_ll(select_id);
+ for (uint i=0; i< n_join_tabs; i++)
{
- if (used_partitions_set)
- {
- push_string(&item_list, &used_partitions);
- }
- else
- item_list.push_back(item_null);
+ if (join_tabs[i]->start_dups_weedout)
+ writer->add_member("duplicates_removal").start_object();
+
+ join_tabs[i]->print_explain_json(query, writer, is_analyze);
+
+ if (join_tabs[i]->end_dups_weedout)
+ writer->end_object();
}
+ print_explain_json_for_children(query, writer, is_analyze);
+ writer->end_object();
+}
- /* `type` column */
- push_str(&item_list, join_type_str[type]);
- /* `possible_keys` column */
- if (possible_keys_str.length() > 0)
- push_string(&item_list, &possible_keys_str);
- else
- item_list.push_back(item_null);
+void Explain_table_access::push_extra(enum explain_extra_tag extra_tag)
+{
+ extra_tags.append(extra_tag);
+}
+
+
+/*
+ Put the contents of 'key' field of EXPLAIN otuput into key_str.
+
+ It is surprisingly complex:
+ - hash join shows #hash#used_key
+ - quick selects that use single index will print index name
+*/
+
+void Explain_table_access::fill_key_str(String *key_str, bool is_json) const
+{
+ const CHARSET_INFO *cs= system_charset_info;
+ bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
+ type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
+ const char *hash_key_prefix= "#hash#";
- /* `key` */
- StringBuffer<64> key_str;
if (key.get_key_name())
{
if (is_hj)
- key_str.append(hash_key_prefix, strlen(hash_key_prefix), cs);
+ key_str->append(hash_key_prefix, strlen(hash_key_prefix), cs);
- key_str.append(key.get_key_name());
+ key_str->append(key.get_key_name());
if (is_hj && type != JT_HASH)
- key_str.append(':');
+ key_str->append(':');
}
if (quick_info)
{
StringBuffer<64> buf2;
- quick_info->print_key(&buf2);
- key_str.append(buf2);
+ if (is_json)
+ quick_info->print_extra_recursive(&buf2);
+ else
+ quick_info->print_key(&buf2);
+ key_str->append(buf2);
}
if (type == JT_HASH_NEXT)
- key_str.append(hash_next_key.get_key_name());
-
- if (key_str.length() > 0)
- push_string(&item_list, &key_str);
- else
- item_list.push_back(item_null);
+ key_str->append(hash_next_key.get_key_name());
+}
- /* `key_len` */
- StringBuffer<64> key_len_str;
+/*
+ Fill "key_length".
+ - this is just used key length for ref/range
+ - for index_merge, it is a comma-separated list of lengths.
+ - for hash join, it is key_len:pseudo_key_len
+
+ The column looks identical in tabular and json forms. In JSON, we consider
+ the column legacy, it is superceded by used_key_parts.
+*/
+
+void Explain_table_access::fill_key_len_str(String *key_len_str) const
+{
+ bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
+ type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
if (key.get_key_len() != (uint)-1)
{
char buf[64];
size_t length;
length= longlong10_to_str(key.get_key_len(), buf, 10) - buf;
- key_len_str.append(buf, length);
+ key_len_str->append(buf, length);
if (is_hj && type != JT_HASH)
- key_len_str.append(':');
+ key_len_str->append(':');
}
if (quick_info)
{
StringBuffer<64> buf2;
quick_info->print_key_len(&buf2);
- key_len_str.append(buf2);
+ key_len_str->append(buf2);
}
if (type == JT_HASH_NEXT)
@@ -525,19 +828,127 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
char buf[64];
size_t length;
length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf;
- key_len_str.append(buf, length);
+ key_len_str->append(buf, length);
+ }
+}
+
+
+void Explain_index_use::set(MEM_ROOT *mem_root, KEY *key, uint key_len_arg)
+{
+ set_pseudo_key(mem_root, key->name);
+ key_len= key_len_arg;
+ uint len= 0;
+ for (uint i= 0; i < key->usable_key_parts; i++)
+ {
+ key_parts_list.append_str(mem_root, key->key_part[i].field->field_name);
+ len += key->key_part[i].store_length;
+ if (len >= key_len_arg)
+ break;
+ }
+}
+
+
+void Explain_index_use::set_pseudo_key(MEM_ROOT *root, const char* key_name_arg)
+{
+ if (key_name_arg)
+ {
+ size_t name_len= strlen(key_name_arg);
+ if ((key_name= (char*)alloc_root(root, name_len+1)))
+ memcpy(key_name, key_name_arg, name_len+1);
+ }
+ else
+ key_name= NULL;
+ key_len= -1;
+}
+
+
+/*
+ Given r_filtered% from join buffer condition and join condition, produce a
+ combined r_filtered% number. This is needed for tabular EXPLAIN output which
+ has only one cell for r_filtered value.
+*/
+
+double Explain_table_access::get_r_filtered()
+{
+ double r_filtered= tracker.get_filtered_after_where();
+ if (bka_type.is_using_jbuf())
+ r_filtered *= jbuf_tracker.get_filtered_after_where();
+ return r_filtered;
+}
+
+
+int Explain_table_access::print_explain(select_result_sink *output, uint8 explain_flags,
+ bool is_analyze,
+ uint select_id, const char *select_type,
+ bool using_temporary, bool using_filesort)
+{
+ //const CHARSET_INFO *cs= system_charset_info;
+
+ List<Item> item_list;
+ Item *item_null= new Item_null();
+
+ /* `id` column */
+ item_list.push_back(new Item_int((int32) select_id));
+
+ /* `select_type` column */
+ push_str(&item_list, select_type);
+
+ /* `table` column */
+ push_string(&item_list, &table_name);
+
+ /* `partitions` column */
+ if (explain_flags & DESCRIBE_PARTITIONS)
+ {
+ if (used_partitions_set)
+ {
+ push_string(&item_list, &used_partitions);
+ }
+ else
+ item_list.push_back(item_null);
}
+ /* `type` column */
+ push_str(&item_list, join_type_str[type]);
+
+ /* `possible_keys` column */
+ StringBuffer<64> possible_keys_buf;
+ if (possible_keys.is_empty())
+ item_list.push_back(item_null);
+ else
+ push_string_list(&item_list, possible_keys, &possible_keys_buf);
+
+ /* `key` */
+ StringBuffer<64> key_str;
+ fill_key_str(&key_str, false);
+
+ if (key_str.length() > 0)
+ push_string(&item_list, &key_str);
+ else
+ item_list.push_back(item_null);
+
+ /* `key_len` */
+ StringBuffer<64> key_len_str;
+ fill_key_len_str(&key_len_str);
+
if (key_len_str.length() > 0)
push_string(&item_list, &key_len_str);
else
item_list.push_back(item_null);
/* `ref` */
- if (ref_set)
- push_string(&item_list, &ref);
+ StringBuffer<64> ref_list_buf;
+ if (ref_list.is_empty())
+ {
+ if (type == JT_FT)
+ {
+ /* Traditionally, EXPLAIN lines with type=fulltext have ref='' */
+ push_str(&item_list, "");
+ }
+ else
+ item_list.push_back(item_null);
+ }
else
- item_list.push_back(item_null);
+ push_string_list(&item_list, ref_list, &ref_list_buf);
/* `rows` */
if (rows_set)
@@ -629,6 +1040,276 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
}
+bool String_list::append_str(MEM_ROOT *mem_root, const char *str)
+{
+ size_t len= strlen(str);
+ char *cp;
+ if (!(cp = (char*)alloc_root(mem_root, len+1)))
+ return 1;
+ memcpy(cp, str, len+1);
+ push_back(cp);
+ return 0;
+}
+
+
+static void write_item(Json_writer *writer, Item *item)
+{
+ THD *thd= current_thd;
+ char item_buf[256];
+ String str(item_buf, sizeof(item_buf), &my_charset_bin);
+ str.length(0);
+
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+
+ item->print(&str, QT_EXPLAIN);
+
+ thd->variables.option_bits= save_option_bits;
+ writer->add_str(str.c_ptr_safe());
+}
+
+
+void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_tag tag)
+{
+ switch (tag)
+ {
+ case ET_OPEN_FULL_TABLE:
+ writer->add_member("open_full_table").add_bool(true);
+ break;
+ case ET_SCANNED_0_DATABASES:
+ writer->add_member("scanned_databases").add_ll(0);
+ break;
+ case ET_SCANNED_1_DATABASE:
+ writer->add_member("scanned_databases").add_ll(1);
+ break;
+ case ET_SCANNED_ALL_DATABASES:
+ writer->add_member("scanned_databases").add_str("all");
+ break;
+ case ET_SKIP_OPEN_TABLE:
+ writer->add_member("skip_open_table").add_bool(true);
+ break;
+ case ET_OPEN_FRM_ONLY:
+ writer->add_member("open_frm_only").add_bool(true);
+ break;
+ case ET_USING_INDEX_CONDITION:
+ writer->add_member("index_condition");
+ write_item(writer, pushed_index_cond);
+ break;
+ case ET_USING_WHERE:
+ {
+ /*
+ We are printing the condition that is checked when scanning this
+ table.
+ - when join buffer is used, it is cache_cond.
+ - in other cases, it is where_cond.
+ */
+ Item *item= bka_type.is_using_jbuf()? cache_cond: where_cond;
+ if (item)
+ {
+ writer->add_member("attached_condition");
+ write_item(writer, item);
+ }
+ }
+ break;
+ case ET_USING_INDEX:
+ writer->add_member("using_index").add_bool(true);
+ break;
+ case ET_USING:
+ // index merge: case ET_USING
+ break;
+ case ET_USING_JOIN_BUFFER:
+ /* Do nothing. Join buffer is handled differently */
+ case ET_START_TEMPORARY:
+ case ET_END_TEMPORARY:
+ /* Handled as "duplicates_removal: { ... } */
+ break;
+ case ET_FIRST_MATCH:
+ writer->add_member("first_match").add_str(firstmatch_table_name.c_ptr());
+ break;
+ case ET_LOOSESCAN:
+ writer->add_member("loose_scan").add_bool(true);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+}
+
+
+void Explain_table_access::print_explain_json(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ if (bka_type.is_using_jbuf())
+ {
+ writer->add_member("block-nl-join").start_object();
+ }
+
+ writer->add_member("table").start_object();
+
+ writer->add_member("table_name").add_str(table_name);
+ // partitions
+ writer->add_member("access_type").add_str(join_type_str[type]);
+ if (!possible_keys.is_empty())
+ {
+ List_iterator_fast<char> it(possible_keys);
+ const char *name;
+ writer->add_member("possible_keys").start_array();
+ while ((name= it++))
+ writer->add_str(name);
+ writer->end_array();
+ }
+
+ /* `key` */
+ /* For non-basic quick select, 'key' will not be present */
+ if (!quick_info || quick_info->is_basic())
+ {
+ StringBuffer<64> key_str;
+ fill_key_str(&key_str, true);
+ if (key_str.length())
+ writer->add_member("key").add_str(key_str);
+ }
+
+ /* `key_length` */
+ StringBuffer<64> key_len_str;
+ fill_key_len_str(&key_len_str);
+ if (key_len_str.length())
+ writer->add_member("key_length").add_str(key_len_str);
+
+ /* `used_key_parts` */
+ String_list *parts_list= NULL;
+ if (quick_info && quick_info->is_basic())
+ parts_list= &quick_info->range.key_parts_list;
+ else
+ parts_list= &key.key_parts_list;
+
+ if (parts_list && !parts_list->is_empty())
+ {
+ List_iterator_fast<char> it(*parts_list);
+ const char *name;
+ writer->add_member("used_key_parts").start_array();
+ while ((name= it++))
+ writer->add_str(name);
+ writer->end_array();
+ }
+
+ if (quick_info && !quick_info->is_basic())
+ {
+ writer->add_member("index_merge").start_object();
+ quick_info->print_json(writer);
+ writer->end_object();
+ }
+
+ /* `ref` */
+ if (!ref_list.is_empty())
+ {
+ List_iterator_fast<char> it(ref_list);
+ const char *str;
+ writer->add_member("ref").start_array();
+ while ((str= it++))
+ writer->add_str(str);
+ writer->end_array();
+ }
+
+ /* r_loops (not present in tabular output) */
+ if (is_analyze)
+ {
+ writer->add_member("r_loops").add_ll(tracker.get_loops());
+ }
+
+ /* `rows` */
+ if (rows_set)
+ writer->add_member("rows").add_ll(rows);
+
+ /* `r_rows` */
+ if (is_analyze)
+ {
+ writer->add_member("r_rows");
+ if (tracker.has_scans())
+ {
+ ha_rows avg_rows= tracker.get_avg_rows();
+ writer->add_ll(avg_rows);
+ }
+ else
+ writer->add_null();
+ }
+
+ /* `filtered` */
+ if (filtered_set)
+ writer->add_member("filtered").add_double(filtered);
+
+ /* `r_filtered` */
+ if (is_analyze)
+ {
+ writer->add_member("r_filtered");
+ if (tracker.has_scans())
+ writer->add_double(tracker.get_filtered_after_where()*100.0);
+ else
+ writer->add_null();
+ }
+
+ for (int i=0; i < (int)extra_tags.elements(); i++)
+ {
+ tag_to_json(writer, extra_tags.at(i));
+ }
+
+ if (bka_type.is_using_jbuf())
+ {
+ writer->end_object();
+ writer->add_member("buffer_type").add_str(bka_type.incremental?
+ "incremental":"flat");
+ writer->add_member("join_type").add_str(bka_type.join_alg);
+ if (bka_type.mrr_type.length())
+ writer->add_member("mrr_type").add_str(bka_type.mrr_type);
+ if (where_cond)
+ {
+ writer->add_member("attached_condition");
+ write_item(writer, where_cond);
+ }
+
+ if (is_analyze)
+ {
+ //writer->add_member("r_loops").add_ll(jbuf_tracker.get_loops());
+ writer->add_member("r_filtered");
+ if (jbuf_tracker.has_scans())
+ writer->add_double(jbuf_tracker.get_filtered_after_where()*100.0);
+ else
+ writer->add_null();
+ }
+ }
+
+ if (derived_select_number)
+ {
+ /* This is a derived table. Print its contents here */
+ writer->add_member("materialized").start_object();
+ Explain_node *node= query->get_node(derived_select_number);
+ node->print_explain_json(query, writer, is_analyze);
+ writer->end_object();
+ }
+ if (non_merged_sjm_number)
+ {
+ /* This is a non-merged semi-join table. Print its contents here */
+ writer->add_member("materialized").start_object();
+ writer->add_member("unique").add_ll(1);
+ Explain_node *node= query->get_node(non_merged_sjm_number);
+ node->connection_type= Explain_node::EXPLAIN_NODE_NON_MERGED_SJ;
+ node->print_explain_json(query, writer, is_analyze);
+ writer->end_object();
+ }
+ if (sjm_nest)
+ {
+ /* This is a non-merged semi-join table. Print its contents here */
+ writer->add_member("materialized").start_object();
+ writer->add_member("unique").add_ll(1);
+ sjm_nest->print_explain_json(query, writer, is_analyze);
+ writer->end_object();
+ }
+
+ writer->end_object();
+}
+
+
/*
Elements in this array match members of enum Extra_tag, defined in
sql_explain.h
@@ -758,6 +1439,35 @@ void Explain_quick_select::print_extra(String *str)
print_extra_recursive(str);
}
+void Explain_quick_select::print_json(Json_writer *writer)
+{
+ if (is_basic())
+ {
+ writer->add_member("range").start_object();
+
+ writer->add_member("key").add_str(range.get_key_name());
+
+ List_iterator_fast<char> it(range.key_parts_list);
+ const char *name;
+ writer->add_member("used_key_parts").start_array();
+ while ((name= it++))
+ writer->add_str(name);
+ writer->end_array();
+
+ writer->end_object();
+ }
+ else
+ {
+ writer->add_member(get_name_by_type()).start_object();
+
+ List_iterator_fast<Explain_quick_select> it (children);
+ Explain_quick_select* child;
+ while ((child = it++))
+ child->print_json(writer);
+
+ writer->end_object();
+ }
+}
void Explain_quick_select::print_extra_recursive(String *str)
{
@@ -867,7 +1577,7 @@ int Explain_delete::print_explain(Explain_query *query,
{
if (deleting_all_rows)
{
- const char *msg= "Deleting all rows";
+ const char *msg= STR_DELETING_ALL_ROWS;
int res= print_explain_message_line(output, explain_flags, is_analyze,
1 /*select number*/,
select_type, &rows, msg);
@@ -882,6 +1592,27 @@ int Explain_delete::print_explain(Explain_query *query,
}
+void Explain_delete::print_explain_json(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ if (deleting_all_rows)
+ {
+ writer->add_member("query_block").start_object();
+ writer->add_member("select_id").add_ll(1);
+ writer->add_member("table").start_object();
+ // just like mysql-5.6, we don't print table name. Is this ok?
+ writer->add_member("message").add_str(STR_DELETING_ALL_ROWS);
+ writer->end_object(); // table
+ writer->end_object(); // query_block
+ return;
+ }
+ Explain_update::print_explain_json(query, writer, is_analyze);
+}
+
+
int Explain_update::print_explain(Explain_query *query,
select_result_sink *output,
uint8 explain_flags,
@@ -893,8 +1624,8 @@ int Explain_update::print_explain(Explain_query *query,
if (impossible_where || no_partitions)
{
const char *msg= impossible_where ?
- "Impossible WHERE" :
- "No matching rows after partition pruning";
+ STR_IMPOSSIBLE_WHERE :
+ STR_NO_ROWS_AFTER_PRUNING;
int res= print_explain_message_line(output, explain_flags, is_analyze,
1 /*select number*/,
select_type,
@@ -903,7 +1634,6 @@ int Explain_update::print_explain(Explain_query *query,
return res;
}
-
if (quick_info)
{
quick_info->print_key(&key_buf);
@@ -917,10 +1647,13 @@ int Explain_update::print_explain(Explain_query *query,
extra_str.append(quick_buf);
}
}
- else
+ else if (key.get_key_name())
{
- key_buf.copy(key_str);
- key_len_buf.copy(key_len_str);
+ const char *name= key.get_key_name();
+ key_buf.set(name, strlen(name), &my_charset_bin);
+ char buf[64];
+ size_t length= longlong10_to_str(key.get_key_len(), buf, 10) - buf;
+ key_len_buf.copy(buf, length, &my_charset_bin);
}
if (using_where)
@@ -964,7 +1697,7 @@ int Explain_update::print_explain(Explain_query *query,
table_name.c_ptr(),
used_partitions_set? used_partitions.c_ptr() : NULL,
jtype,
- possible_keys_line.length()? possible_keys_line.c_ptr(): NULL,
+ &possible_keys,
key_buf.length()? key_buf.c_ptr() : NULL,
key_len_buf.length() ? key_len_buf.c_ptr() : NULL,
NULL, /* 'ref' is always NULL in single-table EXPLAIN DELETE */
@@ -977,6 +1710,140 @@ int Explain_update::print_explain(Explain_query *query,
}
+void Explain_update::print_explain_json(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ writer->add_member("query_block").start_object();
+ writer->add_member("select_id").add_ll(1);
+
+ if (impossible_where || no_partitions)
+ {
+ const char *msg= impossible_where ? STR_IMPOSSIBLE_WHERE :
+ STR_NO_ROWS_AFTER_PRUNING;
+ writer->add_member("table").start_object();
+ writer->add_member("message").add_str(msg);
+ writer->end_object(); // table
+ writer->end_object(); // query_block
+ return;
+ }
+
+ writer->add_member("table").start_object();
+
+ if (get_type() == EXPLAIN_UPDATE)
+ writer->add_member("update").add_ll(1);
+ else
+ writer->add_member("delete").add_ll(1);
+
+ writer->add_member("table_name").add_str(table_name);
+ writer->add_member("access_type").add_str(join_type_str[jtype]);
+
+ if (!possible_keys.is_empty())
+ {
+ List_iterator_fast<char> it(possible_keys);
+ const char *name;
+ writer->add_member("possible_keys").start_array();
+ while ((name= it++))
+ writer->add_str(name);
+ writer->end_array();
+ }
+
+ /* `key`, `key_length` */
+ if (quick_info && quick_info->is_basic())
+ {
+ StringBuffer<64> key_buf;
+ StringBuffer<64> key_len_buf;
+ quick_info->print_extra_recursive(&key_buf);
+ quick_info->print_key_len(&key_len_buf);
+
+ writer->add_member("key").add_str(key_buf);
+ writer->add_member("key_length").add_str(key_len_buf);
+ }
+ else if (key.get_key_name())
+ {
+ writer->add_member("key").add_str(key.get_key_name());
+ writer->add_member("key_length").add_str(key.get_key_len());
+ }
+
+ /* `used_key_parts` */
+ String_list *parts_list= NULL;
+ if (quick_info && quick_info->is_basic())
+ parts_list= &quick_info->range.key_parts_list;
+ else
+ parts_list= &key.key_parts_list;
+
+ if (parts_list && !parts_list->is_empty())
+ {
+ List_iterator_fast<char> it(*parts_list);
+ const char *name;
+ writer->add_member("used_key_parts").start_array();
+ while ((name= it++))
+ writer->add_str(name);
+ writer->end_array();
+ }
+
+ if (quick_info && !quick_info->is_basic())
+ {
+ writer->add_member("index_merge").start_object();
+ quick_info->print_json(writer);
+ writer->end_object();
+ }
+
+#if 0
+ /* `ref` */
+ if (!ref_list.is_empty())
+ {
+ List_iterator_fast<char> it(ref_list);
+ const char *str;
+ writer->add_member("ref").start_array();
+ while ((str= it++))
+ writer->add_str(str);
+ writer->end_array();
+ }
+#endif
+
+ /* `rows` */
+ writer->add_member("rows").add_ll(rows);
+
+ /* `r_rows` */
+ if (is_analyze && tracker.has_scans())
+ {
+ ha_rows avg_rows= tracker.get_avg_rows();
+ writer->add_member("r_rows").add_ll(avg_rows);
+ }
+
+ /* UPDATE/DELETE do not produce `filtered` estimate */
+
+ /* `r_filtered` */
+ if (is_analyze)
+ {
+ double r_filtered= tracker.get_filtered_after_where();
+ writer->add_member("r_filtered").add_double(r_filtered);
+ }
+
+ if (mrr_type.length() != 0)
+ writer->add_member("mrr_type").add_str(mrr_type.ptr());
+
+ if (using_filesort)
+ writer->add_member("using_filesort").add_ll(1);
+
+ if (using_io_buffer)
+ writer->add_member("using_io_buffer").add_ll(1);
+
+ if (where_cond)
+ {
+ writer->add_member("attached_condition");
+ write_item(writer, where_cond);
+ }
+
+ writer->end_object(); // table
+ print_explain_json_for_children(query, writer, is_analyze);
+ writer->end_object(); // query_block
+}
+
+
int Explain_insert::print_explain(Explain_query *query,
select_result_sink *output,
uint8 explain_flags,
@@ -1001,6 +1868,20 @@ int Explain_insert::print_explain(Explain_query *query,
return print_explain_for_children(query, output, explain_flags, is_analyze);
}
+void Explain_insert::print_explain_json(Explain_query *query,
+ Json_writer *writer, bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+
+ writer->add_member("query_block").start_object();
+ writer->add_member("select_id").add_ll(1);
+ writer->add_member("table").start_object();
+ writer->add_member("table_name").add_str(table_name.c_ptr());
+ writer->end_object(); // table
+ print_explain_json_for_children(query, writer, is_analyze);
+ writer->end_object(); // query_block
+}
+
void delete_explain_query(LEX *lex)
{
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index c4de08f6a4c..68ef59c732d 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -14,7 +14,21 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* Data structures for ANALYZE */
+
+class String_list: public List<char>
+{
+public:
+ bool append_str(MEM_ROOT *mem_root, const char *str);
+};
+
+
+/*
+ A class for collecting read statistics.
+
+ The idea is that we run several scans. Each scans gets rows, and then filters
+ some of them out. We count scans, rows, and rows left after filtering.
+*/
+
class Table_access_tracker
{
public:
@@ -29,6 +43,7 @@ public:
ha_rows r_rows_after_where; /* Rows after applying attached part of WHERE */
bool has_scans() { return (r_scans != 0); }
+ ha_rows get_loops() { return r_scans; }
ha_rows get_avg_rows()
{
return r_scans ? (ha_rows)rint((double) r_rows / r_scans): 0;
@@ -67,24 +82,43 @@ const int FAKE_SELECT_LEX_ID= (int)UINT_MAX;
class Explain_query;
+class Json_writer;
+
/*
A node can be either a SELECT, or a UNION.
*/
class Explain_node : public Sql_alloc
{
public:
+ /* A type specifying what kind of node this is */
enum explain_node_type
{
EXPLAIN_UNION,
- EXPLAIN_SELECT,
+ EXPLAIN_SELECT,
+ EXPLAIN_BASIC_JOIN,
EXPLAIN_UPDATE,
EXPLAIN_DELETE,
EXPLAIN_INSERT
};
+
+ /* How this node is connected */
+ enum explain_connection_type {
+ EXPLAIN_NODE_OTHER,
+ EXPLAIN_NODE_DERIVED, /* Materialized derived table */
+ EXPLAIN_NODE_NON_MERGED_SJ /* aka JTBM semi-join */
+ };
+
+ Explain_node() : connection_type(EXPLAIN_NODE_OTHER) {}
virtual enum explain_node_type get_type()= 0;
virtual int get_select_id()= 0;
+ /*
+ How this node is connected to its parent.
+ (NOTE: EXPLAIN_NODE_NON_MERGED_SJ is set very late currently)
+ */
+ enum explain_connection_type connection_type;
+
/*
A node may have children nodes. When a node's explain structure is
created, children nodes may not yet have QPFs. This is why we store ids.
@@ -97,9 +131,13 @@ public:
virtual int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze)=0;
-
+ virtual void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze)= 0;
+
int print_explain_for_children(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ void print_explain_json_for_children(Explain_query *query,
+ Json_writer *writer, bool is_analyze);
virtual ~Explain_node(){}
};
@@ -107,6 +145,49 @@ public:
class Explain_table_access;
+/*
+ A basic join. This is only used for SJ-Materialization nests.
+
+ Basic join doesn't have ORDER/GROUP/DISTINCT operations. It also cannot be
+ degenerate.
+
+ It has its own select_id.
+*/
+class Explain_basic_join : public Explain_node
+{
+public:
+ enum explain_node_type get_type() { return EXPLAIN_BASIC_JOIN; }
+
+ Explain_basic_join() : join_tabs(NULL) {}
+ ~Explain_basic_join();
+
+ bool add_table(Explain_table_access *tab)
+ {
+ if (!join_tabs)
+ {
+ join_tabs= (Explain_table_access**) my_malloc(sizeof(Explain_table_access*) *
+ MAX_TABLES, MYF(0));
+ n_join_tabs= 0;
+ }
+ join_tabs[n_join_tabs++]= tab;
+ return false;
+ }
+
+ int get_select_id() { return select_id; }
+
+ int select_id;
+
+ int print_explain(Explain_query *query, select_result_sink *output,
+ uint8 explain_flags, bool is_analyze);
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
+
+ /* A flat array of Explain structs for tables. */
+ Explain_table_access** join_tabs;
+ uint n_join_tabs;
+};
+
+
/*
EXPLAIN structure for a SELECT.
@@ -122,30 +203,16 @@ class Explain_table_access;
a way get node's children.
*/
-class Explain_select : public Explain_node
+class Explain_select : public Explain_basic_join
{
public:
enum explain_node_type get_type() { return EXPLAIN_SELECT; }
Explain_select() :
- message(NULL), join_tabs(NULL),
+ message(NULL),
using_temporary(false), using_filesort(false)
{}
-
- ~Explain_select();
- bool add_table(Explain_table_access *tab)
- {
- if (!join_tabs)
- {
- join_tabs= (Explain_table_access**) my_malloc(sizeof(Explain_table_access*) *
- MAX_TABLES, MYF(0));
- n_join_tabs= 0;
- }
- join_tabs[n_join_tabs++]= tab;
- return false;
- }
-
/*
This is used to save the results of "late" test_if_skip_sort_order() calls
that are made from JOIN::exec
@@ -153,30 +220,22 @@ public:
void replace_table(uint idx, Explain_table_access *new_tab);
public:
- int select_id;
const char *select_type;
- int get_select_id() { return select_id; }
-
/*
If message != NULL, this is a degenerate join plan, and all subsequent
members have no info
*/
const char *message;
- /*
- A flat array of Explain structs for tables. The order is "just like EXPLAIN
- would print them".
- */
- Explain_table_access** join_tabs;
- uint n_join_tabs;
-
/* Global join attributes. In tabular form, they are printed on the first row */
bool using_temporary;
bool using_filesort;
int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
Table_access_tracker *get_using_temporary_read_tracker()
{
@@ -222,6 +281,8 @@ public:
}
int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
const char *fake_select_type;
bool using_filesort;
@@ -236,6 +297,8 @@ public:
return &tmptable_read_tracker;
}
private:
+ uint make_union_table_name(char *buf);
+
Table_access_tracker fake_select_lex_tracker;
/* This one is for reading after ORDER BY */
Table_access_tracker tmptable_read_tracker;
@@ -311,6 +374,8 @@ public:
/* Return tabular EXPLAIN output as a text string */
bool print_explain_str(THD *thd, String *out_str, bool is_analyze);
+ void print_explain_json(select_result_sink *output, bool is_analyze);
+
/* If true, at least part of EXPLAIN can be printed */
bool have_query_plan() { return insert_plan || upd_del_plan|| get_node(1) != NULL; }
@@ -407,24 +472,24 @@ class Explain_index_use : public Sql_alloc
{
char *key_name;
uint key_len;
- /* will add #keyparts here if we implement EXPLAIN FORMAT=JSON */
public:
+ String_list key_parts_list;
+
+ Explain_index_use()
+ {
+ clear();
+ }
- void set(MEM_ROOT *root, const char *key_name_arg, uint key_len_arg)
+ void clear()
{
- if (key_name_arg)
- {
- size_t name_len= strlen(key_name_arg);
- if ((key_name= (char*)alloc_root(root, name_len+1)))
- memcpy(key_name, key_name_arg, name_len+1);
- }
- else
- key_name= NULL;
- key_len= key_len_arg;
+ key_name= NULL;
+ key_len= (uint)-1;
}
+ void set(MEM_ROOT *root, KEY *key_name, uint key_len_arg);
+ void set_pseudo_key(MEM_ROOT *root, const char *key_name);
- inline const char *get_key_name() { return key_name; }
- inline uint get_key_len() { return key_len; }
+ inline const char *get_key_name() const { return key_name; }
+ inline uint get_key_len() const { return key_len; }
};
@@ -438,6 +503,13 @@ public:
{}
const int quick_type;
+
+ bool is_basic()
+ {
+ return (quick_type == QUICK_SELECT_I::QS_TYPE_RANGE ||
+ quick_type == QUICK_SELECT_I::QS_TYPE_RANGE_DESC ||
+ quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX);
+ }
/* This is used when quick_type == QUICK_SELECT_I::QS_TYPE_RANGE */
Explain_index_use range;
@@ -448,8 +520,11 @@ public:
void print_extra(String *str);
void print_key(String *str);
void print_key_len(String *str);
-private:
+
+ void print_json(Json_writer *writer);
+
void print_extra_recursive(String *str);
+private:
const char *get_name_by_type();
};
@@ -461,26 +536,40 @@ private:
class Explain_table_access : public Sql_alloc
{
public:
+ Explain_table_access() :
+ derived_select_number(0),
+ non_merged_sjm_number(0),
+ start_dups_weedout(false),
+ end_dups_weedout(false),
+ where_cond(NULL),
+ cache_cond(NULL),
+ pushed_index_cond(NULL),
+ sjm_nest(NULL)
+ {}
+ ~Explain_table_access() { delete sjm_nest; }
+
void push_extra(enum explain_extra_tag extra_tag);
/* Internals */
public:
- /*
- 0 means this tab is not inside SJM nest and should use Explain_select's id
- other value means the tab is inside an SJM nest.
- */
- int sjm_nest_select_id;
-
/* id and 'select_type' are cared-of by the parent Explain_select */
StringBuffer<32> table_name;
+ /*
+ Non-zero number means this is a derived table. The number can be used to
+ find the query plan for the derived table
+ */
+ int derived_select_number;
+ /* TODO: join with the previous member. */
+ int non_merged_sjm_number;
+
enum join_type type;
StringBuffer<32> used_partitions;
bool used_partitions_set;
- /* Empty string means "NULL" will be printed */
- StringBuffer<32> possible_keys_str;
+ /* Empty means "NULL" will be printed */
+ String_list possible_keys;
/*
Index use: key name and length.
@@ -498,8 +587,7 @@ public:
*/
Explain_index_use hash_next_key;
- bool ref_set; /* not set means 'NULL' should be printed */
- StringBuffer<32> ref;
+ String_list ref_list;
bool rows_set; /* not set means 'NULL' should be printed */
ha_rows rows;
@@ -530,17 +618,40 @@ public:
StringBuffer<32> firstmatch_table_name;
+ bool start_dups_weedout;
+ bool end_dups_weedout;
+
+ /*
+ Note: lifespan of WHERE condition is less than lifespan of this object.
+ The below two are valid if tags include "ET_USING_WHERE".
+ (TODO: indexsubquery may put ET_USING_WHERE without setting where_cond?)
+ */
+ Item *where_cond;
+ Item *cache_cond;
+
+ Item *pushed_index_cond;
+
+ Explain_basic_join *sjm_nest;
+
int print_explain(select_result_sink *output, uint8 explain_flags,
bool is_analyze,
uint select_id, const char *select_type,
bool using_temporary, bool using_filesort);
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
+
+ /* ANALYZE members */
- /* ANALYZE members*/
+ /* Tracker for reading the table */
Table_access_tracker tracker;
Table_access_tracker jbuf_tracker;
private:
void append_tag_name(String *str, enum explain_extra_tag tag);
+ void fill_key_str(String *key_str, bool is_json) const;
+ void fill_key_len_str(String *key_len_str) const;
+ double get_r_filtered();
+ void tag_to_json(Json_writer *writer, enum explain_extra_tag tag);
};
@@ -567,14 +678,22 @@ public:
StringBuffer<64> table_name;
enum join_type jtype;
- StringBuffer<128> possible_keys_line;
- StringBuffer<128> key_str;
- StringBuffer<128> key_len_str;
+ String_list possible_keys;
+
+ /* Used key when doing a full index scan (possibly with limit) */
+ Explain_index_use key;
+
+ /*
+ MRR that's used with quick select. This should probably belong to the
+ quick select
+ */
StringBuffer<64> mrr_type;
Explain_quick_select *quick_info;
bool using_where;
+ Item *where_cond;
+
ha_rows rows;
bool using_filesort;
@@ -585,6 +704,8 @@ public:
virtual int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ virtual void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
};
@@ -605,6 +726,8 @@ public:
int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
};
@@ -626,6 +749,8 @@ public:
virtual int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
+ virtual void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
};
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 9ad9c15b3ff..3f0e5673736 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -485,6 +485,7 @@ void lex_start(THD *thd)
lex->select_lex.group_list_ptrs->clear();
lex->describe= 0;
lex->analyze_stmt= 0;
+ lex->explain_json= false;
lex->subqueries= FALSE;
lex->context_analysis_only= 0;
lex->derived_tables= 0;
@@ -4269,6 +4270,13 @@ int st_select_lex_unit::save_union_explain(Explain_query *output)
SELECT_LEX *first= first_select();
Explain_union *eu= new (output->mem_root) Explain_union;
+ if (derived)
+ eu->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
+ /*
+ Note: Non-merged semi-joins cannot be made out of UNIONs currently, so we
+ dont ever set EXPLAIN_NODE_NON_MERGED_SJ.
+ */
+
for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
eu->add_select(sl->select_number);
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index dbb5940611c..dec46c61ef4 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -2312,8 +2312,9 @@ public:
void set_impossible_where() { impossible_where= true; }
void set_no_partitions() { no_partitions= true; }
- void save_explain_data(Explain_query *query);
- void save_explain_data_intern(Explain_query *query, Explain_update *eu);
+ void save_explain_data(MEM_ROOT *mem_root, Explain_query *query);
+ void save_explain_data_intern(MEM_ROOT *mem_root, Explain_query *query,
+ Explain_update *eu);
virtual ~Update_plan() {}
@@ -2344,7 +2345,7 @@ public:
scanned_rows= rows_arg;
}
- void save_explain_data(Explain_query *query);
+ void save_explain_data(MEM_ROOT *mem_root, Explain_query *query);
};
@@ -2509,6 +2510,7 @@ struct LEX: public Query_tables_list
uint table_count;
uint8 describe;
bool analyze_stmt; /* TRUE<=> this is "ANALYZE $stmt" */
+ bool explain_json;
/*
A flag that indicates what kinds of derived tables are present in the
query (0 if no derived tables, otherwise a combination of flags
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 38cf5964c68..436f69aafd9 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -97,6 +97,8 @@
#include "log_slow.h"
#include "sql_bootstrap.h"
+#include "my_json_writer.h"
+
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
#ifdef WITH_ARIA_STORAGE_ENGINE
@@ -5722,19 +5724,27 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
top-level LIMIT
*/
result->reset_offset_limit();
- lex->explain->print_explain(result, lex->describe, lex->analyze_stmt);
- if (lex->describe & DESCRIBE_EXTENDED)
+ if (lex->explain_json)
{
- char buff[1024];
- String str(buff,(uint32) sizeof(buff), system_charset_info);
- str.length(0);
- /*
- The warnings system requires input in utf8, @see
- mysqld_show_warnings().
- */
- lex->unit.print(&str, QT_TO_SYSTEM_CHARSET);
- push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_YES, str.c_ptr_safe());
+ lex->explain->print_explain_json(result, lex->analyze_stmt);
+ }
+ else
+ {
+ lex->explain->print_explain(result, thd->lex->describe,
+ thd->lex->analyze_stmt);
+ if (lex->describe & DESCRIBE_EXTENDED)
+ {
+ char buff[1024];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ str.length(0);
+ /*
+ The warnings system requires input in utf8, @see
+ mysqld_show_warnings().
+ */
+ lex->unit.print(&str, QT_TO_SYSTEM_CHARSET);
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, str.c_ptr_safe());
+ }
}
}
@@ -7749,6 +7759,9 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
fake_select_lex->context.resolve_in_select_list= TRUE;
fake_select_lex->context.select_lex= fake_select_lex;
+ fake_select_lex->nest_level_base= first_select()->nest_level_base;
+ fake_select_lex->nest_level=first_select()->nest_level;
+
if (!is_union())
{
/*
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 97692463310..531f968fd8c 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -8232,6 +8232,41 @@ JOIN_TAB *next_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind,
}
+/*
+ Enumerate JOIN_TABs in "EXPLAIN order". This order
+ - const tabs are included
+ - we enumerate "optimization tabs".
+ -
+*/
+
+JOIN_TAB *first_explain_order_tab(JOIN* join)
+{
+ JOIN_TAB* tab;
+ tab= join->table_access_tabs;
+ return (tab->bush_children) ? tab->bush_children->start : tab;
+}
+
+
+JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab)
+{
+ /* If we're inside SJM nest and have reached its end, get out */
+ if (tab->last_leaf_in_bush)
+ return tab->bush_root_tab;
+
+ /* Move to next tab in the array we're traversing */
+ tab++;
+
+ if (tab == join->table_access_tabs + join->top_join_tab_count)
+ return NULL; /* Outside SJM nest and reached EOF */
+
+ if (tab->bush_children)
+ return tab->bush_children->start;
+
+ return tab;
+}
+
+
+
JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls)
{
JOIN_TAB *tab= join->join_tab;
@@ -16426,6 +16461,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
((field_count-param->hidden_field_count)+
(share->uniques ? MY_TEST(null_pack_length) : 0));
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
+ keyinfo->usable_key_parts= keyinfo->user_defined_key_parts;
table->distinct= 1;
share->keys= 1;
if (!(key_part_info= (KEY_PART_INFO*)
@@ -20861,6 +20897,24 @@ static void free_blobs(Field **ptr)
}
+/*
+ @brief
+ Remove duplicates from a temporary table.
+
+ @detail
+ Remove duplicate rows from a temporary table. This is used for e.g. queries
+ like
+
+ select distinct count(*) as CNT from tbl group by col
+
+ Here, we get a group table with count(*) values. It is not possible to
+ prevent duplicates from appearing in the table (as we don't know the values
+ before we've done the grouping). Because of that, we have this function to
+ scan the temptable (maybe, multiple times) and remove the duplicate rows
+
+ Rows that do not satisfy 'having' condition are also removed.
+*/
+
static int
remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
{
@@ -23121,7 +23175,6 @@ void JOIN::clear()
/*
Print an EXPLAIN line with all NULLs and given message in the 'Extra' column
- TODO: is_analyze
*/
int print_explain_message_line(select_result_sink *result,
@@ -23181,208 +23234,6 @@ int print_explain_message_line(select_result_sink *result,
/*
- Make a comma-separated list of possible_keys names and add it into the string
-*/
-
-void make_possible_keys_line(TABLE *table, key_map possible_keys, String *line)
-{
- if (!possible_keys.is_clear_all())
- {
- uint j;
- for (j=0 ; j < table->s->keys ; j++)
- {
- if (possible_keys.is_set(j))
- {
- if (line->length())
- line->append(',');
- line->append(table->key_info[j].name,
- strlen(table->key_info[j].name),
- system_charset_info);
- }
- }
- }
-}
-
-/*
- Print an EXPLAIN output row, based on information provided in the parameters
-
- @note
- Parameters that may have NULL value in EXPLAIN output, should be passed
- (char*)NULL.
-
- @return
- 0 - OK
- 1 - OOM Error
-*/
-
-int print_explain_row(select_result_sink *result,
- uint8 options, bool is_analyze,
- uint select_number,
- const char *select_type,
- const char *table_name,
- const char *partitions,
- enum join_type jtype,
- const char *possible_keys,
- const char *index,
- const char *key_len,
- const char *ref,
- ha_rows *rows,
- ha_rows *r_rows,
- double r_filtered,
- const char *extra)
-{
- Item *item_null= new Item_null();
- List<Item> item_list;
- Item *item;
-
- item_list.push_back(new Item_int((int32) select_number));
- item_list.push_back(new Item_string_sys(select_type));
- item_list.push_back(new Item_string_sys(table_name));
- if (options & DESCRIBE_PARTITIONS)
- {
- if (partitions)
- {
- item_list.push_back(new Item_string_sys(partitions));
- }
- else
- item_list.push_back(item_null);
- }
-
- const char *jtype_str= join_type_str[jtype];
- item_list.push_back(new Item_string_sys(jtype_str));
-
- item= possible_keys? new Item_string_sys(possible_keys) : item_null;
- item_list.push_back(item);
-
- /* 'index */
- item= index ? new Item_string_sys(index) : item_null;
- item_list.push_back(item);
-
- /* 'key_len */
- item= key_len ? new Item_string_sys(key_len) : item_null;
- item_list.push_back(item);
-
- /* 'ref' */
- item= ref ? new Item_string_sys(ref) : item_null;
- item_list.push_back(item);
-
- /* 'rows' */
- if (rows)
- {
- item_list.push_back(new Item_int(*rows,
- MY_INT64_NUM_DECIMAL_DIGITS));
- }
- else
- item_list.push_back(item_null);
-
- /* 'r_rows' */
- if (is_analyze)
- {
- if (r_rows)
- {
- item_list.push_back(new Item_int(*r_rows,
- MY_INT64_NUM_DECIMAL_DIGITS));
- }
- else
- item_list.push_back(item_null);
- }
-
- /* 'filtered' */
- const double filtered=100.0;
- if (options & DESCRIBE_EXTENDED || is_analyze)
- item_list.push_back(new Item_float(filtered, 2));
-
- /* 'r_filtered' */
- if (is_analyze)
- item_list.push_back(new Item_float(r_filtered, 2));
-
- /* 'Extra' */
- if (extra)
- item_list.push_back(new Item_string_sys(extra));
- else
- item_list.push_back(item_null);
-
- if (result->send_data(item_list))
- return 1;
- return 0;
-}
-
-
-int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
- SELECT_LEX *select_lex, uint8 explain_flags)
-{
- Item *item_null= new Item_null();
- List<Item> item_list;
- if (on_the_fly)
- select_lex->set_explain_type(on_the_fly);
- /*
- here we assume that the query will return at least two rows, so we
- show "filesort" in EXPLAIN. Of course, sometimes we'll be wrong
- and no filesort will be actually done, but executing all selects in
- the UNION to provide precise EXPLAIN information will hardly be
- appreciated :)
- */
- char table_name_buffer[SAFE_NAME_LEN];
- item_list.empty();
- /* id */
- item_list.push_back(new Item_null);
- /* select_type */
- item_list.push_back(new Item_string_sys(select_lex->type));
- /* table */
- {
- SELECT_LEX *sl= select_lex->master_unit()->first_select();
- uint len= 6, lastop= 0;
- memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
- for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select())
- {
- len+= lastop;
- lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
- "%u,", sl->select_number);
- }
- if (sl || len + lastop >= NAME_LEN)
- {
- memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
- len+= 4;
- }
- else
- {
- len+= lastop;
- table_name_buffer[len - 1]= '>'; // change ',' to '>'
- }
- item_list.push_back(new Item_string_sys(table_name_buffer, len));
- }
- /* partitions */
- if (explain_flags & DESCRIBE_PARTITIONS)
- item_list.push_back(item_null);
- /* type */
- item_list.push_back(new Item_string_sys(join_type_str[JT_ALL]));
-
- /* possible_keys */
- item_list.push_back(item_null);
- /* key*/
- item_list.push_back(item_null);
- /* key_len */
- item_list.push_back(item_null);
- /* ref */
- item_list.push_back(item_null);
- /* in_rows */
- if (explain_flags & DESCRIBE_EXTENDED)
- item_list.push_back(item_null);
- /* rows */
- item_list.push_back(item_null);
- /* extra */
- if (select_lex->master_unit()->global_parameters()->order_list.first)
- item_list.push_back(new Item_string_sys("Using filesort", 14));
- else
- item_list.push_back(new Item_string_sys("", 0));
-
- if (result->send_data(item_list))
- return 1;
- return 0;
-}
-
-
-/*
Append MRR information from quick select to the given string
*/
@@ -23403,21 +23254,16 @@ void explain_append_mrr_info(QUICK_RANGE_SELECT *quick, String *res)
///////////////////////////////////////////////////////////////////////////////
-// TODO: join with make_possible_keys_line ?
-void append_possible_keys(String *str, TABLE *table, key_map possible_keys)
+int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
+ key_map possible_keys)
{
uint j;
for (j=0 ; j < table->s->keys ; j++)
{
if (possible_keys.is_set(j))
- {
- if (str->length())
- str->append(',');
- str->append(table->key_info[j].name,
- strlen(table->key_info[j].name),
- system_charset_info);
- }
+ list.append_str(alloc, table->key_info[j].name);
}
+ return 0;
}
// TODO: this function is only applicable for the first non-const optimization
@@ -23450,32 +23296,20 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
TABLE *table=tab->table;
TABLE_LIST *table_list= tab->table->pos_in_table_list;
- char buff4[512];
my_bool key_read;
char table_name_buffer[SAFE_NAME_LEN];
- String tmp4(buff4,sizeof(buff4),cs);
KEY *key_info= 0;
uint key_len= 0;
- tmp4.length(0);
quick_type= -1;
QUICK_SELECT_I *quick= NULL;
- eta->key.set(thd->mem_root, NULL, (uint)-1);
+ eta->key.clear();
eta->quick_info= NULL;
tab->tracker= &eta->tracker;
tab->jbuf_tracker= &eta->jbuf_tracker;
- /* id */
- if (tab->bush_root_tab)
- {
- JOIN_TAB *first_sibling= tab->bush_root_tab->bush_children->start;
- eta->sjm_nest_select_id= first_sibling->emb_sj_nest->sj_subq_pred->get_identifier();
- }
- else
- eta->sjm_nest_select_id= 0;
-
- /* select_type is kept in Explain_select */
+ /* id and select_type are kept in Explain_select */
/* table */
if (table->derived_select_number)
@@ -23558,7 +23392,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
eta->type= tab_type;
/* Build "possible_keys" value */
- append_possible_keys(&eta->possible_keys_str, table, tab->keys);
+ // psergey-todo: why does this use thd MEM_ROOT??? Doesn't this
+ // break ANALYZE ? thd->mem_root will be freed, and after that we will
+ // attempt to print the query plan?
+ append_possible_keys(thd->mem_root, eta->possible_keys, table, tab->keys);
+ // psergey-todo: ^ check for error return code
/* Build "key", "key_len", and "ref" */
if (tab_type == JT_NEXT)
@@ -23583,21 +23421,18 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
if (key_info) /* 'index' or 'ref' access */
{
- eta->key.set(thd->mem_root, key_info->name, key_len);
+ eta->key.set(thd->mem_root, key_info, key_len);
if (tab->ref.key_parts && tab_type != JT_FT)
{
store_key **ref=tab->ref.key_copy;
for (uint kp= 0; kp < tab->ref.key_parts; kp++)
{
- if (tmp4.length())
- tmp4.append(',');
-
if ((key_part_map(1) << kp) & tab->ref.const_ref_part_map)
- tmp4.append("const");
+ eta->ref_list.append_str(thd->mem_root, "const");
else
{
- tmp4.append((*ref)->name(), strlen((*ref)->name()), cs);
+ eta->ref_list.append_str(thd->mem_root, (*ref)->name());
ref++;
}
}
@@ -23607,21 +23442,13 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
if (tab_type == JT_HASH_NEXT) /* full index scan + hash join */
{
eta->hash_next_key.set(thd->mem_root,
- table->key_info[tab->index].name,
+ & table->key_info[tab->index],
table->key_info[tab->index].key_length);
+ // psergey-todo: ^ is the above correct? are we necessarily joining on all
+ // columns?
}
- if (key_info)
- {
- if (key_info && tab_type != JT_NEXT)
- {
- eta->ref.copy(tmp4);
- eta->ref_set= true;
- }
- else
- eta->ref_set= false;
- }
- else
+ if (!key_info)
{
if (table_list && /* SJM bushes don't have table_list */
table_list->schema_table &&
@@ -23652,9 +23479,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
}
if (key_name_buf.length())
- eta->key.set(thd->mem_root, key_name_buf.c_ptr_safe(), -1);
+ eta->key.set_pseudo_key(thd->mem_root, key_name_buf.c_ptr_safe());
}
- eta->ref_set= false;
}
/* "rows" */
@@ -23719,7 +23545,10 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno &&
table->file->pushed_idx_cond)
+ {
eta->push_extra(ET_USING_INDEX_CONDITION);
+ eta->pushed_index_cond= table->file->pushed_idx_cond;
+ }
else if (tab->cache_idx_cond)
eta->push_extra(ET_USING_INDEX_CONDITION_BKA);
@@ -23749,7 +23578,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
eta->push_extra(ET_USING_WHERE_WITH_PUSHED_CONDITION);
}
else
+ {
+ eta->where_cond= tab->select->cond;
+ eta->cache_cond= tab->cache_select? tab->cache_select->cond : NULL;
eta->push_extra(ET_USING_WHERE);
+ }
}
}
if (table_list /* SJM bushes don't have table_list */ &&
@@ -23804,9 +23637,16 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
}
if (tab->first_weedout_table)
+ {
+ eta->start_dups_weedout= true;
eta->push_extra(ET_START_TEMPORARY);
+ }
if (tab->check_weed_out_table)
+ {
eta->push_extra(ET_END_TEMPORARY);
+ eta->end_dups_weedout= true;
+ }
+
else if (tab->do_firstmatch)
{
if (tab->do_firstmatch == /*join->join_tab*/ first_top_tab - 1)
@@ -23844,8 +23684,18 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
tab->cache->save_explain_data(&eta->bka_type);
}
}
+
+ /*
+ In case this is a derived table, here we remember the number of
+ subselect that used to produce it.
+ */
+ eta->derived_select_number= table->derived_select_number;
+
+ /* The same for non-merged semi-joins */
+ eta->non_merged_sjm_number = get_non_merged_semijoin_select();
}
+
/*
Save Query Plan Footprint
@@ -23876,6 +23726,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
xpl_sel->message= message;
+ if (select_lex->master_unit()->derived)
+ xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
/* Setting xpl_sel->message means that all other members are invalid */
output->add_node(xpl_sel);
}
@@ -23893,13 +23745,23 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
join->select_lex->set_explain_type(true);
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
+ if (select_lex->master_unit()->derived)
+ xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
+
+ if (need_tmp_table)
+ xpl_sel->using_temporary= true;
+
+ if (need_order)
+ xpl_sel->using_filesort= true;
JOIN_TAB* const first_top_tab= first_breadth_first_tab(join, WALK_OPTIMIZATION_TABS);
+ JOIN_TAB* prev_bush_root_tab= NULL;
- for (JOIN_TAB *tab= first_breadth_first_tab(join, WALK_OPTIMIZATION_TABS); tab;
- tab= next_breadth_first_tab(join, WALK_OPTIMIZATION_TABS, tab))
+ Explain_basic_join *cur_parent= xpl_sel;
+
+ for (JOIN_TAB *tab= first_explain_order_tab(join); tab;
+ tab= next_explain_order_tab(join, tab))
{
-
JOIN_TAB *saved_join_tab= NULL;
TABLE *table=tab->table;
@@ -23910,6 +23772,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
continue;
}
+
if (join->table_access_tabs == join->join_tab &&
tab == (first_top_tab + join->const_tables) && pre_sort_join_tab)
{
@@ -23918,20 +23781,35 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
}
Explain_table_access *eta= new (output->mem_root) Explain_table_access;
- xpl_sel->add_table(eta);
- tab->save_explain_data(eta, used_tables, distinct, first_top_tab);
-
- if (need_tmp_table)
+ if (tab->bush_root_tab != prev_bush_root_tab)
{
- need_tmp_table=0;
- xpl_sel->using_temporary= true;
- }
- if (need_order)
- {
- need_order=0;
- xpl_sel->using_filesort= true;
+ if (tab->bush_root_tab)
+ {
+ /*
+ We've entered an SJ-Materialization nest. Create an object for it.
+ */
+ cur_parent= new Explain_basic_join;
+
+ JOIN_TAB *first_child= tab->bush_root_tab->bush_children->start;
+ cur_parent->select_id=
+ first_child->emb_sj_nest->sj_subq_pred->get_identifier();
+ }
+ else
+ {
+ /*
+ We've just left an SJ-Materialization nest. We are at the join tab
+ that 'embeds the nest'
+ */
+ DBUG_ASSERT(tab->bush_children);
+ eta->sjm_nest= cur_parent;
+ cur_parent= xpl_sel;
+ }
}
+ prev_bush_root_tab= tab->bush_root_tab;
+
+ cur_parent->add_table(eta);
+ tab->save_explain_data(eta, used_tables, distinct, first_top_tab);
if (saved_join_tab)
tab= saved_join_tab;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index ca2bf59d871..42b2e6b31c2 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -527,6 +527,21 @@ typedef struct st_join_table {
bool preread_init();
bool is_sjm_nest() { return MY_TEST(bush_children); }
+
+ /*
+ If this join_tab reads a non-merged semi-join (also called jtbm), return
+ the select's number. Otherwise, return 0.
+ */
+ int get_non_merged_semijoin_select() const
+ {
+ Item_in_subselect *subq;
+ if (table->pos_in_table_list &&
+ (subq= table->pos_in_table_list->jtbm_subselect))
+ {
+ return subq->unit->first_select()->select_number;
+ }
+ return 0; /* Not a merged semi-join */
+ }
bool access_from_tables_is_allowed(table_map used_tables,
table_map sjm_lookup_tables)
@@ -982,7 +997,13 @@ public:
*/
uint top_join_tab_count;
uint send_group_parts;
- bool group; /**< If query contains GROUP BY clause */
+ /*
+ True if the query has GROUP BY.
+ (that is, if group_by != NULL. when DISTINCT is converted into GROUP BY, it
+ will set this, too. It is not clear why we need a separate var from
+ group_list)
+ */
+ bool group;
bool need_distinct;
/**
@@ -1826,8 +1847,10 @@ inline bool optimizer_flag(THD *thd, uint flag)
return (thd->variables.optimizer_switch & flag);
}
+/*
int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
SELECT_LEX *select_lex, uint8 select_options);
+*/
uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
ha_rows limit, ha_rows *scanned_limit,
@@ -1855,22 +1878,8 @@ int print_explain_message_line(select_result_sink *result,
ha_rows *rows,
const char *message);
void explain_append_mrr_info(QUICK_RANGE_SELECT *quick, String *res);
-int print_explain_row(select_result_sink *result,
- uint8 options, bool is_analyze,
- uint select_number,
- const char *select_type,
- const char *table_name,
- const char *partitions,
- enum join_type jtype,
- const char *possible_keys,
- const char *index,
- const char *key_len,
- const char *ref,
- ha_rows *rows,
- ha_rows *r_rows,
- double r_filtered,
- const char *extra);
-void make_possible_keys_line(TABLE *table, key_map possible_keys, String *line);
+int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
+ key_map possible_keys);
/****************************************************************************
Temporary table support for SQL Runtime
@@ -1914,4 +1923,5 @@ ulong check_selectivity(THD *thd,
TABLE *table,
List<COND_STATISTIC> *conds);
+
#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b81caea00bc..a3e2b1e23a5 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -517,7 +517,7 @@ int mysql_update(THD *thd,
*/
if (thd->lex->describe)
goto produce_explain_and_leave;
- query_plan.save_explain_data(thd->lex->explain);
+ query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
DBUG_EXECUTE_IF("show_explain_probe_update_exec_start",
dbug_serve_apcs(thd, 1););
@@ -1042,7 +1042,7 @@ produce_explain_and_leave:
We come here for various "degenerate" query plans: impossible WHERE,
no-partitions-used, impossible-range, etc.
*/
- query_plan.save_explain_data(thd->lex->explain);
+ query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
emit_explain_and_leave:
int err2= thd->lex->explain->send_explain(thd);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index ce4034d62a5..c1c2e23364d 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1152,6 +1152,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token FORCE_SYM
%token FOREIGN /* SQL-2003-R */
%token FOR_SYM /* SQL-2003-R */
+%token FORMAT_SYM
%token FOUND_SYM /* SQL-2003-R */
%token FROM
%token FULL /* SQL-2003-R */
@@ -1827,6 +1828,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
subselect_end select_var_list select_var_list_init help
field_length opt_field_length
opt_extended_describe shutdown
+ opt_format_json
prepare prepare_src execute deallocate
statement sp_suid
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
@@ -9743,6 +9745,18 @@ function_call_conflict:
if ($$ == NULL)
MYSQL_YYABORT;
}
+ | FORMAT_SYM '(' expr ',' expr ')'
+ {
+ $$= new (thd->mem_root) Item_func_format($3, $5);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | FORMAT_SYM '(' expr ',' expr ',' expr ')'
+ {
+ $$= new (thd->mem_root) Item_func_format($3, $5, $7);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
| LAST_VALUE '(' expr_list ')'
{
$$= new (thd->mem_root) Item_func_last_value(* $3);
@@ -9763,17 +9777,15 @@ function_call_conflict:
}
| OLD_PASSWORD '(' expr ')'
{
- $$= new (thd->mem_root) Item_func_old_password($3);
+ $$= new (thd->mem_root)
+ Item_func_password($3, Item_func_password::OLD);
if ($$ == NULL)
MYSQL_YYABORT;
}
| PASSWORD '(' expr ')'
{
Item* i1;
- if (thd->variables.old_passwords)
- i1= new (thd->mem_root) Item_func_old_password($3);
- else
- i1= new (thd->mem_root) Item_func_password($3);
+ i1= new (thd->mem_root) Item_func_password($3);
if (i1 == NULL)
MYSQL_YYABORT;
$$= i1;
@@ -9818,11 +9830,14 @@ function_call_conflict:
}
| WEEK_SYM '(' expr ')'
{
- Item *i1= new (thd->mem_root) Item_int((char*) "0",
- thd->variables.default_week_format,
- 1);
- if (i1 == NULL)
+ Item *i1;
+ LEX_STRING name= {STRING_WITH_LEN("default_week_format")};
+ if (!(i1= get_system_var(thd, OPT_SESSION,
+ name, null_lex_str)))
MYSQL_YYABORT;
+ i1->set_name((const char *)
+ STRING_WITH_LEN("@@default_week_format"),
+ system_charset_info);
$$= new (thd->mem_root) Item_func_week($3, i1);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -12771,16 +12786,34 @@ describe_command:
;
analyze_stmt_command:
- ANALYZE_SYM explainable_command
+ ANALYZE_SYM opt_format_json explainable_command
{
Lex->analyze_stmt= true;
}
;
opt_extended_describe:
- /* empty */ {}
- | EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+ EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
+ | opt_format_json {}
+ ;
+
+opt_format_json:
+ /* empty */ {}
+ | FORMAT_SYM EQ ident_or_text
+ {
+ if (!my_strcasecmp(system_charset_info, $3.str, "JSON"))
+ Lex->explain_json= true;
+ else if (!my_strcasecmp(system_charset_info, $3.str, "TRADITIONAL"))
+ {
+ DBUG_ASSERT(Lex->explain_json==false);
+ }
+ else
+ {
+ my_error(ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), $3.str);
+ MYSQL_YYABORT;
+ }
+ }
;
opt_describe_column:
@@ -14025,6 +14058,7 @@ keyword:
| EXAMINED_SYM {}
| EXECUTE_SYM {}
| FLUSH_SYM {}
+ | FORMAT_SYM {}
| GET_SYM {}
| HANDLER_SYM {}
| HELP_SYM {}
@@ -14868,17 +14902,19 @@ text_or_password:
TEXT_STRING { $$=$1.str;}
| PASSWORD '(' TEXT_STRING ')'
{
- $$= $3.length ? thd->variables.old_passwords ?
- Item_func_old_password::alloc(thd, $3.str, $3.length) :
- Item_func_password::alloc(thd, $3.str, $3.length) :
+ $$= $3.length ?
+ Item_func_password::alloc(thd, $3.str, $3.length,
+ thd->variables.old_passwords ?
+ Item_func_password::OLD :
+ Item_func_password::NEW) :
$3.str;
if ($$ == NULL)
MYSQL_YYABORT;
}
| OLD_PASSWORD '(' TEXT_STRING ')'
{
- $$= $3.length ? Item_func_old_password::
- alloc(thd, $3.str, $3.length) :
+ $$= $3.length ? Item_func_password::
+ alloc(thd, $3.str, $3.length, Item_func_password::OLD) :
$3.str;
if ($$ == NULL)
MYSQL_YYABORT;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 04e1d94d194..43a8c7269f2 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2130,7 +2130,7 @@ static bool check_old_passwords(sys_var *self, THD *thd, set_var *var)
static Sys_var_mybool Sys_old_passwords(
"old_passwords",
"Use old password encryption method (needed for 4.0 and older clients)",
- NO_SET_STMT SESSION_VAR(old_passwords), CMD_LINE(OPT_ARG),
+ SESSION_VAR(old_passwords), CMD_LINE(OPT_ARG),
DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(check_old_passwords));
export sys_var *Sys_old_passwords_ptr= &Sys_old_passwords; // for sql_acl.cc
@@ -3742,7 +3742,7 @@ static Sys_var_session_special Sys_warning_count(
static Sys_var_ulong Sys_default_week_format(
"default_week_format",
"The default week format used by WEEK() functions",
- NO_SET_STMT SESSION_VAR(default_week_format), CMD_LINE(REQUIRED_ARG),
+ SESSION_VAR(default_week_format), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 7), DEFAULT(0), BLOCK_SIZE(1));
static Sys_var_ulonglong Sys_group_concat_max_len(
diff --git a/sql/table.h b/sql/table.h
index ec50a433498..75c118f7374 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1072,6 +1072,12 @@ public:
TABLE_LIST *pos_in_table_list;/* Element referring to this table */
/* Position in thd->locked_table_list under LOCK TABLES */
TABLE_LIST *pos_in_locked_tables;
+
+ /*
+ Not-null for temporary tables only. Non-null values means this table is
+ used to compute GROUP BY, it has a unique of GROUP BY columns.
+ (set by create_tmp_table)
+ */
ORDER *group;
String alias; /* alias or table name */
uchar *null_flags;
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index 2b0196c9017..635e9c88fad 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -456,11 +456,14 @@ fil_compress_page(
/* Actual write needs to be alligned on block size */
if (write_size % block_size) {
-#ifdef UNIV_DEBUG
size_t tmp = write_size;
+#ifdef UNIV_DEBUG
ut_a(block_size > 0);
#endif
write_size = (size_t)ut_uint64_align_up((ib_uint64_t)write_size, block_size);
+ /* Initialize rest of the written data to avoid
+ uninitialized bytes */
+ memset(out_buf+tmp, 0, write_size-tmp);
#ifdef UNIV_DEBUG
ut_a(write_size > 0 && ((write_size % block_size) == 0));
ut_a(write_size >= tmp);
@@ -477,19 +480,10 @@ fil_compress_page(
srv_stats.page_compression_saved.add((len - write_size));
srv_stats.pages_page_compressed.inc();
-#if defined (__linux__) && (!defined(FALLOC_FL_PUNCH_HOLE) || !defined (FALLOC_FL_KEEP_SIZE))
- if (srv_use_trim) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: [Warning] System does not support FALLOC_FL_PUNCH_HOLE || FALLOC_FL_KEEP_SIZE.\n"
- " InnoDB: Disabling trim for now.\n");
- srv_use_trim = FALSE;
- }
-#endif
-
if (!srv_use_trim) {
/* If persistent trims are not used we always write full
- page */
+ page and end of the page needs to be initialized.*/
+ memset(out_buf+write_size, 0, len-write_size);
write_size = len;
}
diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h
index fb97af87460..c797c221efc 100644
--- a/storage/innobase/include/fil0pagecompress.h
+++ b/storage/innobase/include/fil0pagecompress.h
@@ -135,4 +135,11 @@ fil_page_is_compressed(
/*===================*/
byte *buf); /*!< in: page */
+/*******************************************************************//**
+Find out wheather the page is page compressed with lzo method
+@return true if page is page compressed with lzo method*/
+ibool
+fil_page_is_lzo_compressed(
+/*=======================*/
+ byte *buf); /*!< in: page */
#endif
diff --git a/storage/innobase/include/fsp0pagecompress.ic b/storage/innobase/include/fsp0pagecompress.ic
index 1ba3b7835c9..3e59106b05d 100644
--- a/storage/innobase/include/fsp0pagecompress.ic
+++ b/storage/innobase/include/fsp0pagecompress.ic
@@ -182,3 +182,16 @@ fil_space_get_atomic_writes(
return((atomic_writes_t)0);
}
+
+/*******************************************************************//**
+Find out wheather the page is page compressed with lzo method
+@return true if page is page compressed with lzo method, false if not */
+UNIV_INLINE
+ibool
+fil_page_is_lzo_compressed(
+/*=======================*/
+ byte *buf) /*!< in: page */
+{
+ return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED &&
+ mach_read_from_8(buf+FIL_PAGE_FILE_FLUSH_LSN) == PAGE_LZO_ALGORITHM);
+}
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 89c8bf373f7..d0025b829ec 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -48,6 +48,7 @@ Created 10/21/1995 Heikki Tuuri
#include "srv0mon.h"
#include "srv0srv.h"
#ifdef HAVE_POSIX_FALLOCATE
+#include "unistd.h"
#include "fcntl.h"
#endif
#ifndef UNIV_HOTBACKUP
@@ -77,6 +78,19 @@ Created 10/21/1995 Heikki Tuuri
#include <sys/statvfs.h>
#endif
+#if defined(UNIV_LINUX) && defined(HAVE_LINUX_FALLOC_H)
+#include <linux/falloc.h>
+#endif
+
+#if defined(HAVE_FALLOCATE)
+#ifndef FALLOC_FL_KEEP_SIZE
+#define FALLOC_FL_KEEP_SIZE 0x01
+#endif
+#ifndef FALLOC_FL_PUNCH_HOLE
+#define FALLOC_FL_PUNCH_HOLE 0x02
+#endif
+#endif
+
#ifdef HAVE_LZO
#include "lzo/lzo1x.h"
#endif
@@ -2920,7 +2934,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, len, NULL);
}
@@ -2940,7 +2954,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -3065,7 +3079,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -3085,7 +3099,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -4638,12 +4652,10 @@ found:
// We allocate memory for page compressed buffer if and only
// if it is not yet allocated.
- if (slot->page_buf == NULL) {
- os_slot_alloc_page_buf(slot);
- }
+ os_slot_alloc_page_buf(slot);
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
+ if (innodb_compression_algorithm == 3) {
os_slot_alloc_lzo_mem(slot);
}
#endif
@@ -5309,6 +5321,7 @@ os_aio_windows_handle(
case OS_FILE_WRITE:
if (slot->message1 &&
slot->page_compression &&
+ slot->page_compress_success &&
slot->page_buf) {
ret = WriteFile(slot->file, slot->page_buf,
(DWORD) slot->len, &len,
@@ -5349,26 +5362,24 @@ os_aio_windows_handle(
ret_val = ret && len == slot->len;
}
- if (slot->message1 && slot->page_compression) {
- // We allocate memory for page compressed buffer if and only
- // if it is not yet allocated.
- if (slot->page_buf == NULL) {
+ if (slot->type == OS_FILE_READ) {
+ if(fil_page_is_compressed(slot->buf)) {
os_slot_alloc_page_buf(slot);
- }
+
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
- os_slot_alloc_lzo_mem(slot);
- }
+ if (fil_page_is_lzo_compressed(slot->buf)) {
+ os_slot_alloc_lzo_mem(slot);
+ }
#endif
- if (slot->type == OS_FILE_READ) {
fil_decompress_page(slot->page_buf, slot->buf, slot->len, slot->write_size);
- } else {
- if (slot->page_compress_success && fil_page_is_compressed(slot->page_buf)) {
- if (srv_use_trim && os_fallocate_failed == FALSE) {
- // Deallocate unused blocks from file system
- os_file_trim(slot);
- }
+ }
+ } else {
+ /* OS_FILE_WRITE */
+ if (slot->page_compress_success && fil_page_is_compressed(slot->page_buf)) {
+ if (srv_use_trim && os_fallocate_failed == FALSE) {
+ // Deallocate unused blocks from file system
+ os_file_trim(slot);
}
}
}
@@ -5462,32 +5473,30 @@ retry:
/* We have not overstepped to next segment. */
ut_a(slot->pos < end_pos);
- /* If the table is page compressed and this is read,
- we decompress before we annouce the read is
- complete. For writes, we free the compressed page. */
- if (slot->message1 && slot->page_compression) {
- // We allocate memory for page compressed buffer if and only
- // if it is not yet allocated.
- if (slot->page_buf == NULL) {
+ if (slot->type == OS_FILE_READ) {
+ /* If the table is page compressed and this is read,
+ we decompress before we annouce the read is
+ complete. For writes, we free the compressed page. */
+ if (fil_page_is_compressed(slot->buf)) {
+ // We allocate memory for page compressed buffer if and only
+ // if it is not yet allocated.
os_slot_alloc_page_buf(slot);
- }
-
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
- os_slot_alloc_lzo_mem(slot);
- }
+ if (fil_page_is_lzo_compressed(slot->buf)) {
+ os_slot_alloc_lzo_mem(slot);
+ }
#endif
- if (slot->type == OS_FILE_READ) {
fil_decompress_page(slot->page_buf, slot->buf, slot->len, slot->write_size);
- } else {
- if (slot->page_compress_success &&
- fil_page_is_compressed(slot->page_buf)) {
- ut_ad(slot->page_compression_page);
- if (srv_use_trim && os_fallocate_failed == FALSE) {
- // Deallocate unused blocks from file system
- os_file_trim(slot);
- }
+ }
+ } else {
+ /* OS_FILE_WRITE */
+ if (slot->page_compress_success &&
+ fil_page_is_compressed(slot->page_buf)) {
+ ut_ad(slot->page_compression_page);
+ if (srv_use_trim && os_fallocate_failed == FALSE) {
+ // Deallocate unused blocks from file system
+ os_file_trim(slot);
}
}
}
@@ -6401,7 +6410,7 @@ os_file_trim(
}
#ifdef __linux__
-#if defined(FALLOC_FL_PUNCH_HOLE) && defined (FALLOC_FL_KEEP_SIZE)
+#if defined(HAVE_FALLOCATE)
int ret = fallocate(slot->file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, off, trim_len);
if (ret) {
@@ -6522,12 +6531,15 @@ os_slot_alloc_page_buf(
byte* cbuf;
ut_a(slot != NULL);
- /* We allocate extra to avoid memory overwrite on compression */
- cbuf2 = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
- cbuf = static_cast<byte *>(ut_align(cbuf2, UNIV_PAGE_SIZE));
- slot->page_compression_page = static_cast<byte *>(cbuf2);
- slot->page_buf = static_cast<byte *>(cbuf);
- ut_a(slot->page_buf != NULL);
+ if (slot->page_compression_page == NULL) {
+ /* We allocate extra to avoid memory overwrite on compression */
+ cbuf2 = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
+ cbuf = static_cast<byte *>(ut_align(cbuf2, UNIV_PAGE_SIZE));
+ slot->page_compression_page = static_cast<byte *>(cbuf2);
+ slot->page_buf = static_cast<byte *>(cbuf);
+ memset(slot->page_compression_page, 0, UNIV_PAGE_SIZE*2);
+ ut_a(slot->page_buf != NULL);
+ }
}
#ifdef HAVE_LZO
@@ -6541,8 +6553,11 @@ os_slot_alloc_lzo_mem(
os_aio_slot_t* slot) /*!< in: slot structure */
{
ut_a(slot != NULL);
- slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
- ut_a(slot->lzo_mem != NULL);
+ if(slot->lzo_mem == NULL) {
+ slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
+ memset(slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ ut_a(slot->lzo_mem != NULL);
+ }
}
#endif
diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc
index 20b6b0b1b15..0b622bac8ba 100644
--- a/storage/xtradb/fil/fil0pagecompress.cc
+++ b/storage/xtradb/fil/fil0pagecompress.cc
@@ -453,11 +453,14 @@ fil_compress_page(
/* Actual write needs to be alligned on block size */
if (write_size % block_size) {
-#ifdef UNIV_DEBUG
size_t tmp = write_size;
+#ifdef UNIV_DEBUG
ut_a(block_size > 0);
#endif
write_size = (size_t)ut_uint64_align_up((ib_uint64_t)write_size, block_size);
+ /* Initialize rest of the written data to avoid
+ uninitialized bytes */
+ memset(out_buf+tmp, 0, write_size-tmp);
#ifdef UNIV_DEBUG
ut_a(write_size > 0 && ((write_size % block_size) == 0));
ut_a(write_size >= tmp);
@@ -474,19 +477,10 @@ fil_compress_page(
srv_stats.page_compression_saved.add((len - write_size));
srv_stats.pages_page_compressed.inc();
-#if defined (__linux__) && (!defined(FALLOC_FL_PUNCH_HOLE) || !defined (FALLOC_FL_KEEP_SIZE))
- if (srv_use_trim) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: [Warning] System does not support FALLOC_FL_PUNCH_HOLE || FALLOC_FL_KEEP_SIZE.\n"
- " InnoDB: Disabling trim for now.\n");
- srv_use_trim = FALSE;
- }
-#endif
-
if (!srv_use_trim) {
/* If persistent trims are not used we always write full
- page */
+ page and end of the page needs to be initialized.*/
+ memset(out_buf+write_size, 0, len-write_size);
write_size = len;
}
diff --git a/storage/xtradb/include/fil0pagecompress.h b/storage/xtradb/include/fil0pagecompress.h
index fb97af87460..c797c221efc 100644
--- a/storage/xtradb/include/fil0pagecompress.h
+++ b/storage/xtradb/include/fil0pagecompress.h
@@ -135,4 +135,11 @@ fil_page_is_compressed(
/*===================*/
byte *buf); /*!< in: page */
+/*******************************************************************//**
+Find out wheather the page is page compressed with lzo method
+@return true if page is page compressed with lzo method*/
+ibool
+fil_page_is_lzo_compressed(
+/*=======================*/
+ byte *buf); /*!< in: page */
#endif
diff --git a/storage/xtradb/include/fsp0pagecompress.ic b/storage/xtradb/include/fsp0pagecompress.ic
index a2553eeb47b..4dde042e19e 100644
--- a/storage/xtradb/include/fsp0pagecompress.ic
+++ b/storage/xtradb/include/fsp0pagecompress.ic
@@ -182,3 +182,16 @@ fil_space_get_atomic_writes(
return((atomic_writes_t)0);
}
+
+/*******************************************************************//**
+Find out wheather the page is page compressed with lzo method
+@return true if page is page compressed with lzo method, false if not */
+UNIV_INLINE
+ibool
+fil_page_is_lzo_compressed(
+/*=======================*/
+ byte *buf) /*!< in: page */
+{
+ return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED &&
+ mach_read_from_8(buf+FIL_PAGE_FILE_FLUSH_LSN) == PAGE_LZO_ALGORITHM);
+}
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index 0a7c8817b18..9bf1046822e 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -50,6 +50,7 @@ Created 10/21/1995 Heikki Tuuri
#include "srv0mon.h"
#include "srv0srv.h"
#ifdef HAVE_POSIX_FALLOCATE
+#include "unistd.h"
#include "fcntl.h"
#endif
#ifndef UNIV_HOTBACKUP
@@ -83,6 +84,19 @@ Created 10/21/1995 Heikki Tuuri
#include <sys/statvfs.h>
#endif
+#if defined(UNIV_LINUX) && defined(HAVE_LINUX_FALLOC_H)
+#include <linux/falloc.h>
+#endif
+
+#if defined(HAVE_FALLOCATE)
+#ifndef FALLOC_FL_KEEP_SIZE
+#define FALLOC_FL_KEEP_SIZE 0x01
+#endif
+#ifndef FALLOC_FL_PUNCH_HOLE
+#define FALLOC_FL_PUNCH_HOLE 0x02
+#endif
+#endif
+
#ifdef HAVE_LZO
#include "lzo/lzo1x.h"
#endif
@@ -3116,7 +3130,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, len, NULL);
}
@@ -3137,7 +3151,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -3235,7 +3249,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -3256,7 +3270,7 @@ try_again:
as file spaces and they do not have FIL_PAGE_TYPE
field, thus we must use here information is the actual
file space compressed. */
- if (compressed && fil_page_is_compressed((byte *)buf)) {
+ if (fil_page_is_compressed((byte *)buf)) {
fil_decompress_page(NULL, (byte *)buf, n, NULL);
}
@@ -4757,12 +4771,10 @@ found:
// We allocate memory for page compressed buffer if and only
// if it is not yet allocated.
- if (slot->page_buf == NULL) {
- os_slot_alloc_page_buf(slot);
- }
+ os_slot_alloc_page_buf(slot);
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
+ if (innodb_compression_algorithm == 3) {
os_slot_alloc_lzo_mem(slot);
}
#endif
@@ -4790,7 +4802,6 @@ found:
/* Take array mutex back */
os_mutex_enter(array->mutex);
-
}
#ifdef WIN_ASYNC_IO
@@ -5180,9 +5191,11 @@ try_again:
trx->io_reads++;
trx->io_read += n;
}
+
slot = os_aio_array_reserve_slot(type, array, message1, message2, file,
name, buf, offset, n, space_id,
page_compression, page_compression_level, write_size);
+
if (type == OS_FILE_READ) {
if (srv_use_native_aio) {
os_n_file_reads++;
@@ -5368,7 +5381,7 @@ os_aio_windows_handle(
switch (slot->type) {
case OS_FILE_WRITE:
- if (slot->message1 && slot->page_compression && slot->page_buf) {
+ if (slot->message1 && slot->page_compression && slot->page_compress_success && slot->page_buf) {
ret_val = os_file_write(slot->name, slot->file, slot->page_buf,
slot->offset, slot->len);
} else {
@@ -5404,26 +5417,23 @@ os_aio_windows_handle(
ret_val = ret && len == slot->len;
}
- if (slot->message1 && slot->page_compression) {
- // We allocate memory for page compressed buffer if and only
- // if it is not yet allocated.
- if (slot->page_buf == NULL) {
+ if (slot->type == OS_FILE_READ) {
+ if (fil_page_is_compressed(slot->buf)) {
os_slot_alloc_page_buf(slot);
- }
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
- os_slot_alloc_lzo_mem(slot);
- }
+ if (fil_page_is_lzo_compressed(slot->buf)) {
+ os_slot_alloc_lzo_mem(slot);
+ }
#endif
- if (slot->type == OS_FILE_READ) {
fil_decompress_page(slot->page_buf, slot->buf, slot->len, slot->write_size);
- } else {
- if (slot->page_compress_success && fil_page_is_compressed(slot->page_buf)) {
- if (srv_use_trim && os_fallocate_failed == FALSE) {
- // Deallocate unused blocks from file system
- os_file_trim(slot);
- }
+ }
+ } else {
+ /* OS_FILE_WRITE */
+ if (slot->page_compress_success && fil_page_is_compressed(slot->page_buf)) {
+ if (srv_use_trim && os_fallocate_failed == FALSE) {
+ // Deallocate unused blocks from file system
+ os_file_trim(slot);
}
}
}
@@ -5517,31 +5527,29 @@ retry:
/* We have not overstepped to next segment. */
ut_a(slot->pos < end_pos);
- /* If the table is page compressed and this is read,
- we decompress before we annouce the read is
- complete. For writes, we free the compressed page. */
- if (slot->message1 && slot->page_compression) {
- // We allocate memory for page compressed buffer if and only
- // if it is not yet allocated.
- if (slot->page_buf == NULL) {
+ if (slot->type == OS_FILE_READ) {
+ /* If the table is page compressed and this is read,
+ we decompress before we annouce the read is
+ complete. For writes, we free the compressed page. */
+ if (fil_page_is_compressed(slot->buf)) {
+ // We allocate memory for page compressed buffer if and only
+ // if it is not yet allocated.
os_slot_alloc_page_buf(slot);
- }
-
#ifdef HAVE_LZO
- if (innodb_compression_algorithm == 3 && slot->lzo_mem == NULL) {
- os_slot_alloc_lzo_mem(slot);
- }
+ if (fil_page_is_lzo_compressed(slot->buf)) {
+ os_slot_alloc_lzo_mem(slot);
+ }
#endif
- if (slot->type == OS_FILE_READ) {
fil_decompress_page(slot->page_buf, slot->buf, slot->len, slot->write_size);
- } else {
- if (slot->page_compress_success &&
- fil_page_is_compressed(slot->page_buf)) {
- ut_ad(slot->page_compression_page);
- if (srv_use_trim && os_fallocate_failed == FALSE) {
- // Deallocate unused blocks from file system
- os_file_trim(slot);
- }
+ }
+ } else {
+ /* OS_FILE_WRITE */
+ if (slot->page_compress_success &&
+ fil_page_is_compressed(slot->page_buf)) {
+ ut_ad(slot->page_compression_page);
+ if (srv_use_trim && os_fallocate_failed == FALSE) {
+ // Deallocate unused blocks from file system
+ os_file_trim(slot);
}
}
}
@@ -6494,7 +6502,7 @@ os_file_trim(
}
#ifdef __linux__
-#if defined(FALLOC_FL_PUNCH_HOLE) && defined (FALLOC_FL_KEEP_SIZE)
+#if defined(HAVE_FALLOCATE)
int ret = fallocate(slot->file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, off, trim_len);
if (ret) {
@@ -6614,12 +6622,15 @@ os_slot_alloc_page_buf(
byte* cbuf;
ut_a(slot != NULL);
- /* We allocate extra to avoid memory overwrite on compression */
- cbuf2 = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
- cbuf = static_cast<byte *>(ut_align(cbuf2, UNIV_PAGE_SIZE));
- slot->page_compression_page = static_cast<byte *>(cbuf2);
- slot->page_buf = static_cast<byte *>(cbuf);
- ut_a(slot->page_buf != NULL);
+ if (slot->page_compression_page == NULL) {
+ /* We allocate extra to avoid memory overwrite on compression */
+ cbuf2 = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE*2));
+ cbuf = static_cast<byte *>(ut_align(cbuf2, UNIV_PAGE_SIZE));
+ slot->page_compression_page = static_cast<byte *>(cbuf2);
+ slot->page_buf = static_cast<byte *>(cbuf);
+ memset(slot->page_compression_page, 0, UNIV_PAGE_SIZE*2);
+ ut_a(slot->page_buf != NULL);
+ }
}
#ifdef HAVE_LZO
@@ -6633,8 +6644,11 @@ os_slot_alloc_lzo_mem(
os_aio_slot_t* slot) /*!< in: slot structure */
{
ut_a(slot != NULL);
- slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
- ut_a(slot->lzo_mem != NULL);
+ if(slot->lzo_mem == NULL) {
+ slot->lzo_mem = static_cast<byte *>(ut_malloc(LZO1X_1_15_MEM_COMPRESS));
+ memset(slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS);
+ ut_a(slot->lzo_mem != NULL);
+ }
}
#endif