summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Botchkov <holyfoot@askmonty.org>2020-07-16 14:46:21 +0400
committerAlexey Botchkov <holyfoot@askmonty.org>2020-07-16 14:46:21 +0400
commitd3a311a8e9508ef94dddb6b7f4d366337a9fd64a (patch)
tree6fb726ce683ba1760df1f08be6f98cf0ef2dfbd2
parentb1ab211dee599eabd9a5b886fafa3adea29ae041 (diff)
downloadmariadb-git-d3a311a8e9508ef94dddb6b7f4d366337a9fd64a.tar.gz
MDEV-17399 Add support for JSON_TABLE.
ha_json_table handler implemented. JSON_TABLE() added to SQL syntax.
-rw-r--r--libmysqld/CMakeLists.txt1
-rw-r--r--mysql-test/main/query_cache.result16
-rw-r--r--mysql-test/main/query_cache.test11
-rw-r--r--mysql-test/suite/json/r/json_table.result220
-rw-r--r--mysql-test/suite/json/t/json_table.test167
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/handler.cc6
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item_geofunc.cc7
-rw-r--r--sql/item_jsonfunc.cc45
-rw-r--r--sql/item_jsonfunc.h8
-rw-r--r--sql/lex.h5
-rw-r--r--sql/opt_subselect.cc8
-rw-r--r--sql/share/errmsg-utf8.txt2
-rw-r--r--sql/sql_acl.cc10
-rw-r--r--sql/sql_base.cc8
-rw-r--r--sql/sql_lex.h6
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_select.cc22
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy181
-rw-r--r--sql/table.cc8
-rw-r--r--sql/table.h4
-rw-r--r--sql/table_function.cc1132
-rw-r--r--sql/table_function.h224
25 files changed, 2074 insertions, 34 deletions
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index ae54fb4ef5f..15f31d69eef 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -133,6 +133,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/item_vers.cc
../sql/opt_trace.cc
../sql/xa.cc
+ ../sql/table_function.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result
index 4ba2568c3d9..d68d5b7dc11 100644
--- a/mysql-test/main/query_cache.result
+++ b/mysql-test/main/query_cache.result
@@ -2195,6 +2195,22 @@ Variable_name Value
Qcache_queries_in_cache 0
DROP FUNCTION foo;
drop table t1;
+#
+# MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
+#
+create table t1 (a text);
+insert into t1 values ('{"a":"foo"}');
+flush status;
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 0
+select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt;
+a f
+{"a":"foo"} foo
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 1
+drop table t1;
restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size=@save_query_cache_size;
diff --git a/mysql-test/main/query_cache.test b/mysql-test/main/query_cache.test
index 3bf905d624c..475160a3900 100644
--- a/mysql-test/main/query_cache.test
+++ b/mysql-test/main/query_cache.test
@@ -1783,6 +1783,17 @@ show status like "Qcache_queries_in_cache";
DROP FUNCTION foo;
drop table t1;
+--echo #
+--echo # MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
+--echo #
+create table t1 (a text);
+insert into t1 values ('{"a":"foo"}');
+flush status;
+SHOW STATUS LIKE 'Qcache_inserts';
+select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt;
+SHOW STATUS LIKE 'Qcache_inserts';
+drop table t1;
+
--echo restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size=@save_query_cache_size;
diff --git a/mysql-test/suite/json/r/json_table.result b/mysql-test/suite/json/r/json_table.result
new file mode 100644
index 00000000000..c5cd0998133
--- /dev/null
+++ b/mysql-test/suite/json/r/json_table.result
@@ -0,0 +1,220 @@
+select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+a
+1
+2
+select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+1 11
+1 111
+2 22
+2 222
+SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt;
+a b c
+1 11 NULL
+1 111 NULL
+1 NULL 11
+1 NULL 111
+2 22 NULL
+2 222 NULL
+2 NULL 22
+2 NULL 222
+create table t1 (id varchar(5), json varchar(1024));
+insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]');
+insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]');
+select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt;
+id json a
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 4
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 5
+select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+id json js_id a l_js_id b
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 1 11
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 2 111
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 1 22
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 1 11
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 2 111
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 2 222
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+ERROR 42S22: Unknown column 't1.no_field' in 'JSON_TABLE argument'
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt;
+ERROR 42S21: Duplicate column name 'a'
+DROP TABLE t1;
+create table t1 (item_name varchar(32), item_props varchar(1024));
+insert into t1 values ('Laptop', '{"color": "black", "price": 1000}');
+insert into t1 values ('Jeans', '{"color": "blue", "price": 50}');
+select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+item_name item_props color
+Laptop {"color": "black", "price": 1000} black
+Jeans {"color": "blue", "price": 50} blue
+select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+ERROR 42000: Cross dependency found in OUTER JOIN; examine your ON conditions
+DROP TABLE t1;
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+101 11
+101 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+202 11
+202 111
+2 22
+2 222
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 65
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'.
+select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo;
+a
+0.0
+connect con1,localhost,root,,;
+select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo;
+a
+1
+connection default;
+disconnect con1;
+create database db;
+use db;
+create table t (a text);
+insert into t values ('{"foo":"bar"}');
+create user u@localhost;
+grant select (a) on db.t to u@localhost;
+connect con1,localhost,u,,db;
+select a from t;
+a
+{"foo":"bar"}
+select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt;
+a f
+{"foo":"bar"} bar
+connection default;
+disconnect con1;
+drop user u@localhost;
+drop database db;
+use test;
+create table t1 (
+color varchar(32),
+price int
+);
+insert into t1 values ("red", 100), ("blue", 50);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+set optimizer_switch='firstmatch=off';
+select * from
+json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price'
+ )
+) as T
+where
+T.color in (select color from t1 where t1.price=T.price);
+color price
+blue 50
+red 100
+drop table t1;
+select * from
+json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]},
+ {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]',
+'$[*]' columns(
+color varchar(4) path '$.color',
+seq0 for ordinality,
+nested path '$.sizes[*]'
+ columns (seq1 for ordinality,
+size int path '$'),
+nested path '$.prices[*]'
+ columns (seq2 for ordinality,
+price int path '$')
+)
+) as T;
+color seq0 seq1 size seq2 price
+blue 1 1 1 NULL NULL
+blue 1 2 2 NULL NULL
+blue 1 3 3 NULL NULL
+blue 1 4 4 NULL NULL
+blue 1 NULL NULL 1 10
+blue 1 NULL NULL 2 20
+red 2 1 10 NULL NULL
+red 2 2 11 NULL NULL
+red 2 3 12 NULL NULL
+red 2 4 13 NULL NULL
+red 2 5 14 NULL NULL
+red 2 NULL NULL 1 100
+red 2 NULL NULL 2 200
+red 2 NULL NULL 3 300
+select * from json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100},
+ {"color": "rojo", "price": 10.0},
+ {"color": "blanco", "price": 11.0}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price', seq for ordinality)) as T order by color desc;
+color price seq
+rojo 10.0 3
+red 100 2
+blue 50 1
+blanco 11.0 4
+create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x;
+select * from v;
+a x
+- 123
+show create table v;
+View Create View character_set_client collation_connection
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `x`.`a` AS `a`,`x`.`x` AS `x` from JSON_TABLE('{"as":"b", "x":123}', "$" COLUMNS (`a` varchar(8) PATH "$.a" DEFAULT '-' ON EMPTY, `x` int(11) PATH "$.x")) x latin1 latin1_swedish_ci
+drop view v;
+select * from json_table('{"as":"b", "x":123}',
+"$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'null on empty, x int path '$.x')) x' at line 2
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*')) as jt;
+v
+NULL
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+-
+select * from json_table('{"b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+bar
+create table t1 (a varchar(100));
+insert into t1 values ('1');
+select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T;
+ERROR 42000: Not unique table/alias: 'T'
+drop table t1;
+prepare s from 'select * from
+json_table(?,
+ \'$[*]\' columns( color varchar(100) path \'$.color\',
+ price text path \'$.price\',
+ seq for ordinality)) as T
+order by color desc; ';
+execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]';
+color price seq
+red 1 1
+brown 2 2
+deallocate prepare s;
+create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T;
+select * from v2;
+color
+blue
+drop view v2;
+#
+# End of 10.5 tests
+#
diff --git a/mysql-test/suite/json/t/json_table.test b/mysql-test/suite/json/t/json_table.test
new file mode 100644
index 00000000000..b57127299fb
--- /dev/null
+++ b/mysql-test/suite/json/t/json_table.test
@@ -0,0 +1,167 @@
+select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+
+select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt;
+
+create table t1 (id varchar(5), json varchar(1024));
+insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]');
+insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]');
+select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt;
+select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+--error ER_BAD_FIELD_ERROR
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+
+--error ER_DUP_FIELDNAME
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt;
+
+DROP TABLE t1;
+
+create table t1 (item_name varchar(32), item_props varchar(1024));
+insert into t1 values ('Laptop', '{"color": "black", "price": 1000}');
+insert into t1 values ('Jeans', '{"color": "blue", "price": 50}');
+
+select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+
+--error ER_WRONG_OUTER_JOIN
+select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+
+DROP TABLE t1;
+
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+--error ER_JSON_SYNTAX
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+#
+# MDEV-22290 JSON_TABLE: Decimal type with M equal D causes Assertion
+# `scale <= precision' failure
+#
+select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo;
+
+
+#
+# MDEV-22291 JSON_TABLE: SELECT from json_table does not work without default database
+#
+connect (con1,localhost,root,,);
+select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo;
+connection default;
+disconnect con1;
+
+#
+# MDEV-22302 JSON_TABLE: Column privilege is insufficient for query with json_table
+#
+
+create database db;
+use db;
+create table t (a text);
+insert into t values ('{"foo":"bar"}');
+create user u@localhost;
+grant select (a) on db.t to u@localhost;
+
+--connect (con1,localhost,u,,db)
+select a from t;
+select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt;
+
+connection default;
+disconnect con1;
+
+drop user u@localhost;
+drop database db;
+
+use test;
+create table t1 (
+ color varchar(32),
+ price int
+ );
+insert into t1 values ("red", 100), ("blue", 50);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+
+set optimizer_switch='firstmatch=off';
+select * from
+ json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+ '$[*]' columns( color varchar(100) path '$.color',
+ price text path '$.price'
+ )
+ ) as T
+ where
+ T.color in (select color from t1 where t1.price=T.price);
+
+drop table t1;
+
+select * from
+json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]},
+ {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]',
+ '$[*]' columns(
+ color varchar(4) path '$.color',
+ seq0 for ordinality,
+ nested path '$.sizes[*]'
+ columns (seq1 for ordinality,
+ size int path '$'),
+ nested path '$.prices[*]'
+ columns (seq2 for ordinality,
+ price int path '$')
+ )
+ ) as T;
+
+
+select * from json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100},
+ {"color": "rojo", "price": 10.0},
+ {"color": "blanco", "price": 11.0}]',
+ '$[*]' columns( color varchar(100) path '$.color',
+ price text path '$.price', seq for ordinality)) as T order by color desc;
+
+create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x;
+select * from v;
+show create table v;
+drop view v;
+
+--error ER_PARSE_ERROR
+select * from json_table('{"as":"b", "x":123}',
+ "$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x;
+
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*')) as jt;
+
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+
+select * from json_table('{"b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+
+create table t1 (a varchar(100));
+insert into t1 values ('1');
+--error ER_NONUNIQ_TABLE
+select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T;
+
+drop table t1;
+
+prepare s from 'select * from
+json_table(?,
+ \'$[*]\' columns( color varchar(100) path \'$.color\',
+ price text path \'$.price\',
+ seq for ordinality)) as T
+order by color desc; ';
+
+execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]';
+deallocate prepare s;
+
+create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T;
+select * from v2;
+drop view v2;
+
+--echo #
+--echo # End of 10.5 tests
+--echo #
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 0dc3caab507..0894d2f6489 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -157,6 +157,7 @@ SET (SQL_SOURCE
rowid_filter.cc rowid_filter.h
opt_trace.cc
table_cache.cc encryption.cc temporary_tables.cc
+ table_function.cc
proxy_protocol.cc backup.cc xa.cc
${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
diff --git a/sql/handler.cc b/sql/handler.cc
index eac029fd5ac..6d8ff3666fe 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -560,10 +560,12 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
}
+const char *hton_no_exts[]= { 0 };
+
+
int ha_initialize_handlerton(st_plugin_int *plugin)
{
handlerton *hton;
- static const char *no_exts[]= { 0 };
DBUG_ENTER("ha_initialize_handlerton");
DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str));
@@ -576,7 +578,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
goto err_no_hton_memory;
}
- hton->tablefile_extensions= no_exts;
+ hton->tablefile_extensions= hton_no_exts;
hton->discover_table_names= hton_ext_based_table_discovery;
hton->slot= HA_SLOT_UNDEF;
diff --git a/sql/handler.h b/sql/handler.h
index 1fd9a1c6fe0..2bd47625aea 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1692,6 +1692,8 @@ struct handlerton
};
+extern const char *hton_no_exts[];
+
static inline LEX_CSTRING *hton_name(const handlerton *hton)
{
return &(hton2plugin[hton->slot]->name);
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index c107c93d584..df3dca2352d 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -115,10 +115,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
}
-void report_json_error_ex(String *js, json_engine_t *je,
- const char *fname, int n_param,
- Sql_condition::enum_warning_level lv);
-
String *Item_func_geometry_from_json::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -178,7 +174,8 @@ String *Item_func_geometry_from_json::val_str(String *str)
my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON");
break;
default:
- report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN);
+ report_json_error_ex(js->ptr(), &je, func_name(), 0,
+ Sql_condition::WARN_LEVEL_WARN);
return NULL;
}
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 442e9b5bdca..298a8d56cf0 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -247,15 +247,15 @@ error:
#define report_json_error(js, je, n_param) \
- report_json_error_ex(js, je, func_name(), n_param, \
+ report_json_error_ex(js->ptr(), je, func_name(), n_param, \
Sql_condition::WARN_LEVEL_WARN)
-void report_json_error_ex(String *js, json_engine_t *je,
+void report_json_error_ex(const char *js, json_engine_t *je,
const char *fname, int n_param,
Sql_condition::enum_warning_level lv)
{
THD *thd= current_thd;
- int position= (int)((const char *) je->s.c_str - js->ptr());
+ int position= (int)((const char *) je->s.c_str - js);
uint code;
n_param++;
@@ -285,16 +285,22 @@ void report_json_error_ex(String *js, json_engine_t *je,
case JE_DEPTH:
code= ER_JSON_DEPTH;
- push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT,
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT,
+ n_param, fname, position);
return;
default:
return;
}
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
}
@@ -304,15 +310,15 @@ void report_json_error_ex(String *js, json_engine_t *je,
#define TRIVIAL_PATH_NOT_ALLOWED 3
#define report_path_error(js, je, n_param) \
- report_path_error_ex(js, je, func_name(), n_param,\
+ report_path_error_ex(js->ptr(), je, func_name(), n_param,\
Sql_condition::WARN_LEVEL_WARN)
-static void report_path_error_ex(String *ps, json_path_t *p,
- const char *fname, int n_param,
- Sql_condition::enum_warning_level lv)
+void report_path_error_ex(const char *ps, json_path_t *p,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv)
{
THD *thd= current_thd;
- int position= (int)((const char *) p->s.c_str - ps->ptr() + 1);
+ int position= (int)((const char *) p->s.c_str - ps + 1);
uint code;
n_param++;
@@ -331,8 +337,11 @@ static void report_path_error_ex(String *ps, json_path_t *p,
case JE_DEPTH:
code= ER_JSON_PATH_DEPTH;
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- JSON_DEPTH_LIMIT, n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ JSON_DEPTH_LIMIT, n_param, fname, position);
return;
case NO_WILDCARD_ALLOWED:
@@ -347,12 +356,14 @@ static void report_path_error_ex(String *ps, json_path_t *p,
default:
return;
}
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
}
-
/*
Checks if the path has '.*' '[*]' or '**' constructions
and sets the NO_WILDCARD_ALLOWED error if the case.
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
index 0b02b8e4da2..f64e950b527 100644
--- a/sql/item_jsonfunc.h
+++ b/sql/item_jsonfunc.h
@@ -41,6 +41,14 @@ public:
};
+void report_path_error_ex(const char *ps, json_path_t *p,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv);
+void report_json_error_ex(const char *js, json_engine_t *je,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv);
+
+
class Json_engine_scan: public json_engine_t
{
public:
diff --git a/sql/lex.h b/sql/lex.h
index 542356c0e43..f17d8204798 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -211,6 +211,7 @@ static SYMBOL symbols[] = {
{ "ELSE", SYM(ELSE)},
{ "ELSEIF", SYM(ELSEIF_MARIADB_SYM)},
{ "ELSIF", SYM(ELSIF_MARIADB_SYM)},
+ { "EMPTY", SYM(EMPTY_SYM)},
{ "ENABLE", SYM(ENABLE_SYM)},
{ "ENCLOSED", SYM(ENCLOSED)},
{ "END", SYM(END)},
@@ -419,6 +420,7 @@ static SYMBOL symbols[] = {
{ "NATIONAL", SYM(NATIONAL_SYM)},
{ "NATURAL", SYM(NATURAL)},
{ "NCHAR", SYM(NCHAR_SYM)},
+ { "NESTED", SYM(NESTED_SYM)},
{ "NEVER", SYM(NEVER_SYM)},
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
@@ -453,6 +455,7 @@ static SYMBOL symbols[] = {
{ "OPTIONALLY", SYM(OPTIONALLY)},
{ "OR", SYM(OR_SYM)},
{ "ORDER", SYM(ORDER_SYM)},
+ { "ORDINALITY", SYM(ORDINALITY_SYM)},
{ "OTHERS", SYM(OTHERS_MARIADB_SYM)},
{ "OUT", SYM(OUT_SYM)},
{ "OUTER", SYM(OUTER)},
@@ -466,6 +469,7 @@ static SYMBOL symbols[] = {
{ "PAGE_CHECKSUM", SYM(PAGE_CHECKSUM_SYM)},
{ "PARSER", SYM(PARSER_SYM)},
{ "PARSE_VCOL_EXPR", SYM(PARSE_VCOL_EXPR_SYM)},
+ { "PATH", SYM(PATH_SYM)},
{ "PERIOD", SYM(PERIOD_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
{ "PARTITION", SYM(PARTITION_SYM)},
@@ -755,6 +759,7 @@ static SYMBOL sql_functions[] = {
{ "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)},
{ "JSON_ARRAYAGG", SYM(JSON_ARRAYAGG_SYM)},
{ "JSON_OBJECTAGG", SYM(JSON_OBJECTAGG_SYM)},
+ { "JSON_TABLE", SYM(JSON_TABLE_SYM)},
{ "LAG", SYM(LAG_SYM)},
{ "LEAD", SYM(LEAD_SYM)},
{ "MAX", SYM(MAX_SYM)},
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 1e0a5398f6a..cf0e229f757 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -1407,6 +1407,14 @@ void get_delayed_table_estimates(TABLE *table,
double *startup_cost)
{
Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect;
+ Table_function_json_table *table_function=
+ table->pos_in_table_list->table_function;
+
+ if (table_function)
+ {
+ table_function->get_estimates(out_rows, scan_time, startup_cost);
+ return;
+ }
DBUG_ASSERT(item->engine->engine_type() ==
subselect_engine::HASH_SJ_ENGINE);
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 49cabec9916..8593576f847 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -7965,3 +7965,5 @@ ER_NOT_ALLOWED_IN_THIS_CONTEXT
eng "'%-.128s' is not allowed in this context"
ER_DATA_WAS_COMMITED_UNDER_ROLLBACK
eng "Engine %s does not support rollback. Changes where commited during rollback call"
+ER_JSON_TABLE_ERROR_ON_FIELD
+ eng "Field '%s' can't be set for JSON_TABLE '%s'."
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index d8a05686166..64a2d457b54 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -8117,6 +8117,16 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables,
if (!want_access)
continue; // ok
+ if (t_ref->table_function)
+ {
+ /*
+ Table function doesn't need any privileges to check.
+ */
+ t_ref->grant.privilege|= TMP_TABLE_ACLS;
+ t_ref->grant.want_privilege= NO_ACL;
+ continue;
+ }
+
if (!(~t_ref->grant.privilege & want_access) ||
t_ref->is_anonymous_derived_table() || t_ref->schema_table)
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 39fdefbc189..a36416b89af 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3662,6 +3662,14 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
error= TRUE;
goto end;
}
+
+ if (tables->table_function)
+ {
+ if (!create_table_for_function(thd, tables))
+ error= TRUE;
+ goto end;
+ }
+
DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: %p",
tables->db.str, tables->table_name.str, tables));
(*counter)++;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 49edce508ca..55ee3ba4392 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -34,6 +34,7 @@
#include "sql_tvc.h"
#include "item.h"
#include "sql_limit.h" // Select_limit_counters
+#include "table_function.h" // Json_table_column
/* Used for flags of nesting constructs */
#define SELECT_NESTING_MAP_SIZE 64
@@ -453,6 +454,7 @@ enum enum_drop_mode
#define TL_OPTION_IGNORE_LEAVES 4
#define TL_OPTION_ALIAS 8
#define TL_OPTION_SEQUENCE 16
+#define TL_OPTION_TABLE_FUNCTION 32
typedef List<Item> List_item;
typedef Mem_root_array<ORDER*, true> Group_list_ptrs;
@@ -1388,7 +1390,8 @@ public:
enum_mdl_type mdl_type= MDL_SHARED_READ,
List<Index_hint> *hints= 0,
List<String> *partition_names= 0,
- LEX_STRING *option= 0);
+ LEX_STRING *option= 0,
+ Table_function_json_table *tfunc= 0);
TABLE_LIST* get_table_list();
bool init_nested_join(THD *thd);
TABLE_LIST *end_nested_join(THD *thd);
@@ -3290,6 +3293,7 @@ public:
SQL_I_List<ORDER> proc_list;
SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list;
Column_definition *last_field;
+ Table_function_json_table *json_table;
Item_sum *in_sum_func;
udf_func udf;
HA_CHECK_OPT check_opt; // check/repair options
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 86f81dd001a..c48255877d3 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -7071,6 +7071,9 @@ check_table_access(THD *thd, privilege_t requirements, TABLE_LIST *tables,
if (table_ref->is_anonymous_derived_table())
continue;
+ if (table_ref->table_function)
+ continue;
+
if (table_ref->sequence)
{
/* We want to have either SELECT or INSERT rights to sequences depending
@@ -8134,7 +8137,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
enum_mdl_type mdl_type,
List<Index_hint> *index_hints_arg,
List<String> *partition_names,
- LEX_STRING *option)
+ LEX_STRING *option,
+ Table_function_json_table *tfunc)
{
TABLE_LIST *ptr;
TABLE_LIST *UNINIT_VAR(previous_table_ref); /* The table preceding the current one. */
@@ -8158,6 +8162,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
}
if (unlikely(table->is_derived_table() == FALSE && table->db.str &&
+ !(table_options & TL_OPTION_TABLE_FUNCTION) &&
check_db_name((LEX_STRING*) &table->db)))
{
my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
@@ -8201,6 +8206,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->table_name= table->table;
ptr->lock_type= lock_type;
+ ptr->table_function= tfunc;
ptr->updating= MY_TEST(table_options & TL_OPTION_UPDATING);
/* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */
ptr->force_index= MY_TEST(table_options & TL_OPTION_FORCE_INDEX);
@@ -8245,7 +8251,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
{
if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str,
tables->alias.str) &&
- !cmp(&ptr->db, &tables->db) && ! tables->sequence))
+ (tables->table_function || ptr->table_function ||
+ !cmp(&ptr->db, &tables->db)) &&
+ !tables->sequence))
{
my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */
DBUG_RETURN(0); /* purecov: tested */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 2347162ff84..7c8290d2b9f 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1244,6 +1244,14 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num,
enum_parsing_place save_place=
thd->lex->current_select->context_analysis_place;
thd->lex->current_select->context_analysis_place= SELECT_LIST;
+
+ for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (tbl->table_function &&
+ tbl->table_function->setup(thd, tbl))
+ DBUG_RETURN(-1);
+ }
+
if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ,
&all_fields, &select_lex->pre_fix, 1))
DBUG_RETURN(-1);
@@ -12666,6 +12674,10 @@ uint check_join_cache_usage(JOIN_TAB *tab,
!join->allowed_outer_join_with_cache)
goto no_join_cache;
+ if (tab->table->pos_in_table_list->table_function &&
+ !tab->table->pos_in_table_list->table_function->join_cache_allowed())
+ goto no_join_cache;
+
/*
Non-linked join buffers can't guarantee one match
*/
@@ -16432,7 +16444,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
if (table->outer_join && !table->embedding && table->table)
table->table->maybe_null= FALSE;
table->outer_join= 0;
- if (!(straight_join || table->straight))
+ if (!(straight_join || table->straight || table->table_function))
{
table->dep_tables= 0;
TABLE_LIST *embedding= table->embedding;
@@ -27504,6 +27516,14 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
cmp_name= table_name.str;
}
}
+ else if (table_function)
+ {
+ /* A table function. */
+ (void) table_function->print(thd, this, str, query_type);
+ str->append(' ');
+ str->append(alias);
+ cmp_name= alias.str;
+ }
else
{
// A normal table
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 30fadcc63d2..a6a2580167d 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -521,7 +521,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
{
/* is this table temporary and is not view? */
if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view &&
- !tbl->schema_table)
+ !tbl->schema_table && !tbl->table_function)
{
my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias.str);
res= TRUE;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index da752c0a3cb..afb69c701f4 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -68,6 +68,7 @@
#include "sql_sequence.h"
#include "my_base.h"
#include "sql_type_json.h"
+#include "table_function.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
@@ -207,6 +208,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
Lex_for_loop_st for_loop;
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
+ Json_table_column::On_response json_on_response;
vers_history_point_t vers_history_point;
struct
{
@@ -226,6 +228,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
/* pointers */
Lex_ident_sys *ident_sys_ptr;
Create_field *create_field;
+ Json_table_column *json_table_column;
Spvar_definition *spvar_definition;
Row_definition_list *spvar_definition_list;
const Type_handler *type_handler;
@@ -494,6 +497,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> ELSEIF_MARIADB_SYM
%token <kwd> ELSE /* SQL-2003-R */
%token <kwd> ELSIF_ORACLE_SYM /* PLSQL-R */
+%token <kwd> EMPTY_SYM /* SQL-2016-R */
%token <kwd> ENCLOSED
%token <kwd> ESCAPED
%token <kwd> EXCEPT_SYM /* SQL-2003-R */
@@ -512,6 +516,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> GROUP_CONCAT_SYM
%token <rwd> JSON_ARRAYAGG_SYM
%token <rwd> JSON_OBJECTAGG_SYM
+%token <rwd> JSON_TABLE_SYM
%token <kwd> GROUP_SYM /* SQL-2003-R */
%token <kwd> HAVING /* SQL-2003-R */
%token <kwd> HOUR_MICROSECOND_SYM
@@ -569,6 +574,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> MOD_SYM /* SQL-2003-N */
%token <kwd> NATURAL /* SQL-2003-R */
%token <kwd> NEG
+%token <kwd> NESTED_SYM /* SQL-2003-N */
%token <kwd> NOT_SYM /* SQL-2003-R */
%token <kwd> NO_WRITE_TO_BINLOG
%token <kwd> NOW_SYM
@@ -580,6 +586,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> OPTIMIZE
%token <kwd> OPTIONALLY
%token <kwd> ORDER_SYM /* SQL-2003-R */
+%token <kwd> ORDINALITY_SYM /* SQL-2003-N */
%token <kwd> OR_SYM /* SQL-2003-R */
%token <kwd> OTHERS_ORACLE_SYM /* SQL-2011-N, PLSQL-R */
%token <kwd> OUTER
@@ -590,6 +597,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> PAGE_CHECKSUM_SYM
%token <kwd> PARSE_VCOL_EXPR_SYM
%token <kwd> PARTITION_SYM /* SQL-2003-R */
+%token <kwd> PATH_SYM /* SQL-2003-N */
%token <kwd> PERCENTILE_CONT_SYM
%token <kwd> PERCENTILE_DISC_SYM
%token <kwd> PERCENT_RANK_SYM
@@ -1352,12 +1360,15 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <sp_handler> sp_handler
+%type <json_on_response> json_on_response
+
%type <Lex_field_type> type_with_opt_collate field_type
field_type_numeric
field_type_string
field_type_lob
field_type_temporal
field_type_misc
+ json_table_field_type
%type <Lex_dyncol_type> opt_dyncol_type dyncol_type
numeric_dyncol_type temporal_dyncol_type string_dyncol_type
@@ -1521,7 +1532,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
table_primary_derived table_primary_derived_opt_parens
derived_table_list table_reference_list_parens
nested_table_reference_list join_table_parens
- update_table_list
+ update_table_list table_function
%type <date_time_type> date_time_type;
%type <interval> interval
@@ -1676,6 +1687,9 @@ End SQL_MODE_ORACLE_SPECIFIC */
opt_delete_gtid_domain
asrow_attribute
opt_constraint_no_id
+ json_table_columns_clause json_table_columns_list json_table_column
+ json_table_column_type json_opt_on_empty_or_error
+ json_on_error_response json_on_empty_response
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
%type <NONE> sp_if_then_statements sp_case_then_statements
@@ -11422,6 +11436,166 @@ join_table_list:
derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); }
;
+json_table_columns_clause:
+ COLUMNS '(' json_table_columns_list ')'
+ {}
+ ;
+
+json_table_columns_list:
+ json_table_column
+ | json_table_columns_list ',' json_table_column
+ {}
+ ;
+
+json_table_column:
+ ident
+ {
+ LEX *lex=Lex;
+ Create_field *f= new (thd->mem_root) Create_field();
+
+ if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN,
+ system_charset_info, 1)))
+ my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str));
+
+ lex->json_table->m_cur_json_table_column=
+ new (thd->mem_root) Json_table_column(f,
+ lex->json_table->m_sql_nest);
+
+ if (unlikely(!f ||
+ !lex->json_table->m_cur_json_table_column))
+ MYSQL_YYABORT;
+
+ lex->init_last_field(f, &$1, NULL);
+ }
+ json_table_column_type
+ {
+ LEX *lex=Lex;
+ if (unlikely(lex->json_table->
+ m_cur_json_table_column->m_field->check(thd)))
+ MYSQL_YYABORT;
+ lex->json_table->m_columns.push_back(
+ lex->json_table->m_cur_json_table_column, thd->mem_root);
+ }
+ | NESTED_SYM PATH_SYM TEXT_STRING_sys
+ {
+ LEX *lex=Lex;
+ Json_table_nested_path *np= new (thd->mem_root)
+ Json_table_nested_path(lex->json_table->m_sql_nest);
+ np->set_path(thd, $3);
+ lex->json_table->add_nested(np);
+ }
+ json_table_columns_clause
+ {
+ LEX *lex=Lex;
+ lex->json_table->leave_nested();
+ }
+ ;
+
+json_table_column_type:
+ FOR_SYM ORDINALITY_SYM
+ {
+ Lex_field_type_st type;
+ type.set_handler_length_flags(&type_handler_slong, 0, 0);
+ Lex->last_field->set_attributes(thd, type, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ Lex->json_table->m_cur_json_table_column->
+ set(Json_table_column::FOR_ORDINALITY);
+ }
+ | json_table_field_type PATH_SYM TEXT_STRING_sys
+ json_opt_on_empty_or_error
+ {
+ Lex->last_field->set_attributes(thd, $1, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ if (Lex->json_table->m_cur_json_table_column->
+ set(thd, Json_table_column::PATH, $3))
+ {
+ MYSQL_YYABORT;
+ }
+ }
+ | json_table_field_type EXISTS PATH_SYM TEXT_STRING_sys
+ {
+ Lex->last_field->set_attributes(thd, $1, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ Lex->json_table->m_cur_json_table_column->
+ set(thd, Json_table_column::EXISTS_PATH, $4);
+ }
+ ;
+
+json_table_field_type:
+ field_type_numeric
+ | field_type_temporal
+ | field_type_string
+ | field_type_lob
+ ;
+
+json_opt_on_empty_or_error:
+ /* none */
+ {}
+ | json_on_error_response
+ | json_on_error_response json_on_empty_response
+ | json_on_empty_response
+ | json_on_empty_response json_on_error_response
+ ;
+
+json_on_response:
+ ERROR_SYM
+ {
+ $$.m_response= Json_table_column::RESPONSE_ERROR;
+ }
+ | NULL_SYM
+ {
+ $$.m_response= Json_table_column::RESPONSE_NULL;
+ }
+ | DEFAULT TEXT_STRING_sys
+ {
+ $$.m_response= Json_table_column::RESPONSE_DEFAULT;
+ $$.m_default= $2;
+ Lex->json_table->m_cur_json_table_column->m_defaults_cs=
+ thd->variables.collation_connection;
+ }
+ ;
+
+json_on_error_response:
+ json_on_response ON ERROR_SYM
+ {
+ Lex->json_table->m_cur_json_table_column->m_on_error= $1;
+ }
+ ;
+
+json_on_empty_response:
+ json_on_response ON EMPTY_SYM
+ {
+ Lex->json_table->m_cur_json_table_column->m_on_empty= $1;
+ }
+ ;
+
+table_function:
+ JSON_TABLE_SYM '(' expr ',' TEXT_STRING_sys
+ {
+ Table_function_json_table *jt=
+ new (thd->mem_root) Table_function_json_table($3);
+ if (unlikely(!jt || jt->m_nested_path.set_path(thd, $5)))
+ MYSQL_YYABORT;
+ Lex->json_table= jt;
+ jt->m_sql_nest= &jt->m_nested_path;
+ }
+ json_table_columns_clause ')' opt_as ident_table_alias
+ {
+ SELECT_LEX *sel= Select;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd,
+ new (thd->mem_root) Table_ident(thd, &empty_clex_str,
+ &$10, TRUE),
+ NULL,
+ Select->get_table_join_options() |
+ TL_OPTION_TABLE_FUNCTION,
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ 0,0,0, Lex->json_table)))
+ MYSQL_YYABORT;
+ }
+ ;
+
/*
The ODBC escape syntax for Outer Join is: '{' OJ join_table '}'
The parser does not define OJ as a token, any ident is accepted
@@ -11629,6 +11803,7 @@ table_factor:
$$= $1;
}
| table_reference_list_parens { $$= $1; }
+ | table_function { $$= $1; }
;
table_primary_ident_opt_parens:
@@ -15560,6 +15735,7 @@ keyword_sp_var_and_label:
| DYNAMIC_SYM
| ELSEIF_ORACLE_SYM
| ELSIF_MARIADB_SYM
+ | EMPTY_SYM
| ENDS_SYM
| ENGINE_SYM
| ENGINES_SYM
@@ -15665,6 +15841,7 @@ keyword_sp_var_and_label:
| MYSQL_SYM
| MYSQL_ERRNO_SYM
| NAME_SYM
+ | NESTED_SYM
| NEVER_SYM
| NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2
| NEXTVAL_SYM
@@ -15684,6 +15861,7 @@ keyword_sp_var_and_label:
| ONE_SYM
| ONLINE_SYM
| ONLY_SYM
+ | ORDINALITY_SYM
| OVERLAPS_SYM
| PACKAGE_MARIADB_SYM
| PACK_KEYS_SYM
@@ -15691,6 +15869,7 @@ keyword_sp_var_and_label:
| PARTIAL
| PARTITIONING_SYM
| PARTITIONS_SYM
+ | PATH_SYM
| PERSISTENT_SYM
| PHASE_SYM
| PLUGIN_SYM
diff --git a/sql/table.cc b/sql/table.cc
index 41e75347877..6ee29cc8898 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -6848,7 +6848,7 @@ const char *Field_iterator_table_ref::get_table_name()
DBUG_ASSERT(!strcmp(table_ref->table_name.str,
table_ref->table->s->table_name.str) ||
- table_ref->schema_table);
+ table_ref->schema_table || table_ref->table_function);
return table_ref->table_name.str;
}
@@ -6867,7 +6867,8 @@ const char *Field_iterator_table_ref::get_db_name()
*/
DBUG_ASSERT(!cmp(&table_ref->db, &table_ref->table->s->db) ||
(table_ref->schema_table &&
- is_infoschema_db(&table_ref->table->s->db)));
+ is_infoschema_db(&table_ref->table->s->db)) ||
+ table_ref->table_function);
return table_ref->db.str;
}
@@ -8044,7 +8045,8 @@ bool TABLE::is_filled_at_execution()
*/
return MY_TEST(!pos_in_table_list ||
pos_in_table_list->jtbm_subselect ||
- pos_in_table_list->is_active_sjm());
+ pos_in_table_list->is_active_sjm() ||
+ pos_in_table_list->table_function);
}
diff --git a/sql/table.h b/sql/table.h
index f2fad6c19b2..057fc0907c8 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -63,6 +63,7 @@ class Range_rowid_filter_cost_info;
class derived_handler;
class Pushdown_derived;
struct Name_resolution_context;
+class Table_function_json_table;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@@ -2194,6 +2195,7 @@ struct TABLE_LIST
const char *option; /* Used by cache index */
Item *on_expr; /* Used with outer join */
Name_resolution_context *on_context; /* For ON expressions */
+ Table_function_json_table *table_function; /* If it's the table function. */
Item *sj_on_expr;
/*
@@ -2581,7 +2583,7 @@ struct TABLE_LIST
void cleanup_items();
bool placeholder()
{
- return derived || view || schema_table || !table;
+ return derived || view || schema_table || !table || table_function;
}
void print(THD *thd, table_map eliminated_tables, String *str,
enum_query_type query_type);
diff --git a/sql/table_function.cc b/sql/table_function.cc
new file mode 100644
index 00000000000..d81448768f5
--- /dev/null
+++ b/sql/table_function.cc
@@ -0,0 +1,1132 @@
+/*
+ Copyright (c) 2020, MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
+*/
+
+#include "mariadb.h"
+#include "sql_priv.h"
+#include "sql_class.h" /* TMP_TABLE_PARAM */
+#include "table.h"
+#include "item_jsonfunc.h"
+#include "table_function.h"
+#include "sql_show.h"
+
+
+class table_function_handlerton
+{
+public:
+ handlerton m_hton;
+ table_function_handlerton()
+ {
+ bzero(&m_hton, sizeof(m_hton));
+ m_hton.tablefile_extensions= hton_no_exts;
+ m_hton.slot= HA_SLOT_UNDEF;
+ }
+};
+
+
+static table_function_handlerton table_function_hton;
+
+
+class ha_json_table: public handler
+{
+protected:
+ Table_function_json_table *m_jt;
+ String m_tmps;
+ String *m_js;
+ uchar *m_cur_pos;
+public:
+ ha_json_table(TABLE_SHARE *share_arg, Table_function_json_table *jt):
+ handler(&table_function_hton.m_hton, share_arg), m_jt(jt)
+ {
+ /*
+ set the mark_trx_read_write_done to avoid the
+ handler::mark_trx_read_write_internal() call.
+ It relies on &ha_thd()->ha_data[ht->slot].ha_info[0] to be set.
+ But we don't set the ha_data for the ha_json_table, and
+ that call makes no sence for ha_json_table.
+ */
+ mark_trx_read_write_done= 1;
+ ref_length= (jt->m_depth+1)*(4+4) + jt->m_depth * 1;
+ }
+ ~ha_json_table() {}
+ handler *clone(const char *name, MEM_ROOT *mem_root) { return NULL; }
+ const char *index_type(uint inx) { return "NONE"; }
+ /* Rows also use a fixed-size format */
+ enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
+ ulonglong table_flags() const
+ {
+ return (HA_FAST_KEY_READ | /*HA_NO_BLOBS |*/ HA_NULL_IN_KEY |
+ HA_CAN_SQL_HANDLER |
+ HA_REC_NOT_IN_SEQ | HA_NO_TRANSACTIONS |
+ HA_HAS_RECORDS | HA_CAN_HASH_KEYS);
+ }
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR;
+ }
+ uint max_supported_keys() const { return 1; }
+ uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void) { return 0; }
+ int rnd_init(bool scan);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ void position(const uchar *record);
+ int can_continue_handler_scan() { return 1; }
+ int info(uint);
+ int extra(enum ha_extra_function operation);
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+ { return NULL; }
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info)
+ { return 1; }
+private:
+ void update_key_stats();
+};
+
+
+/*
+ Helper class that creates the temporary table that
+ represents the table function in the query.
+*/
+
+class Create_json_table: public Data_type_statistics
+{
+ // The following members are initialized only in start()
+ Field **m_default_field;
+ uchar *m_bitmaps;
+ // The following members are initialized in ctor
+ uint m_temp_pool_slot;
+ uint m_null_count;
+public:
+ Create_json_table(const TMP_TABLE_PARAM *param,
+ bool save_sum_fields)
+ :m_temp_pool_slot(MY_BIT_NONE),
+ m_null_count(0)
+ { }
+
+ void add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols);
+
+ TABLE *start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt,
+ const LEX_CSTRING *table_alias);
+
+ bool add_json_table_fields(THD *thd, TABLE *table,
+ Table_function_json_table *jt);
+ bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt);
+};
+
+
+void Json_table_nested_path::scan_start(CHARSET_INFO *i_cs,
+ const uchar *str, const uchar *end)
+{
+ json_get_path_start(&m_engine, i_cs, str, end, &m_cur_path);
+ m_cur_nested= 0;
+ m_n_cur_nested= 0;
+ m_null= false;
+ m_ordinality_counter= 0;
+}
+
+
+int Json_table_nested_path::scan_next()
+{
+ if (m_cur_nested)
+ {
+ for (;;)
+ {
+ if (m_cur_nested->scan_next() == 0)
+ return 0;
+ m_n_cur_nested++;
+ if (!(m_cur_nested= m_cur_nested->m_next_nested))
+ break;
+handle_new_nested:
+ m_cur_nested->scan_start(m_engine.s.cs, m_engine.value_begin,
+ m_engine.s.str_end);
+ }
+ }
+
+ DBUG_ASSERT(!m_cur_nested);
+
+ while (!json_get_path_next(&m_engine, &m_cur_path))
+ {
+ if (json_path_compare(&m_path, &m_cur_path, m_engine.value_type))
+ continue;
+ /* path found. */
+ ++m_ordinality_counter;
+
+ if (!m_nested)
+ return 0;
+
+ m_cur_nested= m_nested;
+ m_n_cur_nested= 0;
+ goto handle_new_nested;
+ }
+
+ m_null= true;
+ return 1;
+}
+
+
+/*
+ Stores the current position in the form
+ [0..3] - position in the JSON string
+ [4..7] - ORDINALITY counter value
+ if there are nested paths
+ [8] - current NESTED PATH
+ [9...] - position in the nested path
+*/
+void Json_table_nested_path::get_current_position(
+ const uchar *j_start, uchar *pos) const
+{
+ long j_pos= (long) (m_engine.s.c_str - j_start);
+ int4store(pos, j_pos);
+ int4store(pos+4, m_ordinality_counter);
+ if (m_cur_nested)
+ {
+ pos[8]= (uchar) m_n_cur_nested;
+ m_cur_nested->get_current_position(m_engine.s.c_str, pos + 9);
+ }
+}
+
+
+/*
+ Function sets the object to the json parser to the specified position,
+ and restores the m_ordinality_counter.
+*/
+void Json_table_nested_path::set_position(const uchar *j_start,
+ const uchar *j_end, const uchar *pos)
+{
+ const uchar *s_pos= (const uchar *) j_start + sint4korr(pos);
+ m_null= FALSE;
+ scan_start(m_engine.s.cs, j_start, j_end);
+
+ while (m_engine.s.c_str < s_pos)
+ {
+ if (json_get_path_next(&m_engine, &m_cur_path))
+ {
+ DBUG_ASSERT(FALSE); /* should never get here. */
+ }
+ }
+ DBUG_ASSERT(m_engine.s.c_str == s_pos);
+
+ if (m_nested)
+ {
+ unsigned char n_cur_nest= pos[8];
+ m_n_cur_nested= n_cur_nest;
+ for (Json_table_nested_path *np= m_nested; np; np= np->m_next_nested)
+ {
+ np->m_null= TRUE;
+ if (n_cur_nest-- == 0)
+ m_cur_nested= np;
+ }
+
+ m_cur_nested->set_position(j_start, j_end, pos+9);
+ }
+ m_ordinality_counter= sint4korr(pos+4);
+}
+
+
+int ha_json_table::open(const char *name, int mode, uint test_if_locked)
+{
+ m_cur_pos= (uchar*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length));
+ return 0;
+}
+
+
+int ha_json_table::extra(enum ha_extra_function operation)
+{
+ return 0;
+}
+
+
+int ha_json_table::rnd_init(bool scan)
+{
+ Json_table_nested_path &p= m_jt->m_nested_path;
+ DBUG_ENTER("ha_json_table::rnd_init");
+
+ if ((m_js= m_jt->m_json->val_str(&m_tmps)))
+ {
+ p.scan_start(m_js->charset(),
+ (const uchar *) m_js->ptr(), (const uchar *) m_js->end());
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_json_table::rnd_next(uchar *buf)
+{
+ Field **f= table->field;
+ Json_table_column *jc;
+
+ if (!m_js)
+ return HA_ERR_END_OF_FILE;
+
+ m_jt->m_nested_path.get_current_position((uchar *) m_js->ptr(), m_cur_pos);
+ if (m_jt->m_nested_path.scan_next())
+ {
+ if (m_jt->m_nested_path.m_engine.s.error)
+ {
+ report_json_error_ex(m_js->ptr(), &m_jt->m_nested_path.m_engine,
+ "JSON_TABLE", 0, Sql_condition::WARN_LEVEL_ERROR);
+
+ /*
+ We already reported an error, so returning an
+ error code that just doesn't produce extra
+ messages.
+ */
+ return HA_ERR_TABLE_IN_FK_CHECK;
+ }
+ return HA_ERR_END_OF_FILE;
+ }
+
+ List_iterator_fast<Json_table_column> jc_i(m_jt->m_columns);
+ my_ptrdiff_t ptrdiff= buf - table->record[0];
+ while ((jc= jc_i++))
+ {
+ if (!bitmap_is_set(table->read_set, (*f)->field_index))
+ goto cont_loop;
+
+ if (ptrdiff)
+ (*f)->move_field_offset(ptrdiff);
+ switch (jc->m_column_type)
+ {
+ case Json_table_column::FOR_ORDINALITY:
+ if (jc->m_nest->m_null)
+ (*f)->set_null();
+ else
+ {
+ (*f)->set_notnull();
+ (*f)->store(jc->m_nest->m_ordinality_counter, TRUE);
+ }
+ break;
+ case Json_table_column::PATH:
+ case Json_table_column::EXISTS_PATH:
+ {
+ json_engine_t je;
+ json_engine_t &nest_je= jc->m_nest->m_engine;
+ json_path_step_t *cur_step;
+ uint array_counters[JSON_DEPTH_LIMIT];
+ int not_found;
+
+ if (jc->m_nest->m_null)
+ {
+ (*f)->set_null();
+ break;
+ }
+ json_scan_start(&je, nest_je.s.cs,
+ nest_je.value_begin, nest_je.s.str_end);
+
+ cur_step= jc->m_path.steps;
+ not_found= json_find_path(&je, &jc->m_path, &cur_step, array_counters) ||
+ json_read_value(&je);
+
+ if (jc->m_column_type == Json_table_column::EXISTS_PATH)
+ {
+ (*f)->set_notnull();
+ (*f)->store(!not_found);
+ }
+ else /*PATH*/
+ {
+ if (not_found)
+ jc->m_on_empty.respond(jc, *f);
+ else
+ {
+ (*f)->set_notnull();
+ if (!json_value_scalar(&je) ||
+ (*f)->store((const char *) je.value,
+ (uint32) je.value_len, je.s.cs))
+ jc->m_on_error.respond(jc, *f);
+ else
+ {
+ /*
+ If the path contains wildcards, check if there are
+ more matches for it in json and report an error if so.
+ */
+ if (jc->m_path.types_used &
+ (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD) &&
+ (json_scan_next(&je) ||
+ !json_find_path(&je, &jc->m_path, &cur_step, array_counters)))
+ jc->m_on_error.respond(jc, *f);
+ }
+
+ }
+ }
+ break;
+ }
+ };
+ if (ptrdiff)
+ (*f)->move_field_offset(-ptrdiff);
+cont_loop:
+ f++;
+ }
+ return 0;
+}
+
+
+int ha_json_table::rnd_pos(uchar * buf, uchar *pos)
+{
+ m_jt->m_nested_path.set_position((const uchar *) m_js->ptr(),
+ (const uchar *) m_js->end(), pos);
+ return rnd_next(buf);
+}
+
+
+void ha_json_table::position(const uchar *record)
+{
+ memcpy(ref, m_cur_pos, ref_length);
+}
+
+
+int ha_json_table::info(uint)
+{
+ /*
+ We don't want 0 or 1 in stats.records.
+ Though this value shouldn't matter as the optimizer
+ supposed to use Table_function_json_table::get_estimates
+ to obtain this data.
+ */
+ stats.records= 4;
+ return 0;
+}
+
+
+static void
+setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count)
+{
+ uint bitmap_size= bitmap_buffer_size(field_count);
+
+ DBUG_ASSERT(table->s->virtual_fields == 0);
+
+ my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ bitmaps+= bitmap_size;
+ my_bitmap_init(&table->tmp_set,
+ (my_bitmap_map*) bitmaps, field_count, FALSE);
+ bitmaps+= bitmap_size;
+ my_bitmap_init(&table->eq_join_set,
+ (my_bitmap_map*) bitmaps, field_count, FALSE);
+ bitmaps+= bitmap_size;
+ my_bitmap_init(&table->cond_set,
+ (my_bitmap_map*) bitmaps, field_count, FALSE);
+ bitmaps+= bitmap_size;
+ my_bitmap_init(&table->has_value_set,
+ (my_bitmap_map*) bitmaps, field_count, FALSE);
+ /* write_set and all_set are copies of read_set */
+ table->def_write_set= table->def_read_set;
+ table->s->all_set= table->def_read_set;
+ bitmap_set_all(&table->s->all_set);
+ table->default_column_bitmaps();
+}
+
+
+void Create_json_table::add_field(TABLE *table, Field *field,
+ uint fieldnr, bool force_not_null_cols)
+{
+ DBUG_ASSERT(!field->field_name.str ||
+ strlen(field->field_name.str) == field->field_name.length);
+
+ if (force_not_null_cols)
+ {
+ field->flags|= NOT_NULL_FLAG;
+ field->null_ptr= NULL;
+ }
+
+ if (!(field->flags & NOT_NULL_FLAG))
+ m_null_count++;
+
+ table->s->reclength+= field->pack_length();
+
+ // Assign it here, before update_data_type_statistics() changes m_blob_count
+ if (field->flags & BLOB_FLAG)
+ table->s->blob_field[m_blob_count]= fieldnr;
+
+ table->field[fieldnr]= field;
+ field->field_index= fieldnr;
+
+ field->update_data_type_statistics(this);
+}
+
+
+/**
+ Create a json table according to a field list.
+
+ @param thd thread handle
+ @param param a description used as input to create the table
+ @param jt json_table specificaion
+ @param table_alias alias
+*/
+
+TABLE *Create_json_table::start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt,
+ const LEX_CSTRING *table_alias)
+{
+ MEM_ROOT *mem_root_save, own_root;
+ TABLE *table;
+ TABLE_SHARE *share;
+ uint copy_func_count= param->func_count;
+ char *tmpname,path[FN_REFLEN];
+ Field **reg_field;
+ uint *blob_field;
+ DBUG_ENTER("Create_json_table::start");
+ DBUG_PRINT("enter",
+ ("table_alias: '%s' ", table_alias->str));
+
+ if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
+ m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
+
+ if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
+ sprintf(path, "%s-%lx-%i", tmp_file_prefix,
+ current_pid, m_temp_pool_slot);
+ else
+ {
+ /* if we run out of slots or we are not using tempool */
+ sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid,
+ (ulong) thd->thread_id, thd->tmp_table++);
+ }
+
+ /*
+ No need to change table name to lower case.
+ */
+ fn_format(path, path, mysql_tmpdir, "",
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+
+ const uint field_count= param->field_count;
+ DBUG_ASSERT(field_count);
+
+ init_sql_alloc(key_memory_TABLE, &own_root,
+ TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
+
+ if (!multi_alloc_root(&own_root,
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &reg_field, sizeof(Field*) * (field_count+1),
+ &m_default_field, sizeof(Field*) * (field_count),
+ &blob_field, sizeof(uint)*(field_count+1),
+ &param->items_to_copy,
+ sizeof(param->items_to_copy[0])*(copy_func_count+1),
+ &param->keyinfo, sizeof(*param->keyinfo),
+ &param->start_recinfo,
+ sizeof(*param->recinfo)*(field_count*2+4),
+ &tmpname, (uint) strlen(path)+1,
+ &m_bitmaps, bitmap_buffer_size(field_count)*6,
+ NullS))
+ {
+ DBUG_RETURN(NULL); /* purecov: inspected */
+ }
+ strmov(tmpname, path);
+ /* make table according to fields */
+
+ bzero((char*) table,sizeof(*table));
+ bzero((char*) reg_field, sizeof(Field*) * (field_count+1));
+ bzero((char*) m_default_field, sizeof(Field*) * (field_count));
+
+ table->mem_root= own_root;
+ mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ table->field=reg_field;
+ table->alias.set(table_alias->str, table_alias->length, table_alias_charset);
+
+ table->reginfo.lock_type=TL_WRITE; /* Will be updated */
+ table->map=1;
+ table->temp_pool_slot= m_temp_pool_slot;
+ table->copy_blobs= 1;
+ table->in_use= thd;
+ table->no_rows_with_nulls= param->force_not_null_cols;
+
+ table->s= share;
+ init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
+ share->blob_field= blob_field;
+ share->table_charset= param->table_charset;
+ share->primary_key= MAX_KEY; // Indicate no primary key
+ share->not_usable_by_query_cache= FALSE;
+ if (param->schema_table)
+ share->db= INFORMATION_SCHEMA_NAME;
+
+ param->using_outer_summary_function= 0;
+
+ share->db_plugin= NULL;
+ if (!(table->file= new (&table->mem_root) ha_json_table(share, jt)))
+ DBUG_RETURN(NULL);
+
+ table->file->init();
+
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(table);
+}
+
+
+bool Create_json_table::finalize(THD *thd, TABLE *table,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt)
+{
+ DBUG_ENTER("Create_json_table::finalize");
+ DBUG_ASSERT(table);
+
+ uint null_pack_length;
+ bool use_packed_rows= false;
+ uchar *pos;
+ uchar *null_flags;
+ TMP_ENGINE_COLUMNDEF *recinfo;
+ TABLE_SHARE *share= table->s;
+
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ DBUG_ASSERT(param->field_count >= share->fields);
+ DBUG_ASSERT(param->field_count >= share->blob_fields);
+
+ if (table->file->set_ha_share_ref(&share->ha_share))
+ {
+ delete table->file;
+ goto err;
+ }
+
+ if (share->blob_fields == 0)
+ m_null_count++;
+
+ null_pack_length= (m_null_count + m_uneven_bit_length + 7) / 8;
+ share->reclength+= null_pack_length;
+ if (!share->reclength)
+ share->reclength= 1; // Dummy select
+
+ {
+ uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1);
+ share->rec_buff_length= alloc_length;
+ if (!(table->record[0]= (uchar*)
+ alloc_root(&table->mem_root, alloc_length*3)))
+ goto err;
+ table->record[1]= table->record[0]+alloc_length;
+ share->default_values= table->record[1]+alloc_length;
+ }
+
+ setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields);
+
+ recinfo=param->start_recinfo;
+ null_flags=(uchar*) table->record[0];
+ pos=table->record[0]+ null_pack_length;
+ if (null_pack_length)
+ {
+ bzero((uchar*) recinfo,sizeof(*recinfo));
+ recinfo->type=FIELD_NORMAL;
+ recinfo->length=null_pack_length;
+ recinfo++;
+ bfill(null_flags,null_pack_length,255); // Set null fields
+
+ table->null_flags= (uchar*) table->record[0];
+ share->null_fields= m_null_count;
+ share->null_bytes= share->null_bytes_for_compare= null_pack_length;
+ }
+ m_null_count= (share->blob_fields == 0) ? 1 : 0;
+ for (uint i= 0; i < share->fields; i++, recinfo++)
+ {
+ Field *field= table->field[i];
+ uint length;
+ bzero((uchar*) recinfo,sizeof(*recinfo));
+
+ if (!(field->flags & NOT_NULL_FLAG))
+ {
+ recinfo->null_bit= (uint8)1 << (m_null_count & 7);
+ recinfo->null_pos= m_null_count/8;
+ field->move_field(pos, null_flags + m_null_count/8,
+ (uint8)1 << (m_null_count & 7));
+ m_null_count++;
+ }
+ else
+ field->move_field(pos,(uchar*) 0,0);
+ if (field->type() == MYSQL_TYPE_BIT)
+ {
+ /* We have to reserve place for extra bits among null bits */
+ ((Field_bit*) field)->set_bit_ptr(null_flags + m_null_count / 8,
+ m_null_count & 7);
+ m_null_count+= (field->field_length & 7);
+ }
+ field->reset();
+
+ /*
+ Test if there is a default field value. The test for ->ptr is to skip
+ 'offset' fields generated by initialize_tables
+ */
+ if (m_default_field[i] && m_default_field[i]->ptr)
+ {
+ /*
+ default_field[i] is set only in the cases when 'field' can
+ inherit the default value that is defined for the field referred
+ by the Item_field object from which 'field' has been created.
+ */
+ const Field *orig_field= m_default_field[i];
+ /* Get the value from default_values */
+ if (orig_field->is_null_in_record(orig_field->table->s->default_values))
+ field->set_null();
+ else
+ {
+ field->set_notnull();
+ memcpy(field->ptr,
+ orig_field->ptr_in_record(orig_field->table->s->default_values),
+ field->pack_length_in_rec());
+ }
+ }
+
+ length=field->pack_length();
+ pos+= length;
+
+ /* Make entry for create table */
+ recinfo->length=length;
+ recinfo->type= field->tmp_engine_column_type(use_packed_rows);
+
+ // fix table name in field entry
+ field->set_table_name(&table->alias);
+ }
+
+ param->recinfo= recinfo; // Pointer to after last field
+ store_record(table,s->default_values); // Make empty default record
+
+ share->max_rows= ~(ha_rows) 0;
+ param->end_write_records= HA_POS_ERROR;
+
+ share->db_record_offset= 1;
+
+ if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
+ HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
+ goto err;
+
+ table->db_stat= HA_OPEN_KEYFILE;
+ table->set_created();
+
+ thd->mem_root= mem_root_save;
+
+ DBUG_RETURN(false);
+
+err:
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(true);
+}
+
+
+bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
+ Table_function_json_table *jt)
+{
+ TABLE_SHARE *share= table->s;
+ Json_table_column *jc;
+ uint fieldnr= 0;
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ List_iterator_fast<Json_table_column> jc_i(jt->m_columns);
+
+ DBUG_ENTER("add_json_table_fields");
+
+ thd->mem_root= &table->mem_root;
+
+ while ((jc= jc_i++))
+ {
+ Create_field *sql_f= jc->m_field;
+ List_iterator_fast<Json_table_column> it2(jt->m_columns);
+ Json_table_column *jc2;
+ if (!sql_f->charset)
+ sql_f->charset= thd->variables.collation_server;
+
+ if (sql_f->prepare_stage1(thd, thd->mem_root, table->file,
+ table->file->ha_table_flags()))
+ goto err_exit;
+
+ while ((jc2= it2++) != jc)
+ {
+ if (lex_string_cmp(system_charset_info,
+ &sql_f->field_name, &jc2->m_field->field_name) == 0)
+ {
+ my_error(ER_DUP_FIELDNAME, MYF(0), sql_f->field_name.str);
+ goto err_exit;
+ }
+ }
+ it2.rewind();
+ }
+
+ jc_i.rewind();
+
+ while ((jc= jc_i++))
+ {
+ Create_field *sql_f= jc->m_field;
+ Record_addr addr(!(sql_f->flags & NOT_NULL_FLAG));
+ Bit_addr bit(addr.null());
+
+ sql_f->prepare_stage2(table->file, table->file->ha_table_flags());
+
+ if (!sql_f->charset)
+ sql_f->charset= &my_charset_utf8mb4_bin;
+
+ Field *f= sql_f->type_handler()->make_table_field_from_def(share,
+ thd->mem_root, &sql_f->field_name, addr, bit, sql_f, sql_f->flags);
+ if (!f)
+ goto err_exit;
+ f->init(table);
+ add_field(table, f, fieldnr++, FALSE);
+ }
+
+ share->fields= fieldnr;
+ share->blob_fields= m_blob_count;
+ table->field[fieldnr]= 0; // End marker
+ share->blob_field[m_blob_count]= 0; // End marker
+ share->column_bitmap_size= bitmap_buffer_size(share->fields);
+
+ thd->mem_root= mem_root_save;
+
+ DBUG_RETURN(FALSE);
+err_exit:
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(TRUE);
+}
+
+
+TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table)
+{
+ TMP_TABLE_PARAM tp;
+ TABLE *table;
+ uint field_count= sql_table->table_function->m_columns.elements+1;
+
+ DBUG_ENTER("create_table_for_function");
+
+ tp.init();
+ tp.table_charset= system_charset_info;
+ tp.field_count= field_count;
+ {
+ Create_json_table maker(&tp, false);
+
+ if (!(table= maker.start(thd, &tp,
+ sql_table->table_function, &sql_table->alias)) ||
+ maker.add_json_table_fields(thd, table, sql_table->table_function) ||
+ maker.finalize(thd, table, &tp, sql_table->table_function))
+ {
+ if (table)
+ free_tmp_table(thd, table);
+ DBUG_RETURN(NULL);
+ }
+ }
+ sql_table->schema_table_name.length= 0;
+
+ my_bitmap_map* bitmaps=
+ (my_bitmap_map*) thd->alloc(bitmap_buffer_size(field_count));
+ my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ table->read_set= &table->def_read_set;
+ bitmap_clear_all(table->read_set);
+ table->alias_name_used= true;
+ table->next= thd->derived_tables;
+ thd->derived_tables= table;
+ table->s->tmp_table= INTERNAL_TMP_TABLE;
+ table->grant.privilege= SELECT_ACL;
+
+ sql_table->table= table;
+
+ DBUG_RETURN(table);
+}
+
+
+int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path)
+{
+ set(ctype);
+ if (json_path_setup(&m_path, thd->variables.collation_connection,
+ (const uchar *) path.str, (const uchar *)(path.str + path.length)))
+ {
+ report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1,
+ Sql_condition::WARN_LEVEL_ERROR);
+ return 1;
+ }
+
+ /*
+ This is done so the ::print function can just print the path string.
+ Can be removed if we redo that function to print the path using it's
+ anctual content. Not sure though if we should.
+ */
+ m_path.s.c_str= (const uchar *) path.str;
+ return 0;
+}
+
+
+static int print_path(String *str, const json_path_t *p)
+{
+ return str->append('\"') ||
+ str->append_for_single_quote((const char *) p->s.c_str,
+ p->s.str_end - p->s.c_str) ||
+ str->append('\"');
+}
+
+
+/*
+ Print the string representation of the Json_table_column.
+
+ @param thd - the thread
+ @param f - the remaining array of Field-s from the table
+ if the Json_table_column
+ @param str - the string where to print
+*/
+int Json_table_column::print(THD *thd, Field **f, String *str)
+{
+ char column_type_buff[MAX_FIELD_WIDTH];
+ String column_type(column_type_buff, sizeof(column_type_buff),
+ str->charset());
+
+ if (append_identifier(thd, str, &m_field->field_name) ||
+ str->append(' '))
+ return 1;
+
+ switch (m_column_type)
+ {
+ case FOR_ORDINALITY:
+ if (str->append("FOR ORDINALITY"))
+ return 1;
+ break;
+ case EXISTS_PATH:
+ case PATH:
+ (*f)->sql_type(column_type);
+
+ if (str->append(column_type) ||
+ str->append(m_column_type == PATH ? " PATH " : " EXISTS ") ||
+ print_path(str, &m_path))
+ return 1;
+ break;
+ };
+
+ if (m_on_empty.print("EMPTY", str) ||
+ m_on_error.print("ERROR", str))
+ return 1;
+
+ return 0;
+}
+
+
+int Json_table_nested_path::set_path(THD *thd, const LEX_CSTRING &path)
+{
+ if (json_path_setup(&m_path, thd->variables.collation_connection,
+ (const uchar *) path.str, (const uchar *)(path.str + path.length)))
+ {
+ report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1,
+ Sql_condition::WARN_LEVEL_ERROR);
+ return 1;
+ }
+
+ /*
+ This is done so the ::print function can just print the path string.
+ Can be removed if we redo that function to print the path using it's
+ anctual content. Not sure though if we should.
+ */
+ m_path.s.c_str= (const uchar *) path.str;
+ return 0;
+}
+
+
+void Json_table_column::On_response::respond(Json_table_column *jc, Field *f)
+{
+ switch (m_response)
+ {
+ case Json_table_column::RESPONSE_NOT_SPECIFIED:
+ case Json_table_column::RESPONSE_NULL:
+ f->set_null();
+ break;
+ case Json_table_column::RESPONSE_ERROR:
+ f->set_null();
+ my_error(ER_JSON_TABLE_ERROR_ON_FIELD, MYF(0),
+ f->field_name.str, f->table->alias.ptr());
+ break;
+ case Json_table_column::RESPONSE_DEFAULT:
+ f->set_notnull();
+ f->store(m_default.str,
+ m_default.length, jc->m_defaults_cs);
+ break;
+ }
+}
+
+
+int Json_table_column::On_response::print(const char *name, String *str) const
+{
+ const char *resp;
+ const LEX_CSTRING *ds= NULL;
+ if (m_response == Json_table_column::RESPONSE_NOT_SPECIFIED)
+ return 0;
+
+ switch (m_response)
+ {
+ case Json_table_column::RESPONSE_NULL:
+ resp= "NULL";
+ break;
+ case Json_table_column::RESPONSE_ERROR:
+ resp= "ERROR";
+ break;
+ case Json_table_column::RESPONSE_DEFAULT:
+ {
+ resp= "DEFAULT";
+ ds= &m_default;
+ break;
+ }
+ default:
+ DBUG_ASSERT(FALSE); /* should never happen. */
+ }
+
+ return
+ (str->append(' ') || str->append(resp) ||
+ (ds && (str->append(" '") || str->append(*ds) || str->append('\''))) ||
+ str->append(" ON ") ||
+ str->append(name));
+}
+
+
+void Table_function_json_table::add_nested(Json_table_nested_path *np)
+{
+ *m_sql_nest->m_nested_hook= np;
+ m_sql_nest->m_nested_hook= &np->m_next_nested;
+ m_sql_nest= np;
+ if (++m_cur_depth > m_depth)
+ m_depth= m_cur_depth;
+}
+
+
+void Table_function_json_table::leave_nested()
+{
+ m_sql_nest= m_sql_nest->m_parent;
+ --m_cur_depth;
+}
+
+
+int Table_function_json_table::setup(THD *thd, TABLE_LIST *sql_table)
+{
+ thd->where= "JSON_TABLE argument";
+ if (m_json->fix_fields_if_needed(thd, &m_json))
+ return TRUE;
+
+ m_dep_tables= m_json->used_tables();
+
+ if (m_dep_tables)
+ {
+ sql_table->dep_tables|= m_dep_tables;
+ sql_table->table->no_cache= TRUE;
+ if (unlikely(sql_table->dep_tables & sql_table->get_map()))
+ {
+ /* Table itself is used in the argument. */
+ my_error(ER_WRONG_USAGE, MYF(0), "JSON_TABLE", "argument");
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+void Table_function_json_table::get_estimates(ha_rows *out_rows,
+ double *scan_time, double *startup_cost)
+{
+ *out_rows= 40;
+ *scan_time= 0.0;
+ *startup_cost= 0.0;
+}
+
+
+/*
+ Print the string representation of the Json_nested_path object.
+ Which is the COLUMNS(...) part of the JSON_TABLE definition.
+
+ @param thd - the thread
+ @param f - the remaining part of the array of Field* objects
+ taken from the TABLE.
+ It's needed as Json_table_column objects
+ don't have links to the related Field-s.
+ @param str - the string where to print
+ @param it - the remaining part of the Json_table_column list
+ @param last_column - the last column taken from the list.
+*/
+
+int Json_table_nested_path::print(THD *thd, Field ***f, String *str,
+ List_iterator_fast<Json_table_column> &it,
+ Json_table_column **last_column)
+{
+ Json_table_nested_path *c_path= this;
+ Json_table_nested_path *c_nested= m_nested;
+ Json_table_column *jc= *last_column;
+ bool first_column= TRUE;
+
+ if (str->append("COLUMNS ("))
+ return 1;
+
+ do
+ {
+ if (first_column)
+ first_column= FALSE;
+ else if (str->append(", "))
+ return 1;
+
+ if (jc->m_nest == c_path)
+ {
+ if (jc->print(thd, *f, str))
+ return 1;
+ if (!(jc= it++))
+ goto exit_ok;
+ ++(*f);
+ }
+ else if (jc->m_nest == c_nested)
+ {
+ if (str->append("NESTED PATH ") ||
+ print_path(str, &jc->m_nest->m_path) ||
+ c_nested->print(thd, f, str, it, &jc))
+ return 1;
+ c_nested= c_nested->m_next_nested;
+ }
+ else
+ break;
+ } while(jc);
+
+exit_ok:
+ if (str->append(")"))
+ return 1;
+
+ *last_column= jc;
+ return 0;
+}
+
+
+/*
+ Print the SQL definition of the JSON_TABLE.
+ Used mostly as a part of the CREATE VIEW statement.
+
+ @param thd - the thread
+ @param sql_table - the corresponding TABLE_LIST object
+ @param str - the string where to print
+ @param query_type - the query type
+*/
+int Table_function_json_table::print(THD *thd, TABLE_LIST *sql_table,
+ String *str, enum_query_type query_type)
+{
+ List_iterator_fast<Json_table_column> jc_i(m_columns);
+ Json_table_column *jc= jc_i++;
+ Field **f_list= sql_table->table->field;
+
+ DBUG_ENTER("Table_function_json_table::print");
+
+ if (str->append("JSON_TABLE("))
+ DBUG_RETURN(TRUE);
+
+ m_json->print(str, query_type);
+
+ if (str->append(", ") ||
+ print_path(str, &m_nested_path.m_path) ||
+ str->append(' ') ||
+ m_nested_path.print(thd, &f_list, str, jc_i, &jc) ||
+ str->append(')'))
+ DBUG_RETURN(TRUE);
+
+ DBUG_RETURN(0);
+}
+
diff --git a/sql/table_function.h b/sql/table_function.h
new file mode 100644
index 00000000000..ddd21d71c85
--- /dev/null
+++ b/sql/table_function.h
@@ -0,0 +1,224 @@
+#ifndef TABLE_FUNCTION_INCLUDED
+#define TABLE_FUNCTION_INCLUDED
+
+/* Copyright (c) 2020, MariaDB Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
+
+
+#include <json_lib.h>
+
+/*
+ The Json_table_nested_path represents the 'current nesting' level
+ for a set of JSON_TABLE columns.
+ Each column (Json_table_column instance) is linked with corresponding
+ 'nested path' object and gets it's piece of JSON to parse during the computation
+ phase.
+ The root 'nested_path' is always present as a part of Table_function_json_table,
+ then other 'nested_paths' can be created and linked into a tree structure when new
+ 'NESTED PATH' is met. The nested 'nested_paths' are linked with 'm_nested', the same-level
+ 'nested_paths' are linked with 'm_next_nested'.
+ So for instance
+ JSON_TABLE( '...', '$[*]'
+ COLUMNS( a INT PATH '$.a' ,
+ NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$',
+ NESTED PATH '$.c[*]' COLUMNS(x INT PATH '$')),
+ NESTED PATH '$.n[*]' COLUMNS (z INT PAHT '$'))
+ results in 4 'nested_path' created:
+ root nested_b nested_c nested_n
+ m_path '$[*]' '$.b[*]' '$.c[*]' '$.n[*]
+ m_nested &nested_b &nested_c NULL NULL
+ n_next_nested NULL &nested_n NULL NULL
+
+and 4 columns created:
+ a b x z
+ m_nest &root &nested_b &nested_c &nested_n
+*/
+
+
+class Json_table_column;
+
+class Json_table_nested_path : public Sql_alloc
+{
+public:
+ bool m_null;
+ json_path_t m_path;
+ json_engine_t m_engine;
+ json_path_t m_cur_path;
+
+ /* Counts the rows produced. Value is set to the FOR ORDINALITY coluns */
+ longlong m_ordinality_counter;
+
+ /* the Json_table_nested_path that nests this. */
+ Json_table_nested_path *m_parent;
+
+ /* The head of the list of nested NESTED PATH statements. */
+ Json_table_nested_path *m_nested;
+
+ /* in the above list items are linked with the */
+ Json_table_nested_path *m_next_nested;
+
+ /*
+ The pointer to the 'm_next_nested' member of the
+ last item of the above list. So we can add new item to
+ the list doing *m_next_nexted_hook= new_item_ptr
+ */
+ Json_table_nested_path **m_nested_hook;
+
+ /*
+ The NESTED PATH that is currently scanned in rnd_next.
+ */
+ Json_table_nested_path *m_cur_nested;
+ /*
+ The order of the above m_cur_nested in the list of the NESTED PATH.
+ Used only to build the reference in position()/rnd_pos().
+ */
+ int m_n_cur_nested;
+
+ Json_table_nested_path(Json_table_nested_path *parent_nest):
+ m_parent(parent_nest), m_nested(0), m_next_nested(0),
+ m_nested_hook(&m_nested) {}
+ int set_path(THD *thd, const LEX_CSTRING &path);
+ void scan_start(CHARSET_INFO *i_cs, const uchar *str, const uchar *end);
+ int scan_next();
+ int print(THD *thd, Field ***f, String *str,
+ List_iterator_fast<Json_table_column> &it,
+ Json_table_column **last_column);
+ void get_current_position(const uchar *j_start, uchar *pos) const;
+ void set_position(const uchar *j_start, const uchar *j_end, const uchar *pos);
+};
+
+
+class Json_table_column : public Sql_alloc
+{
+public:
+ enum enum_type
+ {
+ FOR_ORDINALITY,
+ PATH,
+ EXISTS_PATH
+ };
+
+ enum enum_on_type
+ {
+ ON_EMPTY,
+ ON_ERROR
+ };
+
+ enum enum_on_response
+ {
+ RESPONSE_NOT_SPECIFIED,
+ RESPONSE_ERROR,
+ RESPONSE_NULL,
+ RESPONSE_DEFAULT
+ };
+
+ struct On_response
+ {
+ public:
+ Json_table_column::enum_on_response m_response;
+ LEX_CSTRING m_default;
+ void respond(Json_table_column *jc, Field *f);
+ int print(const char *name, String *str) const;
+ bool specified() const { return m_response != RESPONSE_NOT_SPECIFIED; }
+ };
+
+ enum_type m_column_type;
+ json_path_t m_path;
+ On_response m_on_error;
+ On_response m_on_empty;
+ Create_field *m_field;
+ Json_table_nested_path *m_nest;
+ CHARSET_INFO *m_defaults_cs;
+
+ void set(enum_type ctype)
+ {
+ m_column_type= ctype;
+ }
+ int set(THD *thd, enum_type ctype, const LEX_CSTRING &path);
+ Json_table_column(Create_field *f, Json_table_nested_path *nest) :
+ m_field(f), m_nest(nest)
+ {
+ m_on_error.m_response= RESPONSE_NOT_SPECIFIED;
+ m_on_empty.m_response= RESPONSE_NOT_SPECIFIED;
+ }
+ int print(THD *tnd, Field **f, String *str);
+};
+
+
+/*
+ Class represents the table function, the function
+ that returns the table as a result so supposed to appear
+ in the FROM list of the SELECT statement.
+ At the moment there is only one such function JSON_TABLE,
+ so the class named after it, but should be refactored
+ into the hierarchy root if we create more of that functions.
+
+ As the parser finds the table function in the list it
+ creates an instance of Table_function_json_table storing it
+ into the TABLE_LIST::table_function.
+ Then the ha_json_table instance is created based on it in
+ the create_table_for_function().
+*/
+class Table_function_json_table : public Sql_alloc
+{
+public:
+ Item *m_json; /* The JSON value to be parsed. */
+
+ /* The COLUMNS(...) part representation. */
+ Json_table_nested_path m_nested_path;
+ /* The list of table column definitions. */
+ List<Json_table_column> m_columns;
+
+ /*
+ the JSON argument can be taken from other tables.
+ We have to mark these tables as dependent so the
+ mask of these dependent tables is calculated in ::setup().
+ */
+ table_map m_dep_tables;
+
+ /*
+ The 'depth' of NESTED PATH statements nesting.
+ Needed to calculate the reference length.
+ m_cur_depth is used in parser.
+ */
+ uint m_depth, m_cur_depth;
+
+ /* used in parser. */
+ Json_table_column *m_cur_json_table_column;
+
+ Table_function_json_table(Item *json): m_json(json), m_nested_path(0),
+ m_depth(0), m_cur_depth(0) {}
+
+ /*
+ Used in sql_yacc.yy.
+ Represents the current NESTED PATH level being parsed.
+ */
+ Json_table_nested_path *m_sql_nest;
+ void add_nested(Json_table_nested_path *np);
+ void leave_nested();
+
+ int setup(THD *thd, TABLE_LIST *sql_table);
+ bool join_cache_allowed() const { return !m_dep_tables; }
+ void get_estimates(ha_rows *out_rows,
+ double *scan_time, double *startup_cost);
+ int print(THD *thd, TABLE_LIST *sql_table,
+ String *str, enum_query_type query_type);
+};
+
+
+TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table);
+
+#endif /* TABLE_FUNCTION_INCLUDED */
+