summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnel Husakovic <anel@mariadb.org>2019-05-13 04:47:57 -0700
committerAnel Husakovic <anel@mariadb.org>2019-07-08 08:53:28 -0700
commitb3b20907725ed3a00901e5947831d1653fa77d04 (patch)
treea15596ad5c64f0162a88c42bfabcea60e6949294
parent83f07970dd4973c5cf26628a501689bf764b6f67 (diff)
downloadmariadb-git-b3b20907725ed3a00901e5947831d1653fa77d04.tar.gz
Add test2
-rw-r--r--mysql-test/main/mysql_json_2.test9
-rw-r--r--mysql-test/std_data/frm/test2.MYDbin0 -> 740 bytes
-rw-r--r--mysql-test/std_data/frm/test2.MYIbin0 -> 1024 bytes
-rw-r--r--mysql-test/std_data/frm/test2.frmbin0 -> 8578 bytes
-rw-r--r--sql/field.cc15
-rw-r--r--sql/mysql_json.cc57
-rw-r--r--sql/mysql_json.h54
7 files changed, 124 insertions, 11 deletions
diff --git a/mysql-test/main/mysql_json_2.test b/mysql-test/main/mysql_json_2.test
new file mode 100644
index 00000000000..09b14455e94
--- /dev/null
+++ b/mysql-test/main/mysql_json_2.test
@@ -0,0 +1,9 @@
+let $datadir=`select @@datadir`;
+
+# Create other table to test other data types
+--copy_file std_data/frm/test2.frm $datadir/test/t1.frm
+--copy_file std_data/frm/test2.MYI $datadir/test/t1.MYI
+--copy_file std_data/frm/test2.MYD $datadir/test/t1.MYD
+
+select * from test.t1;
+drop table t1;
diff --git a/mysql-test/std_data/frm/test2.MYD b/mysql-test/std_data/frm/test2.MYD
new file mode 100644
index 00000000000..c0afcf58777
--- /dev/null
+++ b/mysql-test/std_data/frm/test2.MYD
Binary files differ
diff --git a/mysql-test/std_data/frm/test2.MYI b/mysql-test/std_data/frm/test2.MYI
new file mode 100644
index 00000000000..807af09d1c7
--- /dev/null
+++ b/mysql-test/std_data/frm/test2.MYI
Binary files differ
diff --git a/mysql-test/std_data/frm/test2.frm b/mysql-test/std_data/frm/test2.frm
new file mode 100644
index 00000000000..18ff1a49a0a
--- /dev/null
+++ b/mysql-test/std_data/frm/test2.frm
Binary files differ
diff --git a/sql/field.cc b/sql/field.cc
index efc1cd8ddc4..6bcc19d2d45 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -11222,10 +11222,17 @@ bool Field_mysql_json::parse_mysql(String *s, bool json_quoted,
const char* data1=data+1;
size_t len=length-1;
+ // Calculate number of bytes and elements
+ size_t element_count, bytes;
+ bool large;
switch (type)
{
case JSONB_TYPE_SMALL_OBJECT:
- return parse_array_or_object(Field_mysql_json::enum_type::OBJECT, data1, len, false);
+ {
+ large=false;
+ parse_array_or_object(Field_mysql_json::enum_type::OBJECT, data1, len, large, &element_count, &bytes);
+ break;
+ }
case JSONB_TYPE_LARGE_OBJECT:
return false; //this->parse_array_or_object(Field_mysql_json::OBJECT, data1, len, true);
case JSONB_TYPE_SMALL_ARRAY:
@@ -11235,7 +11242,11 @@ bool Field_mysql_json::parse_mysql(String *s, bool json_quoted,
default:
return false;//this->parse_scalar(type, data, len);
}
-
+ // Clear the buffer
+ s->length(0);
+ if(!get_mysql_string(s, type, data1, len, large, element_count, bytes, func_name, 0))
+ return false;
+ return true;
}
String *Field_mysql_json::val_str(String *buf1_tmp, String *buf2 __attribute__((unused)))
diff --git a/sql/mysql_json.cc b/sql/mysql_json.cc
index c74c0dc2765..25d8f5cf44c 100644
--- a/sql/mysql_json.cc
+++ b/sql/mysql_json.cc
@@ -3,6 +3,7 @@
#include "sql_class.h" // THD
+static bool check_json_depth(size_t depth);
/**
Read an offset or size field from a buffer. The offset could be either
a two byte unsigned integer or a four byte unsigned integer.
@@ -19,8 +20,8 @@ size_t read_offset_or_size(const char *data, bool large)
-bool parse_array_or_object(Field_mysql_json::enum_type t, const char *data, size_t len,
- bool large)
+bool parse_array_or_object(Field_mysql_json::enum_type t, const char *data, size_t len, bool large, size_t *const bytes,
+ size_t *const element_count)
{
//DBUG_ASSERT(t == Field_mysql_json::ARRAY || t == Field_mysql_json::OBJECT);
/*
@@ -30,11 +31,13 @@ bool parse_array_or_object(Field_mysql_json::enum_type t, const char *data, size
const size_t offset_size= large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE;
if (len < 2 * offset_size)
return true;
- const size_t element_count= read_offset_or_size(data, large);
- const size_t bytes= read_offset_or_size(data + offset_size, large);
+
+ // Calculate values of interest
+ *element_count= read_offset_or_size(data, large);
+ *bytes= read_offset_or_size(data + offset_size, large);
// The value can't have more bytes than what's available in the data buffer.
- if (bytes > len)
+ if (*bytes > len)
return true;
/*
@@ -46,14 +49,52 @@ bool parse_array_or_object(Field_mysql_json::enum_type t, const char *data, size
*/
size_t header_size= 2 * offset_size;
if (t==Field_mysql_json::enum_type::OBJECT)
- header_size+= element_count *
+ header_size+= *element_count *
(large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL);
- header_size+= element_count *
+ header_size+= *element_count *
(large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL);
// The header should not be larger than the full size of the value.
- if (header_size > bytes)
+ if (header_size > *bytes)
return true; /* purecov: inspected */
//return Value(t, data, bytes, element_count, large);
return 1;
+}
+/**
+ Check if the depth of a JSON document exceeds the maximum supported
+ depth (JSON_DOCUMENT_MAX_DEPTH). Raise an error if the maximum depth
+ has been exceeded.
+
+ @param[in] depth the current depth of the document
+ @return true if the maximum depth is exceeded, false otherwise
+*/
+static bool check_json_depth(size_t depth)
+{
+ if (depth > JSON_DOCUMENT_MAX_DEPTH)
+ {
+ // @todo anel implement errors
+ //my_error(ER_JSON_DOCUMENT_TOO_DEEP, MYF(0));
+ return true;
+ }
+ return false;
+}
+
+bool get_mysql_string(String* buffer, size_t type, const char *data, size_t len,
+ bool large, size_t element_count, size_t bytes,
+ const char *func_name, size_t depth)
+{
+ if (check_json_depth(++depth))
+ return true;
+ switch(type)
+ {
+ case J_OBJECT:
+ {
+ if (buffer->append('{'))
+ return true; /* purecov: inspected */
+ break;
+ // Implement iter.elt().first/second
+ }
+ }
+
+ return false;
} \ No newline at end of file
diff --git a/sql/mysql_json.h b/sql/mysql_json.h
index a1ee5da21a2..cd631c92c2c 100644
--- a/sql/mysql_json.h
+++ b/sql/mysql_json.h
@@ -52,6 +52,8 @@
#define VALUE_ENTRY_SIZE_SMALL (1 + SMALL_OFFSET_SIZE)
#define VALUE_ENTRY_SIZE_LARGE (1 + LARGE_OFFSET_SIZE)
+/// The maximum number of nesting levels allowed in a JSON document.
+#define JSON_DOCUMENT_MAX_DEPTH 100
/*
@todo anel enums used in json_dom
@@ -90,8 +92,58 @@
};
*/
+/**
+ Json values in MySQL comprises the stand set of JSON values plus a
+ MySQL specific set. A Json _number_ type is subdivided into _int_,
+ _uint_, _double_ and _decimal_.
+
+ MySQL also adds four built-in date/time values: _date_, _time_,
+ _datetime_ and _timestamp_. An additional _opaque_ value can
+ store any other MySQL type.
+
+ The enumeration is common to Json_dom and Json_wrapper.
+
+ The enumeration is also used by Json_wrapper::compare() to
+ determine the ordering when comparing values of different types,
+ so the order in which the values are defined in the enumeration,
+ is significant. The expected order is null < number < string <
+ object < array < boolean < date < time < datetime/timestamp <
+ opaque.}
+ */
+enum enum_json_type {
+ J_NULL,
+ J_DECIMAL,
+ J_INT,
+ J_UINT,
+ J_DOUBLE,
+ J_STRING,
+ J_OBJECT,
+ J_ARRAY,
+ J_BOOLEAN,
+ J_DATE,
+ J_TIME,
+ J_DATETIME,
+ J_TIMESTAMP,
+ J_OPAQUE,
+ J_ERROR
+};
+
+ /**
+ Extended type ids so that JSON_TYPE() can give useful type
+ names to certain sub-types of J_OPAQUE.
+*/
+enum enum_json_opaque_type {
+ J_OPAQUE_BLOB,
+ J_OPAQUE_BIT,
+ J_OPAQUE_GEOMETRY
+};
+
// Prototypes
size_t read_offset_or_size(const char *, bool);
-bool parse_array_or_object(Field_mysql_json::enum_type, const char *,size_t ,bool);
+bool parse_array_or_object(Field_mysql_json::enum_type,
+ const char *,size_t , bool,
+ size_t *const, size_t *const);
bool parse_scalar();
+
+bool get_mysql_string(String*, size_t, const char*, size_t, bool, size_t, size_t, const char*, size_t);
#endif /* MYSQL_JSON_INCLUDED */ \ No newline at end of file