diff options
author | Anel Husakovic <anel@mariadb.org> | 2019-07-10 02:27:35 -0700 |
---|---|---|
committer | Anel Husakovic <anel@mariadb.org> | 2019-07-11 00:52:27 -0700 |
commit | 81ad7b12d09f5c4b50fdfa5d7105fed82f9a463c (patch) | |
tree | 9d732508b86d8c24fbca5dc4efdfe63275c268c4 /sql | |
parent | 0a3aec0a75cfd929c3383d034270c15166db83ee (diff) | |
download | mariadb-git-bb-anel-json-v2-10.3-recursion.tar.gz |
After review fixesbb-anel-json-v2-10.3-recursion
- Indentation fixed
- Logic of empty string fixed
- Added read_variable_length() to opaque data type
- Added logic and test case for MYSQL_TYPE_NEWDECIMAL
- Added new utf8 test
- Added support encoding for other opaque data types
(MYSQL_TYPE_{LONG/MEDIUM/TINY}BLOB, MYSQL_TYPE_VARCHAR, MYSQL_TYPE_YEAR) found in json
suite of mysql and test cases
- Added big array test (--do-test=anel/mysql_j)
- alter force @todo
Diffstat (limited to 'sql')
-rw-r--r-- | sql/field.cc | 40 | ||||
-rw-r--r-- | sql/mysql_json.cc | 473 | ||||
-rw-r--r-- | sql/mysql_json.h | 17 | ||||
-rw-r--r-- | sql/table.cc | 4 |
4 files changed, 230 insertions, 304 deletions
diff --git a/sql/field.cc b/sql/field.cc index d5ab3153910..d79e9622d1e 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -11188,18 +11188,6 @@ uint32 Field_blob::max_display_length() const bool Field_mysql_json::parse_mysql(String *s, bool json_quoted, const char *func_name) const { - // This code is part of mysql code val_json(wrapper) - /* - The empty string is not a valid JSON binary representation, so we - should have returned an error. However, sometimes an empty - Field_json object is created in order to retrieve meta-data. - Return a dummy value instead of raising an error. Bug#21104470. - The field could also contain an empty string after forcing NULL or - DEFAULT into a not nullable JSON column using lax error checking - (such as INSERT IGNORE or non-strict SQL mode). The JSON null - literal is used to represent the empty value in this case. - Bug#21437989. - */ const char *data= s->ptr(); size_t length= s->length(); @@ -11207,27 +11195,25 @@ bool Field_mysql_json::parse_mysql(String *s, bool json_quoted, s->length(0); if (length == 0) { - //@todo anel will need to see how to handle this - //Json_wrapper w(new (std::nothrow) Json_null()); - //wr->steal(&w); + // There are no data. return false; } // Each document should start with a one-byte type specifier. if (length < 1) - return 1; //err(); /* purecov: inspected */ - - // anel Parse JSON value => part of parse_value() - + return true; + + // First byte is type, starting from second byte, raw data are considered for + // obtaining the header and key/value vectors. size_t type= data[0]; + const char* data1= data + 1; + size_t len= length - 1; - const char* data1=data+1; - size_t len=length-1; - + // The fifth argument represents `large` parameter and since it is validated + // according to the `type` in parse_value() false value is not important here. if(parse_value(s, type, data1, len, false, 0)) - { return true; - } + return false; } @@ -11235,10 +11221,8 @@ bool Field_mysql_json::parse_mysql(String *s, bool json_quoted, { ASSERT_COLUMN_MARKED_FOR_READ; String *buf1= Field_blob::val_str(buf1_tmp, buf2); - bool parsed= this->parse_mysql(buf1, true, field_name.str); - if(!parsed) - buf1->append("\nFinshed"); - else + + if (this->parse_mysql(buf1, true, field_name.str)) buf1->length(0); return buf1; } diff --git a/sql/mysql_json.cc b/sql/mysql_json.cc index 34ab6db41cc..f2302b42cf7 100644 --- a/sql/mysql_json.cc +++ b/sql/mysql_json.cc @@ -1,10 +1,9 @@ #include "mysql_json.h" #include "mysqld.h" // key_memory_JSON #include "sql_class.h" // THD +#include "field.h" // THD - -static bool check_json_depth(size_t depth); -/** +/* Read an offset or size field from a buffer. The offset could be either a two byte unsigned integer or a four byte unsigned integer. @@ -18,7 +17,7 @@ size_t read_offset_or_size(const char *data, bool large) return large ? uint4korr(data) : uint2korr(data); } -/** +/* Check if the depth of a JSON document exceeds the maximum supported depth (JSON_DOCUMENT_MAX_DEPTH). Raise an error if the maximum depth has been exceeded. @@ -30,33 +29,35 @@ static bool check_json_depth(size_t depth) { if (depth > JSON_DOCUMENT_MAX_DEPTH) { - // @todo anel implement errors - //my_error(ER_JSON_DOCUMENT_TOO_DEEP, MYF(0)); + // Json document too deep. return true; } return false; } -bool parse_value(String *buffer, size_t type, const char *data, size_t len, bool large, size_t depth) +bool parse_value(String *buffer, size_t type, const char *data, size_t len, + bool large, size_t depth) { switch (type) { case JSONB_TYPE_SMALL_OBJECT: - { - return parse_array_or_object(buffer, Field_mysql_json::enum_type::OBJECT, data, len, false); - } + return parse_array_or_object(buffer, Field_mysql_json::enum_type::OBJECT, + data, len, false); case JSONB_TYPE_LARGE_OBJECT: - return parse_array_or_object(buffer, Field_mysql_json::enum_type::OBJECT, data, len, true); + return parse_array_or_object(buffer, Field_mysql_json::enum_type::OBJECT, + data, len, true); case JSONB_TYPE_SMALL_ARRAY: - return parse_array_or_object(buffer, Field_mysql_json::enum_type::ARRAY, data, len, false); + return parse_array_or_object(buffer, Field_mysql_json::enum_type::ARRAY, + data, len, false); case JSONB_TYPE_LARGE_ARRAY: - return parse_array_or_object(buffer, Field_mysql_json::enum_type::ARRAY, data, len, true); + return parse_array_or_object(buffer, Field_mysql_json::enum_type::ARRAY, + data, len, true); default: - return parse_mysql_scalar(buffer, type, data, len, large, depth); // ovo ne radi + return parse_mysql_scalar(buffer, type, data, len, large, depth); } } -bool parse_array_or_object(String *buffer,Field_mysql_json::enum_type t, +bool parse_array_or_object(String *buffer, Field_mysql_json::enum_type t, const char *data, size_t len, bool large) { DBUG_ASSERT((t == Field_mysql_json::enum_type::ARRAY) || @@ -66,10 +67,11 @@ bool parse_array_or_object(String *buffer,Field_mysql_json::enum_type t, (both number of elements or members, and number of bytes). */ const size_t offset_size= large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE; + // The length has to be at least double offset size (header). if (len < 2 * offset_size) return true; - // Calculate number of elements and length of binary (number of bytes) + // Calculate number of elements and length of binary (number of bytes). size_t element_count, bytes; element_count= read_offset_or_size(data, large); @@ -79,192 +81,141 @@ bool parse_array_or_object(String *buffer,Field_mysql_json::enum_type t, if (bytes > len) return true; - /* - Calculate the size of the header. It consists of: - - two length fields - - if it is a JSON object, key entries with pointers to where the keys - are stored - - value entries with pointers to where the actual values are stored - */ - - //size_t header_size= 2 * offset_size; - size_t key_json_offset, key_json_start, key_json_len; - size_t type, value_type_offset, value_counter(0); - char *key_element; - bool is_last(false); - - if (element_count == 0) +// Handling start of object or arrays. + if (t==Field_mysql_json::enum_type::OBJECT) { - if (t==Field_mysql_json::enum_type::OBJECT) - { - if(buffer->append("{}")) - { - return true; - } - } - else - { - if(buffer->append("[]")) - { - return true; - } - } - return false; + if (buffer->append('{')) + return true; } + else + { + if (buffer->append('[')) + return true; + } + + // Variables used for an object - vector of keys. + size_t key_json_offset, key_json_start, key_json_len; + char *key_element; + // Variables used for an object and array - vector of values. + size_t type, value_type_offset; - for(uint8 i=0; i<element_count; i++) + for (size_t i=0; i < element_count; i++) { if (t==Field_mysql_json::enum_type::OBJECT) { - // header_size+= element_count * - // (large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL); - if(i==0) - { - if(buffer->append('{')) - { - return true; - } - } - - key_json_offset= 2*offset_size+i*(large?KEY_ENTRY_SIZE_LARGE:KEY_ENTRY_SIZE_SMALL); - key_json_start= read_offset_or_size(data+key_json_offset,large); - //keys are always 2 bytes - key_json_len= read_offset_or_size(data+key_json_offset+offset_size, false); + /* + Calculate the size of the header. It consists of: + - two length fields, + - if it is a JSON object, key entries with pointers to where the keys + are stored (key_json_offset), + - value entries with pointers to where the actual values are stored + (value_type_offset). + */ + key_json_offset= 2 * offset_size + i * (large ? KEY_ENTRY_SIZE_LARGE : + KEY_ENTRY_SIZE_SMALL); + key_json_start= read_offset_or_size(data + key_json_offset, large); + // The length of keys is always on 2 bytes (large == false) + key_json_len= read_offset_or_size(data + key_json_offset + offset_size, + false); - key_element= new char[key_json_len+1]; - memmove(key_element, const_cast<char*>(&data[key_json_start]), key_json_len); + key_element= new char[key_json_len + 1]; + memmove(key_element, &data[key_json_start], key_json_len); key_element[key_json_len]= '\0'; - if(buffer->append('"')) + if (buffer->append('"')) { delete[] key_element; return true; } - if( buffer->append(String((const char *)key_element, &my_charset_bin)) ) + if (buffer->append(String((const char *)key_element, &my_charset_bin))) { delete[] key_element; return true; } delete[] key_element; - if(buffer->append('"')) - { + if (buffer->append('"')) return true; - } - if(buffer->append(":")) - { + + if (buffer->append(':')) return true; - } - value_type_offset= 2*offset_size+ - (large?KEY_ENTRY_SIZE_LARGE:KEY_ENTRY_SIZE_SMALL)*(element_count)+ - (large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL)*value_counter; - value_counter++; - - if(i==(element_count-1)) - { - is_last=true; - } + value_type_offset= 2 * offset_size + + (large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL) * (element_count) + + (large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL) * i; + // Get the type of the actual value. type= data[value_type_offset]; - //parse_value(buffer, type, data, len); // should be called which is - // calling parse_mysql_scalar(buffer, type, data, len, large, 0) - // Inlined values + // Inlined values are sort of optimization obtained from raw data, + // where actual value is obtained as a first next byte from value_type_offset if (type == JSONB_TYPE_INT16 || type == JSONB_TYPE_UINT16 || - type == JSONB_TYPE_LITERAL || - (large && (type == JSONB_TYPE_INT32 || type == JSONB_TYPE_UINT32))) + type == JSONB_TYPE_LITERAL || + (large && (type == JSONB_TYPE_INT32 || type == JSONB_TYPE_UINT32))) { - if(parse_mysql_scalar(buffer, type, data + value_type_offset+1, len, large, 0)) - { + if (parse_mysql_scalar(buffer, type, data + value_type_offset + 1, + len, large, 0)) return true; - } } - else // Non-inlined values + else // Non-inlined values - we need to get the lenght of data and use + // recursively parse_value() { - size_t val_len_ptr=read_offset_or_size(data+value_type_offset+1, large); - //if(parse_mysql_scalar(buffer, type, data+val_len_ptr, len, large, 0)) - if(parse_value(buffer, type, data+val_len_ptr, bytes-val_len_ptr, large, 0)) - { + size_t val_start_offset= read_offset_or_size(data + value_type_offset + 1, + large); + if (parse_value(buffer, type, data + val_start_offset, bytes - val_start_offset, + large, 0)) return true; - } - } - if(!is_last) + if (!(i == (element_count - 1))) { - buffer->append(","); + buffer->append(','); } - - if(i==(element_count-1)) - { - if(buffer->append('}')) - { - return true; - } - } - } // end object else // t==Field_mysql::enum_type::Array { - if(i==0) - { - if(buffer->append('[')) - { - return true; - } - } - - // Parse array - value_type_offset= 2*offset_size+ - (large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL)*value_counter; - value_counter++; - - if(i==(element_count-1)) - { - is_last=true; - } + value_type_offset= 2 * offset_size + + (large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL) * i; type= data[value_type_offset]; - //parse_value(buffer, type, data, len); // should be called which is - // calling parse_mysql_scalar(buffer, type, data, len, large, 0) - // Inlined values + // Inlined values are sort of optimization obtained from raw data, + // where actual value is obtained as a first next byte from value_type_offset if (type == JSONB_TYPE_INT16 || type == JSONB_TYPE_UINT16 || - type == JSONB_TYPE_LITERAL || - (large && (type == JSONB_TYPE_INT32 || type == JSONB_TYPE_UINT32))) + type == JSONB_TYPE_LITERAL || + (large && (type == JSONB_TYPE_INT32 || type == JSONB_TYPE_UINT32))) { - if(parse_mysql_scalar(buffer, type, data + value_type_offset+1, len, large, 0)) - { + if (parse_mysql_scalar(buffer, type, data + value_type_offset + 1, + bytes, large, 0)) return true; - } } - else // Non-inlined values + else // Non-inlined values - we need to get the lenght of data and use + // recursively parse_value() { - size_t val_len_ptr=read_offset_or_size(data+value_type_offset+1, large); - //if(parse_mysql_scalar(buffer, type, data+val_len_ptr, len, large, 0)) - if(parse_value(buffer, type, data+val_len_ptr, bytes-val_len_ptr, large, 0)) - { + size_t val_len_ptr= read_offset_or_size(data + value_type_offset + 1, + large); + if (parse_value(buffer, type, data + val_len_ptr, bytes - val_len_ptr, + large, 0)) return true; - } - - } - - if(!is_last) - { - buffer->append(","); } - if(i==(element_count-1)) + if(!(i==(element_count-1))) { - if(buffer->append(']')) - { - return true; - } + buffer->append(','); } } // end array - is_last=false; - } // end for + +// Handling ending of objects and arrays. + if (t==Field_mysql_json::enum_type::OBJECT) + { + if (buffer->append('}')) + return true; + } + else + { + if (buffer->append(']')) + return true; + } return false; } @@ -312,13 +263,15 @@ static bool read_variable_length(const char *data, size_t data_length, bool parse_mysql_scalar(String* buffer, size_t value_json_type, const char *data, size_t len, bool large, size_t depth) { + // We keep function check_json_depth() since `mysql` has it. + // The current function is the last one which is called recursively, so it is ok + // to have depth argument only in this function. if (check_json_depth(++depth)) { return true; } - - switch(value_json_type) + switch (value_json_type) { /** FINISHED WORKS **/ case JSONB_TYPE_LITERAL: @@ -327,130 +280,90 @@ bool parse_mysql_scalar(String* buffer, size_t value_json_type, { case JSONB_NULL_LITERAL: { - if(buffer->append("null")) - { + if (buffer->append("null")) return true; - } break; } case JSONB_TRUE_LITERAL: { - if(buffer->append("true")) - { + if (buffer->append("true")) return true; - } break; } case JSONB_FALSE_LITERAL: { - if(buffer->append("false")) - { + if (buffer->append("false")) return true; - } break; } default: - { return true; - } - } break; } /** FINISHED WORKS **/ - case JSONB_TYPE_INT16 : + case JSONB_TYPE_INT16: { - if(buffer->append_longlong((longlong) (sint2korr(data)))) - { + if (buffer->append_longlong((longlong) (sint2korr(data)))) return true; - } break; } - /** FINISHED WORKS **/ case JSONB_TYPE_INT32: { - char *value_element; - uint num_bytes=MAX_BIGINT_WIDTH; - value_element= new char[num_bytes+1]; - memmove(value_element, const_cast<char*>(&data[0]), num_bytes); - value_element[num_bytes+1]= '\0'; - if( buffer->append_longlong(sint4korr(value_element))) - { - delete[] value_element; + const uint num_bytes= MAX_BIGINT_WIDTH + 1; + char value_element [num_bytes]; + memmove(value_element, &data[0], num_bytes); + value_element[num_bytes + 1]= '\0'; + + if (buffer->append_longlong(sint4korr(value_element))) return true; - } - delete[] value_element; break; } - /* FINISHED WORKS */ case JSONB_TYPE_INT64: { - char *value_element; - uint num_bytes=MAX_BIGINT_WIDTH; - value_element= new char[num_bytes+1]; - memmove(value_element, const_cast<char*>(&data[0]), num_bytes); - value_element[num_bytes+1]= '\0'; - if( buffer->append_longlong(sint8korr(value_element))) - { - delete[] value_element; + const uint num_bytes= MAX_BIGINT_WIDTH + 1; + char value_element [num_bytes]; + memmove(value_element, &data[0], num_bytes); + value_element[num_bytes + 1]= '\0'; + if (buffer->append_longlong(sint8korr(value_element))) return true; - } - delete[] value_element; break; } /** FINISHED WORKS **/ - case JSONB_TYPE_UINT16 : + case JSONB_TYPE_UINT16: { - if(buffer->append_longlong((longlong) (uint2korr(data)))) - { + if (buffer->append_longlong((longlong) (uint2korr(data)))) return true; - } break; } - /** FINISHED WORKS **/ - case JSONB_TYPE_UINT32 : + case JSONB_TYPE_UINT32: { - if(buffer->append_longlong((longlong) (uint4korr(data)))) - { + if (buffer->append_longlong((longlong) (uint4korr(data)))) return true; - } break; } - /** FINISHED WORKS **/ case JSONB_TYPE_UINT64: { - char *value_element; - uint num_bytes=MAX_BIGINT_WIDTH; - - value_element= new char[num_bytes+1]; - memmove(value_element, const_cast<char*>(&data[0]),num_bytes); - value_element[num_bytes+1]= '\0'; - if( buffer->append_ulonglong(uint8korr(value_element))) - { - delete[] value_element; + const uint num_bytes= MAX_BIGINT_WIDTH + 1; + char value_element [num_bytes]; + memmove(value_element, &data[0], num_bytes); + value_element[num_bytes + 1]= '\0'; + if (buffer->append_ulonglong(uint8korr(value_element))) return true; - } - delete[] value_element; break; } - + /** FINISHED WORKS **/ case JSONB_TYPE_DOUBLE: { - //char *end; - //double d=strtod(data, &end); - // double d; - // int error; - // d=my_strtod(data, &end, &error); double d; float8get(d, data); buffer->qs_append(&d); break; } - /** FINISHED WORKS **/ case JSONB_TYPE_STRING: { @@ -458,104 +371,138 @@ bool parse_mysql_scalar(String* buffer, size_t value_json_type, char *value_element; if (read_variable_length(data, len, &value_length, &n)) - return true; /* purecov: inspected */ + return true; if (len < n + value_length) return true; - //value_length= (uint) data[0]; - value_element= new char[value_length+1]; - memmove(value_element, const_cast<char*>(&data[n]), - value_length); + value_element= new char[value_length + 1]; + memmove(value_element, &data[n], value_length); value_element[value_length]= '\0'; - if(buffer->append('"')) + if (buffer->append('"')) { delete[] value_element; return true; } - if( buffer->append(String((const char *)value_element, &my_charset_bin))) + if (buffer->append(String((const char *)value_element, &my_charset_bin))) { delete[] value_element; return true; } delete[] value_element; - if(buffer->append('"')) - { + if (buffer->append('"')) return true; - } break; } - /** testing **/ + /** FINISHED WORKS ???? **/ case JSONB_TYPE_OPAQUE: { - // The type byte is encoded as a uint8 that maps to an enum_field_types + // The type_byte is encoded as a uint8 that maps to an enum_field_types uint8 type_byte= static_cast<uint8>(*data); - enum_field_types field_type= - static_cast<enum_field_types>(type_byte); + enum_field_types field_type= + static_cast<enum_field_types>(type_byte); + size_t value_length, n; char *value_element; - // For now we are assuming one byte length - // in general it should be calculated depending on 8th bit of ith byte - // see read_variable_length() @todo anel - size_t length; - length= data[1]; - value_element= new char[length+1]; - memmove(value_element, const_cast<char*>(&data[2]), - len-2); + + if (read_variable_length(data + 1, len, &value_length, &n)) + return true; + if (len < n + value_length) + return true; + + value_element= new char[value_length + 1]; + memmove(value_element, &data[n + 1], value_length); + value_element[value_length]= '\0'; + + MYSQL_TIME t; - switch(field_type) + switch (field_type) { - case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME: { TIME_from_longlong_time_packed(&t, sint8korr(value_element)); break; } case MYSQL_TYPE_DATE: { - //TIME_from_longlong_date_packed(ltime, packed_value); //not defined in sql/compat56.h + // The bellow line cannot work since it is not defined in sql/compat56.h + //TIME_from_longlong_date_packed(ltime, packed_value); TIME_from_longlong_datetime_packed(&t, sint8korr(value_element)); t.time_type= MYSQL_TIMESTAMP_DATE; break; } case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP: { TIME_from_longlong_datetime_packed(&t, sint8korr(value_element)); break; } case MYSQL_TYPE_NEWDECIMAL: { - my_decimal m; //@todo // need to add test case ! - return false; - } - case MYSQL_TYPE_BIT: - { - if(buffer->append("base64:type") || buffer->append(':')) - { - return true; - } - size_t pos= buffer->length(); - const size_t needed= - static_cast<size_t>(my_base64_needed_encoded_length(length)); - if(my_base64_encode(value_element, length, - const_cast<char*>(buffer->ptr() + pos))) + my_decimal m; //@todo anel // need to add test case ! + // Expect at least two bytes, which contain precision and scale. + bool error= (value_length < 2); + + if (!error) { - return true; + int precision= value_element[0]; + int scale= value_element[1]; + + // The decimal value is encoded after the two precision/scale bytes. + size_t bin_size= my_decimal_get_binary_size(precision, scale); + error= + (bin_size != value_length - 2) || + (binary2my_decimal(E_DEC_ERROR, + ((const uchar*)value_element) + 2, + &m, precision, scale) != E_DEC_OK); + m.fix_buffer_pointer(); + // Convert my_decimal to decimal and append to string. + double d; + const decimal_t *mptr= &m; + my_decimal2double(E_DEC_FATAL_ERROR, mptr, &d); + buffer->qs_append(&d); } - buffer->length(pos+needed-1); - return false; + + return error; } default: + { + /* The same encoding is applied on MYSQL_TYPE_BIT, MYSQL_TYPE_VARCHAR, + MYSQL_TYPE_YEAR, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_BLOB. + */ + if (field_type == MYSQL_TYPE_BIT || field_type == MYSQL_TYPE_VARCHAR || + field_type == MYSQL_TYPE_YEAR || field_type == MYSQL_TYPE_LONG_BLOB || + field_type == MYSQL_TYPE_MEDIUM_BLOB || + field_type == MYSQL_TYPE_TINY_BLOB || field_type == MYSQL_TYPE_BLOB) + { + if (buffer->append('"')) + return true; + if (buffer->append("base64:type") || buffer->append(':')) + return true; + + size_t pos= buffer->length(); + const size_t needed= + static_cast<size_t>(my_base64_needed_encoded_length(value_length)); + buffer->reserve(needed); + if(my_base64_encode(value_element, value_length, + const_cast<char*>(buffer->ptr() + pos))) + return true; + buffer->length(pos + needed - 1); + if (buffer->append('"')) + return true; + return false; + } return false; + } } delete[] value_element; // This part is common to datetime/date/timestamp - char *ptr= const_cast<char *>(buffer->ptr())+buffer->length(); + char *ptr= const_cast<char *>(buffer->ptr()) + buffer->length(); const int size= my_TIME_to_str(&t, ptr, 6); buffer->length(buffer->length() + size); } // opaque } return false; } - diff --git a/sql/mysql_json.h b/sql/mysql_json.h index 31bed6ac1af..ed5c58651e7 100644 --- a/sql/mysql_json.h +++ b/sql/mysql_json.h @@ -8,7 +8,6 @@ #include "mysql_com.h" #include "mysqld_error.h" -// defines of mysql @todo anel put in single file #define JSONB_TYPE_SMALL_OBJECT 0x0 #define JSONB_TYPE_LARGE_OBJECT 0x1 #define JSONB_TYPE_SMALL_ARRAY 0x2 @@ -56,7 +55,6 @@ #define JSON_DOCUMENT_MAX_DEPTH 100 /* - @todo anel enums used in json_dom Json values in MySQL comprises the stand set of JSON values plus a MySQL specific set. A Json _number_ type is subdivided into _int_, _uint_, _double_ and _decimal_. @@ -128,7 +126,7 @@ enum enum_json_type { J_ERROR }; - /** +/* Extended type ids so that JSON_TYPE() can give useful type names to certain sub-types of J_OPAQUE. */ @@ -138,15 +136,14 @@ enum enum_json_opaque_type { J_OPAQUE_GEOMETRY }; -// Prototypes + size_t read_offset_or_size(const char *, bool); -bool get_mysql_string(String *buffer, size_t type, const char *data, size_t len, bool large); -bool parse_value(String *buffer, size_t type, const char *data, size_t len, bool large, size_t depth); +bool get_mysql_string(String *buffer, size_t type, const char *data, size_t len, + bool large); +bool parse_value(String *buffer, size_t type, const char *data, size_t len, + bool large, size_t depth); bool parse_array_or_object(String * buffer, Field_mysql_json::enum_type, - const char *,size_t , bool); + const char *, size_t, bool); bool parse_mysql_scalar(String* buffer, size_t type, const char *data, size_t len, bool large, size_t depth); - -// static bool read_variable_length(const char *data, size_t data_length, -// size_t *length, size_t *num) #endif /* MYSQL_JSON_INCLUDED */
\ No newline at end of file diff --git a/sql/table.cc b/sql/table.cc index e335abe4e8d..1124ef788f1 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1856,16 +1856,14 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } /* - if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL) if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL) Special handling to be able to read MySQL JSON types when - converting a MySQL table to MariaDB table. + converting a MySQL table (MyISAM) to MariaDB table. */ if (share->mysql_version >= 50700 && share->mysql_version < 100000 && strpos[13] == (uchar) MYSQL_TYPE_VIRTUAL) { field_type= (enum_field_types) MYSQL_TYPE_MYSQL_JSON; - // strpos[13]= (uchar) MYSQL_TYPE_MYSQL_JSON; // read only } else if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL) { |