summaryrefslogtreecommitdiff
path: root/sql/item_jsonfunc.cc
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2022-07-28 10:33:26 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2022-07-28 10:33:26 +0300
commitf79cebb4d02a7b5151ac617bc762c3e094436562 (patch)
tree16008fa4f8d8d0b9b884670f6d850ae6a151520a /sql/item_jsonfunc.cc
parent1630037959ab9516fc2a56d4901d2e5d794bc8e7 (diff)
parent742e1c727fc2be50b758068c2ab92abb19f3ff56 (diff)
downloadmariadb-git-f79cebb4d02a7b5151ac617bc762c3e094436562.tar.gz
Merge 10.7 into 10.8
Diffstat (limited to 'sql/item_jsonfunc.cc')
-rw-r--r--sql/item_jsonfunc.cc143
1 files changed, 143 insertions, 0 deletions
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index f4ac1605ab6..725fe4ab33e 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -18,7 +18,22 @@
#include "sql_priv.h"
#include "sql_class.h"
#include "item.h"
+#include "sql_parse.h" // For check_stack_overrun
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ bzero(array, A); \
+ my_checksum(0, array, A); \
+ } while(0)
/*
Compare ASCII string against the string with the specified
@@ -128,6 +143,113 @@ static int append_tab(String *js, int depth, int tab_size)
return 0;
}
+int json_path_parts_compare(
+ const json_path_step_t *a, const json_path_step_t *a_end,
+ const json_path_step_t *b, const json_path_step_t *b_end,
+ enum json_value_types vt)
+{
+ int res, res2;
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
+ while (a <= a_end)
+ {
+ if (b > b_end)
+ {
+ while (vt != JSON_VALUE_ARRAY &&
+ (a->type & JSON_PATH_ARRAY_WILD) == JSON_PATH_ARRAY &&
+ a->n_item == 0)
+ {
+ if (++a > a_end)
+ return 0;
+ }
+ return -2;
+ }
+
+ DBUG_ASSERT((b->type & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD)) == 0);
+
+
+ if (a->type & JSON_PATH_ARRAY)
+ {
+ if (b->type & JSON_PATH_ARRAY)
+ {
+ if ((a->type & JSON_PATH_WILD) || a->n_item == b->n_item)
+ goto step_fits;
+ goto step_failed;
+ }
+ if ((a->type & JSON_PATH_WILD) == 0 && a->n_item == 0)
+ goto step_fits_autowrap;
+ goto step_failed;
+ }
+ else /* JSON_PATH_KEY */
+ {
+ if (!(b->type & JSON_PATH_KEY))
+ goto step_failed;
+
+ if (!(a->type & JSON_PATH_WILD) &&
+ (a->key_end - a->key != b->key_end - b->key ||
+ memcmp(a->key, b->key, a->key_end - a->key) != 0))
+ goto step_failed;
+
+ goto step_fits;
+ }
+step_failed:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ return -1;
+ b++;
+ continue;
+
+step_fits:
+ b++;
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+step_fits_autowrap:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b+1, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b+1, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+ }
+
+ return b <= b_end;
+}
+
+
+int json_path_compare(const json_path_t *a, const json_path_t *b,
+ enum json_value_types vt)
+{
+ return json_path_parts_compare(a->steps+1, a->last_step,
+ b->steps+1, b->last_step, vt);
+}
+
static int json_nice(json_engine_t *je, String *nice_js,
Item_func_json_format::formats mode, int tab_size=4)
@@ -1088,6 +1210,12 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
{
json_engine_t loc_js;
bool set_js;
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
switch (js->value_type)
{
@@ -2051,6 +2179,14 @@ err_return:
static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
{
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
@@ -2385,6 +2521,13 @@ static int copy_value_patch(String *str, json_engine_t *je)
static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
bool *empty_result)
{
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;