summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2019-05-17 17:23:01 +0200
committerSergei Golubchik <serg@mariadb.org>2019-05-17 17:23:01 +0200
commitc1fd027115ce9a32bcbe42796f4df58309636705 (patch)
tree49ba501be560c1aa17dbf4c066652c7d3b1b876b
parente506bef430c3648f88469d42631136080db9f332 (diff)
parentfae6539ef727b56bb5a58d4bbe515512e85ba2f4 (diff)
downloadmariadb-git-c1fd027115ce9a32bcbe42796f4df58309636705.tar.gz
Merge branch '10.2' into 10.3
-rw-r--r--mysql-test/main/func_json.result75
-rw-r--r--mysql-test/main/func_json.test43
-rw-r--r--mysql-test/suite/json/r/json_no_table.result4
-rw-r--r--mysql-test/suite/plugins/r/feedback_plugin_load.result6
-rw-r--r--mysql-test/suite/plugins/t/feedback_plugin_load.test2
-rw-r--r--sql/item_create.cc39
-rw-r--r--sql/item_jsonfunc.cc325
-rw-r--r--sql/item_jsonfunc.h12
-rw-r--r--sql/mysqld.cc1
-rw-r--r--sql/sql_base.cc5
-rw-r--r--storage/innobase/handler/ha_innodb.cc2
-rw-r--r--storage/rocksdb/CMakeLists.txt5
-rw-r--r--storage/rocksdb/ha_rocksdb.cc67
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result675
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rc.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rr.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rc.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rr.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc.result50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc_lsr.result37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr.result50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr_lsr.result37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rc.result25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rr.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rc.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rr.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rc.result29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rr.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rc.result29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rr.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc.result41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc_lsr.result45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr.result41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr_lsr.result45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test66
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc_lsr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr_lsr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rc.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rr.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc_lsr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr_lsr.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test16
-rw-r--r--storage/rocksdb/rdb_utils.h4
-rw-r--r--tests/mysql_client_test.c21
59 files changed, 1322 insertions, 820 deletions
diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result
index 6222d5f1feb..427be24d5ea 100644
--- a/mysql-test/main/func_json.result
+++ b/mysql-test/main/func_json.result
@@ -306,7 +306,7 @@ select json_merge('string', 123);
json_merge('string', 123)
NULL
Warnings:
-Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge' at position 1
+Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge_preserve' at position 1
select json_merge('"string"', 123);
json_merge('"string"', 123)
["string", 123]
@@ -326,7 +326,7 @@ select json_merge('a','b');
json_merge('a','b')
NULL
Warnings:
-Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge' at position 1
+Warning 4038 Syntax error in JSON text in argument 1 to function 'json_merge_preserve' at position 1
select json_merge('{"a":"b"}','{"c":"d"}');
json_merge('{"a":"b"}','{"c":"d"}')
{"a": "b", "c": "d"}
@@ -843,6 +843,77 @@ SELECT CHARSET(JSON_OBJECT());
CHARSET(JSON_OBJECT())
latin1
#
+# MDEV-13992 Implement JSON_MERGE_PATCH
+#
+CREATE TABLE merge_t(
+id INT PRIMARY KEY AUTO_INCREMENT,
+target VARCHAR(100), patch VARCHAR(100)
+);
+INSERT INTO merge_t(target, patch) VALUES
+('{"a":"b"}', '{"a":"c"}'),
+('{"a":"b"}', '{"b":"c"}'),
+('{"a":"b"}', '{"a":null}'),
+('{"a":"b", "b":"c"}', '{"a":null}'),
+('{"a":["b"]}', '{"a":"c"}'),
+('{"a":"c"}', '{"a":["b"]}'),
+('{"a": {"b":"c"}}', '{"a": {"b":"d", "c":null}}'),
+('{"a":[{"b":"c"}]}', '{"a": [1]}'),
+('["a","b"]', '["c","d"]'),
+('{"a":"b"}', '["c"]'),
+('{"a":"foo"}', 'null'),
+('{"a":"foo"}', '"bar"'),
+('{"e":null}', '{"a":1}'),
+('[1,2]', '{"a":"b", "c":null}'),
+('{}', '{"a":{"bb":{"ccc":null}}}'),
+(NULL, '{}'),
+('{}', NULL);
+SELECT id, target, patch,
+JSON_MERGE_PATCH(target, patch) AS merged,
+JSON_EXTRACT(JSON_MERGE_PATCH(target, patch), '$.a') AS a
+FROM merge_t ORDER BY id;
+id target patch merged a
+1 {"a":"b"} {"a":"c"} {"a": "c"} "c"
+2 {"a":"b"} {"b":"c"} {"a": "b", "b": "c"} "b"
+3 {"a":"b"} {"a":null} {} NULL
+4 {"a":"b", "b":"c"} {"a":null} {"b": "c"} NULL
+5 {"a":["b"]} {"a":"c"} {"a": "c"} "c"
+6 {"a":"c"} {"a":["b"]} {"a": ["b"]} ["b"]
+7 {"a": {"b":"c"}} {"a": {"b":"d", "c":null}} {"a": {"b": "d"}} {"b": "d"}
+8 {"a":[{"b":"c"}]} {"a": [1]} {"a": [1]} [1]
+9 ["a","b"] ["c","d"] ["c", "d"] NULL
+10 {"a":"b"} ["c"] ["c"] NULL
+11 {"a":"foo"} null null NULL
+12 {"a":"foo"} "bar" "bar" NULL
+13 {"e":null} {"a":1} {"e": null, "a": 1} 1
+14 [1,2] {"a":"b", "c":null} {"a": "b"} "b"
+15 {} {"a":{"bb":{"ccc":null}}} {"a": {"bb": {}}} {"bb": {}}
+16 NULL {} NULL NULL
+17 {} NULL NULL NULL
+DROP TABLE merge_t;
+SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}');
+JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}')
+NULL
+SELECT JSON_MERGE_PATCH(NULL, '[1,2,3]');
+JSON_MERGE_PATCH(NULL, '[1,2,3]')
+[1, 2, 3]
+SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}');
+JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}')
+{"d": "e"}
+SELECT JSON_MERGE_PATCH();
+ERROR 42000: Incorrect parameter count in the call to native function 'JSON_MERGE_PATCH'
+SELECT JSON_MERGE_PATCH('{}');
+ERROR 42000: Incorrect parameter count in the call to native function 'JSON_MERGE_PATCH'
+SELECT JSON_MERGE_PATCH('{', '[1,2,3]');
+JSON_MERGE_PATCH('{', '[1,2,3]')
+NULL
+Warnings:
+Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge_patch'
+SELECT JSON_MERGE_PATCH('{"a":"b"}', '[1,');
+JSON_MERGE_PATCH('{"a":"b"}', '[1,')
+NULL
+Warnings:
+Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge_patch'
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test
index bcf0fdfe3fc..e557be85308 100644
--- a/mysql-test/main/func_json.test
+++ b/mysql-test/main/func_json.test
@@ -492,6 +492,49 @@ SELECT CHARSET(JSON_ARRAY());
SELECT CHARSET(JSON_OBJECT());
--echo #
+--echo # MDEV-13992 Implement JSON_MERGE_PATCH
+--echo #
+
+CREATE TABLE merge_t(
+id INT PRIMARY KEY AUTO_INCREMENT,
+target VARCHAR(100), patch VARCHAR(100)
+);
+INSERT INTO merge_t(target, patch) VALUES
+('{"a":"b"}', '{"a":"c"}'),
+('{"a":"b"}', '{"b":"c"}'),
+('{"a":"b"}', '{"a":null}'),
+('{"a":"b", "b":"c"}', '{"a":null}'),
+('{"a":["b"]}', '{"a":"c"}'),
+('{"a":"c"}', '{"a":["b"]}'),
+('{"a": {"b":"c"}}', '{"a": {"b":"d", "c":null}}'),
+('{"a":[{"b":"c"}]}', '{"a": [1]}'),
+('["a","b"]', '["c","d"]'),
+('{"a":"b"}', '["c"]'),
+('{"a":"foo"}', 'null'),
+('{"a":"foo"}', '"bar"'),
+('{"e":null}', '{"a":1}'),
+('[1,2]', '{"a":"b", "c":null}'),
+('{}', '{"a":{"bb":{"ccc":null}}}'),
+(NULL, '{}'),
+('{}', NULL);
+SELECT id, target, patch,
+ JSON_MERGE_PATCH(target, patch) AS merged,
+ JSON_EXTRACT(JSON_MERGE_PATCH(target, patch), '$.a') AS a
+FROM merge_t ORDER BY id;
+DROP TABLE merge_t;
+
+SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '{"c":"d"}');
+SELECT JSON_MERGE_PATCH(NULL, '[1,2,3]');
+SELECT JSON_MERGE_PATCH('{"a":"b"}', NULL, '[1,2,3]', '{"c":null,"d":"e"}');
+
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT JSON_MERGE_PATCH();
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT JSON_MERGE_PATCH('{}');
+SELECT JSON_MERGE_PATCH('{', '[1,2,3]');
+SELECT JSON_MERGE_PATCH('{"a":"b"}', '[1,');
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/suite/json/r/json_no_table.result b/mysql-test/suite/json/r/json_no_table.result
index 41150032e51..b8ac19bd09e 100644
--- a/mysql-test/suite/json/r/json_no_table.result
+++ b/mysql-test/suite/json/r/json_no_table.result
@@ -821,13 +821,13 @@ select json_merge( '[1, 2]', '[3, 4' );
json_merge( '[1, 2]', '[3, 4' )
NULL
Warnings:
-Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge'
+Warning 4037 Unexpected end of JSON text in argument 2 to function 'json_merge_preserve'
error ER_INVALID_JSON_TEXT_IN_PARAM
select json_merge( '[1, 2', '[3, 4]' );
json_merge( '[1, 2', '[3, 4]' )
NULL
Warnings:
-Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge'
+Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_merge_preserve'
select json_merge( '1', '2' );
json_merge( '1', '2' )
[1, 2]
diff --git a/mysql-test/suite/plugins/r/feedback_plugin_load.result b/mysql-test/suite/plugins/r/feedback_plugin_load.result
index f96b4d9b71f..843cd15ac94 100644
--- a/mysql-test/suite/plugins/r/feedback_plugin_load.result
+++ b/mysql-test/suite/plugins/r/feedback_plugin_load.result
@@ -2,9 +2,9 @@ select plugin_status from information_schema.plugins where plugin_name='feedback
plugin_status
ACTIVE
SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used';
-SELECT variable_value = @feedback_used + 1 FROM information_schema.feedback where variable_name = 'FEEDBACK used';
-variable_value = @feedback_used + 1
-0
+SELECT variable_value = @feedback_used + 1 as 'MUST BE 1' FROM information_schema.feedback where variable_name = 'FEEDBACK used';
+MUST BE 1
+1
select * from information_schema.feedback where variable_name like 'feed%'
and variable_name not like '%_uid' and variable_name not like 'FEEDBACK used'
and variable_name not like '%debug%';
diff --git a/mysql-test/suite/plugins/t/feedback_plugin_load.test b/mysql-test/suite/plugins/t/feedback_plugin_load.test
index 8b4aee28362..11a16134135 100644
--- a/mysql-test/suite/plugins/t/feedback_plugin_load.test
+++ b/mysql-test/suite/plugins/t/feedback_plugin_load.test
@@ -17,7 +17,7 @@ select plugin_status from information_schema.plugins where plugin_name='feedback
SELECT variable_value INTO @feedback_used FROM information_schema.feedback where variable_name = 'FEEDBACK used';
# Now $feedback_used == X+1, and 'FEEDBACK used' is also X+1. And variable_value is increased again when we run the next SELECT
-SELECT variable_value = @feedback_used + 1 FROM information_schema.feedback where variable_name = 'FEEDBACK used';
+SELECT variable_value = @feedback_used + 1 as 'MUST BE 1' FROM information_schema.feedback where variable_name = 'FEEDBACK used';
# Now when we are happy with 'FEEDBACK used', we can check everything else
diff --git a/sql/item_create.cc b/sql/item_create.cc
index cdb9c4bf6bb..503165e69df 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -1982,6 +1982,19 @@ protected:
};
+class Create_func_json_merge_patch : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_CSTRING *name, List<Item> *item_list);
+
+ static Create_func_json_merge_patch s_singleton;
+
+protected:
+ Create_func_json_merge_patch() {}
+ virtual ~Create_func_json_merge_patch() {}
+};
+
+
class Create_func_json_quote : public Create_func_arg1
{
public:
@@ -5514,6 +5527,30 @@ Create_func_json_merge::create_native(THD *thd, LEX_CSTRING *name,
}
+Create_func_json_merge_patch Create_func_json_merge_patch::s_singleton;
+
+Item*
+Create_func_json_merge_patch::create_native(THD *thd, LEX_CSTRING *name,
+ List<Item> *item_list)
+{
+ Item *func;
+ int arg_count;
+
+ if (item_list == NULL ||
+ (arg_count= item_list->elements) < 2) // json, json
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
+ func= NULL;
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_merge_patch(thd, *item_list);
+ }
+
+ return func;
+}
+
+
Create_func_json_contains Create_func_json_contains::s_singleton;
Item*
@@ -7108,6 +7145,8 @@ static Native_func_registry func_array[] =
{ { STRING_WITH_LEN("JSON_LENGTH") }, BUILDER(Create_func_json_length)},
{ { STRING_WITH_LEN("JSON_LOOSE") }, BUILDER(Create_func_json_loose)},
{ { STRING_WITH_LEN("JSON_MERGE") }, BUILDER(Create_func_json_merge)},
+ { { STRING_WITH_LEN("JSON_MERGE_PATCH") }, BUILDER(Create_func_json_merge_patch)},
+ { { STRING_WITH_LEN("JSON_MERGE_PRESERVE") }, BUILDER(Create_func_json_merge)},
{ { STRING_WITH_LEN("JSON_QUERY") }, BUILDER(Create_func_json_query)},
{ { STRING_WITH_LEN("JSON_QUOTE") }, BUILDER(Create_func_json_quote)},
{ { STRING_WITH_LEN("JSON_OBJECT") }, BUILDER(Create_func_json_object)},
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 960a6e9ccc8..2f43f907c8b 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -2152,6 +2152,331 @@ null_return:
}
+static int copy_value_patch(String *str, json_engine_t *je)
+{
+ int first_key= 1;
+
+ if (je->value_type != JSON_VALUE_OBJECT)
+ {
+ const uchar *beg, *end;
+
+ beg= je->value_begin;
+
+ if (!json_value_scalar(je))
+ {
+ if (json_skip_level(je))
+ return 1;
+ end= je->s.c_str;
+ }
+ else
+ end= je->value_end;
+
+ if (append_simple(str, beg, end-beg))
+ return 1;
+
+ return 0;
+ }
+ /* JSON_VALUE_OBJECT */
+
+ if (str->append("{", 1))
+ return 1;
+ while (json_scan_next(je) == 0 && je->state != JST_OBJ_END)
+ {
+ const uchar *key_start;
+ /* Loop through the Json_1 keys and compare with the Json_2 keys. */
+ DBUG_ASSERT(je->state == JST_KEY);
+ key_start= je->s.c_str;
+
+ if (json_read_value(je))
+ return 1;
+
+ if (je->value_type == JSON_VALUE_NULL)
+ continue;
+
+ if (!first_key)
+ {
+ if (str->append(", ", 2))
+ return 3;
+ }
+ else
+ first_key= 0;
+
+ if (str->append("\"", 1) ||
+ append_simple(str, key_start, je->value_begin - key_start) ||
+ copy_value_patch(str, je))
+ return 1;
+ }
+ if (str->append("}", 1))
+ return 1;
+
+ return 0;
+}
+
+
+static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
+ bool *empty_result)
+{
+ if (json_read_value(je1) || json_read_value(je2))
+ return 1;
+
+ if (je1->value_type == JSON_VALUE_OBJECT &&
+ je2->value_type == JSON_VALUE_OBJECT)
+ {
+ json_engine_t sav_je1= *je1;
+ json_engine_t sav_je2= *je2;
+
+ int first_key= 1;
+ json_string_t key_name;
+ size_t sav_len;
+ bool mrg_empty;
+
+ *empty_result= FALSE;
+ json_string_set_cs(&key_name, je1->s.cs);
+
+ if (str->append("{", 1))
+ return 3;
+ while (json_scan_next(je1) == 0 &&
+ je1->state != JST_OBJ_END)
+ {
+ const uchar *key_start, *key_end;
+ /* Loop through the Json_1 keys and compare with the Json_2 keys. */
+ DBUG_ASSERT(je1->state == JST_KEY);
+ key_start= je1->s.c_str;
+ do
+ {
+ key_end= je1->s.c_str;
+ } while (json_read_keyname_chr(je1) == 0);
+
+ if (je1->s.error)
+ return 1;
+
+ sav_len= str->length();
+
+ if (!first_key)
+ {
+ if (str->append(", ", 2))
+ return 3;
+ *je2= sav_je2;
+ }
+
+ if (str->append("\"", 1) ||
+ append_simple(str, key_start, key_end - key_start) ||
+ str->append("\":", 2))
+ return 3;
+
+ while (json_scan_next(je2) == 0 &&
+ je2->state != JST_OBJ_END)
+ {
+ int ires;
+ DBUG_ASSERT(je2->state == JST_KEY);
+ json_string_set_str(&key_name, key_start, key_end);
+ if (!json_key_matches(je2, &key_name))
+ {
+ if (je2->s.error || json_skip_key(je2))
+ return 2;
+ continue;
+ }
+
+ /* Json_2 has same key as Json_1. Merge them. */
+ if ((ires= do_merge_patch(str, je1, je2, &mrg_empty)))
+ return ires;
+
+ if (mrg_empty)
+ str->length(sav_len);
+ else
+ first_key= 0;
+
+ goto merged_j1;
+ }
+
+ if (je2->s.error)
+ return 2;
+
+ key_start= je1->s.c_str;
+ /* Just append the Json_1 key value. */
+ if (json_skip_key(je1))
+ return 1;
+ if (append_simple(str, key_start, je1->s.c_str - key_start))
+ return 3;
+ first_key= 0;
+
+merged_j1:
+ continue;
+ }
+
+ *je2= sav_je2;
+ /*
+ Now loop through the Json_2 keys.
+ Skip if there is same key in Json_1
+ */
+ while (json_scan_next(je2) == 0 &&
+ je2->state != JST_OBJ_END)
+ {
+ const uchar *key_start, *key_end;
+ DBUG_ASSERT(je2->state == JST_KEY);
+ key_start= je2->s.c_str;
+ do
+ {
+ key_end= je2->s.c_str;
+ } while (json_read_keyname_chr(je2) == 0);
+
+ if (je2->s.error)
+ return 1;
+
+ *je1= sav_je1;
+ while (json_scan_next(je1) == 0 &&
+ je1->state != JST_OBJ_END)
+ {
+ DBUG_ASSERT(je1->state == JST_KEY);
+ json_string_set_str(&key_name, key_start, key_end);
+ if (!json_key_matches(je1, &key_name))
+ {
+ if (je1->s.error || json_skip_key(je1))
+ return 2;
+ continue;
+ }
+ if (json_skip_key(je2) ||
+ json_skip_level(je1))
+ return 1;
+ goto continue_j2;
+ }
+
+ if (je1->s.error)
+ return 2;
+
+
+ sav_len= str->length();
+
+ if (!first_key && str->append(", ", 2))
+ return 3;
+
+ if (str->append("\"", 1) ||
+ append_simple(str, key_start, key_end - key_start) ||
+ str->append("\":", 2))
+ return 3;
+
+ if (json_read_value(je2))
+ return 1;
+
+ if (je2->value_type == JSON_VALUE_NULL)
+ str->length(sav_len);
+ else
+ {
+ if (copy_value_patch(str, je2))
+ return 1;
+ first_key= 0;
+ }
+
+continue_j2:
+ continue;
+ }
+
+ if (str->append("}", 1))
+ return 3;
+ }
+ else
+ {
+ if (!json_value_scalar(je1) && json_skip_level(je1))
+ return 1;
+
+ *empty_result= je2->value_type == JSON_VALUE_NULL;
+ if (!(*empty_result) && copy_value_patch(str, je2))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+String *Item_func_json_merge_patch::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ json_engine_t je1, je2;
+ String *js1= args[0]->val_json(&tmp_js1), *js2=NULL;
+ uint n_arg;
+ bool empty_result, merge_to_null;
+
+ merge_to_null= args[0]->null_value;
+
+ for (n_arg=1; n_arg < arg_count; n_arg++)
+ {
+ js2= args[n_arg]->val_json(&tmp_js2);
+ if (args[n_arg]->null_value)
+ {
+ merge_to_null= true;
+ goto cont_point;
+ }
+
+ json_scan_start(&je2, js2->charset(),(const uchar *) js2->ptr(),
+ (const uchar *) js2->ptr() + js2->length());
+
+ if (merge_to_null)
+ {
+ if (json_read_value(&je2))
+ goto error_return;
+ if (je2.value_type == JSON_VALUE_OBJECT)
+ {
+ merge_to_null= true;
+ goto cont_point;
+ }
+ merge_to_null= false;
+ str->set(js2->ptr(), js2->length(), js2->charset());
+ goto cont_point;
+ }
+
+ str->set_charset(js1->charset());
+ str->length(0);
+
+
+ json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
+ (const uchar *) js1->ptr() + js1->length());
+
+ if (do_merge_patch(str, &je1, &je2, &empty_result))
+ goto error_return;
+
+ if (empty_result)
+ str->append("null");
+
+cont_point:
+ {
+ /* Swap str and js1. */
+ if (str == &tmp_js1)
+ {
+ str= js1;
+ js1= &tmp_js1;
+ }
+ else
+ {
+ js1= str;
+ str= &tmp_js1;
+ }
+ }
+ }
+
+ if (merge_to_null)
+ goto null_return;
+
+ json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
+ (const uchar *) js1->ptr() + js1->length());
+ str->length(0);
+ str->set_charset(js1->charset());
+ if (json_nice(&je1, str, Item_func_json_format::LOOSE))
+ goto error_return;
+
+ null_value= 0;
+ return str;
+
+error_return:
+ if (je1.s.error)
+ report_json_error(js1, &je1, 0);
+ if (je2.s.error)
+ report_json_error(js2, &je2, n_arg);
+null_return:
+ null_value= 1;
+ return NULL;
+}
+
+
bool Item_func_json_length::fix_length_and_dec()
{
if (arg_count > 1)
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
index a4705f012f2..e9b77502e80 100644
--- a/sql/item_jsonfunc.h
+++ b/sql/item_jsonfunc.h
@@ -289,11 +289,21 @@ public:
Item_func_json_array(thd, list) {}
String *val_str(String *);
bool is_json_type() { return true; }
- const char *func_name() const { return "json_merge"; }
+ const char *func_name() const { return "json_merge_preserve"; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_json_merge>(thd, this); }
};
+class Item_func_json_merge_patch: public Item_func_json_merge
+{
+public:
+ Item_func_json_merge_patch(THD *thd, List<Item> &list):
+ Item_func_json_merge(thd, list) {}
+ const char *func_name() const { return "json_merge_patch"; }
+ String *val_str(String *);
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_json_merge_patch>(thd, this); }
+};
class Item_func_json_length: public Item_long_func
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 560662e2806..e8c605ce945 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -7831,7 +7831,6 @@ struct my_option my_long_options[]=
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-offset"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-limit"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-max-mem-size"), // OPTIMIZER_TRACE
- MYSQL_TO_BE_IMPLEMENTED_OPTION("eq-range-index-dive-limit"),
MYSQL_COMPATIBILITY_OPTION("server-id-bits"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-rows-search-algorithms"), // HAVE_REPLICATION
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-allow-batching"), // HAVE_REPLICATION
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index d61eb86890d..b1fa78b5ec6 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3502,8 +3502,7 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
Check whether the information schema contains a table
whose name is tables->schema_table_name
*/
- ST_SCHEMA_TABLE *schema_table;
- schema_table= find_schema_table(thd, &tables->schema_table_name);
+ ST_SCHEMA_TABLE *schema_table= tables->schema_table;
if (!schema_table ||
(schema_table->hidden &&
((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
@@ -3514,7 +3513,7 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
lex->sql_command == SQLCOM_SHOW_KEYS)))
{
my_error(ER_UNKNOWN_TABLE, MYF(0),
- tables->schema_table_name.str, INFORMATION_SCHEMA_NAME.str);
+ tables->table_name.str, INFORMATION_SCHEMA_NAME.str);
DBUG_RETURN(1);
}
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 16c922428db..27d867c2d75 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -1848,7 +1848,7 @@ thd_innodb_tmpdir(
@return reference to transaction pointer */
static trx_t* thd_to_trx(THD* thd)
{
- return *reinterpret_cast<trx_t**>(thd_ha_data(thd, innodb_hton_ptr));
+ return reinterpret_cast<trx_t*>(thd_get_ha_data(thd, innodb_hton_ptr));
}
#ifdef WITH_WSREP
diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt
index e4906638f6c..703504a6682 100644
--- a/storage/rocksdb/CMakeLists.txt
+++ b/storage/rocksdb/CMakeLists.txt
@@ -128,10 +128,15 @@ CHECK_CXX_SOURCE_COMPILES("
#endif
int main() {
static __thread int tls;
+ tls=0;
+ return tls;
}
" HAVE_THREAD_LOCAL)
if(HAVE_THREAD_LOCAL)
ADD_DEFINITIONS(-DROCKSDB_SUPPORT_THREAD_LOCAL)
+else()
+ MESSAGE(SEND_ERROR "The compiler failed the check for ROCKSDB_SUPPORT_THREAD_LOCAL. "
+ "MyRocks requires that feature.")
endif()
INCLUDE(build_rocksdb.cmake)
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index c985bd0d194..ae04a061bf2 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -3290,9 +3290,9 @@ void Rdb_snapshot_notifier::SnapshotCreated(
std::multiset<Rdb_transaction *> Rdb_transaction::s_tx_list;
mysql_mutex_t Rdb_transaction::s_tx_list_mutex;
-static Rdb_transaction *&get_tx_from_thd(THD *const thd) {
- return *reinterpret_cast<Rdb_transaction **>(
- my_core::thd_ha_data(thd, rocksdb_hton));
+static Rdb_transaction *get_tx_from_thd(THD *const thd) {
+ return reinterpret_cast<Rdb_transaction *>(
+ my_core::thd_get_ha_data(thd, rocksdb_hton));
}
namespace {
@@ -3339,7 +3339,7 @@ class Rdb_perf_context_guard {
*/
static Rdb_transaction *get_or_create_tx(THD *const thd) {
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
// TODO: this is called too many times.. O(#rows)
if (tx == nullptr) {
bool rpl_skip_tx_api= false; // MARIAROCKS_NOT_YET.
@@ -3354,6 +3354,7 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) {
}
tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks));
tx->start_tx();
+ my_core::thd_set_ha_data(thd, rocksdb_hton, tx);
} else {
tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks));
if (!tx->is_tx_started()) {
@@ -3365,7 +3366,7 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) {
}
static int rocksdb_close_connection(handlerton *const hton, THD *const thd) {
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
if (tx != nullptr) {
int rc = tx->finish_bulk_load(false);
if (rc != 0) {
@@ -3376,7 +3377,6 @@ static int rocksdb_close_connection(handlerton *const hton, THD *const thd) {
}
delete tx;
- tx = nullptr;
}
return HA_EXIT_SUCCESS;
}
@@ -3444,7 +3444,7 @@ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx)
{
bool async=false; // This is "ASYNC_COMMIT" feature which is only present in webscalesql
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
if (!tx->can_prepare()) {
return HA_EXIT_FAILURE;
}
@@ -3695,7 +3695,7 @@ static void rocksdb_commit_ordered(handlerton *hton, THD* thd, bool all)
// Same assert as InnoDB has
DBUG_ASSERT(all || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT |
OPTION_BEGIN)));
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
if (!tx->is_two_phase()) {
/*
ordered_commit is supposedly slower as it is done sequentially
@@ -3727,7 +3727,7 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx)
rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true);
/* note: h->external_lock(F_UNLCK) is called after this function is called) */
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
/* this will trigger saving of perf_context information */
Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd));
@@ -3800,7 +3800,7 @@ static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx)
static int rocksdb_rollback(handlerton *const hton, THD *const thd,
bool rollback_tx) {
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd));
if (tx != nullptr) {
@@ -4607,7 +4607,7 @@ static int rocksdb_savepoint(handlerton *const hton, THD *const thd,
static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd,
void *const savepoint) {
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
return tx->rollback_to_savepoint(savepoint);
}
@@ -5347,49 +5347,6 @@ static int rocksdb_done_func(void *const p) {
}
/*
- MariaDB: When the plugin is unloaded with UNINSTALL SONAME command, some
- connections may still have Rdb_transaction objects.
-
- These objects are not genuine transactions (as SQL layer makes sure that
- a plugin that is being unloaded has no open tables), they are empty
- Rdb_transaction objects that were left there to save on object
- creation/deletion.
-
- Go through the list and delete them.
- */
- {
- class Rdb_trx_deleter: public Rdb_tx_list_walker {
- public:
- std::set<Rdb_transaction*> rdb_trxs;
-
- void process_tran(const Rdb_transaction *const tx) override {
- /*
- Check if the transaction is really empty. We only check
- non-WriteBatch-based transactions, because there is no easy way to
- check WriteBatch-based transactions.
- */
- if (!tx->is_writebatch_trx()) {
- const auto tx_impl = static_cast<const Rdb_transaction_impl *>(tx);
- DBUG_ASSERT(tx_impl);
- if (tx_impl->get_rdb_trx())
- DBUG_ASSERT(0);
- }
- rdb_trxs.insert((Rdb_transaction*)tx);
- };
- } deleter;
-
- Rdb_transaction::walk_tx_list(&deleter);
-
- for (std::set<Rdb_transaction*>::iterator it= deleter.rdb_trxs.begin();
- it != deleter.rdb_trxs.end();
- ++it)
- {
- // When a transaction is deleted, it removes itself from s_tx_list.
- delete *it;
- }
- }
-
- /*
destructors for static objects can be called at _exit(),
but we want to free the memory at dlclose()
*/
@@ -13833,7 +13790,7 @@ int rocksdb_check_bulk_load(
return 1;
}
- Rdb_transaction *&tx = get_tx_from_thd(thd);
+ Rdb_transaction *tx = get_tx_from_thd(thd);
if (tx != nullptr) {
const int rc = tx->finish_bulk_load();
if (rc != 0) {
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result
index d160b81f10e..84c93481c79 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result
@@ -1,674 +1 @@
-
------------------------------------------------------------------------
-- Locking issues case 1.1:
-- Locking rows that do not exist when using all primary key columns in
-- a WHERE clause
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
-INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
-id1 id2 value
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-INSERT INTO t0 VALUES (1,5,0);
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 1.1:
-- Locking rows that do not exist when using all primary key columns in
-- a WHERE clause
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
-INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
-id1 id2 value
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-INSERT INTO t0 VALUES (1,5,0);
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 1.2:
-- Locking rows that do not exist without using all primary key
-- columns in a WHERE clause
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
-INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
-id1 id2 value
-1 1 0
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
-id1 id2 value
-INSERT INTO t0 VALUES (1,5,0);
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 1.2:
-- Locking rows that do not exist without using all primary key
-- columns in a WHERE clause
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
-INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
-id1 id2 value
-1 1 0
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
-id1 id2 value
-INSERT INTO t0 VALUES (1,5,0);
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 2:
-- Rows that are scanned but do not match the WHERE are not locked
-- using REPEATABLE READ transaction isolation level unless
-- rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-CREATE TABLE t0(id INT PRIMARY KEY, value INT);
-INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con1;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-id value
-2 1
-5 1
-connection con2;
-UPDATE t0 SET VALUE=10 WHERE id=1;
-UPDATE t0 SET VALUE=10 WHERE id=5;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
-connection con2;
-SELECT * FROM t0 WHERE id=4 FOR UPDATE;
-id value
-4 0
-COMMIT;
-SELECT * FROM t0;
-id value
-1 10
-2 1
-3 0
-4 0
-5 1
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 2:
-- Rows that are scanned but do not match the WHERE are not locked
-- using READ COMMITTED transaction isolation level unless
-- rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-CREATE TABLE t0(id INT PRIMARY KEY, value INT);
-INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con1;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-id value
-2 1
-5 1
-connection con2;
-UPDATE t0 SET VALUE=10 WHERE id=1;
-UPDATE t0 SET VALUE=10 WHERE id=5;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
-connection con2;
-SELECT * FROM t0 WHERE id=4 FOR UPDATE;
-id value
-4 0
-COMMIT;
-SELECT * FROM t0;
-id value
-1 10
-2 1
-3 0
-4 0
-5 1
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 2:
-- Rows that are scanned but do not match the WHERE are not locked
-- using REPEATABLE READ transaction isolation level unless
-- rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-SET GLOBAL rocksdb_lock_scanned_rows=ON;
-CREATE TABLE t0(id INT PRIMARY KEY, value INT);
-INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con1;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-id value
-2 1
-5 1
-connection con2;
-UPDATE t0 SET VALUE=10 WHERE id=1;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-SET GLOBAL rocksdb_lock_scanned_rows=0;
-
------------------------------------------------------------------------
-- Locking issues case 2:
-- Rows that are scanned but do not match the WHERE are not locked
-- using READ COMMITTED transaction isolation level unless
-- rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-SET GLOBAL rocksdb_lock_scanned_rows=ON;
-CREATE TABLE t0(id INT PRIMARY KEY, value INT);
-INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con1;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-id value
-2 1
-5 1
-connection con2;
-UPDATE t0 SET VALUE=10 WHERE id=1;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-SET GLOBAL rocksdb_lock_scanned_rows=0;
-
------------------------------------------------------------------------
-- Locking issues case 3:
-- After creating a snapshot, other clients updating rows
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
-connection con1;
-ERROR: 1213
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 3:
-- After creating a snapshot, other clients updating rows
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
-connection con1;
-id value
-190000 1
-ERROR: 0
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 4:
-- Phantom rows
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-INSERT INTO t0 VALUES(200001,1), (-1,1);
-connection con1;
-id value
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 4:
-- Phantom rows
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-INSERT INTO t0 VALUES(200001,1), (-1,1);
-connection con1;
-id value
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 5:
-- Deleting primary key
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-UPDATE t0 SET value=100 WHERE id=190000;
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-DELETE FROM t0 WHERE id=190000;
-COMMIT;
-connection con1;
-ERROR: 1213
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 5:
-- Deleting primary key
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-UPDATE t0 SET value=100 WHERE id=190000;
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-DELETE FROM t0 WHERE id=190000;
-COMMIT;
-connection con1;
-id value
-ERROR: 0
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 6:
-- Changing primary key
-- using REPEATABLE READ transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-UPDATE t0 SET value=100 WHERE id=190000;
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-UPDATE t0 SET id=200001 WHERE id=190000;
-COMMIT;
-connection con1;
-ERROR: 1213
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 6:
-- Changing primary key
-- using READ COMMITTED transaction isolation level
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t0;
-CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
-Inserting 200,000 rows
-UPDATE t0 SET value=100 WHERE id=190000;
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-UPDATE t0 SET id=200001 WHERE id=190000;
-COMMIT;
-connection con1;
-id value
-ERROR: 0
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t0;
-
------------------------------------------------------------------------
-- Locking issues case 7:
-- Rows that are scanned as part of a query but not in the table being
-- updated should not be locked unless rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t1, t2;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-CREATE TABLE t1(id INT PRIMARY KEY, value INT);
-CREATE TABLE t2(id INT PRIMARY KEY, value INT);
-INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
-INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-lock_scanned_rows is 0
-connection con1;
-UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
-connection con2;
-UPDATE t2 SET value=value+100;
-SELECT * FROM t2;
-id value
-1 101
-2 102
-3 103
-4 104
-5 105
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t1;
-DROP TABLE t2;
-
------------------------------------------------------------------------
-- Locking issues case 7:
-- Rows that are scanned as part of a query but not in the table being
-- updated should not be locked unless rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t1, t2;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-CREATE TABLE t1(id INT PRIMARY KEY, value INT);
-CREATE TABLE t2(id INT PRIMARY KEY, value INT);
-INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
-INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-lock_scanned_rows is 0
-connection con1;
-UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
-connection con2;
-UPDATE t2 SET value=value+100;
-SELECT * FROM t2;
-id value
-1 101
-2 102
-3 103
-4 104
-5 105
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t1;
-DROP TABLE t2;
-
------------------------------------------------------------------------
-- Locking issues case 7:
-- Rows that are scanned as part of a query but not in the table being
-- updated should not be locked unless rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t1, t2;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-SET GLOBAL rocksdb_lock_scanned_rows=ON;
-CREATE TABLE t1(id INT PRIMARY KEY, value INT);
-CREATE TABLE t2(id INT PRIMARY KEY, value INT);
-INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
-INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-BEGIN;
-lock_scanned_rows is 1
-connection con1;
-UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
-connection con2;
-UPDATE t2 SET value=value+100 WHERE id=3;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
-SELECT * FROM t2;
-id value
-1 101
-2 102
-3 3
-4 104
-5 105
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t1;
-DROP TABLE t2;
-SET GLOBAL rocksdb_lock_scanned_rows=0;
-
------------------------------------------------------------------------
-- Locking issues case 7:
-- Rows that are scanned as part of a query but not in the table being
-- updated should not be locked unless rocksdb_lock_scanned_rows is on
------------------------------------------------------------------------
-DROP TABLE IF EXISTS t1, t2;
-SELECT @@global.rocksdb_lock_scanned_rows;
-@@global.rocksdb_lock_scanned_rows
-0
-SET GLOBAL rocksdb_lock_scanned_rows=ON;
-CREATE TABLE t1(id INT PRIMARY KEY, value INT);
-CREATE TABLE t2(id INT PRIMARY KEY, value INT);
-INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
-INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
-connect con1,localhost,root,,;
-connect con2,localhost,root,,;
-connection con1;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-connection con2;
-SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-BEGIN;
-lock_scanned_rows is 1
-connection con1;
-UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
-connection con2;
-UPDATE t2 SET value=value+100 WHERE id=3;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
-SELECT * FROM t2;
-id value
-1 101
-2 102
-3 3
-4 104
-5 105
-connection con1;
-COMMIT;
-connection default;
-disconnect con1;
-disconnect con2;
-DROP TABLE t1;
-DROP TABLE t2;
-SET GLOBAL rocksdb_lock_scanned_rows=0;
+tests moved to rocksdb.locking_issues_case*
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rc.result
new file mode 100644
index 00000000000..a47aa3c7d90
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rc.result
@@ -0,0 +1,30 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 1.1:
+- Locking rows that do not exist when using all primary key columns in
+- a WHERE clause
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+id1 id2 value
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+INSERT INTO t0 VALUES (1,5,0);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rr.result
new file mode 100644
index 00000000000..c923c34c98e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_1_rr.result
@@ -0,0 +1,30 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 1.1:
+- Locking rows that do not exist when using all primary key columns in
+- a WHERE clause
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+id1 id2 value
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+INSERT INTO t0 VALUES (1,5,0);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rc.result
new file mode 100644
index 00000000000..01c4e7e3b2f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rc.result
@@ -0,0 +1,30 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 1.2:
+- Locking rows that do not exist without using all primary key
+- columns in a WHERE clause
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
+id1 id2 value
+1 1 0
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
+id1 id2 value
+INSERT INTO t0 VALUES (1,5,0);
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rr.result
new file mode 100644
index 00000000000..798fd15b76e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case1_2_rr.result
@@ -0,0 +1,30 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 1.2:
+- Locking rows that do not exist without using all primary key
+- columns in a WHERE clause
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
+id1 id2 value
+1 1 0
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
+id1 id2 value
+INSERT INTO t0 VALUES (1,5,0);
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc.result
new file mode 100644
index 00000000000..16480da8e80
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc.result
@@ -0,0 +1,50 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using READ COMMITTED transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con1;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+connection con2;
+UPDATE t0 SET VALUE=10 WHERE id=1;
+UPDATE t0 SET VALUE=10 WHERE id=5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
+connection con2;
+SELECT * FROM t0 WHERE id=4 FOR UPDATE;
+id value
+4 0
+COMMIT;
+SELECT * FROM t0;
+id value
+1 10
+2 1
+3 0
+4 0
+5 1
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc_lsr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc_lsr.result
new file mode 100644
index 00000000000..330cd09d33e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rc_lsr.result
@@ -0,0 +1,37 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using READ COMMITTED transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con1;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+connection con2;
+UPDATE t0 SET VALUE=10 WHERE id=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr.result
new file mode 100644
index 00000000000..3e6b63afaa8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr.result
@@ -0,0 +1,50 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using REPEATABLE READ transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con1;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+connection con2;
+UPDATE t0 SET VALUE=10 WHERE id=1;
+UPDATE t0 SET VALUE=10 WHERE id=5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
+connection con2;
+SELECT * FROM t0 WHERE id=4 FOR UPDATE;
+id value
+4 0
+COMMIT;
+SELECT * FROM t0;
+id value
+1 10
+2 1
+3 0
+4 0
+5 1
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr_lsr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr_lsr.result
new file mode 100644
index 00000000000..088e975ebc5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case2_rr_lsr.result
@@ -0,0 +1,37 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using REPEATABLE READ transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con1;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+connection con2;
+UPDATE t0 SET VALUE=10 WHERE id=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rc.result
new file mode 100644
index 00000000000..9a6f02cd41d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rc.result
@@ -0,0 +1,25 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 3:
+- After creating a snapshot, other clients updating rows
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
+connection con1;
+id value
+190000 1
+ERROR: 0
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rr.result
new file mode 100644
index 00000000000..fdd4d8fcaca
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case3_rr.result
@@ -0,0 +1,23 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 3:
+- After creating a snapshot, other clients updating rows
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
+connection con1;
+ERROR: 1213
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rc.result
new file mode 100644
index 00000000000..1a35d276192
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rc.result
@@ -0,0 +1,23 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 4:
+- Phantom rows
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+INSERT INTO t0 VALUES(200001,1), (-1,1);
+connection con1;
+id value
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rr.result
new file mode 100644
index 00000000000..7ecb1ac5f7a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case4_rr.result
@@ -0,0 +1,23 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 4:
+- Phantom rows
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+INSERT INTO t0 VALUES(200001,1), (-1,1);
+connection con1;
+id value
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rc.result
new file mode 100644
index 00000000000..4d707e83eec
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rc.result
@@ -0,0 +1,29 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 5:
+- Deleting primary key
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+DELETE FROM t0 WHERE id=190000;
+COMMIT;
+connection con1;
+id value
+ERROR: 0
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rr.result
new file mode 100644
index 00000000000..0ebfe8e6079
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case5_rr.result
@@ -0,0 +1,28 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 5:
+- Deleting primary key
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+DELETE FROM t0 WHERE id=190000;
+COMMIT;
+connection con1;
+ERROR: 1213
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rc.result
new file mode 100644
index 00000000000..f16ffbeaa63
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rc.result
@@ -0,0 +1,29 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 6:
+- Changing primary key
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+UPDATE t0 SET id=200001 WHERE id=190000;
+COMMIT;
+connection con1;
+id value
+ERROR: 0
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rr.result
new file mode 100644
index 00000000000..c2323937d15
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case6_rr.result
@@ -0,0 +1,28 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 6:
+- Changing primary key
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+UPDATE t0 SET id=200001 WHERE id=190000;
+COMMIT;
+connection con1;
+ERROR: 1213
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc.result
new file mode 100644
index 00000000000..f30dbab045d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc.result
@@ -0,0 +1,41 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+lock_scanned_rows is 0
+connection con1;
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+connection con2;
+UPDATE t2 SET value=value+100;
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 103
+4 104
+5 105
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc_lsr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc_lsr.result
new file mode 100644
index 00000000000..d43e3efbfe0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rc_lsr.result
@@ -0,0 +1,45 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+lock_scanned_rows is 1
+connection con1;
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+connection con2;
+UPDATE t2 SET value=value+100 WHERE id=3;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 3
+4 104
+5 105
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr.result
new file mode 100644
index 00000000000..dbcb0a03aef
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr.result
@@ -0,0 +1,41 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+lock_scanned_rows is 0
+connection con1;
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+connection con2;
+UPDATE t2 SET value=value+100;
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 103
+4 104
+5 105
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr_lsr.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr_lsr.result
new file mode 100644
index 00000000000..d9f7e333d3c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues_case7_rr_lsr.result
@@ -0,0 +1,45 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+lock_scanned_rows is 1
+connection con1;
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+connection con2;
+UPDATE t2 SET value=value+100 WHERE id=3;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 3
+4 104
+5 105
+connection con1;
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result
index 6ab7ab003fd..6d6cb1db54e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result
@@ -2,14 +2,18 @@
# MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex
#
INSTALL SONAME 'ha_rocksdb';
+connect con1,localhost,root,,test;
CREATE TABLE t1 (i INT) ENGINE=RocksDB;
insert into t1 values (1);
-connect con1,localhost,root,,;
-connection con1;
-insert into test.t1 values (1);
-connection default;
DROP TABLE t1;
+connection default;
UNINSTALL SONAME 'ha_rocksdb';
+Warnings:
+Warning 1620 Plugin is busy and will be uninstalled on shutdown
+SELECT ENGINE, SUPPORT FROM INFORMATION_SCHEMA.ENGINES WHERE ENGINE='ROCKSDB';
+ENGINE SUPPORT
+ROCKSDB NO
+disconnect con1;
#
# MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash
#
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test
index 18a796573d1..41e2f69578a 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test
@@ -1,67 +1,3 @@
--source include/have_rocksdb.inc
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case1_1.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case1_1.inc
-
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case1_2.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case1_2.inc
-
-let $lock_scanned_rows=0;
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case2.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case2.inc
-
-# Rerun the case2 tests with rocksdb_lock_scanned_rows on
-let $lock_scanned_rows=1;
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case2.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case2.inc
-
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case3.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case3.inc
-
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case4.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case4.inc
-
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case5.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case5.inc
-
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case6.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case6.inc
-
-let $lock_scanned_rows=0;
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case7.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case7.inc
-
-# Rerun the case7 tests with rocksdb_lock_scanned_rows on
-let $lock_scanned_rows=1;
-let $isolation_level = REPEATABLE READ;
---source include/locking_issues_case7.inc
-
-let $isolation_level = READ COMMITTED;
---source include/locking_issues_case7.inc
+--echo tests moved to rocksdb.locking_issues_case*
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rc.test
new file mode 100644
index 00000000000..3fd183bf4dd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case1_1.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rr.test
new file mode 100644
index 00000000000..cd4a0bd3c9d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_1_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case1_1.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rc.test
new file mode 100644
index 00000000000..3fe052a4099
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case1_2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rr.test
new file mode 100644
index 00000000000..02263273ba5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case1_2_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case1_2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc.test
new file mode 100644
index 00000000000..d780b5247bc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc_lsr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc_lsr.test
new file mode 100644
index 00000000000..bd46f93a76c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rc_lsr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=1;
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr.test
new file mode 100644
index 00000000000..b820fddb979
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr_lsr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr_lsr.test
new file mode 100644
index 00000000000..33d3b752098
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case2_rr_lsr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=1;
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case2.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rc.test
new file mode 100644
index 00000000000..7dc7f8784ea
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case3.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rr.test
new file mode 100644
index 00000000000..7c81daccebc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case3_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case3.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rc.test
new file mode 100644
index 00000000000..edc1111b0a5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case4.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rr.test
new file mode 100644
index 00000000000..8c26c2d1e19
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case4_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case4.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rc.test
new file mode 100644
index 00000000000..10bedcf2cca
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case5.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rr.test
new file mode 100644
index 00000000000..6de3847cb66
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case5_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case5.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rc.test
new file mode 100644
index 00000000000..9409bde0c58
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rc.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case6.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rr.test
new file mode 100644
index 00000000000..0c7a8a116c3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case6_rr.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case6.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc.test
new file mode 100644
index 00000000000..f8e874a135c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case7.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc_lsr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc_lsr.test
new file mode 100644
index 00000000000..0f97ca2f00f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rc_lsr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=1;
+let $isolation_level = READ COMMITTED;
+--source include/locking_issues_case7.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr.test
new file mode 100644
index 00000000000..c8cec956893
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case7.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr_lsr.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr_lsr.test
new file mode 100644
index 00000000000..875e81a1e05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues_case7_rr_lsr.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $lock_scanned_rows=1;
+let $isolation_level = REPEATABLE READ;
+--source include/locking_issues_case7.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test
index 1a3d505f81a..0cf56c0cbd5 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test
@@ -1,5 +1,6 @@
--source include/have_log_bin.inc
--source include/have_binlog_format_row.inc
+--source include/not_windows.inc
--echo #
--echo # MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex
@@ -14,18 +15,21 @@
INSTALL SONAME 'ha_rocksdb';
--enable_warnings
+connect (con1,localhost,root,,test);
CREATE TABLE t1 (i INT) ENGINE=RocksDB;
insert into t1 values (1);
-
-connect (con1,localhost,root,,);
-connection con1;
-insert into test.t1 values (1);
+DROP TABLE t1;
connection default;
-
# Cleanup
-DROP TABLE t1;
UNINSTALL SONAME 'ha_rocksdb';
+SELECT ENGINE, SUPPORT FROM INFORMATION_SCHEMA.ENGINES WHERE ENGINE='ROCKSDB';
+disconnect con1;
+# Unfortunately this is the only more or less reliable way to wait until
+# connection done ha_close_connections(). It doesn't work on Windows due
+# to different thread handling.
+let $wait_condition= SELECT VARIABLE_VALUE=1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME='Threads_cached';
+--source include/wait_condition.inc
--echo #
--echo # MDEV-15686: Loading MyRocks plugin back after it has been unloaded causes a crash
diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h
index cdc7651e3c4..44d90d78437 100644
--- a/storage/rocksdb/rdb_utils.h
+++ b/storage/rocksdb/rdb_utils.h
@@ -52,8 +52,8 @@ namespace myrocks {
Since we cannot or don't want to change the API in any way, we can use this
mechanism to define readability tokens that look like C++ namespaces, but are
not enforced in any way by the compiler, since the pre-compiler strips them
- out. However, on the calling side, code looks like my_core::thd_ha_data()
- rather than plain a thd_ha_data() call. This technique adds an immediate
+ out. However, on the calling side, code looks like my_core::thd_get_ha_data()
+ rather than plain a thd_get_ha_data() call. This technique adds an immediate
visible cue on what type of API we are calling into.
*/
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index 0e772d1bf51..88e28f79789 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -8397,6 +8397,26 @@ static void test_list_fields()
}
+/* Test mysql_list_fields() with information_schema */
+
+static void test_list_information_schema_fields()
+{
+ MYSQL_RES *result;
+ int rc;
+ myheader("test_list_information_schema_fields");
+
+ rc= mysql_select_db(mysql, "information_schema");
+ myquery(rc);
+ result= mysql_list_fields(mysql, "all_plugins", NULL);
+ mytest(result);
+ rc= my_process_result_set(result);
+ DIE_UNLESS(rc == 0);
+ mysql_free_result(result);
+ rc= mysql_select_db(mysql, current_db);
+ myquery(rc);
+}
+
+
static void test_list_fields_default()
{
int rc, i;
@@ -20865,6 +20885,7 @@ static struct my_tests_st my_tests[]= {
{ "test_fetch_column", test_fetch_column },
{ "test_mem_overun", test_mem_overun },
{ "test_list_fields", test_list_fields },
+ { "test_list_information_schema_fields", test_list_information_schema_fields },
{ "test_list_fields_default", test_list_fields_default },
{ "test_free_result", test_free_result },
{ "test_free_store_result", test_free_store_result },