summaryrefslogtreecommitdiff
path: root/storage/rocksdb
diff options
context:
space:
mode:
authorSergei Petrunia <psergey@askmonty.org>2016-10-06 17:24:09 +0000
committerSergei Petrunia <psergey@askmonty.org>2016-10-06 17:24:09 +0000
commitebfc4e6ad02b0cef34ec3f446007b98d85af9296 (patch)
tree1811d0c75aaf4aa3f130d3f35a86c769dcfadce4 /storage/rocksdb
downloadmariadb-git-ebfc4e6ad02b0cef34ec3f446007b98d85af9296.tar.gz
Initial commit,
copy of commit 86587affafe77ef555f7c3839839de44f0f203f3 Author: Tian Xia <tianx@fb.com> Date: Tue Oct 4 10:01:52 2016 -0700 Allow filtering of show commands through admission control
Diffstat (limited to 'storage/rocksdb')
-rw-r--r--storage/rocksdb/CMakeLists.txt108
-rw-r--r--storage/rocksdb/README38
-rw-r--r--storage/rocksdb/event_listener.cc80
-rw-r--r--storage/rocksdb/event_listener.h40
-rwxr-xr-xstorage/rocksdb/get_rocksdb_files.sh27
-rw-r--r--storage/rocksdb/ha_rocksdb.cc10699
-rw-r--r--storage/rocksdb/ha_rocksdb.h1054
-rw-r--r--storage/rocksdb/ha_rocksdb_proto.h80
-rw-r--r--storage/rocksdb/logger.h73
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc69
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc51
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc48
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc97
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc69
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc89
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/my.cnf7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/1st.result22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result378
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result251
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result780
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/alter_table.result183
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result1235
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result122
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result1235
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result49
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/cardinality.result50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/check_table.result68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result59
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result87
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result2612
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result2270
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result741
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result723
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/collation.result128
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result151
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result144
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result74
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/create_table.result165
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/deadlock.result37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/delete.result166
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result59
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/describe.result19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_database.result6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result154
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_table.result71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result362
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result504
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result115
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/hermitage.result648
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index.result42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result51
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_primary.result48
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/information_schema.result78
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result120
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/insert.result202
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result63
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue100.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue111.result32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue290.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue314.result12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result111
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result116
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/loaddata.result239
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/lock.result108
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result40
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result490
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/misc.result84
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result143
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result131
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result63
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result81
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/partition.result30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/perf_context.result160
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result210
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/repair_table.result37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/replace.result32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result2456
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result61
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result120
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result129
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result227
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result193
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result63
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result123
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result290
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result11
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result66
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result321
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result88
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result242
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result103
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result54
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result11
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select.result373
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result35
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/show_engine.result416
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/shutdown.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/singledelete.result66
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result10
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/statistics.result69
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/table_stats.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result26
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result46
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/transaction.result936
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result33
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_binary.result48
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result80
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_bit.result53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result58
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_blob.result57
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result188
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_bool.result73
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_char.result76
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result73
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result109
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result119
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result179
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_enum.result47
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result69
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result131
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result129
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_float.result306
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result189
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_int.result212
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result99
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_set.result49
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result80
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_text.result57
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result165
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result743
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result254
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/unique_check.result72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result185
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result162
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/update.result113
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result57
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/update_multi.result691
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/write_sync.result39
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/1st.test36
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test290
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test102
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test91
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test137
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/alter_table.test94
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test31
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test44
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test65
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc63
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test103
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test118
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test52
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc189
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl36
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test110
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cardinality.test41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py31
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/check_table.inc54
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/check_table.test12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test107
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test76
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc55
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc61
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_null.inc34
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test224
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test216
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test74
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test67
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/collation.test181
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test87
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc43
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test34
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc136
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test80
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/create_table.test192
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/deadlock.test43
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/delete.test101
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test36
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test39
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/describe.test24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/disabled.def4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_database.test11
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test116
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table.test115
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test110
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc47
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc15
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc257
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/hermitage.test10
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index.inc121
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test51
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test70
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_primary.test64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/information_schema.test72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc35
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test35
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/insert.test99
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue100.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue111.test38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue290.test40
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/issue314.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc117
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/loaddata.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/lock.test202
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test110
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test67
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/misc.test45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test65
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test43
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test26
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc65
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc78
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/partition.test42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/perf_context.test92
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test70
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test144
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc38
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/repair_table.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/replace.test54
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test1925
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt1
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test76
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test124
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test24
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py95
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc154
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test44
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test92
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test121
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test30
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test193
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test57
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test80
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test302
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc92
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test47
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test46
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test262
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test90
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test57
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg.inc43
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test10
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/select.test202
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test55
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test46
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test58
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/show_engine.test75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/shutdown.test36
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/singledelete.test89
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test34
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh52
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/statistics.test74
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/table_stats.test27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test37
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/transaction.test105
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc150
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test74
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_binary.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test99
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc53
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bit.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test113
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc49
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_blob.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test176
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_bool.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char.inc45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char.test19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test107
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test142
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc45
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test157
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test163
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_enum.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc85
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test107
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_float.inc108
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_float.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test175
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_int.inc68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_int.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_set.inc49
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_set.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test104
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_text.inc49
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_text.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test171
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc77
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test137
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc84
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/unique_check.test145
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc198
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test33
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update.test72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test35
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update_multi.test15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test78
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test102
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/write_sync.test42
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf25
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc3
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh43
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc16
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc4
-rwxr-xr-xstorage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test47
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/combinations2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result27
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result135
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result361
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result140
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result34
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result31
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result44
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result222
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result28
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf51
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test81
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test71
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test41
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc153
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test12
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test39
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test373
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test26
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc56
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/my.cnf8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result21
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result21
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py1029
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test31
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result13
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result39
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result70
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result46
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result58
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result106
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result93
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result170
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result72
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result43
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result114
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result101
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result65
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test68
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result64
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result163
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result65
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result75
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result36
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result85
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result46
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result114
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result100
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result114
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test39
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test8
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test29
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test44
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test23
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test21
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test50
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test32
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test20
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test63
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test21
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test15
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test19
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test35
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test22
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test17
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test18
-rw-r--r--storage/rocksdb/properties_collector.cc555
-rw-r--r--storage/rocksdb/properties_collector.h190
-rw-r--r--storage/rocksdb/rdb_buff.h452
-rw-r--r--storage/rocksdb/rdb_cf_manager.cc236
-rw-r--r--storage/rocksdb/rdb_cf_manager.h106
-rw-r--r--storage/rocksdb/rdb_cf_options.cc340
-rw-r--r--storage/rocksdb/rdb_cf_options.h92
-rw-r--r--storage/rocksdb/rdb_compact_filter.h115
-rw-r--r--storage/rocksdb/rdb_comparator.h93
-rw-r--r--storage/rocksdb/rdb_datadic.cc4584
-rw-r--r--storage/rocksdb/rdb_datadic.h1054
-rw-r--r--storage/rocksdb/rdb_i_s.cc1189
-rw-r--r--storage/rocksdb/rdb_i_s.h34
-rw-r--r--storage/rocksdb/rdb_index_merge.cc604
-rw-r--r--storage/rocksdb/rdb_index_merge.h229
-rw-r--r--storage/rocksdb/rdb_mutex_wrapper.cc234
-rw-r--r--storage/rocksdb/rdb_mutex_wrapper.h139
-rw-r--r--storage/rocksdb/rdb_perf_context.cc240
-rw-r--r--storage/rocksdb/rdb_perf_context.h134
-rw-r--r--storage/rocksdb/rdb_sst_info.cc417
-rw-r--r--storage/rocksdb/rdb_sst_info.h101
-rw-r--r--storage/rocksdb/rdb_threads.cc81
-rw-r--r--storage/rocksdb/rdb_threads.h116
-rw-r--r--storage/rocksdb/rdb_utils.cc311
-rw-r--r--storage/rocksdb/rdb_utils.h206
-rw-r--r--storage/rocksdb/rocksdb-range-access.txt353
-rw-r--r--storage/rocksdb/tools/mysql_ldb.cc17
-rw-r--r--storage/rocksdb/unittest/CMakeLists.txt22
-rw-r--r--storage/rocksdb/unittest/test_properties_collector.cc60
822 files changed, 81367 insertions, 0 deletions
diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt
new file mode 100644
index 00000000000..f2c52ce84c4
--- /dev/null
+++ b/storage/rocksdb/CMakeLists.txt
@@ -0,0 +1,108 @@
+# TODO: Copyrights
+
+IF (NOT EXISTS "${CMAKE_SOURCE_DIR}/rocksdb/Makefile")
+ MESSAGE(SEND_ERROR "Missing Makefile in rocksdb directory. Try \"git submodule update\".")
+ENDIF()
+
+# get a list of rocksdb library source files
+# run with env -i to avoid passing variables
+EXECUTE_PROCESS(
+ COMMAND env -i ${CMAKE_SOURCE_DIR}/storage/rocksdb/get_rocksdb_files.sh
+ OUTPUT_VARIABLE SCRIPT_OUTPUT
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+)
+# split the list into lines
+STRING(REGEX MATCHALL "[^\n]+" ROCKSDB_LIB_SOURCES ${SCRIPT_OUTPUT})
+
+INCLUDE_DIRECTORIES(
+ ${CMAKE_SOURCE_DIR}/rocksdb
+ ${CMAKE_SOURCE_DIR}/rocksdb/include
+ ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src
+)
+
+ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX
+ -DZLIB)
+
+SET(ROCKSDB_SOURCES
+ ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h
+ logger.h
+ rdb_comparator.h
+ rdb_datadic.cc rdb_datadic.h
+ rdb_cf_options.cc rdb_cf_options.h
+ rdb_cf_manager.cc rdb_cf_manager.h
+ properties_collector.cc properties_collector.h
+ event_listener.cc event_listener.h
+ rdb_i_s.cc rdb_i_s.h
+ rdb_index_merge.cc rdb_index_merge.h
+ rdb_perf_context.cc rdb_perf_context.h
+ rdb_mutex_wrapper.cc rdb_mutex_wrapper.h
+ rdb_sst_info.cc rdb_sst_info.h
+ rdb_utils.cc rdb_utils.h rdb_buff.h
+ rdb_threads.cc rdb_threads.h
+ ${ROCKSDB_LIB_SOURCES}
+)
+
+IF(WITH_FB_TSAN)
+ SET(PIC_EXT "_pic")
+ELSE()
+ SET(PIC_EXT "")
+ENDIF()
+
+SET(rocksdb_static_libs )
+IF (NOT "$ENV{WITH_SNAPPY}" STREQUAL "")
+ SET(rocksdb_static_libs ${rocksdb_static_libs}
+ $ENV{WITH_SNAPPY}/lib/libsnappy${PIC_EXT}.a)
+ ADD_DEFINITIONS(-DSNAPPY)
+ELSE()
+ SET(rocksdb_static_libs ${rocksdb_static_libs} snappy)
+ENDIF()
+
+IF (NOT "$ENV{WITH_LZ4}" STREQUAL "")
+ SET(rocksdb_static_libs ${rocksdb_static_libs}
+ $ENV{WITH_LZ4}/lib/liblz4${PIC_EXT}.a)
+ ADD_DEFINITIONS(-DLZ4)
+ELSE()
+ SET(rocksdb_static_libs ${rocksdb_static_libs} lz4)
+ENDIF()
+
+IF (NOT "$ENV{WITH_BZ2}" STREQUAL "")
+ SET(rocksdb_static_libs ${rocksdb_static_libs}
+ $ENV{WITH_BZ2}/lib/libbz2${PIC_EXT}.a)
+ ADD_DEFINITIONS(-DBZIP2)
+ELSE()
+ SET(rocksdb_static_libs ${rocksdb_static_libs} bz2)
+ENDIF()
+
+# link ZSTD only if instructed
+IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "")
+ SET(rocksdb_static_libs ${rocksdb_static_libs}
+ $ENV{WITH_ZSTD}/lib/libzstd${PIC_EXT}.a)
+ ADD_DEFINITIONS(-DZSTD)
+ENDIF()
+
+SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt")
+
+MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY
+ LINK_LIBRARIES ${rocksdb_static_libs}
+)
+
+IF(WITH_EMBEDDED_SERVER)
+ ADD_SUBDIRECTORY(unittest)
+ENDIF()
+
+IF (WITH_ROCKSDB_SE_STORAGE_ENGINE)
+ # TODO: read this file list from src.mk:TOOL_SOURCES
+ SET(ROCKSDB_TOOL_SOURCES
+ ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_tool.cc
+ ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb_cmd.cc
+ ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump_tool.cc
+ )
+ MYSQL_ADD_EXECUTABLE(sst_dump ${CMAKE_SOURCE_DIR}/rocksdb/tools/sst_dump.cc ${ROCKSDB_TOOL_SOURCES})
+ TARGET_LINK_LIBRARIES(sst_dump rocksdb_se)
+
+ MYSQL_ADD_EXECUTABLE(ldb ${CMAKE_SOURCE_DIR}/rocksdb/tools/ldb.cc ${ROCKSDB_TOOL_SOURCES})
+ TARGET_LINK_LIBRARIES(ldb rocksdb_se)
+
+ MYSQL_ADD_EXECUTABLE(mysql_ldb ${CMAKE_SOURCE_DIR}/storage/rocksdb/tools/mysql_ldb.cc ${ROCKSDB_TOOL_SOURCES})
+ TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_se)
+ENDIF()
diff --git a/storage/rocksdb/README b/storage/rocksdb/README
new file mode 100644
index 00000000000..472b7986f91
--- /dev/null
+++ b/storage/rocksdb/README
@@ -0,0 +1,38 @@
+== Summary ==
+This directory contains RocksDB-based Storage Engine (RDBSE) for MySQL = "MyRocks".
+
+== Resources ==
+See https://github.com/facebook/mysql-5.6/wiki/Getting-Started-with-MyRocks
+Facebook group: https://www.facebook.com/groups/mysqlonrocksdb/
+
+== Coding Conventions ==
+The baseline for MyRocks coding conventions is the MySQL set, available at
+http://dev.mysql.com/doc/internals/en/coding-guidelines.html.
+
+Several refinements:
+ 0. There is an umbrella C++ namespace named "myrocks" for all MyRocks code.
+ 1. We introduced "RDB" as the super-short abbreviation for "RocksDB". We will
+ use it as a name prefix, with different capitalization (see below), to ease
+ up code navigation with ctags and grep.
+ N.B. For ease of matching, we'll keep the variables and functions dealing
+ with sysvars as close as possible to the outside visible names of
+ sysvars, which start with "rocksdb_" prefix, the outward storage
+ engine name.
+ 2. The names for classes, interfaces, and C++ structures (which act as
+ classes), start with prefix "Rdb_".
+ NB: For historical reasons, we'll keep the "ha_<storage_engine_name>" class
+ name for ha_rocksdb class, which is an exception to the rule.
+ 3. The names for global objects and functions start with prefix "rdb_".
+ 4. The names for macros and constants start with prefix "RDB_".
+ 5. Regular class member names start with "m_".
+ 6. Static class member names start with "s_".
+ 7. Given the 80 character per line limit, we'll not always use full English
+ words in names, when a well known or easily recognizable abbreviation
+ exists (like "tx" for "transaction" or "param" for "parameter" etc).
+ 8. When needing to disambiguate, we use different suffixes for that, like
+ "_arg" for a function argument/parameter, "_arr" for a C style array, and
+ "_vect" for a std::vector etc.
+
+== Running Tests ==
+To run tests from rocksdb, rocksd_rpl or rocksdb_sys_vars packages, use the following parameters:
+--mysqld=--default-storage-engine=rocksdb --mysqld=--skip-innodb --mysqld=--default-tmp-storage-engine=MyISAM --mysqld=--rocksdb \ No newline at end of file
diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc
new file mode 100644
index 00000000000..851e4782416
--- /dev/null
+++ b/storage/rocksdb/event_listener.cc
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* The C++ file's header */
+#include "./event_listener.h"
+
+/* C++ standard header files */
+#include <string>
+#include <vector>
+
+/* MySQL includes */
+#include <mysql/plugin.h>
+#include <my_global.h>
+
+/* MyRocks includes */
+#include "./ha_rocksdb.h"
+#include "./properties_collector.h"
+#include "./rdb_datadic.h"
+
+namespace myrocks {
+
+static std::vector<Rdb_index_stats>
+extract_index_stats(
+ const std::vector<std::string>& files,
+ const rocksdb::TablePropertiesCollection& props
+) {
+ std::vector<Rdb_index_stats> ret;
+ for (auto fn : files) {
+ auto it = props.find(fn);
+ DBUG_ASSERT(it != props.end());
+ std::vector<Rdb_index_stats> stats;
+ Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats);
+ ret.insert(ret.end(), stats.begin(), stats.end());
+ }
+ return ret;
+}
+
+void Rdb_event_listener::OnCompactionCompleted(
+ rocksdb::DB *db,
+ const rocksdb::CompactionJobInfo& ci
+) {
+ DBUG_ASSERT(db != nullptr);
+ DBUG_ASSERT(m_ddl_manager != nullptr);
+
+ if (ci.status.ok()) {
+ m_ddl_manager->adjust_stats(
+ extract_index_stats(ci.output_files, ci.table_properties),
+ extract_index_stats(ci.input_files, ci.table_properties));
+ }
+}
+
+void Rdb_event_listener::OnFlushCompleted(
+ rocksdb::DB* db,
+ const rocksdb::FlushJobInfo& flush_job_info
+) {
+ DBUG_ASSERT(db != nullptr);
+ DBUG_ASSERT(m_ddl_manager != nullptr);
+
+ auto tbl_props = std::make_shared<const rocksdb::TableProperties>(
+ flush_job_info.table_properties);
+
+ std::vector<Rdb_index_stats> stats;
+ Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats);
+ m_ddl_manager->adjust_stats(stats);
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h
new file mode 100644
index 00000000000..f92c6d0dea1
--- /dev/null
+++ b/storage/rocksdb/event_listener.h
@@ -0,0 +1,40 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+#include "rocksdb/listener.h"
+
+namespace myrocks {
+
+class Rdb_ddl_manager;
+
+class Rdb_event_listener : public rocksdb::EventListener
+{
+ public:
+ explicit Rdb_event_listener(Rdb_ddl_manager* ddl_manager) :
+ m_ddl_manager(ddl_manager) {
+ }
+
+ void OnCompactionCompleted(
+ rocksdb::DB *db, const rocksdb::CompactionJobInfo& ci) override;
+ void OnFlushCompleted(
+ rocksdb::DB* db, const rocksdb::FlushJobInfo& flush_job_info) override;
+
+ private:
+ Rdb_ddl_manager* m_ddl_manager;
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/get_rocksdb_files.sh b/storage/rocksdb/get_rocksdb_files.sh
new file mode 100755
index 00000000000..121d664a086
--- /dev/null
+++ b/storage/rocksdb/get_rocksdb_files.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+MKFILE=`mktemp`
+# create and run a simple makefile
+# include rocksdb make file relative to the path of this script
+echo "include rocksdb/src.mk
+all:
+ @echo \$(LIB_SOURCES)" > $MKFILE
+for f in `make --makefile $MKFILE`
+do
+ echo ../../rocksdb/$f
+done
+rm $MKFILE
+
+# create build_version.cc file. Only create one if it doesn't exists or if it is different
+# this is so that we don't rebuild mysqld every time
+bv=rocksdb/util/build_version.cc
+date=$(date +%F)
+git_sha=$(pushd rocksdb >/dev/null && git rev-parse HEAD 2>/dev/null && popd >/dev/null)
+if [ ! -f $bv ] || [ -z $git_sha ] || [ ! `grep $git_sha $bv` ]
+then
+echo "#include \"build_version.h\"
+const char* rocksdb_build_git_sha =
+\"rocksdb_build_git_sha:$git_sha\";
+const char* rocksdb_build_git_date =
+\"rocksdb_build_git_date:$date\";
+const char* rocksdb_build_compile_date = __DATE__;" > $bv
+fi
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
new file mode 100644
index 00000000000..4fbaf01f154
--- /dev/null
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -0,0 +1,10699 @@
+/*
+ Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+#define MYSQL_SERVER 1
+
+/* The C++ file's header */
+#include "./ha_rocksdb.h"
+
+/* C++ standard header files */
+#include <set>
+#include <queue>
+#include <string>
+#include <vector>
+
+/* MySQL includes */
+#include "./debug_sync.h"
+#include "./my_bit.h"
+#include "./my_stacktrace.h"
+#include "./sql_table.h"
+#include <mysys_err.h>
+#include <mysql/psi/mysql_table.h>
+#include <mysql/thread_pool_priv.h>
+
+/* RocksDB includes */
+#include "rocksdb/compaction_filter.h"
+#include "rocksdb/rate_limiter.h"
+#include "rocksdb/slice_transform.h"
+#include "rocksdb/utilities/checkpoint.h"
+#include "rocksdb/utilities/convenience.h"
+#include "rocksdb/utilities/flashcache.h"
+#include "rocksdb/utilities/memory_util.h"
+
+/* MyRocks includes */
+#include "./event_listener.h"
+#include "./ha_rocksdb_proto.h"
+#include "./logger.h"
+#include "./rdb_cf_manager.h"
+#include "./rdb_cf_options.h"
+#include "./rdb_datadic.h"
+#include "./rdb_i_s.h"
+#include "./rdb_index_merge.h"
+#include "./rdb_mutex_wrapper.h"
+#include "./rdb_threads.h"
+
+#ifdef TARGET_OS_LINUX
+extern my_bool cachedev_enabled;
+#endif /* TARGET_OS_LINUX */
+
+// Internal MySQL APIs not exposed in any header.
+extern "C"
+{
+/**
+ Mark transaction to rollback and mark error as fatal to a sub-statement.
+ @param thd Thread handle
+ @param all TRUE <=> rollback main transaction.
+*/
+void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all);
+
+/**
+ * Get the user thread's binary logging format
+ * @param thd user thread
+ * @return Value to be used as index into the binlog_format_names array
+*/
+int thd_binlog_format(const MYSQL_THD thd);
+
+/**
+ * Check if binary logging is filtered for thread's current db.
+ * @param thd Thread handle
+ * @retval 1 the query is not filtered, 0 otherwise.
+*/
+bool thd_binlog_filter_ok(const MYSQL_THD thd);
+}
+
+namespace myrocks {
+
+static st_global_stats global_stats;
+static st_export_stats export_stats;
+
+/**
+ Updates row counters based on the table type and operation type.
+*/
+void ha_rocksdb::update_row_stats(operation_type type) {
+ DBUG_ASSERT(type < ROWS_MAX);
+ // Find if we are modifying system databases.
+ if (table->s && m_tbl_def->m_is_mysql_system_table)
+ global_stats.system_rows[type].inc();
+ else
+ global_stats.rows[type].inc();
+}
+
+void dbug_dump_database(rocksdb::DB *db);
+static handler *rocksdb_create_handler(my_core::handlerton *hton,
+ my_core::TABLE_SHARE *table_arg,
+ my_core::MEM_ROOT *mem_root);
+
+bool can_use_bloom_filter(THD *thd,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice &eq_cond,
+ const bool use_all_keys,
+ bool is_ascending);
+
+///////////////////////////////////////////////////////////
+// Parameters and settings
+///////////////////////////////////////////////////////////
+static char * rocksdb_default_cf_options;
+static char * rocksdb_override_cf_options;
+Rdb_cf_options rocksdb_cf_options_map;
+
+///////////////////////////////////////////////////////////
+// Globals
+///////////////////////////////////////////////////////////
+handlerton *rocksdb_hton;
+
+rocksdb::TransactionDB *rdb= nullptr;
+
+static std::shared_ptr<rocksdb::Statistics> rocksdb_stats;
+static std::unique_ptr<rocksdb::Env> flashcache_aware_env;
+static std::shared_ptr<Rdb_tbl_prop_coll_factory>
+ properties_collector_factory;
+
+Rdb_dict_manager dict_manager;
+Rdb_cf_manager cf_manager;
+Rdb_ddl_manager ddl_manager;
+Rdb_binlog_manager binlog_manager;
+
+
+/**
+ MyRocks background thread control
+ N.B. This is besides RocksDB's own background threads
+ (@see rocksdb::CancelAllBackgroundWork())
+*/
+
+static Rdb_background_thread rdb_bg_thread;
+
+
+// List of table names (using regex) that are exceptions to the strict
+// collation check requirement.
+Regex_list_handler *rdb_collation_exceptions;
+
+static const char* const ERRSTR_ROLLBACK_ONLY
+ = "This transaction was rolled back and cannot be "
+ "committed. Only supported operation is to roll it back, "
+ "so all pending changes will be discarded. "
+ "Please restart another transaction.";
+
+
+static void
+rocksdb_flush_all_memtables()
+{
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+ for (auto cf_handle : cf_manager.get_all_cf()) {
+ rdb->Flush(rocksdb::FlushOptions(), cf_handle);
+ }
+}
+
+static void
+rocksdb_compact_column_family_stub(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save)
+{
+}
+
+static int
+rocksdb_compact_column_family(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ struct st_mysql_value* value)
+{
+ char buff[STRING_BUFFER_USUAL_SIZE];
+ int len = sizeof(buff);
+
+ if (const char* cf = value->val_str(value, buff, &len)) {
+ bool is_automatic;
+ auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic);
+ if (cfh != nullptr && rdb != nullptr) {
+ sql_print_information("RocksDB: Manual compaction of column family: %s\n", cf);
+ rdb->CompactRange(rocksdb::CompactRangeOptions(), cfh, nullptr, nullptr);
+ }
+ }
+ return 0;
+}
+
+///////////////////////////////////////////////////////////
+// Hash map: table name => open table handler
+///////////////////////////////////////////////////////////
+
+namespace // anonymous namespace = not visible outside this source file
+{
+
+struct Rdb_open_tables_map
+{
+ /* Hash table used to track the handlers of open tables */
+ my_core::HASH m_hash;
+ /* The mutex used to protect the hash table */
+ mutable mysql_mutex_t m_mutex;
+
+ void init_hash(void)
+ {
+ (void) my_hash_init(&m_hash, my_core::system_charset_info, 32, 0, 0,
+ (my_hash_get_key) Rdb_open_tables_map::get_hash_key,
+ 0, 0);
+ }
+
+ void free_hash(void)
+ {
+ my_hash_free(&m_hash);
+ }
+
+ static uchar* get_hash_key(Rdb_table_handler *table_handler,
+ size_t *length,
+ my_bool not_used __attribute__((__unused__)));
+
+ Rdb_table_handler* get_table_handler(const char *table_name);
+ void release_table_handler(Rdb_table_handler *table_handler);
+
+ std::vector<std::string> get_table_names(void) const;
+};
+
+} // anonymous namespace
+
+static Rdb_open_tables_map rdb_open_tables;
+
+
+static std::string rdb_normalize_dir(std::string dir)
+{
+ while (dir.size() > 0 && dir.back() == '/')
+ {
+ dir.resize(dir.size() - 1);
+ }
+ return dir;
+}
+
+
+static int rocksdb_create_checkpoint(
+ THD* thd __attribute__((__unused__)),
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* save __attribute__((__unused__)),
+ struct st_mysql_value* value)
+{
+ char buf[512];
+ int len = sizeof(buf);
+ const char* checkpoint_dir_raw= value->val_str(value, buf, &len);
+ if (checkpoint_dir_raw) {
+ if (rdb != nullptr) {
+ std::string checkpoint_dir= rdb_normalize_dir(checkpoint_dir_raw);
+ // NO_LINT_DEBUG
+ sql_print_information("RocksDB: creating checkpoint in directory : %s\n",
+ checkpoint_dir.c_str());
+ rocksdb::Checkpoint* checkpoint;
+ auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint);
+ if (status.ok()) {
+ status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str());
+ if (status.ok()) {
+ sql_print_information(
+ "RocksDB: created checkpoint in directory : %s\n",
+ checkpoint_dir.c_str());
+ } else {
+ my_printf_error(
+ ER_UNKNOWN_ERROR,
+ "RocksDB: Failed to create checkpoint directory. status %d %s",
+ MYF(0), status.code(), status.ToString().c_str());
+ }
+ delete checkpoint;
+ } else {
+ std::string err_text(status.ToString());
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "RocksDB: failed to initialize checkpoint. status %d %s\n",
+ MYF(0), status.code(), err_text.c_str());
+ }
+ return status.code();
+ }
+ }
+ return HA_ERR_INTERNAL_ERROR;
+}
+
+/* This method is needed to indicate that the
+ ROCKSDB_CREATE_CHECKPOINT command is not read-only */
+static void
+rocksdb_create_checkpoint_stub(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save)
+{
+}
+
+static void
+rocksdb_force_flush_memtable_now_stub(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save)
+{
+}
+
+static int
+rocksdb_force_flush_memtable_now(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ struct st_mysql_value* value)
+{
+ sql_print_information("RocksDB: Manual memtable flush\n");
+ rocksdb_flush_all_memtables();
+ return 0;
+}
+
+static void rocksdb_drop_index_wakeup_thread(
+ my_core::THD* thd __attribute__((__unused__)),
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr __attribute__((__unused__)),
+ const void* save);
+
+static my_bool rocksdb_pause_background_work= 0;
+static mysql_mutex_t rdb_sysvars_mutex;
+
+static void rocksdb_set_pause_background_work(
+ my_core::THD* thd __attribute__((__unused__)),
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr __attribute__((__unused__)),
+ const void* save)
+{
+ mysql_mutex_lock(&rdb_sysvars_mutex);
+ bool pause_requested= *static_cast<const bool*>(save);
+ if (rocksdb_pause_background_work != pause_requested) {
+ if (pause_requested) {
+ rdb->PauseBackgroundWork();
+ } else {
+ rdb->ContinueBackgroundWork();
+ }
+ rocksdb_pause_background_work= pause_requested;
+ }
+ mysql_mutex_unlock(&rdb_sysvars_mutex);
+}
+
+static void
+rocksdb_set_compaction_options(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save);
+
+static void
+rocksdb_set_table_stats_sampling_pct(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save);
+
+static void
+rocksdb_set_rate_limiter_bytes_per_sec(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save);
+
+static void rdb_set_collation_exception_list(const char *exception_list);
+static void
+rocksdb_set_collation_exception_list(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save);
+
+static void
+rocksdb_set_bulk_load(THD* thd,
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr,
+ const void* save);
+//////////////////////////////////////////////////////////////////////////////
+// Options definitions
+//////////////////////////////////////////////////////////////////////////////
+static long long rocksdb_block_cache_size;
+/* Use unsigned long long instead of uint64_t because of MySQL compatibility */
+static unsigned long long // NOLINT(runtime/int)
+ rocksdb_rate_limiter_bytes_per_sec;
+static uint64_t rocksdb_info_log_level;
+static char * rocksdb_wal_dir;
+static uint64_t rocksdb_index_type;
+static char rocksdb_background_sync;
+static uint32_t rocksdb_debug_optimizer_n_rows;
+static my_bool rocksdb_debug_optimizer_no_zero_cardinality;
+static uint32_t rocksdb_wal_recovery_mode;
+static uint32_t rocksdb_access_hint_on_compaction_start;
+static char * rocksdb_compact_cf_name;
+static char * rocksdb_checkpoint_name;
+static my_bool rocksdb_signal_drop_index_thread;
+static my_bool rocksdb_strict_collation_check= 1;
+static my_bool rocksdb_disable_2pc= 0;
+static char * rocksdb_strict_collation_exceptions;
+static my_bool rocksdb_collect_sst_properties= 1;
+static my_bool rocksdb_force_flush_memtable_now_var= 0;
+static uint64_t rocksdb_number_stat_computes= 0;
+static uint32_t rocksdb_seconds_between_stat_computes= 3600;
+static long long rocksdb_compaction_sequential_deletes= 0l;
+static long long rocksdb_compaction_sequential_deletes_window= 0l;
+static long long rocksdb_compaction_sequential_deletes_file_size= 0l;
+static uint32_t rocksdb_validate_tables = 1;
+static char * rocksdb_datadir;
+static uint32_t rocksdb_table_stats_sampling_pct;
+static my_bool rocksdb_enable_bulk_load_api= 1;
+static my_bool rpl_skip_tx_api_var= 0;
+
+std::atomic<uint64_t> rocksdb_snapshot_conflict_errors(0);
+
+static rocksdb::DBOptions rdb_init_rocksdb_db_options(void)
+{
+ rocksdb::DBOptions o;
+
+ o.create_if_missing= true;
+ o.listeners.push_back(std::make_shared<Rdb_event_listener>(&ddl_manager));
+ o.info_log_level= rocksdb::InfoLogLevel::INFO_LEVEL;
+ o.max_subcompactions= DEFAULT_SUBCOMPACTIONS;
+
+ return o;
+}
+
+static rocksdb::DBOptions rocksdb_db_options= rdb_init_rocksdb_db_options();
+static rocksdb::BlockBasedTableOptions rocksdb_tbl_options;
+
+static std::shared_ptr<rocksdb::RateLimiter> rocksdb_rate_limiter;
+
+/* This enum needs to be kept up to date with rocksdb::InfoLogLevel */
+static const char* info_log_level_names[] = {
+ "debug_level",
+ "info_level",
+ "warn_level",
+ "error_level",
+ "fatal_level",
+ NullS
+};
+
+static TYPELIB info_log_level_typelib = {
+ array_elements(info_log_level_names) - 1,
+ "info_log_level_typelib",
+ info_log_level_names,
+ nullptr
+};
+
+static void
+rocksdb_set_rocksdb_info_log_level(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save)
+{
+ mysql_mutex_lock(&rdb_sysvars_mutex);
+ rocksdb_info_log_level = *static_cast<const uint64_t*>(save);
+ rocksdb_db_options.info_log->SetInfoLogLevel(
+ static_cast<const rocksdb::InfoLogLevel>(rocksdb_info_log_level));
+ mysql_mutex_unlock(&rdb_sysvars_mutex);
+}
+
+static const char* index_type_names[] = {
+ "kBinarySearch",
+ "kHashSearch",
+ NullS
+};
+
+static TYPELIB index_type_typelib = {
+ array_elements(index_type_names) - 1,
+ "index_type_typelib",
+ index_type_names,
+ nullptr
+};
+
+//TODO: 0 means don't wait at all, and we don't support it yet?
+static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG,
+ "Number of seconds to wait for lock",
+ nullptr, nullptr, /*default*/ 1, /*min*/ 1, /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_BOOL(bulk_load, PLUGIN_VAR_RQCMDARG,
+ "Use bulk-load mode for inserts. This enables both "
+ "rocksdb_skip_unique_check and rocksdb_commit_in_the_middle.",
+ nullptr, rocksdb_set_bulk_load, FALSE);
+
+static MYSQL_SYSVAR_BOOL(enable_bulk_load_api,
+ rocksdb_enable_bulk_load_api,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Enables using SstFileWriter for bulk loading",
+ nullptr, nullptr, rocksdb_enable_bulk_load_api);
+
+static MYSQL_THDVAR_STR(skip_unique_check_tables,
+ PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC,
+ "Skip unique constraint checking for the specified tables", nullptr, nullptr,
+ ".*");
+
+static MYSQL_THDVAR_BOOL(skip_unique_check, PLUGIN_VAR_RQCMDARG,
+ "Skip unique constraint checking for all tables", nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_BOOL(commit_in_the_middle, PLUGIN_VAR_RQCMDARG,
+ "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, "
+ "update and delete",
+ nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_STR(read_free_rpl_tables,
+ PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC,
+ "List of tables that will use read-free replication on the slave "
+ "(i.e. not lookup a row during replication)", nullptr, nullptr, "");
+
+static MYSQL_SYSVAR_BOOL(
+ rpl_skip_tx_api,
+ rpl_skip_tx_api_var,
+ PLUGIN_VAR_RQCMDARG,
+ "Use write batches for replication thread instead of tx api", nullptr,
+ nullptr, FALSE);
+
+static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG,
+ "Skip using bloom filter for reads", nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG,
+ "Maximum number of locks a transaction can have",
+ nullptr, nullptr, /*default*/ 1024*1024*1024, /*min*/ 1,
+ /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_BOOL(lock_scanned_rows, PLUGIN_VAR_RQCMDARG,
+ "Take and hold locks on rows that are scanned but not updated",
+ nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG,
+ "Max #records in a batch for bulk-load mode",
+ nullptr, nullptr, /*default*/ 1000, /*min*/ 1, /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_ULONGLONG(merge_buf_size, PLUGIN_VAR_RQCMDARG,
+ "Size to allocate for merge sort buffers written out to disk "
+ "during inplace index creation.",
+ nullptr, nullptr,
+ /* default (64MB) */ (ulonglong) 67108864,
+ /* min (100B) */ 100,
+ /* max */ SIZE_T_MAX, 1);
+
+static MYSQL_THDVAR_ULONGLONG(merge_combine_read_size, PLUGIN_VAR_RQCMDARG,
+ "Size that we have to work with during combine (reading from disk) phase of "
+ "external sort during fast index creation.",
+ nullptr, nullptr,
+ /* default (1GB) */ (ulonglong) 1073741824,
+ /* min (100B) */ 100,
+ /* max */ SIZE_T_MAX, 1);
+
+static MYSQL_SYSVAR_BOOL(create_if_missing,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.create_if_missing),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::create_if_missing for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.create_if_missing);
+
+static MYSQL_SYSVAR_BOOL(create_missing_column_families,
+ *reinterpret_cast<my_bool*>(
+ &rocksdb_db_options.create_missing_column_families),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::create_missing_column_families for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.create_missing_column_families);
+
+static MYSQL_SYSVAR_BOOL(error_if_exists,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.error_if_exists),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::error_if_exists for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.error_if_exists);
+
+static MYSQL_SYSVAR_BOOL(paranoid_checks,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.paranoid_checks),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::paranoid_checks for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.paranoid_checks);
+
+static MYSQL_SYSVAR_ULONGLONG(rate_limiter_bytes_per_sec,
+ rocksdb_rate_limiter_bytes_per_sec,
+ PLUGIN_VAR_RQCMDARG,
+ "DBOptions::rate_limiter bytes_per_sec for RocksDB",
+ nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L,
+ /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0);
+
+static MYSQL_SYSVAR_ENUM(info_log_level,
+ rocksdb_info_log_level,
+ PLUGIN_VAR_RQCMDARG,
+ "Filter level for info logs to be written mysqld error log. "
+ "Valid values include 'debug_level', 'info_level', 'warn_level'"
+ "'error_level' and 'fatal_level'.",
+ nullptr, rocksdb_set_rocksdb_info_log_level,
+ rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib);
+
+static MYSQL_THDVAR_INT(perf_context_level,
+ PLUGIN_VAR_RQCMDARG,
+ "Perf Context Level for rocksdb internal timer stat collection",
+ nullptr, nullptr,
+ /* default */ rocksdb::PerfLevel::kUninitialized,
+ /* min */ rocksdb::PerfLevel::kUninitialized,
+ /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0);
+
+static MYSQL_SYSVAR_UINT(wal_recovery_mode,
+ rocksdb_wal_recovery_mode,
+ PLUGIN_VAR_RQCMDARG,
+ "DBOptions::wal_recovery_mode for RocksDB",
+ nullptr, nullptr, 2,
+ /* min */ 0L, /* max */ 3, 0);
+
+static MYSQL_SYSVAR_ULONG(compaction_readahead_size,
+ rocksdb_db_options.compaction_readahead_size,
+ PLUGIN_VAR_RQCMDARG,
+ "DBOptions::compaction_readahead_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.compaction_readahead_size,
+ /* min */ 0L, /* max */ ULONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(new_table_reader_for_compaction_inputs,
+ *reinterpret_cast<my_bool*>
+ (&rocksdb_db_options.new_table_reader_for_compaction_inputs),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::new_table_reader_for_compaction_inputs for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs);
+
+static MYSQL_SYSVAR_UINT(access_hint_on_compaction_start,
+ rocksdb_access_hint_on_compaction_start,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::access_hint_on_compaction_start for RocksDB",
+ nullptr, nullptr, 1,
+ /* min */ 0L, /* max */ 3, 0);
+
+static MYSQL_SYSVAR_BOOL(allow_concurrent_memtable_write,
+ *reinterpret_cast<my_bool*>(
+ &rocksdb_db_options.allow_concurrent_memtable_write),
+ PLUGIN_VAR_RQCMDARG,
+ "DBOptions::allow_concurrent_memtable_write for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.allow_concurrent_memtable_write);
+
+static MYSQL_SYSVAR_BOOL(enable_write_thread_adaptive_yield,
+ *reinterpret_cast<my_bool*>(
+ &rocksdb_db_options.enable_write_thread_adaptive_yield),
+ PLUGIN_VAR_RQCMDARG,
+ "DBOptions::enable_write_thread_adaptive_yield for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.enable_write_thread_adaptive_yield);
+
+static MYSQL_SYSVAR_INT(max_open_files,
+ rocksdb_db_options.max_open_files,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_open_files for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_open_files,
+ /* min */ -1, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(max_total_wal_size,
+ rocksdb_db_options.max_total_wal_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_total_wal_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_total_wal_size,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(disabledatasync,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.disableDataSync),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::disableDataSync for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.disableDataSync);
+
+static MYSQL_SYSVAR_BOOL(use_fsync,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.use_fsync),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::use_fsync for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.use_fsync);
+
+static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::wal_dir for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.wal_dir.c_str());
+
+static MYSQL_SYSVAR_ULONG(delete_obsolete_files_period_micros,
+ rocksdb_db_options.delete_obsolete_files_period_micros,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::delete_obsolete_files_period_micros for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.delete_obsolete_files_period_micros,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_INT(base_background_compactions,
+ rocksdb_db_options.base_background_compactions,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::base_background_compactions for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.base_background_compactions,
+ /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0);
+
+static MYSQL_SYSVAR_INT(max_background_compactions,
+ rocksdb_db_options.max_background_compactions,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_background_compactions for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_background_compactions,
+ /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0);
+
+static MYSQL_SYSVAR_INT(max_background_flushes,
+ rocksdb_db_options.max_background_flushes,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_background_flushes for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_background_flushes,
+ /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0);
+
+static MYSQL_SYSVAR_UINT(max_subcompactions,
+ rocksdb_db_options.max_subcompactions,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_subcompactions for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_subcompactions,
+ /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0);
+
+static MYSQL_SYSVAR_ULONG(max_log_file_size,
+ rocksdb_db_options.max_log_file_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_log_file_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_log_file_size,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(log_file_time_to_roll,
+ rocksdb_db_options.log_file_time_to_roll,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::log_file_time_to_roll for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.log_file_time_to_roll,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(keep_log_file_num,
+ rocksdb_db_options.keep_log_file_num,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::keep_log_file_num for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.keep_log_file_num,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(max_manifest_file_size,
+ rocksdb_db_options.max_manifest_file_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::max_manifest_file_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.max_manifest_file_size,
+ /* min */ 0L, /* max */ ULONG_MAX, 0);
+
+static MYSQL_SYSVAR_INT(table_cache_numshardbits,
+ rocksdb_db_options.table_cache_numshardbits,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::table_cache_numshardbits for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.table_cache_numshardbits,
+ /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(wal_ttl_seconds,
+ rocksdb_db_options.WAL_ttl_seconds,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::WAL_ttl_seconds for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.WAL_ttl_seconds,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(wal_size_limit_mb,
+ rocksdb_db_options.WAL_size_limit_MB,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::WAL_size_limit_MB for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.WAL_size_limit_MB,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(manifest_preallocation_size,
+ rocksdb_db_options.manifest_preallocation_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::manifest_preallocation_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.manifest_preallocation_size,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(allow_os_buffer,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.allow_os_buffer),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::allow_os_buffer for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.allow_os_buffer);
+
+static MYSQL_SYSVAR_BOOL(allow_mmap_reads,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.allow_mmap_reads),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::allow_mmap_reads for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.allow_mmap_reads);
+
+static MYSQL_SYSVAR_BOOL(allow_mmap_writes,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.allow_mmap_writes),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::allow_mmap_writes for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.allow_mmap_writes);
+
+static MYSQL_SYSVAR_BOOL(is_fd_close_on_exec,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.is_fd_close_on_exec),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::is_fd_close_on_exec for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.is_fd_close_on_exec);
+
+static MYSQL_SYSVAR_UINT(stats_dump_period_sec,
+ rocksdb_db_options.stats_dump_period_sec,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::stats_dump_period_sec for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.stats_dump_period_sec,
+ /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(advise_random_on_open,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.advise_random_on_open),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::advise_random_on_open for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.advise_random_on_open);
+
+static MYSQL_SYSVAR_ULONG(db_write_buffer_size,
+ rocksdb_db_options.db_write_buffer_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::db_write_buffer_size for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.db_write_buffer_size,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(use_adaptive_mutex,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.use_adaptive_mutex),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::use_adaptive_mutex for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.use_adaptive_mutex);
+
+static MYSQL_SYSVAR_ULONG(bytes_per_sync,
+ rocksdb_db_options.bytes_per_sync,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::bytes_per_sync for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.bytes_per_sync,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_ULONG(wal_bytes_per_sync,
+ rocksdb_db_options.wal_bytes_per_sync,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::wal_bytes_per_sync for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.wal_bytes_per_sync,
+ /* min */ 0L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(enable_thread_tracking,
+ *reinterpret_cast<my_bool*>(&rocksdb_db_options.enable_thread_tracking),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "DBOptions::enable_thread_tracking for RocksDB",
+ nullptr, nullptr, rocksdb_db_options.enable_thread_tracking);
+
+static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "block_cache size for RocksDB",
+ nullptr, nullptr, /* RocksDB's default is 8 MB: */ 8*1024*1024L,
+ /* min */ 1024L, /* max */ LONGLONG_MAX, /* Block size */1024L);
+
+static MYSQL_SYSVAR_BOOL(cache_index_and_filter_blocks,
+ *reinterpret_cast<my_bool*>(
+ &rocksdb_tbl_options.cache_index_and_filter_blocks),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB",
+ nullptr, nullptr, true);
+
+// When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the
+// LRU cache, but will always keep the filter & idndex block's handle checked
+// out (=won't call ShardedLRUCache::Release), plus the parsed out objects
+// the LRU cache will never push flush them out, hence they're pinned.
+//
+// This fixes the mutex contention between :ShardedLRUCache::Lookup and
+// ShardedLRUCache::Release which reduced the QPS ratio (QPS using secondary
+// index / QPS using PK).
+static MYSQL_SYSVAR_BOOL(pin_l0_filter_and_index_blocks_in_cache,
+ *reinterpret_cast<my_bool*>(
+ &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "pin_l0_filter_and_index_blocks_in_cache for RocksDB",
+ nullptr, nullptr, true);
+
+static MYSQL_SYSVAR_ENUM(index_type,
+ rocksdb_index_type,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::index_type for RocksDB",
+ nullptr, nullptr,
+ (uint64_t)rocksdb_tbl_options.index_type, &index_type_typelib);
+
+static MYSQL_SYSVAR_BOOL(hash_index_allow_collision,
+ *reinterpret_cast<my_bool*>(&rocksdb_tbl_options.hash_index_allow_collision),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::hash_index_allow_collision for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.hash_index_allow_collision);
+
+static MYSQL_SYSVAR_BOOL(no_block_cache,
+ *reinterpret_cast<my_bool*>(&rocksdb_tbl_options.no_block_cache),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::no_block_cache for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.no_block_cache);
+
+static MYSQL_SYSVAR_ULONG(block_size,
+ rocksdb_tbl_options.block_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::block_size for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.block_size,
+ /* min */ 1L, /* max */ LONG_MAX, 0);
+
+static MYSQL_SYSVAR_INT(block_size_deviation,
+ rocksdb_tbl_options.block_size_deviation,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::block_size_deviation for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.block_size_deviation,
+ /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_INT(block_restart_interval,
+ rocksdb_tbl_options.block_restart_interval,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::block_restart_interval for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.block_restart_interval,
+ /* min */ 1, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(whole_key_filtering,
+ *reinterpret_cast<my_bool*>(&rocksdb_tbl_options.whole_key_filtering),
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "BlockBasedTableOptions::whole_key_filtering for RocksDB",
+ nullptr, nullptr, rocksdb_tbl_options.whole_key_filtering);
+
+static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "default cf options for RocksDB",
+ nullptr, nullptr, "");
+
+static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "option overrides per cf for RocksDB",
+ nullptr, nullptr, "");
+
+static MYSQL_SYSVAR_BOOL(background_sync,
+ rocksdb_background_sync,
+ PLUGIN_VAR_RQCMDARG,
+ "turns on background syncs for RocksDB",
+ nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_BOOL(write_sync,
+ PLUGIN_VAR_RQCMDARG,
+ "WriteOptions::sync for RocksDB",
+ nullptr, nullptr, rocksdb::WriteOptions().sync);
+
+static MYSQL_THDVAR_BOOL(write_disable_wal,
+ PLUGIN_VAR_RQCMDARG,
+ "WriteOptions::disableWAL for RocksDB",
+ nullptr, nullptr, rocksdb::WriteOptions().disableWAL);
+
+static MYSQL_THDVAR_BOOL(write_ignore_missing_column_families,
+ PLUGIN_VAR_RQCMDARG,
+ "WriteOptions::ignore_missing_column_families for RocksDB",
+ nullptr, nullptr, rocksdb::WriteOptions().ignore_missing_column_families);
+
+static MYSQL_THDVAR_BOOL(skip_fill_cache,
+ PLUGIN_VAR_RQCMDARG,
+ "Skip filling block cache on read requests",
+ nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_BOOL(unsafe_for_binlog,
+ PLUGIN_VAR_RQCMDARG,
+ "Allowing statement based binary logging which may break consistency",
+ nullptr, nullptr, FALSE);
+
+static MYSQL_THDVAR_UINT(records_in_range,
+ PLUGIN_VAR_RQCMDARG,
+ "Used to override the result of records_in_range(). Set to a positive number to override",
+ nullptr, nullptr, 0,
+ /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_THDVAR_UINT(force_index_records_in_range,
+ PLUGIN_VAR_RQCMDARG,
+ "Used to override the result of records_in_range() when FORCE INDEX is used.",
+ nullptr, nullptr, 0,
+ /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_UINT(debug_optimizer_n_rows,
+ rocksdb_debug_optimizer_n_rows,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR,
+ "Test only to override rocksdb estimates of table size in a memtable",
+ nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(debug_optimizer_no_zero_cardinality,
+ rocksdb_debug_optimizer_no_zero_cardinality,
+ PLUGIN_VAR_RQCMDARG,
+ "In case if cardinality is zero, overrides it with some value",
+ nullptr, nullptr, TRUE);
+
+static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name,
+ PLUGIN_VAR_RQCMDARG,
+ "Compact column family",
+ rocksdb_compact_column_family, rocksdb_compact_column_family_stub, "");
+
+static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name,
+ PLUGIN_VAR_RQCMDARG,
+ "Checkpoint directory",
+ rocksdb_create_checkpoint, rocksdb_create_checkpoint_stub, "");
+
+static MYSQL_SYSVAR_BOOL(signal_drop_index_thread,
+ rocksdb_signal_drop_index_thread,
+ PLUGIN_VAR_RQCMDARG,
+ "Wake up drop index thread",
+ nullptr, rocksdb_drop_index_wakeup_thread, FALSE);
+
+static MYSQL_SYSVAR_BOOL(pause_background_work,
+ rocksdb_pause_background_work,
+ PLUGIN_VAR_RQCMDARG,
+ "Disable all rocksdb background operations",
+ nullptr, rocksdb_set_pause_background_work, FALSE);
+
+static MYSQL_SYSVAR_BOOL(disable_2pc,
+ rocksdb_disable_2pc,
+ PLUGIN_VAR_RQCMDARG,
+ "Disable two phase commit for MyRocks",
+ nullptr, nullptr, TRUE);
+
+static MYSQL_SYSVAR_BOOL(strict_collation_check,
+ rocksdb_strict_collation_check,
+ PLUGIN_VAR_RQCMDARG,
+ "Enforce case sensitive collation for MyRocks indexes",
+ nullptr, nullptr, TRUE);
+
+static MYSQL_SYSVAR_STR(strict_collation_exceptions,
+ rocksdb_strict_collation_exceptions,
+ PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_MEMALLOC,
+ "List of tables (using regex) that are excluded "
+ "from the case sensitive collation enforcement",
+ nullptr, rocksdb_set_collation_exception_list, "");
+
+static MYSQL_SYSVAR_BOOL(collect_sst_properties,
+ rocksdb_collect_sst_properties,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Enables collecting SST file properties on each flush",
+ nullptr, nullptr, rocksdb_collect_sst_properties);
+
+static MYSQL_SYSVAR_BOOL(
+ force_flush_memtable_now,
+ rocksdb_force_flush_memtable_now_var,
+ PLUGIN_VAR_RQCMDARG,
+ "Forces memstore flush which may block all write requests so be careful",
+ rocksdb_force_flush_memtable_now,
+ rocksdb_force_flush_memtable_now_stub, FALSE);
+
+static MYSQL_THDVAR_BOOL(
+ flush_memtable_on_analyze,
+ PLUGIN_VAR_RQCMDARG,
+ "Forces memtable flush on ANALZYE table to get accurate cardinality",
+ nullptr, nullptr, true);
+
+static MYSQL_SYSVAR_UINT(seconds_between_stat_computes,
+ rocksdb_seconds_between_stat_computes,
+ PLUGIN_VAR_RQCMDARG,
+ "Sets a number of seconds to wait between optimizer stats recomputation. "
+ "Only changed indexes will be refreshed.",
+ nullptr, nullptr, rocksdb_seconds_between_stat_computes,
+ /* min */ 0L, /* max */ UINT_MAX, 0);
+
+static MYSQL_SYSVAR_LONGLONG(
+ compaction_sequential_deletes,
+ rocksdb_compaction_sequential_deletes,
+ PLUGIN_VAR_RQCMDARG,
+ "RocksDB will trigger compaction for the file if it has more than this number sequential deletes per window",
+ nullptr, rocksdb_set_compaction_options,
+ DEFAULT_COMPACTION_SEQUENTIAL_DELETES,
+ /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0);
+
+static MYSQL_SYSVAR_LONGLONG(
+ compaction_sequential_deletes_window,
+ rocksdb_compaction_sequential_deletes_window,
+ PLUGIN_VAR_RQCMDARG,
+ "Size of the window for counting rocksdb_compaction_sequential_deletes",
+ nullptr, rocksdb_set_compaction_options,
+ DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW,
+ /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0);
+
+static MYSQL_SYSVAR_LONGLONG(
+ compaction_sequential_deletes_file_size,
+ rocksdb_compaction_sequential_deletes_file_size,
+ PLUGIN_VAR_RQCMDARG,
+ "Minimum file size required for compaction_sequential_deletes",
+ nullptr, rocksdb_set_compaction_options, 0L,
+ /* min */ -1L, /* max */ LONGLONG_MAX, 0);
+
+static MYSQL_SYSVAR_BOOL(compaction_sequential_deletes_count_sd,
+ rocksdb_compaction_sequential_deletes_count_sd,
+ PLUGIN_VAR_RQCMDARG,
+ "Counting SingleDelete as rocksdb_compaction_sequential_deletes",
+ nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd);
+
+static MYSQL_THDVAR_INT(checksums_pct,
+ PLUGIN_VAR_RQCMDARG,
+ "How many percentages of rows to be checksummed",
+ nullptr, nullptr, 100,
+ /* min */ 0, /* max */ 100, 0);
+
+static MYSQL_THDVAR_BOOL(store_checksums,
+ PLUGIN_VAR_RQCMDARG,
+ "Include checksums when writing index/table records",
+ nullptr, nullptr, false /* default value */);
+
+static MYSQL_THDVAR_BOOL(verify_checksums,
+ PLUGIN_VAR_RQCMDARG,
+ "Verify checksums when reading index/table records",
+ nullptr, nullptr, false /* default value */);
+
+static MYSQL_SYSVAR_UINT(validate_tables,
+ rocksdb_validate_tables,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Verify all .frm files match all RocksDB tables (0 means no verification, "
+ "1 means verify and fail on error, and 2 means verify but continue",
+ nullptr, nullptr, 1 /* default value */, 0 /* min value */,
+ 2 /* max value */, 0);
+
+static MYSQL_SYSVAR_STR(datadir,
+ rocksdb_datadir,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
+ "RocksDB data directory",
+ nullptr, nullptr, "./.rocksdb");
+
+static MYSQL_SYSVAR_UINT(
+ table_stats_sampling_pct,
+ rocksdb_table_stats_sampling_pct,
+ PLUGIN_VAR_RQCMDARG,
+ "Percentage of entries to sample when collecting statistics about table "
+ "properties. Specify either 0 to sample everything or percentage ["
+ STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".."
+ STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " "By default "
+ STRINGIFY_ARG(RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% of entries are "
+ "sampled.",
+ nullptr, rocksdb_set_table_stats_sampling_pct, /* default */
+ RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0,
+ /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0);
+
+static const longlong ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT= 4194304;
+static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE= 100;
+
+static struct st_mysql_sys_var* rocksdb_system_variables[]= {
+ MYSQL_SYSVAR(lock_wait_timeout),
+ MYSQL_SYSVAR(max_row_locks),
+ MYSQL_SYSVAR(lock_scanned_rows),
+ MYSQL_SYSVAR(bulk_load),
+ MYSQL_SYSVAR(skip_unique_check_tables),
+ MYSQL_SYSVAR(skip_unique_check),
+ MYSQL_SYSVAR(commit_in_the_middle),
+ MYSQL_SYSVAR(read_free_rpl_tables),
+ MYSQL_SYSVAR(rpl_skip_tx_api),
+ MYSQL_SYSVAR(bulk_load_size),
+ MYSQL_SYSVAR(merge_buf_size),
+ MYSQL_SYSVAR(enable_bulk_load_api),
+ MYSQL_SYSVAR(merge_combine_read_size),
+ MYSQL_SYSVAR(skip_bloom_filter_on_read),
+
+ MYSQL_SYSVAR(create_if_missing),
+ MYSQL_SYSVAR(create_missing_column_families),
+ MYSQL_SYSVAR(error_if_exists),
+ MYSQL_SYSVAR(paranoid_checks),
+ MYSQL_SYSVAR(rate_limiter_bytes_per_sec),
+ MYSQL_SYSVAR(info_log_level),
+ MYSQL_SYSVAR(max_open_files),
+ MYSQL_SYSVAR(max_total_wal_size),
+ MYSQL_SYSVAR(disabledatasync),
+ MYSQL_SYSVAR(use_fsync),
+ MYSQL_SYSVAR(wal_dir),
+ MYSQL_SYSVAR(delete_obsolete_files_period_micros),
+ MYSQL_SYSVAR(base_background_compactions),
+ MYSQL_SYSVAR(max_background_compactions),
+ MYSQL_SYSVAR(max_background_flushes),
+ MYSQL_SYSVAR(max_log_file_size),
+ MYSQL_SYSVAR(max_subcompactions),
+ MYSQL_SYSVAR(log_file_time_to_roll),
+ MYSQL_SYSVAR(keep_log_file_num),
+ MYSQL_SYSVAR(max_manifest_file_size),
+ MYSQL_SYSVAR(table_cache_numshardbits),
+ MYSQL_SYSVAR(wal_ttl_seconds),
+ MYSQL_SYSVAR(wal_size_limit_mb),
+ MYSQL_SYSVAR(manifest_preallocation_size),
+ MYSQL_SYSVAR(allow_os_buffer),
+ MYSQL_SYSVAR(allow_mmap_reads),
+ MYSQL_SYSVAR(allow_mmap_writes),
+ MYSQL_SYSVAR(is_fd_close_on_exec),
+ MYSQL_SYSVAR(stats_dump_period_sec),
+ MYSQL_SYSVAR(advise_random_on_open),
+ MYSQL_SYSVAR(db_write_buffer_size),
+ MYSQL_SYSVAR(use_adaptive_mutex),
+ MYSQL_SYSVAR(bytes_per_sync),
+ MYSQL_SYSVAR(wal_bytes_per_sync),
+ MYSQL_SYSVAR(enable_thread_tracking),
+ MYSQL_SYSVAR(perf_context_level),
+ MYSQL_SYSVAR(wal_recovery_mode),
+ MYSQL_SYSVAR(access_hint_on_compaction_start),
+ MYSQL_SYSVAR(new_table_reader_for_compaction_inputs),
+ MYSQL_SYSVAR(compaction_readahead_size),
+ MYSQL_SYSVAR(allow_concurrent_memtable_write),
+ MYSQL_SYSVAR(enable_write_thread_adaptive_yield),
+
+ MYSQL_SYSVAR(block_cache_size),
+ MYSQL_SYSVAR(cache_index_and_filter_blocks),
+ MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache),
+ MYSQL_SYSVAR(index_type),
+ MYSQL_SYSVAR(hash_index_allow_collision),
+ MYSQL_SYSVAR(no_block_cache),
+ MYSQL_SYSVAR(block_size),
+ MYSQL_SYSVAR(block_size_deviation),
+ MYSQL_SYSVAR(block_restart_interval),
+ MYSQL_SYSVAR(whole_key_filtering),
+
+ MYSQL_SYSVAR(default_cf_options),
+ MYSQL_SYSVAR(override_cf_options),
+
+ MYSQL_SYSVAR(background_sync),
+
+ MYSQL_SYSVAR(write_sync),
+ MYSQL_SYSVAR(write_disable_wal),
+ MYSQL_SYSVAR(write_ignore_missing_column_families),
+
+ MYSQL_SYSVAR(skip_fill_cache),
+ MYSQL_SYSVAR(unsafe_for_binlog),
+
+ MYSQL_SYSVAR(records_in_range),
+ MYSQL_SYSVAR(force_index_records_in_range),
+ MYSQL_SYSVAR(debug_optimizer_n_rows),
+ MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality),
+
+ MYSQL_SYSVAR(compact_cf),
+ MYSQL_SYSVAR(signal_drop_index_thread),
+ MYSQL_SYSVAR(pause_background_work),
+ MYSQL_SYSVAR(disable_2pc),
+ MYSQL_SYSVAR(strict_collation_check),
+ MYSQL_SYSVAR(strict_collation_exceptions),
+ MYSQL_SYSVAR(collect_sst_properties),
+ MYSQL_SYSVAR(force_flush_memtable_now),
+ MYSQL_SYSVAR(flush_memtable_on_analyze),
+ MYSQL_SYSVAR(seconds_between_stat_computes),
+
+ MYSQL_SYSVAR(compaction_sequential_deletes),
+ MYSQL_SYSVAR(compaction_sequential_deletes_window),
+ MYSQL_SYSVAR(compaction_sequential_deletes_file_size),
+ MYSQL_SYSVAR(compaction_sequential_deletes_count_sd),
+
+ MYSQL_SYSVAR(datadir),
+ MYSQL_SYSVAR(create_checkpoint),
+
+ MYSQL_SYSVAR(checksums_pct),
+ MYSQL_SYSVAR(store_checksums),
+ MYSQL_SYSVAR(verify_checksums),
+
+ MYSQL_SYSVAR(validate_tables),
+ MYSQL_SYSVAR(table_stats_sampling_pct),
+ nullptr
+};
+
+
+static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD* thd)
+{
+ rocksdb::WriteOptions opt;
+
+ opt.sync= THDVAR(thd, write_sync);
+ opt.disableWAL= THDVAR(thd, write_disable_wal);
+ opt.ignore_missing_column_families=
+ THDVAR(thd, write_ignore_missing_column_families);
+
+ return opt;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ @brief
+ Function we use in the creation of our hash to get key.
+*/
+
+uchar* Rdb_open_tables_map::get_hash_key(
+ Rdb_table_handler *table_handler, size_t *length,
+ my_bool not_used __attribute__((__unused__)))
+{
+ *length= table_handler->m_table_name_length;
+ return reinterpret_cast<uchar*>(table_handler->m_table_name);
+}
+
+
+/*
+ The following is needed as an argument for mysql_stage_register,
+ irrespectively of whether we're compiling with P_S or not.
+*/
+PSI_stage_info stage_waiting_on_row_lock= { 0, "Waiting for row lock", 0};
+
+#ifdef HAVE_PSI_INTERFACE
+static PSI_thread_key rdb_background_psi_thread_key;
+static PSI_thread_key rdb_drop_idx_psi_thread_key;
+
+static PSI_stage_info *all_rocksdb_stages[]=
+{
+ & stage_waiting_on_row_lock
+};
+
+
+static my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key,
+ rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key,
+ rdb_collation_data_mutex_key,
+ rdb_mem_cmp_space_mutex_key,
+ key_mutex_tx_list, rdb_sysvars_psi_mutex_key;
+
+static PSI_mutex_info all_rocksdb_mutexes[]=
+{
+ { &rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL},
+ { &rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL},
+ { &rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL},
+ { &rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL},
+ { &rdb_mem_cmp_space_mutex_key, "collation space char data init",
+ PSI_FLAG_GLOBAL},
+ { &key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL},
+ { &rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL},
+};
+
+static PSI_rwlock_key key_rwlock_collation_exception_list;
+static PSI_rwlock_key key_rwlock_read_free_rpl_tables;
+static PSI_rwlock_key key_rwlock_skip_unique_check_tables;
+
+static PSI_rwlock_info all_rocksdb_rwlocks[]=
+{
+ { &key_rwlock_collation_exception_list, "collation_exception_list",
+ PSI_FLAG_GLOBAL},
+ { &key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL},
+ { &key_rwlock_skip_unique_check_tables, "skip_unique_check_tables",
+ PSI_FLAG_GLOBAL},
+};
+
+PSI_cond_key rdb_signal_bg_psi_cond_key, rdb_signal_drop_idx_psi_cond_key;
+
+static PSI_cond_info all_rocksdb_conds[]=
+{
+ { &rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL},
+ { &rdb_signal_drop_idx_psi_cond_key, "cond signal drop index",
+ PSI_FLAG_GLOBAL},
+};
+
+static PSI_thread_info all_rocksdb_threads[]=
+{
+ { &rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL},
+ { &rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL},
+};
+
+static void init_rocksdb_psi_keys()
+{
+ const char* category= "rocksdb";
+ int count;
+
+ if (PSI_server == nullptr)
+ return;
+
+ count= array_elements(all_rocksdb_mutexes);
+ PSI_server->register_mutex(category, all_rocksdb_mutexes, count);
+
+ count= array_elements(all_rocksdb_rwlocks);
+ PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count);
+
+ count= array_elements(all_rocksdb_conds);
+ // TODO Disabling PFS for conditions due to the bug https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92
+ // PSI_server->register_cond(category, all_rocksdb_conds, count);
+
+ count= array_elements(all_rocksdb_stages);
+ mysql_stage_register(category, all_rocksdb_stages, count);
+
+ count= array_elements(all_rocksdb_threads);
+ mysql_thread_register(category, all_rocksdb_threads, count);
+}
+#endif
+
+
+/*
+ Drop index thread's control
+*/
+
+static Rdb_drop_index_thread rdb_drop_idx_thread;
+
+static void rocksdb_drop_index_wakeup_thread(
+ my_core::THD* thd __attribute__((__unused__)),
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr __attribute__((__unused__)),
+ const void* save)
+{
+ if (*static_cast<const bool*>(save)) {
+ rdb_drop_idx_thread.signal();
+ }
+}
+
+static inline uint32_t rocksdb_perf_context_level(THD* thd)
+{
+ DBUG_ASSERT(thd != nullptr);
+
+ int session_perf_context_level= THDVAR(thd, perf_context_level);
+ if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) {
+ return session_perf_context_level;
+ }
+
+ /*
+ Fallback to global thdvar, if session specific one was not set to a valid
+ value.
+ */
+
+ int global_perf_context_level= THDVAR(nullptr, perf_context_level);
+ if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) {
+ return global_perf_context_level;
+ }
+
+ return rocksdb::PerfLevel::kDisable;
+}
+
+/*
+ Very short (functor-like) interface to be passed to
+ Rdb_transaction::walk_tx_list()
+*/
+
+interface Rdb_tx_list_walker
+{
+ virtual ~Rdb_tx_list_walker() {}
+ virtual void process_tran(const Rdb_transaction*) = 0;
+};
+
+/*
+ This is a helper class that is passed to RocksDB to get notifications when
+ a snapshot gets created.
+*/
+
+class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier
+{
+ Rdb_transaction* m_owning_tx;
+
+ void SnapshotCreated(const rocksdb::Snapshot *snapshot) override;
+
+ public:
+ explicit Rdb_snapshot_notifier(Rdb_transaction* owning_tx)
+ : m_owning_tx(owning_tx) {}
+
+ // If the owning Rdb_transaction gets destructed we need to not reference
+ // it anymore.
+ void detach()
+ {
+ m_owning_tx = nullptr;
+ }
+};
+
+/* This is the base class for transactions when interacting with rocksdb.
+*/
+class Rdb_transaction
+{
+ protected:
+ ulonglong m_write_count= 0;
+ ulonglong m_lock_count= 0;
+
+ bool m_is_delayed_snapshot= false;
+ bool m_is_two_phase= false;
+
+ THD* m_thd= nullptr;
+
+ rocksdb::ReadOptions m_read_opts;
+
+ static std::multiset<Rdb_transaction*> s_tx_list;
+ static mysql_mutex_t s_tx_list_mutex;
+
+ Rdb_io_perf* m_tbl_io_perf;
+
+ bool m_tx_read_only= false;
+
+ int m_timeout_sec; /* Cached value of @@rocksdb_lock_wait_timeout */
+
+ /* Maximum number of locks the transaction can have */
+ ulonglong m_max_row_locks;
+
+ bool m_is_tx_failed= false;
+ bool m_rollback_only= false;
+
+ std::shared_ptr<Rdb_snapshot_notifier> m_notifier;
+
+ // This should be used only when updating binlog information.
+ virtual rocksdb::WriteBatchBase* get_write_batch()= 0;
+ virtual bool commit_no_binlog()= 0;
+ virtual rocksdb::Iterator *get_iterator(
+ const rocksdb::ReadOptions &options,
+ rocksdb::ColumnFamilyHandle* column_family)= 0;
+
+
+public:
+ const char* m_mysql_log_file_name;
+ my_off_t m_mysql_log_offset;
+ const char* m_mysql_gtid;
+ String m_detailed_error;
+ int64_t m_snapshot_timestamp= 0;
+ bool m_ddl_transaction;
+
+ static void init_mutex()
+ {
+ mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST);
+ }
+
+ static void term_mutex()
+ {
+ DBUG_ASSERT(s_tx_list.size() == 0);
+ mysql_mutex_destroy(&s_tx_list_mutex);
+ }
+
+ static void walk_tx_list(Rdb_tx_list_walker* walker)
+ {
+ mysql_mutex_lock(&s_tx_list_mutex);
+ for (auto it : s_tx_list)
+ walker->process_tran(it);
+ mysql_mutex_unlock(&s_tx_list_mutex);
+ }
+
+ int set_status_error(THD *thd, const rocksdb::Status &s,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ Rdb_tbl_def *tbl_def)
+ {
+ DBUG_ASSERT(!s.ok());
+
+ if (s.IsTimedOut())
+ {
+ /*
+ SQL layer has weird expectations. If we return an error when
+ doing a read in DELETE IGNORE, it will ignore the error ("because it's
+ an IGNORE command!) but then will fail an assert, because "error code
+ was returned, but no error happened". Do what InnoDB's
+ convert_error_code_to_mysql() does: force a statement
+ rollback before returning HA_ERR_LOCK_WAIT_TIMEOUT:
+ */
+ my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/);
+ m_detailed_error.copy(timeout_message("index",
+ tbl_def->full_tablename().c_str(),
+ kd->get_name().c_str()));
+
+ return HA_ERR_LOCK_WAIT_TIMEOUT;
+ }
+ if (s.IsBusy())
+ {
+ rocksdb_snapshot_conflict_errors++;
+ return HA_ERR_LOCK_DEADLOCK;
+ }
+ /* TODO: who returns HA_ERR_ROCKSDB_TOO_MANY_LOCKS now?? */
+
+ my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str());
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ THD* get_thd() const { return m_thd; }
+
+ /* Used for tracking io_perf counters */
+ void io_perf_start(Rdb_io_perf *io_perf)
+ {
+ /*
+ Since perf_context is tracked per thread, it is difficult and expensive
+ to maintain perf_context on a per table basis. Therefore, roll all
+ perf_context data into the first table used in a query. This works well
+ for single table queries and is probably good enough for queries that hit
+ multiple tables.
+
+ perf_context stats gathering is started when the table lock is acquired
+ or when ha_rocksdb::start_stmt is called in case of LOCK TABLES. They
+ are recorded when the table lock is released, or when commit/rollback
+ is called on the transaction, whichever comes first. Table lock release
+ and commit/rollback can happen in different orders. In the case where
+ the lock is released before commit/rollback is called, an extra step to
+ gather stats during commit/rollback is needed.
+ */
+ if (m_tbl_io_perf == nullptr &&
+ io_perf->start(rocksdb_perf_context_level(m_thd)))
+ {
+ m_tbl_io_perf= io_perf;
+ }
+ }
+
+ void io_perf_end_and_record(void)
+ {
+ if (m_tbl_io_perf != nullptr)
+ {
+ m_tbl_io_perf->end_and_record(rocksdb_perf_context_level(m_thd));
+ m_tbl_io_perf= nullptr;
+ }
+ }
+
+ void io_perf_end_and_record(Rdb_io_perf *io_perf)
+ {
+ if (m_tbl_io_perf == io_perf)
+ {
+ io_perf_end_and_record();
+ }
+ }
+
+ void set_params(int timeout_sec_arg, int max_row_locks_arg)
+ {
+ m_timeout_sec= timeout_sec_arg;
+ m_max_row_locks= max_row_locks_arg;
+ set_lock_timeout(timeout_sec_arg);
+ }
+
+ virtual void set_lock_timeout(int timeout_sec_arg)= 0;
+
+ ulonglong get_write_count() const { return m_write_count; }
+
+ ulonglong get_lock_count() const { return m_lock_count; }
+
+ virtual void set_sync(bool sync)= 0;
+
+ virtual void release_lock(rocksdb::ColumnFamilyHandle* column_family,
+ const std::string& rowkey)= 0;
+
+ virtual bool prepare(const rocksdb::TransactionName& name)= 0;
+
+ bool commit_or_rollback()
+ {
+ bool res;
+ if (m_is_tx_failed)
+ {
+ rollback();
+ res= false;
+ }
+ else
+ res= commit();
+ return res;
+ }
+
+ bool commit()
+ {
+ if (get_write_count() == 0)
+ {
+ rollback();
+ return false;
+ }
+ else if (m_rollback_only)
+ {
+ /*
+ Transactions marked as rollback_only are expected to be rolled back at
+ prepare(). But there are some exceptions like below that prepare() is
+ never called and commit() is called instead.
+ 1. Binlog is disabled
+ 2. No modification exists in binlog cache for the transaction (#195)
+ In both cases, rolling back transaction is safe. Nothing is written to
+ binlog.
+ */
+ my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0));
+ rollback();
+ return true;
+ }
+ else
+ {
+ my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name,
+ &m_mysql_log_offset, &m_mysql_gtid);
+ binlog_manager.update(m_mysql_log_file_name,
+ m_mysql_log_offset,
+ m_mysql_gtid, get_write_batch());
+ return commit_no_binlog();
+ }
+ }
+
+ virtual void rollback()= 0;
+
+ void snapshot_created(const rocksdb::Snapshot *snapshot)
+ {
+ m_read_opts.snapshot = snapshot;
+ rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp);
+ m_is_delayed_snapshot = false;
+ }
+
+ virtual void acquire_snapshot(bool acquire_now)= 0;
+ virtual void release_snapshot()= 0;
+
+ bool has_snapshot() const
+ {
+ return m_read_opts.snapshot != nullptr;
+ }
+
+ private:
+ // The tables we are currently loading. In a partitioned table this can
+ // have more than one entry
+ std::vector<ha_rocksdb*> m_curr_bulk_load;
+
+ public:
+ int finish_bulk_load()
+ {
+ int rc= 0;
+
+ std::vector<ha_rocksdb*>::iterator it;
+ while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end())
+ {
+ int rc2= (*it)->finalize_bulk_load();
+ if (rc2 != 0 && rc == 0)
+ {
+ rc= rc2;
+ }
+ }
+
+ DBUG_ASSERT(m_curr_bulk_load.size() == 0);
+
+ return rc;
+ }
+
+ void start_bulk_load(ha_rocksdb* bulk_load)
+ {
+ /*
+ If we already have an open bulk load of a table and the name doesn't
+ match the current one, close out the currently running one. This allows
+ multiple bulk loads to occur on a partitioned table, but then closes
+ them all out when we switch to another table.
+ */
+ if (!m_curr_bulk_load.empty() &&
+ !bulk_load->same_table(*m_curr_bulk_load[0]))
+ {
+ auto res= finish_bulk_load();
+ SHIP_ASSERT(res == 0);
+ }
+
+ m_curr_bulk_load.push_back(bulk_load);
+ }
+
+ void end_bulk_load(ha_rocksdb* bulk_load)
+ {
+ for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end();
+ it++)
+ {
+ if (*it == bulk_load)
+ {
+ m_curr_bulk_load.erase(it);
+ return;
+ }
+ }
+
+ // Should not reach here
+ SHIP_ASSERT(0);
+ }
+
+ /*
+ Flush the data accumulated so far. This assumes we're doing a bulk insert.
+
+ @detail
+ This should work like transaction commit, except that we don't
+ synchronize with the binlog (there is no API that would allow to have
+ binlog flush the changes accumulated so far and return its current
+ position)
+
+ @todo
+ Add test coverage for what happens when somebody attempts to do bulk
+ inserts while inside a multi-statement transaction.
+ */
+ bool flush_batch()
+ {
+ if (get_write_count() == 0)
+ return false;
+
+ /* Commit the current transaction */
+ if (commit_no_binlog())
+ return true;
+
+ /* Start another one */
+ start_tx();
+ return false;
+ }
+
+ virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value)= 0;
+ virtual rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key)= 0;
+ virtual rocksdb::Status single_delete(
+ rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key)= 0;
+
+ virtual bool has_modifications() const= 0;
+
+ virtual rocksdb::WriteBatchBase* get_indexed_write_batch()= 0;
+ /*
+ Return a WriteBatch that one can write to. The writes will skip any
+ transaction locking. The writes will NOT be visible to the transaction.
+ */
+ rocksdb::WriteBatchBase* get_blind_write_batch()
+ {
+ return get_indexed_write_batch()->GetWriteBatch();
+ }
+
+ virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ std::string* value) const= 0;
+ virtual rocksdb::Status get_for_update(
+ rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key, std::string* value)= 0;
+
+ rocksdb::Iterator *get_iterator(rocksdb::ColumnFamilyHandle* column_family,
+ bool skip_bloom_filter,
+ bool fill_cache,
+ bool read_current= false,
+ bool create_snapshot= true)
+ {
+ // Make sure we are not doing both read_current (which implies we don't
+ // want a snapshot) and create_snapshot which makes sure we create
+ // a snapshot
+ DBUG_ASSERT(!read_current || !create_snapshot);
+
+ if (create_snapshot)
+ acquire_snapshot(true);
+
+ rocksdb::ReadOptions options= m_read_opts;
+
+ if (skip_bloom_filter)
+ {
+ options.total_order_seek= true;
+ }
+ else
+ {
+ // With this option, Iterator::Valid() returns false if key
+ // is outside of the prefix bloom filter range set at Seek().
+ // Must not be set to true if not using bloom filter.
+ options.prefix_same_as_start= true;
+ }
+ options.fill_cache= fill_cache;
+ if (read_current)
+ {
+ options.snapshot= nullptr;
+ }
+ return get_iterator(options, column_family);
+ }
+
+ virtual bool is_tx_started() const= 0;
+ virtual void start_tx()= 0;
+ virtual void start_stmt()= 0;
+ virtual void rollback_stmt()= 0;
+
+ void set_tx_failed(bool failed_arg) { m_is_tx_failed= failed_arg; }
+
+ bool can_prepare() const
+ {
+ if (m_rollback_only)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0));
+ return false;
+ }
+ return true;
+ }
+
+ int rollback_to_savepoint(void *savepoint)
+ {
+ if (has_modifications())
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "MyRocks currently does not support ROLLBACK TO "
+ "SAVEPOINT if modifying rows.",
+ MYF(0));
+ m_rollback_only= true;
+ return 1;
+ }
+ return 0;
+ }
+
+ /*
+ This is used by transactions started with "START TRANSACTION WITH "
+ "CONSISTENT [ROCKSDB] SNAPSHOT". When tx_read_only is turned on,
+ snapshot has to be created via DB::GetSnapshot(), not via Transaction
+ API.
+ */
+ bool is_tx_read_only() const
+ {
+ return m_tx_read_only;
+ }
+
+ bool is_two_phase() const
+ {
+ return m_is_two_phase;
+ }
+
+ void set_tx_read_only(bool val)
+ {
+ m_tx_read_only= val;
+ }
+
+ explicit Rdb_transaction(THD *thd): m_thd(thd), m_tbl_io_perf(nullptr)
+ {
+ mysql_mutex_lock(&s_tx_list_mutex);
+ s_tx_list.insert(this);
+ mysql_mutex_unlock(&s_tx_list_mutex);
+ }
+
+ virtual ~Rdb_transaction() {
+ mysql_mutex_lock(&s_tx_list_mutex);
+ s_tx_list.erase(this);
+ mysql_mutex_unlock(&s_tx_list_mutex);
+ }
+};
+
+/*
+ This is a rocksdb transaction. Its members represent the current transaction,
+ which consists of:
+ - the snapshot
+ - the changes we've made but are not seeing yet.
+
+ The changes are made to individual tables, which store them here and then
+ this object commits them on commit.
+*/
+class Rdb_transaction_impl : public Rdb_transaction
+{
+ rocksdb::Transaction *m_rocksdb_tx= nullptr;
+ rocksdb::Transaction *m_rocksdb_reuse_tx= nullptr;
+
+ public:
+ void set_lock_timeout(int timeout_sec_arg) override
+ {
+ if (m_rocksdb_tx)
+ m_rocksdb_tx->SetLockTimeout(m_timeout_sec * 1000);
+ }
+
+ void set_sync(bool sync) override
+ {
+ m_rocksdb_tx->GetWriteOptions()->sync= sync;
+ }
+
+ void release_lock(rocksdb::ColumnFamilyHandle* column_family,
+ const std::string &rowkey) override
+ {
+ if (!THDVAR(m_thd, lock_scanned_rows))
+ {
+ m_rocksdb_tx->UndoGetForUpdate(column_family, rocksdb::Slice(rowkey));
+ }
+ }
+
+ private:
+ void release_tx(void)
+ {
+ // We are done with the current active transaction object. Preserve it
+ // for later reuse.
+ DBUG_ASSERT(m_rocksdb_reuse_tx == nullptr);
+ m_rocksdb_reuse_tx= m_rocksdb_tx;
+ m_rocksdb_tx= nullptr;
+ }
+
+ bool prepare(const rocksdb::TransactionName& name) override
+ {
+ rocksdb::Status s;
+ s= m_rocksdb_tx->SetName(name);
+ if (!s.ok())
+ {
+ rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT);
+ return false;
+ }
+
+ s= m_rocksdb_tx->Prepare();
+ if (!s.ok())
+ {
+ rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT);
+ return false;
+ }
+ return true;
+ }
+
+ bool commit_no_binlog() override
+ {
+ bool res= false;
+ release_snapshot();
+ rocksdb::Status s= m_rocksdb_tx->Commit();
+ if (!s.ok())
+ {
+ rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT);
+ res= true;
+ }
+
+ /* Save the transaction object to be reused */
+ release_tx();
+
+ m_write_count= 0;
+ m_lock_count= 0;
+ set_tx_read_only(false);
+ m_rollback_only= false;
+ return res;
+ }
+
+ public:
+ void rollback() override
+ {
+ m_write_count= 0;
+ m_lock_count= 0;
+ m_ddl_transaction= false;
+ if (m_rocksdb_tx)
+ {
+ release_snapshot();
+ /* This will also release all of the locks: */
+ m_rocksdb_tx->Rollback();
+
+ /* Save the transaction object to be reused */
+ release_tx();
+
+ set_tx_read_only(false);
+ m_rollback_only= false;
+ }
+ }
+
+ void acquire_snapshot(bool acquire_now) override
+ {
+ if (m_read_opts.snapshot == nullptr) {
+ if (is_tx_read_only()) {
+ snapshot_created(rdb->GetSnapshot());
+ }
+ else if (acquire_now) {
+ m_rocksdb_tx->SetSnapshot();
+ snapshot_created(m_rocksdb_tx->GetSnapshot());
+ }
+ else if (!m_is_delayed_snapshot) {
+ m_rocksdb_tx->SetSnapshotOnNextOperation(m_notifier);
+ m_is_delayed_snapshot = true;
+ }
+ }
+ }
+
+ void release_snapshot() override
+ {
+ bool need_clear = m_is_delayed_snapshot;
+
+ if (m_read_opts.snapshot != nullptr)
+ {
+ m_snapshot_timestamp = 0;
+ if (is_tx_read_only())
+ {
+ rdb->ReleaseSnapshot(m_read_opts.snapshot);
+ need_clear = false;
+ }
+ else
+ {
+ need_clear = true;
+ }
+ m_read_opts.snapshot = nullptr;
+ }
+
+ if (need_clear && m_rocksdb_tx != nullptr)
+ m_rocksdb_tx->ClearSnapshot();
+ }
+
+ bool has_snapshot()
+ {
+ return m_read_opts.snapshot != nullptr;
+ }
+
+ const char *err_too_many_locks=
+ "Number of locks held by the transaction exceeded @@rocksdb_max_row_locks";
+
+ rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value) override
+ {
+ ++m_write_count;
+ ++m_lock_count;
+ if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks)
+ return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks));
+ return m_rocksdb_tx->Put(column_family, key, value);
+ }
+
+ rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key) override
+ {
+ ++m_write_count;
+ ++m_lock_count;
+ if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks)
+ return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks));
+ return m_rocksdb_tx->Delete(column_family, key);
+ }
+
+ rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key) override
+ {
+ ++m_write_count;
+ ++m_lock_count;
+ if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks)
+ return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks));
+ return m_rocksdb_tx->SingleDelete(column_family, key);
+ }
+
+ bool has_modifications() const override
+ {
+ return m_rocksdb_tx->GetWriteBatch() &&
+ m_rocksdb_tx->GetWriteBatch()->GetWriteBatch() &&
+ m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()->Count() > 0;
+ }
+
+ rocksdb::WriteBatchBase* get_write_batch() override
+ {
+ if (is_two_phase())
+ {
+ return m_rocksdb_tx->GetCommitTimeWriteBatch();
+ }
+ return m_rocksdb_tx->GetWriteBatch()->GetWriteBatch();
+ }
+
+ /*
+ Return a WriteBatch that one can write to. The writes will skip any
+ transaction locking. The writes WILL be visible to the transaction.
+ */
+ rocksdb::WriteBatchBase* get_indexed_write_batch() override
+ {
+ ++m_write_count;
+ return m_rocksdb_tx->GetWriteBatch();
+ }
+
+ rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ std::string* value) const override
+ {
+ return m_rocksdb_tx->Get(m_read_opts, column_family, key, value);
+ }
+
+ rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ std::string* value) override
+ {
+ if (++m_lock_count > m_max_row_locks)
+ return rocksdb::Status::Aborted(rocksdb::Slice(err_too_many_locks));
+ return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value);
+ }
+
+ rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options,
+ rocksdb::ColumnFamilyHandle* column_family)
+ override
+ {
+ return m_rocksdb_tx->GetIterator(options, column_family);
+ }
+
+ bool is_tx_started() const override
+ {
+ return (m_rocksdb_tx != nullptr);
+ }
+
+ void start_tx() override
+ {
+ rocksdb::TransactionOptions tx_opts;
+ rocksdb::WriteOptions write_opts;
+ tx_opts.set_snapshot= false;
+ tx_opts.lock_timeout= m_timeout_sec * 1000;
+
+ write_opts.sync= THDVAR(m_thd, write_sync);
+ write_opts.disableWAL= THDVAR(m_thd, write_disable_wal);
+ write_opts.ignore_missing_column_families=
+ THDVAR(m_thd, write_ignore_missing_column_families);
+ m_is_two_phase= !rocksdb_disable_2pc;
+
+ /*
+ If m_rocksdb_reuse_tx is null this will create a new transaction object.
+ Otherwise it will reuse the existing one.
+ */
+ m_rocksdb_tx= rdb->BeginTransaction(write_opts, tx_opts,
+ m_rocksdb_reuse_tx);
+ m_rocksdb_reuse_tx= nullptr;
+
+ m_read_opts= rocksdb::ReadOptions();
+
+ m_ddl_transaction= false;
+ }
+
+ /*
+ Start a statement inside a multi-statement transaction.
+
+ @todo: are we sure this is called once (and not several times) per
+ statement start?
+
+ For hooking to start of statement that is its own transaction, see
+ ha_rocksdb::external_lock().
+ */
+ void start_stmt() override
+ {
+ // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation)
+ acquire_snapshot(false);
+ m_rocksdb_tx->SetSavePoint();
+ }
+
+ /*
+ This must be called when last statement is rolled back, but the transaction
+ continues
+ */
+ void rollback_stmt() override
+ {
+ /* TODO: here we must release the locks taken since the start_stmt() call */
+ if (m_rocksdb_tx)
+ {
+ const rocksdb::Snapshot *org_snapshot = m_rocksdb_tx->GetSnapshot();
+ m_rocksdb_tx->RollbackToSavePoint();
+
+ const rocksdb::Snapshot *cur_snapshot = m_rocksdb_tx->GetSnapshot();
+ if (org_snapshot != cur_snapshot)
+ {
+ if (org_snapshot != nullptr)
+ m_snapshot_timestamp = 0;
+
+ m_read_opts.snapshot = cur_snapshot;
+ if (cur_snapshot != nullptr)
+ rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp);
+ else
+ m_is_delayed_snapshot = true;
+ }
+ }
+ }
+
+ explicit Rdb_transaction_impl(THD *thd) :
+ Rdb_transaction(thd), m_rocksdb_tx(nullptr)
+ {
+ // Create a notifier that can be called when a snapshot gets generated.
+ m_notifier = std::make_shared<Rdb_snapshot_notifier>(this);
+ }
+
+ virtual ~Rdb_transaction_impl()
+ {
+ rollback();
+
+ // Theoretically the notifier could outlive the Rdb_transaction_impl
+ // (because of the shared_ptr), so let it know it can't reference
+ // the transaction anymore.
+ m_notifier->detach();
+
+ // Free any transaction memory that is still hanging around.
+ delete m_rocksdb_reuse_tx;
+ DBUG_ASSERT(m_rocksdb_tx == nullptr);
+ }
+};
+
+/* This is a rocksdb write batch. This class doesn't hold or wait on any
+ transaction locks (skips rocksdb transaction API) thus giving better
+ performance. The commit is done through rdb->GetBaseDB()->Commit().
+
+ Currently this is only used for replication threads which are guaranteed
+ to be non-conflicting. Any further usage of this class should completely
+ be thought thoroughly.
+*/
+class Rdb_writebatch_impl : public Rdb_transaction
+{
+ rocksdb::WriteBatchWithIndex* m_batch;
+ rocksdb::WriteOptions write_opts;
+ // Called after commit/rollback.
+ void reset()
+ {
+ m_batch->Clear();
+ m_read_opts = rocksdb::ReadOptions();
+ m_ddl_transaction= false;
+ }
+ private:
+ bool prepare(const rocksdb::TransactionName& name) override
+ {
+ return true;
+ }
+
+ bool commit_no_binlog() override
+ {
+ bool res= false;
+ release_snapshot();
+ rocksdb::Status s= rdb->GetBaseDB()->Write(write_opts,
+ m_batch->GetWriteBatch());
+ if (!s.ok())
+ {
+ rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT);
+ res= true;
+ }
+ reset();
+
+ m_write_count= 0;
+ set_tx_read_only(false);
+ m_rollback_only= false;
+ return res;
+ }
+ public:
+ void set_lock_timeout(int timeout_sec_arg) override
+ {
+ // Nothing to do here.
+ }
+
+ void set_sync(bool sync) override
+ {
+ write_opts.sync= sync;
+ }
+
+ void release_lock(rocksdb::ColumnFamilyHandle* column_family,
+ const std::string &rowkey) override
+ {
+ // Nothing to do here since we don't hold any row locks.
+ }
+
+ void rollback() override
+ {
+ m_write_count= 0;
+ m_lock_count= 0;
+ release_snapshot();
+
+ reset();
+ set_tx_read_only(false);
+ m_rollback_only= false;
+ }
+
+ void acquire_snapshot(bool acquire_now) override
+ {
+ if (m_read_opts.snapshot == nullptr)
+ snapshot_created(rdb->GetSnapshot());
+ }
+
+ void release_snapshot() override
+ {
+ if (m_read_opts.snapshot != nullptr)
+ {
+ rdb->ReleaseSnapshot(m_read_opts.snapshot);
+ m_read_opts.snapshot = nullptr;
+ }
+ }
+
+ rocksdb::Status put(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value) override
+ {
+ ++m_write_count;
+ m_batch->Put(column_family, key, value);
+ // Note Put/Delete in write batch doesn't return any error code. We simply
+ // return OK here.
+ return rocksdb::Status::OK();
+ }
+
+ rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key) override
+ {
+ ++m_write_count;
+ m_batch->Delete(column_family, key);
+ return rocksdb::Status::OK();
+ }
+
+ rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key) override
+ {
+ ++m_write_count;
+ m_batch->SingleDelete(column_family, key);
+ return rocksdb::Status::OK();
+ }
+
+ bool has_modifications() const override
+ {
+ return m_batch->GetWriteBatch()->Count() > 0;
+ }
+
+ rocksdb::WriteBatchBase* get_write_batch() override
+ {
+ return m_batch;
+ }
+
+ rocksdb::WriteBatchBase* get_indexed_write_batch() override
+ {
+ ++m_write_count;
+ return m_batch;
+ }
+
+ rocksdb::Status get(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ std::string* value) const override
+ {
+ return m_batch->GetFromBatchAndDB(
+ rdb, m_read_opts, column_family, key, value);
+ }
+
+ rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key,
+ std::string* value) override
+ {
+ return get(column_family, key, value);
+ }
+
+ rocksdb::Iterator *get_iterator(const rocksdb::ReadOptions &options,
+ rocksdb::ColumnFamilyHandle* column_family)
+ override
+ {
+ auto it = rdb->NewIterator(options);
+ return m_batch->NewIteratorWithBase(it);
+ }
+
+ bool is_tx_started() const override
+ {
+ return (m_batch != nullptr);
+ }
+
+ void start_tx() override
+ {
+ reset();
+ write_opts.sync= THDVAR(m_thd, write_sync);
+ write_opts.disableWAL= THDVAR(m_thd, write_disable_wal);
+ write_opts.ignore_missing_column_families=
+ THDVAR(m_thd, write_ignore_missing_column_families);
+ }
+
+ void start_stmt() override
+ {
+ m_batch->SetSavePoint();
+ }
+
+ void rollback_stmt() override
+ {
+ if (m_batch)
+ m_batch->RollbackToSavePoint();
+ }
+
+ explicit Rdb_writebatch_impl(THD *thd) :
+ Rdb_transaction(thd), m_batch(nullptr)
+ {
+ m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(),
+ 0 , true);
+ }
+
+ virtual ~Rdb_writebatch_impl()
+ {
+ rollback();
+ delete m_batch;
+ }
+};
+
+void Rdb_snapshot_notifier::SnapshotCreated(const rocksdb::Snapshot *snapshot)
+{
+ if (m_owning_tx != nullptr)
+ {
+ m_owning_tx->snapshot_created(snapshot);
+ }
+}
+
+std::multiset<Rdb_transaction*> Rdb_transaction::s_tx_list;
+mysql_mutex_t Rdb_transaction::s_tx_list_mutex;
+
+static Rdb_transaction* &get_tx_from_thd(THD *thd)
+{
+ return *reinterpret_cast<Rdb_transaction**>(
+ my_core::thd_ha_data(thd, rocksdb_hton));
+}
+
+namespace {
+
+class Rdb_perf_context_guard
+{
+ Rdb_io_perf m_io_perf;
+ THD *m_thd;
+
+ public:
+ explicit Rdb_perf_context_guard(THD *thd) : m_thd(thd)
+ {
+ Rdb_transaction*& tx= get_tx_from_thd(m_thd);
+ /*
+ if perf_context information is already being recorded, this becomes a
+ no-op
+ */
+ if (tx != nullptr)
+ {
+ tx->io_perf_start(&m_io_perf);
+ }
+ }
+
+ ~Rdb_perf_context_guard()
+ {
+ Rdb_transaction*& tx= get_tx_from_thd(m_thd);
+ if (tx != nullptr)
+ {
+ tx->io_perf_end_and_record();
+ }
+ }
+};
+
+} // anonymous namespace
+
+/*
+ TODO: maybe, call this in external_lock() and store in ha_rocksdb..
+*/
+
+static Rdb_transaction *get_or_create_tx(THD *thd)
+{
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+ // TODO: this is called too many times.. O(#rows)
+ if (tx == nullptr)
+ {
+ if (rpl_skip_tx_api_var && thd->rli_slave)
+ tx= new Rdb_writebatch_impl(thd);
+ else
+ tx= new Rdb_transaction_impl(thd);
+ tx->set_params(THDVAR(thd, lock_wait_timeout),
+ THDVAR(thd, max_row_locks));
+ tx->start_tx();
+ }
+ else
+ {
+ tx->set_params(THDVAR(thd, lock_wait_timeout),
+ THDVAR(thd, max_row_locks));
+ if (!tx->is_tx_started())
+ {
+ tx->start_tx();
+ }
+ }
+
+ return tx;
+}
+
+
+static int rocksdb_close_connection(handlerton* hton, THD* thd)
+{
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+ if (tx != nullptr)
+ {
+ int rc= tx->finish_bulk_load();
+ if (rc != 0)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Error %d finalizing last SST file while "
+ "disconnecting", rc);
+ abort_with_stack_traces();
+ }
+
+ delete tx;
+ tx= nullptr;
+ }
+ return 0;
+}
+
+/*
+ * Serializes an xid to a string so that it can
+ * be used as a rocksdb transaction name
+ */
+static std::string rdb_xid_to_string(const XID& src)
+{
+ DBUG_ASSERT(src.gtrid_length >= 0 && src.gtrid_length <= MAXGTRIDSIZE);
+ DBUG_ASSERT(src.bqual_length >= 0 && src.bqual_length <= MAXBQUALSIZE);
+
+ std::string buf;
+ buf.reserve(RDB_XIDHDR_LEN + src.gtrid_length + src.bqual_length);
+
+ /*
+ * expand formatID to fill 8 bytes if it doesn't already
+ * then reinterpret bit pattern as unsigned and store in network order
+ */
+ uchar fidbuf[RDB_FORMATID_SZ];
+ int64 signed_fid8= src.formatID;
+ uint64 raw_fid8= *reinterpret_cast<uint64*>(&signed_fid8);
+ rdb_netbuf_store_uint64(fidbuf, raw_fid8);
+ buf.append(reinterpret_cast<const char*>(fidbuf), RDB_FORMATID_SZ);
+
+ buf.push_back(src.gtrid_length);
+ buf.push_back(src.bqual_length);
+ buf.append(src.data, (src.gtrid_length) + (src.bqual_length));
+ return buf;
+}
+
+
+/**
+ Called by hton->flush_logs after MySQL group commit prepares a set of
+ transactions.
+*/
+static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__)),
+ ulonglong target_lsn __attribute__((__unused__)))
+{
+ DBUG_ASSERT(rdb != nullptr);
+ rocksdb::Status s= rdb->SyncWAL();
+ if (!s.ok()) {
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ For a slave, prepare() updates the slave_gtid_info table which tracks the
+ replication progress.
+*/
+static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx,
+ bool async)
+{
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+ if (!tx->can_prepare())
+ {
+ return 1;
+ }
+ if (prepare_tx ||
+ (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
+ /* We were instructed to prepare the whole transaction, or
+ this is an SQL statement end and autocommit is on */
+ std::vector<st_slave_gtid_info> slave_gtid_info;
+ my_core::thd_slave_gtid_info(thd, &slave_gtid_info);
+ for (auto it : slave_gtid_info) {
+ rocksdb::WriteBatchBase* write_batch = tx->get_blind_write_batch();
+ binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch);
+ }
+
+ if (tx->is_two_phase()) {
+ if (thd->durability_property == HA_IGNORE_DURABILITY || async) {
+ tx->set_sync(false);
+ }
+ XID xid;
+ thd_get_xid(thd, reinterpret_cast<MYSQL_XID*>(&xid));
+ if (!tx->prepare(rdb_xid_to_string(xid))) {
+ return 1;
+ }
+ if (thd->durability_property == HA_IGNORE_DURABILITY) {
+ /**
+ we set the log sequence as '1' just to trigger hton->flush_logs
+ */
+ thd_store_lsn(thd, 1, DB_TYPE_ROCKSDB);
+ }
+ }
+
+ DEBUG_SYNC(thd, "rocksdb.prepared");
+ }
+
+ return 0;
+}
+
+/**
+ do nothing for prepare/commit by xid
+ this is needed to avoid crashes in XA scenarios
+*/
+static int rocksdb_commit_by_xid(handlerton* hton, XID* xid)
+{
+ auto name= rdb_xid_to_string(*xid);
+ rocksdb::Transaction *trx= rdb->GetTransactionByName(name);
+ if (trx == nullptr) {
+ return 1;
+ }
+ rocksdb::Status s= trx->Commit();
+ if (!s.ok()) {
+ return 1;
+ }
+ delete trx;
+ return 0;
+}
+
+static int rocksdb_rollback_by_xid(handlerton* hton __attribute__((__unused__)),
+ XID* xid)
+{
+ auto name= rdb_xid_to_string(*xid);
+ rocksdb::Transaction *trx= rdb->GetTransactionByName(name);
+ if (trx == nullptr) {
+ return 1;
+ }
+ rocksdb::Status s= trx->Rollback();
+ if (!s.ok()) {
+ return 1;
+ }
+ delete trx;
+ return 0;
+}
+
+/**
+ Rebuilds an XID from a serialized version stored in a string.
+*/
+static void rdb_xid_from_string(const std::string& src, XID *dst)
+{
+ DBUG_ASSERT(dst != nullptr);
+ uint offset= 0;
+ uint64 raw_fid8=
+ rdb_netbuf_to_uint64(reinterpret_cast<const uchar*>(src.data()));
+ int64 signed_fid8= *reinterpret_cast<int64*>(&raw_fid8);
+ dst->formatID= signed_fid8;
+ offset += RDB_FORMATID_SZ;
+ dst->gtrid_length= src.at(offset);
+ offset += RDB_GTRID_SZ;
+ dst->bqual_length= src.at(offset);
+ offset += RDB_BQUAL_SZ;
+
+ DBUG_ASSERT(dst->gtrid_length >= 0 && dst->gtrid_length <= MAXGTRIDSIZE);
+ DBUG_ASSERT(dst->bqual_length >= 0 && dst->bqual_length <= MAXBQUALSIZE);
+
+ src.copy(dst->data, (dst->gtrid_length)+(dst->bqual_length), RDB_XIDHDR_LEN);
+}
+
+/**
+ Reading last committed binary log info from RocksDB system row.
+ The info is needed for crash safe slave/master to work.
+*/
+static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len,
+ char* binlog_file, my_off_t* binlog_pos)
+{
+ if (binlog_file && binlog_pos)
+ {
+ char file_buf[FN_REFLEN+1]= {0};
+ my_off_t pos;
+ char gtid_buf[FN_REFLEN+1]= {0};
+ if (binlog_manager.read(file_buf, &pos, gtid_buf))
+ {
+ if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos))
+ {
+ memcpy(binlog_file, file_buf, FN_REFLEN + 1);
+ *binlog_pos= pos;
+ fprintf(stderr, "RocksDB: Last binlog file position %llu,"
+ " file name %s\n", pos, file_buf);
+ if (*gtid_buf)
+ {
+ fprintf(stderr, "RocksDB: Last MySQL Gtid %s\n", gtid_buf);
+ }
+ }
+ }
+ }
+
+ if (len == 0 || xid_list == nullptr)
+ {
+ return 0;
+ }
+
+ std::vector<rocksdb::Transaction*> trans_list;
+ rdb->GetAllPreparedTransactions(&trans_list);
+
+ uint count= 0;
+ for (auto& trans : trans_list)
+ {
+ if (count >= len)
+ {
+ break;
+ }
+ auto name= trans->GetName();
+ rdb_xid_from_string(name, &xid_list[count]);
+ count++;
+ }
+ return count;
+}
+
+static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx, bool)
+{
+ DBUG_ENTER("rocksdb_commit");
+
+ /* this will trigger saving of perf_context information */
+ Rdb_perf_context_guard guard(thd);
+
+ /* note: h->external_lock(F_UNLCK) is called after this function is called) */
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+
+ if (tx != nullptr)
+ {
+ if (commit_tx ||
+ (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+ {
+ /*
+ We get here
+ - For a COMMIT statement that finishes a multi-statement transaction
+ - For a statement that has its own transaction
+ */
+ if (tx->commit())
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ else
+ {
+ /*
+ We get here when committing a statement within a transaction.
+
+ We don't need to do anything here. tx->start_stmt() will notify
+ Rdb_transaction_impl that another statement has started.
+ */
+ tx->set_tx_failed(false);
+ }
+
+ if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED)
+ {
+ // For READ_COMMITTED, we release any existing snapshot so that we will
+ // see any changes that occurred since the last statement.
+ tx->release_snapshot();
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+static int rocksdb_rollback(handlerton* hton, THD* thd, bool rollback_tx)
+{
+ Rdb_perf_context_guard guard(thd);
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+
+ if (tx != nullptr)
+ {
+ if (rollback_tx)
+ {
+ /*
+ We get here, when
+ - ROLLBACK statement is issued.
+
+ Discard the changes made by the transaction
+ */
+ tx->rollback();
+ }
+ else
+ {
+ /*
+ We get here when
+ - a statement with AUTOCOMMIT=1 is being rolled back (because of some
+ error)
+ - a statement inside a transaction is rolled back
+ */
+
+ tx->rollback_stmt();
+ tx->set_tx_failed(true);
+ }
+
+ if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED)
+ {
+ // For READ_COMMITTED, we release any existing snapshot so that we will
+ // see any changes that occurred since the last statement.
+ tx->release_snapshot();
+ }
+ }
+ return 0;
+}
+
+static bool print_stats(THD* thd,
+ std::string const& type,
+ std::string const& name,
+ std::string const& status,
+ stat_print_fn *stat_print)
+{
+ return stat_print(thd, type.c_str(), type.size(), name.c_str(), name.size(),
+ status.c_str(), status.size());
+}
+
+static std::string format_string(
+ const char *format,
+ ...)
+{
+ std::string res;
+ va_list args;
+ va_list args_copy;
+
+ va_start(args, format);
+ va_copy(args_copy, args);
+
+ size_t len = vsnprintf(nullptr, 0, format, args) + 1;
+ va_end(args);
+
+ if (len == 0) {
+ res = std::string("");
+ }
+ else {
+ char buff[len];
+ (void) vsnprintf(buff, len, format, args_copy);
+
+ res = std::string(buff);
+ }
+
+ va_end(args_copy);
+
+ return res;
+}
+
+class Rdb_snapshot_status : public Rdb_tx_list_walker
+{
+ private:
+ std::string m_data;
+
+ static std::string current_timestamp(void)
+ {
+ static const char *const format = "%d-%02d-%02d %02d:%02d:%02d";
+ time_t currtime;
+ struct tm currtm;
+
+ time(&currtime);
+
+ localtime_r(&currtime, &currtm);
+
+ return format_string(format, currtm.tm_year + 1900, currtm.tm_mon + 1,
+ currtm.tm_mday, currtm.tm_hour, currtm.tm_min,
+ currtm.tm_sec);
+ }
+
+ static std::string get_header(void)
+ {
+ return
+ "\n============================================================\n" +
+ current_timestamp() +
+ " ROCKSDB TRANSACTION MONITOR OUTPUT\n"
+ "============================================================\n"
+ "---------\n"
+ "SNAPSHOTS\n"
+ "---------\n"
+ "LIST OF SNAPSHOTS FOR EACH SESSION:\n";
+ }
+
+ static std::string get_footer(void)
+ {
+ return
+ "-----------------------------------------\n"
+ "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n"
+ "=========================================\n";
+ }
+
+ public:
+ Rdb_snapshot_status() : m_data(get_header()) {}
+
+ std::string getResult() { return m_data + get_footer(); }
+
+ /* Implement Rdb_transaction interface */
+ /* Create one row in the snapshot status table */
+ void process_tran(const Rdb_transaction *tx) override
+ {
+ /* Calculate the duration the snapshot has existed */
+ int64_t snapshot_timestamp = tx->m_snapshot_timestamp;
+ if (snapshot_timestamp != 0)
+ {
+ int64_t curr_time;
+ rdb->GetEnv()->GetCurrentTime(&curr_time);
+
+ THD* thd = tx->get_thd();
+
+ m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n"
+ "MySQL thread id %lu, OS thread handle %p\n"
+ "lock count %llu, write count %llu\n",
+ curr_time - snapshot_timestamp,
+ my_core::thd_get_thread_id(thd), thd,
+ tx->get_lock_count(), tx->get_write_count());
+ }
+ }
+};
+
+/* Generate the snapshot status table */
+static bool rocksdb_show_snapshot_status(handlerton* hton,
+ THD* thd,
+ stat_print_fn* stat_print)
+{
+ Rdb_snapshot_status showStatus;
+
+ Rdb_transaction::walk_tx_list(&showStatus);
+
+ // Send the result data back to MySQL */
+ return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(),
+ stat_print);
+}
+
+/*
+ This is called for SHOW ENGINE ROCKSDB STATUS|LOGS|etc.
+
+ For now, produce info about live files (which gives an imprecise idea about
+ what column families are there)
+*/
+
+static bool rocksdb_show_status(handlerton* hton,
+ THD* thd,
+ stat_print_fn* stat_print,
+ enum ha_stat_type stat_type)
+{
+ bool res= false;
+ if (stat_type == HA_ENGINE_STATUS)
+ {
+ std::string str;
+
+ /* Per DB stats */
+ if (rdb->GetProperty("rocksdb.dbstats", &str)) {
+ res |= print_stats(thd, "DBSTATS", "rocksdb", str, stat_print);
+ }
+
+ /* Per column family stats */
+ for (auto cf_name : cf_manager.get_cf_names())
+ {
+ rocksdb::ColumnFamilyHandle* cfh;
+ bool is_automatic;
+
+ /*
+ Only the cf name is important. Whether it was generated automatically
+ does not matter, so is_automatic is ignored.
+ */
+ cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic);
+ if (cfh == nullptr)
+ continue;
+
+ if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str))
+ continue;
+
+ res |= print_stats(thd, "CF_COMPACTION", cf_name, str, stat_print);
+ }
+
+ /* Memory Statistics */
+ std::vector<rocksdb::DB*> dbs;
+ std::unordered_set<const rocksdb::Cache*> cache_set;
+ size_t internal_cache_count = 0;
+ size_t kDefaultInternalCacheSize = 8 * 1024 * 1024;
+ char buf[100];
+
+ dbs.push_back(rdb);
+ cache_set.insert(rocksdb_tbl_options.block_cache.get());
+ for (const auto& cf_handle : cf_manager.get_all_cf())
+ {
+ rocksdb::ColumnFamilyDescriptor cf_desc;
+ cf_handle->GetDescriptor(&cf_desc);
+ auto* table_factory = cf_desc.options.table_factory.get();
+ if (table_factory != nullptr)
+ {
+ std::string tf_name = table_factory->Name();
+ if (tf_name.find("BlockBasedTable") != std::string::npos)
+ {
+ const rocksdb::BlockBasedTableOptions* bbt_opt =
+ reinterpret_cast<rocksdb::BlockBasedTableOptions*>(
+ table_factory->GetOptions());
+ if (bbt_opt != nullptr)
+ {
+ if (bbt_opt->block_cache.get() != nullptr)
+ {
+ cache_set.insert(bbt_opt->block_cache.get());
+ }
+ else
+ {
+ internal_cache_count++;
+ }
+ cache_set.insert(bbt_opt->block_cache_compressed.get());
+ }
+ }
+ }
+ }
+
+ std::map<rocksdb::MemoryUtil::UsageType, uint64_t> temp_usage_by_type;
+ str.clear();
+ rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(
+ dbs, cache_set, &temp_usage_by_type);
+ snprintf(buf, sizeof(buf), "\nMemTable Total: %lu",
+ temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]);
+ str.append(buf);
+ snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %lu",
+ temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]);
+ str.append(buf);
+ snprintf(buf, sizeof(buf), "\nTable Readers Total: %lu",
+ temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]);
+ str.append(buf);
+ snprintf(buf, sizeof(buf), "\nCache Total: %lu",
+ temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]);
+ str.append(buf);
+ snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %lu",
+ internal_cache_count * kDefaultInternalCacheSize);
+ str.append(buf);
+ res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print);
+ }
+ else if (stat_type == HA_ENGINE_TRX)
+ {
+ /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */
+ res |= rocksdb_show_snapshot_status(hton, thd, stat_print);
+ }
+
+ return res;
+}
+
+static inline void rocksdb_register_tx(handlerton *hton, THD *thd,
+ Rdb_transaction *tx)
+{
+ trans_register_ha(thd, FALSE, rocksdb_hton);
+ if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ {
+ tx->start_stmt();
+ trans_register_ha(thd, TRUE, rocksdb_hton);
+ }
+}
+
+/*
+ Supporting START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT
+
+ Features:
+ 1. Supporting START TRANSACTION WITH CONSISTENT SNAPSHOT
+ 2. Getting current binlog position in addition to #1.
+
+ The second feature is done by START TRANSACTION WITH
+ CONSISTENT ROCKSDB SNAPSHOT. This is Facebook's extension, and
+ it works like existing START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT.
+
+ - When not setting engine, START TRANSACTION WITH CONSISTENT SNAPSHOT
+ takes both InnoDB and RocksDB snapshots, and both InnoDB and RocksDB
+ participate in transaction. When executing COMMIT, both InnoDB and
+ RocksDB modifications are committed. Remember that XA is not supported yet,
+ so mixing engines is not recommended anyway.
+
+ - When setting engine, START TRANSACTION WITH CONSISTENT.. takes
+ snapshot for the specified engine only. But it starts both
+ InnoDB and RocksDB transactions.
+*/
+static int rocksdb_start_tx_and_assign_read_view(
+ handlerton* hton, /*!< in: RocksDB handlerton */
+ THD* thd, /*!< in: MySQL thread handle of the
+ user for whom the transaction should
+ be committed */
+ char* binlog_file, /* out: binlog file for last commit */
+ ulonglong* binlog_pos, /* out: binlog pos for last commit */
+ char** gtid_executed, /* out: Gtids logged until last commit */
+ int* gtid_executed_length) /*out: Length of gtid_executed string */
+{
+ Rdb_perf_context_guard guard(thd);
+
+ ulong const tx_isolation = my_core::thd_tx_isolation(thd);
+
+ if (tx_isolation != ISO_REPEATABLE_READ)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Only REPEATABLE READ isolation level is supported "
+ "for START TRANSACTION WITH CONSISTENT SNAPSHOT "
+ "in RocksDB Storage Engine.", MYF(0));
+ return 1;
+ }
+
+ if (binlog_file)
+ {
+ if (binlog_pos && mysql_bin_log_is_open())
+ mysql_bin_log_lock_commits();
+ else
+ return 1;
+ }
+
+ Rdb_transaction* tx= get_or_create_tx(thd);
+ DBUG_ASSERT(!tx->has_snapshot());
+ tx->set_tx_read_only(true);
+ rocksdb_register_tx(hton, thd, tx);
+ tx->acquire_snapshot(true);
+
+ if (binlog_file)
+ mysql_bin_log_unlock_commits(binlog_file, binlog_pos, gtid_executed,
+ gtid_executed_length);
+
+ return 0;
+}
+
+/* Dummy SAVEPOINT support. This is needed for long running transactions
+ * like mysqldump (https://bugs.mysql.com/bug.php?id=71017).
+ * Current SAVEPOINT does not correctly handle ROLLBACK and does not return
+ * errors. This needs to be addressed in future versions (Issue#96).
+ */
+static int rocksdb_savepoint(handlerton *hton, THD *thd, void *savepoint)
+{
+ return 0;
+}
+
+static int rocksdb_rollback_to_savepoint(handlerton *hton, THD *thd,
+ void *savepoint)
+{
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+ return tx->rollback_to_savepoint(savepoint);
+}
+
+static bool rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *hton,
+ THD *thd)
+{
+ return true;
+}
+
+/*
+ This is called for INFORMATION_SCHEMA
+*/
+static void rocksdb_update_table_stats(
+ /* per-table stats callback */
+ void (*cb)(const char* db, const char* tbl, bool is_partition,
+ my_io_perf_t* r, my_io_perf_t* w, my_io_perf_t* r_blob,
+ my_io_perf_t* r_primary, my_io_perf_t* r_secondary,
+ page_stats_t *page_stats, comp_stats_t *comp_stats,
+ int n_lock_wait, int n_lock_wait_timeout,
+ const char* engine))
+{
+ my_io_perf_t io_perf_read;
+ my_io_perf_t io_perf;
+ page_stats_t page_stats;
+ comp_stats_t comp_stats;
+ std::vector<std::string> tablenames;
+
+ /*
+ Most of these are for innodb, so setting them to 0.
+ TODO: possibly separate out primary vs. secondary index reads
+ */
+ memset(&io_perf, 0, sizeof(io_perf));
+ memset(&page_stats, 0, sizeof(page_stats));
+ memset(&comp_stats, 0, sizeof(comp_stats));
+
+ tablenames= rdb_open_tables.get_table_names();
+
+ for (const auto& it : tablenames)
+ {
+ Rdb_table_handler *table_handler;
+ std::string str, dbname, tablename, partname;
+ char dbname_sys[NAME_LEN + 1];
+ char tablename_sys[NAME_LEN + 1];
+ bool is_partition;
+
+ if (rdb_normalize_tablename(it, &str)) {
+ /* Function needs to return void because of the interface and we've
+ * detected an error which shouldn't happen. There's no way to let
+ * caller know that something failed.
+ */
+ SHIP_ASSERT(false);
+ return;
+ }
+
+ if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname))
+ {
+ continue;
+ }
+
+ is_partition= (partname.size() != 0);
+
+ table_handler= rdb_open_tables.get_table_handler(it.c_str());
+ if (table_handler == nullptr)
+ {
+ continue;
+ }
+
+ io_perf_read.bytes= table_handler->m_io_perf_read.bytes.load();
+ io_perf_read.requests= table_handler->m_io_perf_read.requests.load();
+
+ /*
+ Convert from rocksdb timer to mysql timer. RocksDB values are
+ in nanoseconds, but table statistics expect the value to be
+ in my_timer format.
+ */
+ io_perf_read.svc_time= my_core::microseconds_to_my_timer(
+ table_handler->m_io_perf_read.svc_time.load() / 1000);
+ io_perf_read.svc_time_max= my_core::microseconds_to_my_timer(
+ table_handler->m_io_perf_read.svc_time_max.load() / 1000);
+ io_perf_read.wait_time= my_core::microseconds_to_my_timer(
+ table_handler->m_io_perf_read.wait_time.load() / 1000);
+ io_perf_read.wait_time_max= my_core::microseconds_to_my_timer(
+ table_handler->m_io_perf_read.wait_time_max.load() / 1000);
+ io_perf_read.slow_ios= table_handler->m_io_perf_read.slow_ios.load();
+ rdb_open_tables.release_table_handler(table_handler);
+
+ /*
+ Table stats expects our database and table name to be in system encoding,
+ not filename format. Convert before calling callback.
+ */
+ my_core::filename_to_tablename(dbname.c_str(), dbname_sys,
+ sizeof(dbname_sys));
+ my_core::filename_to_tablename(tablename.c_str(), tablename_sys,
+ sizeof(tablename_sys));
+ (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf,
+ &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, 0,
+ rocksdb_hton_name);
+ }
+}
+
+
+static rocksdb::Status check_rocksdb_options_compatibility(
+ const char *dbpath,
+ const rocksdb::Options& main_opts,
+ const std::vector<rocksdb::ColumnFamilyDescriptor>& cf_descr)
+{
+ DBUG_ASSERT(rocksdb_datadir != nullptr);
+
+ rocksdb::DBOptions loaded_db_opt;
+ std::vector<rocksdb::ColumnFamilyDescriptor> loaded_cf_descs;
+ rocksdb::Status status = LoadLatestOptions(dbpath,
+ rocksdb::Env::Default(), &loaded_db_opt,
+ &loaded_cf_descs);
+
+ // If we're starting from scratch and there are no options saved yet then this
+ // is a valid case. Therefore we can't compare the current set of options to
+ // anything.
+ if (status.IsNotFound()) {
+ return rocksdb::Status::OK();
+ }
+
+ if (!status.ok()) {
+ return status;
+ }
+
+ if (loaded_cf_descs.size() != cf_descr.size()) {
+ return rocksdb::Status::NotSupported("Mismatched size of column family " \
+ "descriptors.");
+ }
+
+ // Please see RocksDB documentation for more context about why we need to set
+ // user-defined functions and pointer-typed options manually.
+ for (size_t i = 0; i < loaded_cf_descs.size(); i++) {
+ loaded_cf_descs[i].options.compaction_filter =
+ cf_descr[i].options.compaction_filter;
+ loaded_cf_descs[i].options.compaction_filter_factory =
+ cf_descr[i].options.compaction_filter_factory;
+ loaded_cf_descs[i].options.comparator = cf_descr[i].options.comparator;
+ loaded_cf_descs[i].options.memtable_factory =
+ cf_descr[i].options.memtable_factory;
+ loaded_cf_descs[i].options.merge_operator =
+ cf_descr[i].options.merge_operator;
+ loaded_cf_descs[i].options.prefix_extractor =
+ cf_descr[i].options.prefix_extractor;
+ loaded_cf_descs[i].options.table_factory =
+ cf_descr[i].options.table_factory;
+ }
+
+ // This is the essence of the function - determine if it's safe to open the
+ // database or not.
+ status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(),
+ main_opts, loaded_cf_descs);
+
+ return status;
+}
+
+/*
+ Storage Engine initialization function, invoked when plugin is loaded.
+*/
+
+static int rocksdb_init_func(void *p)
+{
+ DBUG_ENTER("rocksdb_init_func");
+
+ // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN.
+ static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes.");
+
+#ifdef HAVE_PSI_INTERFACE
+ init_rocksdb_psi_keys();
+#endif
+
+ rocksdb_hton= (handlerton *)p;
+ mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex,
+ MY_MUTEX_INIT_FAST);
+#ifdef HAVE_PSI_INTERFACE
+ rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key,
+ rdb_signal_bg_psi_cond_key);
+ rdb_drop_idx_thread.init(rdb_signal_drop_idx_psi_mutex_key,
+ rdb_signal_drop_idx_psi_cond_key);
+#else
+ rdb_bg_thread.init();
+ rdb_drop_idx_thread.init();
+#endif
+ mysql_mutex_init(rdb_collation_data_mutex_key, &rdb_collation_data_mutex,
+ MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex,
+ MY_MUTEX_INIT_FAST);
+
+#if defined(HAVE_PSI_INTERFACE)
+ rdb_collation_exceptions = new Regex_list_handler(
+ key_rwlock_collation_exception_list);
+#else
+ rdb_collation_exceptions = new Regex_list_handler();
+#endif
+
+ mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex,
+ MY_MUTEX_INIT_FAST);
+ rdb_open_tables.init_hash();
+ Rdb_transaction::init_mutex();
+
+ rocksdb_hton->state= SHOW_OPTION_YES;
+ rocksdb_hton->create= rocksdb_create_handler;
+ rocksdb_hton->close_connection= rocksdb_close_connection;
+ rocksdb_hton->prepare= rocksdb_prepare;
+ rocksdb_hton->commit_by_xid= rocksdb_commit_by_xid;
+ rocksdb_hton->rollback_by_xid= rocksdb_rollback_by_xid;
+ rocksdb_hton->recover= rocksdb_recover;
+ rocksdb_hton->commit= rocksdb_commit;
+ rocksdb_hton->rollback= rocksdb_rollback;
+ rocksdb_hton->db_type= DB_TYPE_ROCKSDB;
+ rocksdb_hton->show_status= rocksdb_show_status;
+ rocksdb_hton->start_consistent_snapshot=
+ rocksdb_start_tx_and_assign_read_view;
+ rocksdb_hton->savepoint_set= rocksdb_savepoint;
+ rocksdb_hton->savepoint_rollback= rocksdb_rollback_to_savepoint;
+ rocksdb_hton->savepoint_rollback_can_release_mdl=
+ rocksdb_rollback_to_savepoint_can_release_mdl;
+ rocksdb_hton->update_table_stats = rocksdb_update_table_stats;
+ rocksdb_hton->flush_logs= rocksdb_flush_wal;
+
+ rocksdb_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED |
+ HTON_SUPPORTS_EXTENDED_KEYS |
+ HTON_CAN_RECREATE;
+
+ DBUG_ASSERT(!mysqld_embedded);
+
+ rocksdb_stats= rocksdb::CreateDBStatistics();
+ rocksdb_db_options.statistics= rocksdb_stats;
+
+ if (rocksdb_rate_limiter_bytes_per_sec != 0) {
+ rocksdb_rate_limiter.reset(rocksdb::NewGenericRateLimiter(
+ rocksdb_rate_limiter_bytes_per_sec));
+ rocksdb_db_options.rate_limiter= rocksdb_rate_limiter;
+ }
+
+ std::shared_ptr<Rdb_logger> myrocks_logger= std::make_shared<Rdb_logger>();
+ rocksdb::Status s= rocksdb::CreateLoggerFromOptions(
+ rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log);
+ if (s.ok()) {
+ myrocks_logger->SetRocksDBLogger(rocksdb_db_options.info_log);
+ }
+
+ rocksdb_db_options.info_log= myrocks_logger;
+ myrocks_logger->SetInfoLogLevel(
+ static_cast<rocksdb::InfoLogLevel>(rocksdb_info_log_level));
+ rocksdb_db_options.wal_dir= rocksdb_wal_dir;
+
+ rocksdb_db_options.wal_recovery_mode=
+ static_cast<rocksdb::WALRecoveryMode>(rocksdb_wal_recovery_mode);
+
+ rocksdb_db_options.access_hint_on_compaction_start=
+ static_cast<rocksdb::Options::AccessHint>
+ (rocksdb_access_hint_on_compaction_start);
+
+ if (rocksdb_db_options.allow_mmap_reads &&
+ !rocksdb_db_options.allow_os_buffer)
+ {
+ // allow_mmap_reads implies allow_os_buffer and RocksDB will not open if
+ // mmap_reads is on and os_buffer is off. (NO_LINT_DEBUG)
+ sql_print_error("RocksDB: Can't disable allow_os_buffer "
+ "if allow_mmap_reads is enabled\n");
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ std::vector<std::string> cf_names;
+ rocksdb::Status status;
+ status= rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir,
+ &cf_names);
+ if (!status.ok())
+ {
+ /*
+ When we start on an empty datadir, ListColumnFamilies returns IOError,
+ and RocksDB doesn't provide any way to check what kind of error it was.
+ Checking system errno happens to work right now.
+ */
+ if (status.IsIOError() && errno == ENOENT)
+ {
+ sql_print_information("RocksDB: Got ENOENT when listing column families");
+ sql_print_information("RocksDB: assuming that we're creating a new database");
+ }
+ else
+ {
+ std::string err_text= status.ToString();
+ sql_print_error("RocksDB: Error listing column families: %s", err_text.c_str());
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ sql_print_information("RocksDB: %ld column families found", cf_names.size());
+
+ std::vector<rocksdb::ColumnFamilyDescriptor> cf_descr;
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+
+ rocksdb_tbl_options.index_type=
+ (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type;
+
+ if (!rocksdb_tbl_options.no_block_cache) {
+ rocksdb_tbl_options.block_cache=
+ rocksdb::NewLRUCache(rocksdb_block_cache_size);
+ }
+ // Using newer BlockBasedTable format version for better compression
+ // and better memory allocation.
+ // See: https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd
+ rocksdb_tbl_options.format_version= 2;
+
+ if (rocksdb_collect_sst_properties) {
+ properties_collector_factory = std::make_shared
+ <Rdb_tbl_prop_coll_factory>(
+ &ddl_manager
+ );
+
+ rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr);
+
+ mysql_mutex_lock(&rdb_sysvars_mutex);
+
+ DBUG_ASSERT(rocksdb_table_stats_sampling_pct
+ <= RDB_TBL_STATS_SAMPLE_PCT_MAX);
+ properties_collector_factory->SetTableStatsSamplingPct(
+ rocksdb_table_stats_sampling_pct);
+
+ mysql_mutex_unlock(&rdb_sysvars_mutex);
+ }
+
+ if (!rocksdb_cf_options_map.init(ROCKSDB_WRITE_BUFFER_SIZE_DEFAULT,
+ rocksdb_tbl_options,
+ properties_collector_factory,
+ rocksdb_default_cf_options,
+ rocksdb_override_cf_options))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Failed to initialize CF options map.");
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ /*
+ If there are no column families, we're creating the new database.
+ Create one column family named "default".
+ */
+ if (cf_names.size() == 0)
+ cf_names.push_back(DEFAULT_CF_NAME);
+
+ std::vector<int> compaction_enabled_cf_indices;
+ sql_print_information("RocksDB: Column Families at start:");
+ for (size_t i = 0; i < cf_names.size(); ++i)
+ {
+ rocksdb::ColumnFamilyOptions opts;
+ rocksdb_cf_options_map.get_cf_options(cf_names[i], &opts);
+
+ sql_print_information(" cf=%s", cf_names[i].c_str());
+ sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size);
+ sql_print_information(" target_file_size_base=%" PRIu64,
+ opts.target_file_size_base);
+
+ /*
+ Temporarily disable compactions to prevent a race condition where
+ compaction starts before compaction filter is ready.
+ */
+ if (!opts.disable_auto_compactions)
+ {
+ compaction_enabled_cf_indices.push_back(i);
+ opts.disable_auto_compactions = true;
+ }
+ cf_descr.push_back(rocksdb::ColumnFamilyDescriptor(cf_names[i], opts));
+ }
+
+ rocksdb::Options main_opts(rocksdb_db_options,
+ rocksdb_cf_options_map.get_defaults());
+
+ /*
+ Flashcache configuration:
+ When running on Flashcache, mysqld opens Flashcache device before
+ initializing storage engines, and setting file descriptor at
+ cachedev_fd global variable.
+ RocksDB has Flashcache-aware configuration. When this is enabled,
+ RocksDB adds background threads into Flashcache blacklists, which
+ makes sense for Flashcache use cases.
+ */
+ if (cachedev_enabled)
+ {
+ flashcache_aware_env=
+ rocksdb::NewFlashcacheAwareEnv(rocksdb::Env::Default(),
+ cachedev_fd);
+ if (flashcache_aware_env.get() == nullptr)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Failed to open flashcache device at fd %d",
+ cachedev_fd);
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+ sql_print_information("RocksDB: Disabling flashcache on background "
+ "writer threads, fd %d", cachedev_fd);
+ main_opts.env= flashcache_aware_env.get();
+ }
+
+ main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes,
+ rocksdb::Env::Priority::HIGH);
+ main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions,
+ rocksdb::Env::Priority::LOW);
+ rocksdb::TransactionDBOptions tx_db_options;
+ tx_db_options.transaction_lock_timeout= 2; // 2 seconds
+ tx_db_options.custom_mutex_factory= std::make_shared<Rdb_mutex_factory>();
+
+ status= check_rocksdb_options_compatibility(rocksdb_datadir, main_opts,
+ cf_descr);
+
+ // We won't start if we'll determine that there's a chance of data corruption
+ // because of incompatible options.
+ if (!status.ok()) {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: compatibility check against existing database " \
+ "options failed. %s", status.ToString().c_str());
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ status= rocksdb::TransactionDB::Open(main_opts, tx_db_options,
+ rocksdb_datadir, cf_descr,
+ &cf_handles, &rdb);
+
+ if (!status.ok())
+ {
+ std::string err_text= status.ToString();
+ sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str());
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+ cf_manager.init(&rocksdb_cf_options_map, &cf_handles);
+
+ if (dict_manager.init(rdb->GetBaseDB(), &cf_manager))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Failed to initialize data dictionary.");
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ if (binlog_manager.init(&dict_manager))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Failed to initialize binlog manager.");
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Failed to initialize DDL manager.");
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ Rdb_sst_info::init(rdb);
+
+ /*
+ Enable auto compaction, things needed for compaction filter are finished
+ initializing
+ */
+ std::vector<rocksdb::ColumnFamilyHandle*> compaction_enabled_cf_handles;
+ compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size());
+ for (auto index : compaction_enabled_cf_indices)
+ {
+ compaction_enabled_cf_handles.push_back(cf_handles[index]);
+ }
+
+ status= rdb->EnableAutoCompaction(compaction_enabled_cf_handles);
+
+ if (!status.ok())
+ {
+ std::string err_text= status.ToString();
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str());
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ auto err= rdb_bg_thread.create_thread(
+#ifdef HAVE_PSI_INTERFACE
+ rdb_background_psi_thread_key
+#endif
+ );
+ if (err != 0) {
+ sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)",
+ err);
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ err= rdb_drop_idx_thread.create_thread(
+#ifdef HAVE_PSI_INTERFACE
+ rdb_drop_idx_psi_thread_key
+#endif
+ );
+ if (err != 0) {
+ sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)",
+ err);
+ rdb_open_tables.free_hash();
+ DBUG_RETURN(1);
+ }
+
+ rdb_set_collation_exception_list(rocksdb_strict_collation_exceptions);
+
+ if (rocksdb_pause_background_work) {
+ rdb->PauseBackgroundWork();
+ }
+
+ sql_print_information("RocksDB instance opened");
+ DBUG_RETURN(0);
+}
+
+/*
+ Storage Engine deinitialization function, invoked when plugin is unloaded.
+*/
+
+static int rocksdb_done_func(void *p)
+{
+ int error= 0;
+ DBUG_ENTER("rocksdb_done_func");
+
+ // signal the drop index thread to stop
+ rdb_drop_idx_thread.signal(true);
+
+ // Flush all memtables for not lose data, even if WAL is disabled.
+ rocksdb_flush_all_memtables();
+
+ // Stop all rocksdb background work
+ CancelAllBackgroundWork(rdb->GetBaseDB(), true);
+
+ // Signal the background thread to stop and to persist all stats collected
+ // from background flushes and compactions. This will add more keys to a new
+ // memtable, but since the memtables were just flushed, it should not trigger
+ // a flush that can stall due to background threads being stopped. As long
+ // as these keys are stored in a WAL file, they can be retrieved on restart.
+ rdb_bg_thread.signal(true);
+
+ // Wait for the background thread to finish.
+ auto err= rdb_bg_thread.join();
+ if (err != 0) {
+ // We'll log the message and continue because we're shutting down and
+ // continuation is the optimal strategy.
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Couldn't stop the background thread: (errno=%d)",
+ err);
+ }
+
+ // Wait for the drop index thread to finish.
+ err= rdb_drop_idx_thread.join();
+ if (err != 0) {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)",
+ err);
+ }
+
+ if (rdb_open_tables.m_hash.records)
+ {
+ // Looks like we are getting unloaded and yet we have some open tables
+ // left behind.
+ error= 1;
+ }
+
+ rdb_open_tables.free_hash();
+ mysql_mutex_destroy(&rdb_open_tables.m_mutex);
+ mysql_mutex_destroy(&rdb_sysvars_mutex);
+
+ delete rdb_collation_exceptions;
+ mysql_mutex_destroy(&rdb_collation_data_mutex);
+ mysql_mutex_destroy(&rdb_mem_cmp_space_mutex);
+
+ Rdb_transaction::term_mutex();
+
+ for (auto& it : rdb_collation_data)
+ {
+ delete it;
+ it= nullptr;
+ }
+
+ ddl_manager.cleanup();
+ binlog_manager.cleanup();
+ dict_manager.cleanup();
+ cf_manager.cleanup();
+
+ delete rdb;
+ rdb= nullptr;
+
+ // Disown the cache data since we're shutting down.
+ // This results in memory leaks but it improved the shutdown time.
+ // Don't disown when running under valgrind
+#ifndef HAVE_purify
+ if (rocksdb_tbl_options.block_cache)
+ {
+ rocksdb_tbl_options.block_cache->DisownData();
+ }
+#endif /* HAVE_purify */
+
+ DBUG_RETURN(error);
+}
+
+
+/**
+ @brief
+ Example of simple lock controls. The "table_handler" it creates is a
+ structure we will pass to each ha_rocksdb handler. Do you have to have
+ one of these? Well, you have pieces that are used for locking, and
+ they are needed to function.
+*/
+
+Rdb_table_handler* Rdb_open_tables_map::get_table_handler(
+ const char *table_name)
+{
+ Rdb_table_handler *table_handler;
+ uint length;
+ char *tmp_name;
+
+ DBUG_ASSERT(table_name != nullptr);
+ length=(uint) strlen(table_name);
+
+ // First, look up the table in the hash map.
+ mysql_mutex_lock(&m_mutex);
+ if (!(table_handler= reinterpret_cast<Rdb_table_handler*>(
+ my_hash_search(&m_hash, reinterpret_cast<const uchar*>(table_name),
+ length))))
+ {
+ // Since we did not find it in the hash map, attempt to create and add it
+ // to the hash map.
+ if (!(table_handler= reinterpret_cast<Rdb_table_handler*>(
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &table_handler, sizeof(*table_handler),
+ &tmp_name, length+1,
+ NullS))))
+ {
+ // Allocating a new Rdb_table_handler and a new table name failed.
+ mysql_mutex_unlock(&m_mutex);
+ return nullptr;
+ }
+
+ table_handler->m_ref_count= 0;
+ table_handler->m_table_name_length= length;
+ table_handler->m_table_name= tmp_name;
+ strmov(table_handler->m_table_name, table_name);
+
+ if (my_hash_insert(&m_hash, reinterpret_cast<uchar*>(table_handler)))
+ {
+ // Inserting into the hash map failed.
+ mysql_mutex_unlock(&m_mutex);
+ my_free(table_handler);
+ return nullptr;
+ }
+
+ thr_lock_init(&table_handler->m_thr_lock);
+ my_io_perf_atomic_init(&table_handler->m_io_perf_read);
+ }
+ DBUG_ASSERT(table_handler->m_ref_count >= 0);
+ table_handler->m_ref_count++;
+
+ mysql_mutex_unlock(&m_mutex);
+
+ return table_handler;
+}
+
+
+std::vector<std::string> rdb_get_open_table_names(void)
+{
+ return rdb_open_tables.get_table_names();
+}
+
+
+std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const
+{
+ ulong i;
+ const Rdb_table_handler *table_handler;
+ std::vector<std::string> names;
+
+ mysql_mutex_lock(&m_mutex);
+ for (i = 0;
+ (table_handler= reinterpret_cast<const Rdb_table_handler*>(
+ my_hash_const_element(&m_hash, i)));
+ i++)
+ {
+ DBUG_ASSERT(table_handler != nullptr);
+ names.push_back(table_handler->m_table_name);
+ }
+ DBUG_ASSERT(i == m_hash.records);
+ mysql_mutex_unlock(&m_mutex);
+
+ return names;
+}
+
+
+void ha_rocksdb::load_auto_incr_value()
+{
+ int save_active_index= active_index;
+ active_index= table->s->next_number_index;
+ uint8 save_table_status= table->status;
+
+ /*
+ load_auto_incr_value() may be called by statements that
+ do not execute implicit commits (i.e. SHOW CREATE TABLE).
+ index_last() creates a snapshot. When a snapshot is created
+ here, it has to be released as well. (GitHub issue#189)
+ */
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ bool is_new_snapshot= !tx->has_snapshot();
+
+ // Do a lookup. We only need index column, so it should be index-only.
+ // (another reason to make it index-only is that table->read_set is
+ // not set appropriately and non-index-only lookup will not read the value)
+ bool save_keyread_only= m_keyread_only;
+ m_keyread_only= true;
+
+ if (!index_last(table->record[0]))
+ update_auto_incr_val();
+
+ m_keyread_only= save_keyread_only;
+ if (is_new_snapshot)
+ {
+ tx->release_snapshot();
+ }
+
+ table->status= save_table_status;
+ active_index= save_active_index;
+
+ /*
+ Do what ha_rocksdb::index_end() does.
+ (Why don't we use index_init/index_end? class handler defines index_init
+ as private, for some reason).
+ */
+ release_scan_iterator();
+}
+
+/* Get PK value from table->record[0]. */
+/*
+ TODO(alexyang): No existing support for auto_increment on non-pk columns, see
+ end of ha_rocksdb::create. Also see opened issue here:
+ https://github.com/facebook/mysql-5.6/issues/153
+*/
+void ha_rocksdb::update_auto_incr_val()
+{
+ Field *field;
+ longlong new_val;
+ field= table->key_info[table->s->next_number_index].key_part[0].field;
+
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ new_val= field->val_int() + 1;
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ longlong auto_incr_val= m_tbl_def->m_auto_incr_val;
+ while (auto_incr_val < new_val &&
+ !m_tbl_def->m_auto_incr_val.compare_exchange_weak(
+ auto_incr_val, new_val))
+ {
+ // Do nothing - just loop until auto_incr_val is >= new_val or
+ // we successfully set it
+ }
+}
+
+int ha_rocksdb::load_hidden_pk_value()
+{
+ int save_active_index= active_index;
+ active_index= m_tbl_def->m_key_count - 1;
+ uint8 save_table_status= table->status;
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ bool is_new_snapshot= !tx->has_snapshot();
+
+ // Do a lookup.
+ if (!index_last(table->record[0]))
+ {
+ /*
+ Decode PK field from the key
+ */
+ longlong hidden_pk_id= 0;
+ if (read_hidden_pk_id_from_rowkey(&hidden_pk_id))
+ {
+ if (is_new_snapshot)
+ {
+ tx->release_snapshot();
+ }
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ hidden_pk_id++;
+ longlong old = m_tbl_def->m_hidden_pk_val;
+ while (old < hidden_pk_id &&
+ !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id))
+ {}
+ }
+
+ if (is_new_snapshot)
+ {
+ tx->release_snapshot();
+ }
+
+ table->status= save_table_status;
+ active_index= save_active_index;
+
+ release_scan_iterator();
+
+ return 0;
+}
+
+/* Get PK value from m_tbl_def->m_hidden_pk_info. */
+longlong ha_rocksdb::update_hidden_pk_val()
+{
+ DBUG_ASSERT(has_hidden_pk(table));
+ longlong new_val;
+ new_val= m_tbl_def->m_hidden_pk_val++;
+ return new_val;
+}
+
+/* Get the id of the hidden pk id from m_last_rowkey */
+int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong* hidden_pk_id)
+{
+ DBUG_ASSERT(hidden_pk_id != nullptr);
+ DBUG_ASSERT(table != nullptr);
+ DBUG_ASSERT(has_hidden_pk(table));
+
+ rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length());
+
+ // Get hidden primary key from old key slice
+ Rdb_string_reader reader(&rowkey_slice);
+ if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE)))
+ return 1;
+
+ const int length= Field_longlong::PACK_LENGTH;
+ const uchar *from= reinterpret_cast<const uchar*>(reader.read(length));
+ if (from == nullptr)
+ {
+ return 1; /* Mem-comparable image doesn't have enough bytes */
+ }
+
+ *hidden_pk_id= rdb_netbuf_read_uint64(&from);
+ return 0;
+}
+
+
+/**
+ @brief
+ Free lock controls. We call this whenever we close a table. If the table had
+ the last reference to the table_handler, then we free the memory associated
+ with it.
+*/
+
+void Rdb_open_tables_map::release_table_handler(
+ Rdb_table_handler *table_handler)
+{
+ mysql_mutex_lock(&m_mutex);
+
+ DBUG_ASSERT(table_handler->m_ref_count > 0);
+ if (!--table_handler->m_ref_count)
+ {
+ // Last rereference was released. Tear down the hash entry.
+ auto ret __attribute__((__unused__)) =
+ my_hash_delete(&m_hash, reinterpret_cast<uchar*>(table_handler));
+ DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted
+ my_core::thr_lock_delete(&table_handler->m_thr_lock);
+ my_free(table_handler);
+ }
+
+ mysql_mutex_unlock(&m_mutex);
+}
+
+
+static handler* rocksdb_create_handler(my_core::handlerton *hton,
+ my_core::TABLE_SHARE *table_arg,
+ my_core::MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_rocksdb(hton, table_arg);
+}
+
+
+ha_rocksdb::ha_rocksdb(my_core::handlerton *hton,
+ my_core::TABLE_SHARE *table_arg)
+ : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr),
+ m_scan_it_skips_bloom(false), m_tbl_def(nullptr),
+ m_pk_descr(nullptr), m_key_descr_arr(nullptr),
+ m_pk_can_be_decoded(false),
+ m_maybe_unpack_info(false),
+ m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr),
+ m_sk_packed_tuple(nullptr),
+ m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr),
+ m_sk_packed_tuple_old(nullptr),
+ m_pack_buffer(nullptr), m_lock_rows(RDB_LOCK_NONE),
+ m_keyread_only(FALSE), m_encoder_arr(nullptr),
+ m_row_checksums_checked(0),
+ m_in_rpl_delete_rows(false), m_in_rpl_update_rows(false),
+ m_force_skip_unique_check(false)
+{}
+
+
+static const char *ha_rocksdb_exts[] = {
+ NullS
+};
+
+
+const char **ha_rocksdb::bas_ext() const
+{
+ return ha_rocksdb_exts;
+}
+
+bool ha_rocksdb::same_table(const ha_rocksdb& other) const
+{
+ return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename();
+}
+
+bool ha_rocksdb::init_with_fields()
+{
+ uint pk= table_share->primary_key;
+ if (pk != MAX_KEY)
+ {
+ uint key_parts= table_share->key_info[pk].user_defined_key_parts;
+ check_keyread_allowed(pk /*PK*/, key_parts-1, true);
+ }
+ else
+ m_pk_can_be_decoded= false;
+
+ cached_table_flags= table_flags();
+ return false; /* Ok */
+}
+
+
+/**
+ Convert record from table->record[0] form into a form that can be written
+ into rocksdb.
+
+ @param pk_packed_slice Packed PK tuple. We need it in order to compute
+ and store its CRC.
+ @param packed_rec OUT Data slice with record data.
+*/
+
+void ha_rocksdb::convert_record_to_storage_format(
+ const rocksdb::Slice& pk_packed_slice,
+ Rdb_string_writer *pk_unpack_info,
+ rocksdb::Slice *packed_rec)
+{
+ DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info);
+ m_storage_record.length(0);
+
+ /* All NULL bits are initially 0 */
+ m_storage_record.fill(m_null_bytes_in_rec, 0);
+
+ // If a primary key may have non-empty unpack_info for certain values,
+ // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block
+ // itself was prepared in Rdb_key_def::pack_record.
+ if (m_maybe_unpack_info &&
+ m_pk_descr->m_kv_format_version >=
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1)
+ {
+ m_storage_record.append(reinterpret_cast<char*>(pk_unpack_info->ptr()),
+ pk_unpack_info->get_current_pos());
+ }
+
+ for (uint i=0; i < table->s->fields; i++)
+ {
+ /* Don't pack decodable PK key parts */
+ if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL)
+ {
+ continue;
+ }
+
+ Field *field= table->field[i];
+ if (m_encoder_arr[i].maybe_null())
+ {
+ char *data= (char*)m_storage_record.ptr();
+ if (field->is_null())
+ {
+ data[m_encoder_arr[i].m_null_offset]|= m_encoder_arr[i].m_null_mask;
+ /* Don't write anything for NULL values */
+ continue;
+ }
+ }
+
+ if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB)
+ {
+ my_core::Field_blob *blob= (my_core::Field_blob*)field;
+ /* Get the number of bytes needed to store length*/
+ uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr;
+
+ /* Store the length of the value */
+ m_storage_record.append(reinterpret_cast<char*>(blob->ptr), length_bytes);
+
+ /* Store the blob value itself */
+ char *data_ptr;
+ memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar**));
+ m_storage_record.append(data_ptr, blob->get_length());
+ }
+ else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR)
+ {
+ Field_varstring* field_var= (Field_varstring*)field;
+ uint data_len;
+ /* field_var->length_bytes is 1 or 2 */
+ if (field_var->length_bytes == 1)
+ {
+ data_len= field_var->ptr[0];
+ }
+ else
+ {
+ DBUG_ASSERT(field_var->length_bytes==2);
+ data_len= uint2korr(field_var->ptr);
+ }
+ m_storage_record.append(reinterpret_cast<char*>(field_var->ptr),
+ field_var->length_bytes + data_len);
+ }
+ else
+ {
+ /* Copy the field data */
+ uint len= field->pack_length_in_rec();
+ m_storage_record.append(reinterpret_cast<char*>(field->ptr), len);
+ }
+ }
+
+ if (should_store_checksums())
+ {
+ uint32_t key_crc32= my_core::crc32(0,
+ rdb_slice_to_uchar_ptr(&pk_packed_slice),
+ pk_packed_slice.size());
+ uint32_t val_crc32= my_core::crc32(
+ 0, rdb_mysql_str_to_uchar_str(&m_storage_record),
+ m_storage_record.length());
+ uchar key_crc_buf[RDB_CHECKSUM_SIZE];
+ uchar val_crc_buf[RDB_CHECKSUM_SIZE];
+ rdb_netbuf_store_uint32(key_crc_buf, key_crc32);
+ rdb_netbuf_store_uint32(val_crc_buf, val_crc32);
+ m_storage_record.append((const char*)&RDB_CHECKSUM_DATA_TAG, 1);
+ m_storage_record.append((const char*)key_crc_buf, RDB_CHECKSUM_SIZE);
+ m_storage_record.append((const char*)val_crc_buf, RDB_CHECKSUM_SIZE);
+ }
+
+ *packed_rec= rocksdb::Slice(m_storage_record.ptr(),
+ m_storage_record.length());
+}
+
+
+/*
+ @brief
+ Setup which fields will be unpacked when reading rows
+
+ @detail
+ Two special cases when we still unpack all fields:
+ - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE).
+ - When @@rocksdb_verify_checksums is ON (In this mode, we need to read all
+ fields to find whether there is a row checksum at the end. We could skip
+ the fields instead of decoding them, but currently we do decoding.)
+
+ @seealso
+ ha_rocksdb::setup_field_converters()
+ ha_rocksdb::convert_record_from_storage_format()
+*/
+void ha_rocksdb::setup_read_decoders()
+{
+ m_decoders_vect.clear();
+
+ int last_useful= 0;
+ int skip_size= 0;
+
+ for (uint i= 0; i < table->s->fields; i++)
+ {
+ // We only need the decoder if the whole record is stored.
+ if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL)
+ {
+ continue;
+ }
+
+ if (m_lock_rows == RDB_LOCK_WRITE || m_verify_checksums ||
+ bitmap_is_set(table->read_set, table->field[i]->field_index))
+ {
+ // We will need to decode this field
+ m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size});
+ last_useful= m_decoders_vect.size();
+ skip_size= 0;
+ }
+ else
+ {
+ if (m_encoder_arr[i].uses_variable_len_encoding() ||
+ m_encoder_arr[i].maybe_null())
+ {
+ // For variable-length field, we need to read the data and skip it
+ m_decoders_vect.push_back({&m_encoder_arr[i], false, skip_size});
+ skip_size= 0;
+ }
+ else
+ {
+ // Fixed-width field can be skipped without looking at it.
+ // Add appropriate skip_size to the next field.
+ skip_size += m_encoder_arr[i].m_pack_length_in_rec;
+ }
+ }
+ }
+
+ // It could be that the last few elements are varchars that just do
+ // skipping. Remove them.
+ m_decoders_vect.erase(m_decoders_vect.begin() + last_useful,
+ m_decoders_vect.end());
+}
+
+
+#ifndef NDEBUG
+void dbug_append_garbage_at_end(std::string &on_disk_rec)
+{
+ on_disk_rec.append("abc");
+}
+
+void dbug_truncate_record(std::string &on_disk_rec)
+{
+ on_disk_rec.resize(0);
+}
+
+void dbug_modify_rec_varchar12(std::string &on_disk_rec)
+{
+ std::string res;
+ // The record is NULL-byte followed by VARCHAR(10).
+ // Put the NULL-byte
+ res.append("\0", 1);
+ // Then, add a valid VARCHAR(12) value.
+ res.append("\xC", 1);
+ res.append("123456789ab", 12);
+
+ on_disk_rec.assign(res);
+}
+
+
+void dbug_modify_key_varchar8(String &on_disk_rec)
+{
+ std::string res;
+ // The key starts with index number
+ res.append(on_disk_rec.ptr(), Rdb_key_def::INDEX_NUMBER_SIZE);
+
+ // Then, a mem-comparable form of a varchar(8) value.
+ res.append("ABCDE\0\0\0\xFC", 9);
+ on_disk_rec.length(0);
+ on_disk_rec.append(res.data(), res.size());
+}
+
+#endif
+
+
+int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key,
+ uchar * buf)
+{
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1",
+ dbug_append_garbage_at_end(m_retrieved_record););
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2",
+ dbug_truncate_record(m_retrieved_record););
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3",
+ dbug_modify_rec_varchar12(m_retrieved_record););
+
+ rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(),
+ m_retrieved_record.size());
+ return convert_record_from_storage_format(key, &retrieved_rec_slice, buf);
+}
+
+
+/*
+ @brief
+ Unpack the record in this->m_retrieved_record and this->m_last_rowkey from
+ storage format into buf (which can be table->record[0] or table->record[1]).
+
+ @param key Table record's key in mem-comparable form.
+ @param buf Store record in table->record[0] format here
+
+ @detail
+ If the table has blobs, the unpacked data in buf may keep pointers to the
+ data in this->m_retrieved_record.
+
+ The key is only needed to check its checksum value (the checksum is in
+ m_retrieved_record).
+
+ @seealso
+ ha_rocksdb::setup_read_decoders() Sets up data structures which tell which
+ columns to decode.
+
+ @return
+ 0 OK
+ other Error inpacking the data
+*/
+
+int ha_rocksdb::convert_record_from_storage_format(const rocksdb::Slice *key,
+ const rocksdb::Slice *value,
+ uchar * buf)
+{
+ DBUG_ASSERT(key != nullptr);
+ DBUG_ASSERT(buf != nullptr);
+
+ Rdb_string_reader reader(value);
+ my_ptrdiff_t ptr_diff= buf - table->record[0];
+
+ /*
+ Decode PK fields from the key
+ */
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_read1",
+ dbug_modify_key_varchar8(m_last_rowkey););
+
+ const rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(),
+ m_last_rowkey.length());
+ const char *unpack_info= nullptr;
+ uint16 unpack_info_len= 0;
+ rocksdb::Slice unpack_slice;
+
+ /* Other fields are decoded from the value */
+ const char *null_bytes= nullptr;
+ if (m_null_bytes_in_rec && !(null_bytes= reader.read(m_null_bytes_in_rec)))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ if (m_maybe_unpack_info && m_pk_descr->m_kv_format_version >=
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1)
+ {
+ unpack_info= reader.read(RDB_UNPACK_HEADER_SIZE);
+
+ if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ unpack_info_len= rdb_netbuf_to_uint16(
+ reinterpret_cast<const uchar *>(unpack_info + 1));
+ unpack_slice= rocksdb::Slice(unpack_info, unpack_info_len);
+
+ reader.read(unpack_info_len - RDB_UNPACK_HEADER_SIZE);
+ }
+
+ if (m_pk_descr->unpack_record(table, buf, &rowkey_slice,
+ unpack_info ? &unpack_slice : nullptr,
+ false /* verify_checksum */))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ for (auto it= m_decoders_vect.begin(); it != m_decoders_vect.end(); it++)
+ {
+ const Rdb_field_encoder* const field_dec= it->m_field_enc;
+ const bool decode= it->m_decode;
+ bool isNull = field_dec->maybe_null() &&
+ ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0);
+
+ Field *field= table->field[field_dec->m_field_index];
+
+ /* Skip the bytes we need to skip */
+ if (it->m_skip && !reader.read(it->m_skip))
+ return HA_ERR_INTERNAL_ERROR;
+
+ if (isNull)
+ {
+ if (decode)
+ {
+ /* This sets the NULL-bit of this record */
+ field->set_null(ptr_diff);
+ /*
+ Besides that, set the field value to default value. CHECKSUM TABLE
+ depends on this.
+ */
+ uint field_offset= field->ptr - table->record[0];
+ memcpy(buf + field_offset,
+ table->s->default_values + field_offset,
+ field->pack_length());
+ }
+ continue;
+ }
+ else
+ {
+ if (decode)
+ field->set_notnull(ptr_diff);
+ }
+
+ if (field_dec->m_field_type == MYSQL_TYPE_BLOB)
+ {
+ my_core::Field_blob *blob= (my_core::Field_blob*)field;
+ /* Get the number of bytes needed to store length*/
+ uint length_bytes= blob->pack_length() - portable_sizeof_char_ptr;
+
+ blob->move_field_offset(ptr_diff);
+
+ const char *data_len_str;
+ if (!(data_len_str= reader.read(length_bytes)))
+ {
+ blob->move_field_offset(-ptr_diff);
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ memcpy(blob->ptr, data_len_str, length_bytes);
+
+ uint32 data_len= blob->get_length((uchar*)data_len_str, length_bytes,
+ table->s->db_low_byte_first);
+ const char *blob_ptr;
+ if (!(blob_ptr= reader.read(data_len)))
+ {
+ blob->move_field_offset(-ptr_diff);
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ if (decode)
+ {
+ // set 8-byte pointer to 0, like innodb does (relevant for 32-bit
+ // platforms)
+ memset(blob->ptr + length_bytes, 0, 8);
+ memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar**));
+ blob->move_field_offset(-ptr_diff);
+ }
+ }
+ else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR)
+ {
+ Field_varstring* field_var= (Field_varstring*)field;
+ const char *data_len_str;
+ if (!(data_len_str= reader.read(field_var->length_bytes)))
+ return HA_ERR_INTERNAL_ERROR;
+
+ uint data_len;
+ /* field_var->length_bytes is 1 or 2 */
+ if (field_var->length_bytes == 1)
+ {
+ data_len= (uchar)data_len_str[0];
+ }
+ else
+ {
+ DBUG_ASSERT(field_var->length_bytes == 2);
+ data_len= uint2korr(data_len_str);
+ }
+ if (data_len > field->field_length)
+ {
+ /* The data on disk is longer than table DDL allows? */
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ if (!reader.read(data_len))
+ return HA_ERR_INTERNAL_ERROR;
+
+ if (decode)
+ {
+ memcpy(field_var->ptr + ptr_diff, data_len_str,
+ field_var->length_bytes + data_len);
+ }
+ }
+ else
+ {
+ const char *data_bytes;
+ uint len= field_dec->m_pack_length_in_rec;
+ if (len > 0)
+ {
+ if ((data_bytes= reader.read(len)) == nullptr)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ if (decode)
+ memcpy(field->ptr + ptr_diff, data_bytes, len);
+ }
+ }
+ }
+
+ if (m_verify_checksums)
+ {
+ if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE &&
+ reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG)
+ {
+ uint32_t stored_key_chksum=
+ rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE));
+ uint32_t stored_val_chksum=
+ rdb_netbuf_to_uint32((const uchar*)reader.read(RDB_CHECKSUM_SIZE));
+
+ uint32_t computed_key_chksum=
+ my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size());
+ uint32_t computed_val_chksum=
+ my_core::crc32(0, rdb_slice_to_uchar_ptr(value),
+ value->size() - RDB_CHECKSUM_CHUNK_SIZE);
+
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum1",
+ stored_key_chksum++;);
+
+ if (stored_key_chksum != computed_key_chksum)
+ {
+ m_pk_descr->report_checksum_mismatch(true, key->data(), key->size());
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2",
+ stored_val_chksum++;);
+ if (stored_val_chksum != computed_val_chksum)
+ {
+ m_pk_descr->report_checksum_mismatch(false, value->data(),
+ value->size());
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_row_checksums_checked++;
+ }
+ if (reader.remaining_bytes())
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ return 0;
+}
+
+
+void ha_rocksdb::get_storage_type(Rdb_field_encoder *encoder, uint kp)
+{
+ Field *field= table->key_info[table->s->primary_key].key_part[kp].field;
+
+ if (field->real_type() == MYSQL_TYPE_NEWDECIMAL)
+ {
+ // Index-only is supported for DECIMAL columns.
+ // A DECIMAL value can be restored from its mem-comparable form.
+ // This works for both the old data format and the new data format.
+ if (m_pk_descr->m_kv_format_version >=
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1)
+ {
+ // New format, don't store Decimal value in the row.
+ encoder->m_storage_type= Rdb_field_encoder::STORE_NONE;
+ }
+ else
+ {
+ // Old format. Pretend there's no way to unpack the decimal from
+ // its mem-comparable form.
+ encoder->m_storage_type= Rdb_field_encoder::STORE_ALL;
+ }
+ return;
+ }
+
+ if (m_pk_descr->m_kv_format_version >=
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1)
+ {
+ // STORE_SOME uses unpack_info, so our key must be of version
+ // higher than PRIMARY_FORMAT_VERSION_UPDATE1 to use this
+ // feature.
+ if (m_pk_descr->has_unpack_info(kp))
+ {
+ DBUG_ASSERT(m_pk_descr->can_unpack(kp));
+ encoder->m_storage_type= Rdb_field_encoder::STORE_SOME;
+ m_maybe_unpack_info= true;
+ }
+ else if (m_pk_descr->can_unpack(kp))
+ {
+ encoder->m_storage_type= Rdb_field_encoder::STORE_NONE;
+ }
+ }
+ else
+ {
+ // For old versions, we can only store none if there is no
+ // unpack_info (and it's unpackable).
+ if (m_pk_descr->can_unpack(kp) && !m_pk_descr->has_unpack_info(kp))
+ {
+ encoder->m_storage_type= Rdb_field_encoder::STORE_NONE;
+ }
+ }
+}
+
+/*
+ Setup data needed to convert table->record[] to and from record storage format.
+
+ @seealso
+ ha_rocksdb::convert_record_to_storage_format,
+ ha_rocksdb::convert_record_from_storage_format
+*/
+
+void ha_rocksdb::setup_field_converters()
+{
+ uint i;
+ uint null_bytes= 0;
+ uchar cur_null_mask= 0x1;
+
+ DBUG_ASSERT(m_encoder_arr == nullptr);
+ m_encoder_arr= static_cast<Rdb_field_encoder*>(
+ my_malloc(table->s->fields * sizeof(Rdb_field_encoder), MYF(0)));
+ if (m_encoder_arr == nullptr)
+ {
+ return;
+ }
+
+ for (i= 0; i < table->s->fields; i++)
+ {
+ Field *field= table->field[i];
+ m_encoder_arr[i].m_storage_type= Rdb_field_encoder::STORE_ALL;
+
+ /*
+ Check if this field is
+ - a part of primary key, and
+ - it can be decoded back from its key image.
+ If both hold, we don't need to store this field in the value part of
+ RocksDB's key-value pair.
+
+ If hidden pk exists, we skip this check since the field will never be
+ part of the hidden pk.
+ */
+ if (!has_hidden_pk(table) &&
+ field->part_of_key.is_set(table->s->primary_key))
+ {
+ KEY *pk_info= &table->key_info[table->s->primary_key];
+ for (uint kp= 0; kp < pk_info->user_defined_key_parts; kp++)
+ {
+ /* key_part->fieldnr is counted from 1 */
+ if (field->field_index + 1 == pk_info->key_part[kp].fieldnr)
+ {
+ get_storage_type(&m_encoder_arr[i], kp);
+ break;
+ }
+ }
+ }
+
+ m_encoder_arr[i].m_field_type= field->real_type();
+ m_encoder_arr[i].m_field_index= i;
+ m_encoder_arr[i].m_pack_length_in_rec= field->pack_length_in_rec();
+
+ if (field->real_maybe_null())
+ {
+ m_encoder_arr[i].m_null_mask= cur_null_mask;
+ m_encoder_arr[i].m_null_offset= null_bytes;
+ if (cur_null_mask == 0x80)
+ {
+ cur_null_mask= 0x1;
+ null_bytes++;
+ }
+ else
+ cur_null_mask= cur_null_mask << 1;
+ }
+ else
+ {
+ m_encoder_arr[i].m_null_mask= 0;
+ }
+ }
+
+ /* Count the last, unfinished NULL-bits byte */
+ if (cur_null_mask != 0x1)
+ null_bytes++;
+
+ m_null_bytes_in_rec= null_bytes;
+}
+
+int ha_rocksdb::alloc_key_buffers(const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+{
+ DBUG_ENTER("ha_rocksdb::alloc_key_buffers");
+ DBUG_ASSERT(m_pk_tuple == nullptr);
+
+ std::shared_ptr<Rdb_key_def>* kd_arr= tbl_def_arg->m_key_descr_arr;
+
+ uint key_len= 0;
+ m_pk_descr= kd_arr[pk_index(table_arg, tbl_def_arg)];
+ if (has_hidden_pk(table_arg))
+ {
+ m_pk_key_parts= 1;
+ }
+ else
+ {
+ m_pk_key_parts=
+ table->key_info[table->s->primary_key].user_defined_key_parts;
+ key_len= table->key_info[table->s->primary_key].key_length;
+ }
+
+ // move this into get_table_handler() ??
+ m_pk_descr->setup(table_arg, tbl_def_arg);
+
+ m_pk_tuple= reinterpret_cast<uchar*>(my_malloc(key_len, MYF(0)));
+ if (m_pk_tuple == nullptr)
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ uint pack_key_len= m_pk_descr->max_storage_fmt_length();
+ m_pk_packed_tuple= reinterpret_cast<uchar*>(my_malloc(pack_key_len, MYF(0)));
+ if (m_pk_packed_tuple == nullptr)
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */
+ uint max_packed_sk_len= pack_key_len;
+ for (uint i= 0; i < table_arg->s->keys; i++)
+ {
+ if (i == table_arg->s->primary_key) /* Primary key was processed above */
+ continue;
+
+ // TODO: move this into get_table_handler() ??
+ kd_arr[i]->setup(table_arg, tbl_def_arg);
+
+ uint packed_len= kd_arr[i]->max_storage_fmt_length();
+ if (packed_len > max_packed_sk_len)
+ {
+ max_packed_sk_len= packed_len;
+ }
+ }
+
+ if (!(m_sk_packed_tuple=
+ reinterpret_cast<uchar*>(my_malloc(max_packed_sk_len, MYF(0)))) ||
+ !(m_sk_match_prefix_buf=
+ reinterpret_cast<uchar*>(my_malloc(max_packed_sk_len, MYF(0)))) ||
+ !(m_sk_packed_tuple_old=
+ reinterpret_cast<uchar*>(my_malloc(max_packed_sk_len, MYF(0)))) ||
+ !(m_end_key_packed_tuple=
+ reinterpret_cast<uchar*>(my_malloc(max_packed_sk_len, MYF(0)))) ||
+ !((m_pack_buffer=
+ reinterpret_cast<uchar*>(my_malloc(max_packed_sk_len, MYF(0))))))
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ DBUG_RETURN(0);
+}
+
+void ha_rocksdb::free_key_buffers()
+{
+ my_free(m_pk_tuple);
+ m_pk_tuple= nullptr;
+
+ my_free(m_pk_packed_tuple);
+ m_pk_packed_tuple= nullptr;
+
+ my_free(m_sk_packed_tuple);
+ m_sk_packed_tuple= nullptr;
+
+ my_free(m_sk_match_prefix_buf);
+ m_sk_match_prefix_buf= nullptr;
+
+ my_free(m_sk_packed_tuple_old);
+ m_sk_packed_tuple_old= nullptr;
+
+ my_free(m_end_key_packed_tuple);
+ m_end_key_packed_tuple= nullptr;
+
+ my_free(m_pack_buffer);
+ m_pack_buffer= nullptr;
+}
+
+void ha_rocksdb::set_use_read_free_rpl(const char* whitelist)
+{
+ DBUG_ASSERT(whitelist != nullptr);
+
+#if defined(HAVE_PSI_INTERFACE)
+ Regex_list_handler regex_handler(key_rwlock_read_free_rpl_tables);
+#else
+ Regex_list_handler regex_handler;
+#endif
+
+ if (!regex_handler.set_patterns(whitelist))
+ {
+ warn_about_bad_patterns(&regex_handler, "read_free_rpl_tables");
+ }
+
+ m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename());
+}
+
+void ha_rocksdb::set_skip_unique_check_tables(const char* whitelist)
+{
+ DBUG_ASSERT(whitelist != nullptr);
+
+#if defined(HAVE_PSI_INTERFACE)
+ Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables);
+#else
+ Regex_list_handler regex_handler;
+#endif
+
+ if (!regex_handler.set_patterns(whitelist))
+ {
+ warn_about_bad_patterns(&regex_handler, "skip_unique_check_tables");
+ }
+
+ m_skip_unique_check= regex_handler.matches(m_tbl_def->base_tablename());
+}
+
+int ha_rocksdb::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_rocksdb::open");
+
+ if (m_table_handler != nullptr)
+ {
+ rdb_open_tables.release_table_handler(m_table_handler);
+ }
+ m_table_handler= rdb_open_tables.get_table_handler(name);
+ if (m_table_handler == nullptr)
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ my_core::thr_lock_data_init(&m_table_handler->m_thr_lock, &m_db_lock,
+ nullptr);
+ m_io_perf.init(&m_table_handler->m_table_perf_context,
+ &m_table_handler->m_io_perf_read, &stats);
+
+ /*
+ note: m_pk_descr may be non-NULL here, when using partitions. It seems,
+ it's ok to discard it
+ */
+ std::string fullname;
+ if (rdb_normalize_tablename(name, &fullname))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_tbl_def= ddl_manager.find(fullname);
+ if (m_tbl_def == nullptr)
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "Attempt to open a table that is not present in RocksDB-SE data "
+ "dictionary");
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ m_lock_rows= RDB_LOCK_NONE;
+
+ m_key_descr_arr= m_tbl_def->m_key_descr_arr;
+
+ /*
+ Full table scan actually uses primary key
+ (UPDATE needs to know this, otherwise it will go into infinite loop on
+ queries like "UPDATE tbl SET pk=pk+100")
+ */
+ key_used_on_scan= table->s->primary_key;
+
+ int err;
+ if ((err = alloc_key_buffers(table, m_tbl_def)))
+ {
+ DBUG_RETURN(err);
+ }
+
+ setup_field_converters();
+
+ info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
+
+ /*
+ The following load_XXX code calls row decode functions, and they do
+ that without having done ::external_lock() or index_init()/rnd_init().
+ (Note: this also means we're doing a read when there was no
+ setup_field_converters() call)
+
+ Initialize the necessary variables for them:
+ */
+ m_verify_checksums= false;
+
+ /* TODO: move the following to where TABLE_SHARE is opened: */
+ if (table->found_next_number_field)
+ load_auto_incr_value();
+
+ if (has_hidden_pk(table) && load_hidden_pk_value())
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+
+ /* Index block size in MyRocks: used by MySQL in query optimization */
+ stats.block_size = rocksdb_tbl_options.block_size;
+
+ /* Determine at open whether we can use Read Free Replication or not */
+ set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables));
+
+ /* Determine at open whether we should skip unique checks for this table */
+ set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables));
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_rocksdb::close(void)
+{
+ DBUG_ENTER("ha_rocksdb::close");
+
+ m_pk_descr= nullptr;
+ m_key_descr_arr= nullptr;
+
+ free_key_buffers();
+
+ my_free(m_encoder_arr);
+ m_encoder_arr= nullptr;
+
+ rdb_open_tables.release_table_handler(m_table_handler);
+ m_table_handler= nullptr;
+
+ DBUG_RETURN(0);
+}
+
+bool ha_rocksdb::get_error_message(const int error, String *buf)
+{
+ static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST,
+ "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST");
+ static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST,
+ "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST");
+
+ DBUG_ASSERT(error > 0);
+ DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST);
+ DBUG_ASSERT(buf != nullptr);
+
+ Rdb_transaction* tx = get_tx_from_thd(ha_thd());
+ bool temp_error = false;
+
+ switch (error) {
+ case HA_ERR_ROCKSDB_PK_REQUIRED:
+ buf->append("Table must have a PRIMARY KEY.");
+ break;
+ case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED:
+ buf->append("Unique indexes are not supported.");
+ break;
+ case HA_ERR_ROCKSDB_TOO_MANY_LOCKS:
+ buf->append("Number of locks held reached @@rocksdb_max_row_locks.");
+ break;
+ case HA_ERR_LOCK_WAIT_TIMEOUT:
+ DBUG_ASSERT(tx != nullptr);
+ buf->append(tx->m_detailed_error);
+ temp_error = true;
+ break;
+ default:
+ // We can be called with the values which are < HA_ERR_FIRST because most
+ // MySQL internal functions will just return 1 in case of an error.
+ break;
+ }
+
+ return temp_error;
+}
+
+/* MyRocks supports only the following collations for indexed columns */
+static const std::set<const my_core::CHARSET_INFO *> RDB_INDEX_COLLATIONS=
+ {&my_charset_bin, &my_charset_utf8_bin, &my_charset_latin1_bin};
+
+
+static bool rdb_is_index_collation_supported(const my_core::Field *field)
+{
+ my_core::enum_field_types type= field->real_type();
+ /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */
+ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING ||
+ type == MYSQL_TYPE_BLOB)
+ {
+ return RDB_INDEX_COLLATIONS.find(field->charset()) !=
+ RDB_INDEX_COLLATIONS.end();
+ }
+ return true;
+}
+
+/*
+ Create structures needed for storing data in rocksdb. This is called when the
+ table is created. The structures will be shared by all TABLE* objects.
+
+ @param
+ table_arg Table with definition
+ db_table "dbname.tablename"
+ len strlen of the above
+ tbl_def_arg tbl_def whose key_descr is being created/populated
+ old_tbl_def_arg tbl_def from which keys are being copied over from
+ (for use during inplace alter)
+
+ @return
+ 0 - Ok
+ other - error, either given table ddl is not supported by rocksdb or OOM.
+*/
+int ha_rocksdb::create_key_defs(const TABLE *table_arg,
+ Rdb_tbl_def *tbl_def_arg,
+ const TABLE *old_table_arg /* = nullptr */,
+ const Rdb_tbl_def *old_tbl_def_arg
+ /* = nullptr */)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(table_arg->s != nullptr);
+
+ DBUG_ENTER("ha_rocksdb::create_key_defs");
+
+ uint i;
+
+ /*
+ These need to be one greater than MAX_INDEXES since the user can create
+ MAX_INDEXES secondary keys and no primary key which would cause us
+ to generate a hidden one.
+ */
+ std::array<key_def_cf_info, MAX_INDEXES + 1> cfs;
+
+ /*
+ NOTE: All new column families must be created before new index numbers are
+ allocated to each key definition. See below for more details.
+ http://github.com/MySQLOnRocksDB/mysql-5.6/issues/86#issuecomment-138515501
+ */
+ if (create_cfs(table_arg, tbl_def_arg, &cfs))
+ {
+ DBUG_RETURN(1);
+ };
+
+ if (!old_tbl_def_arg)
+ {
+ /*
+ old_tbl_def doesn't exist. this means we are in the process of creating
+ a new table.
+
+ Get the index numbers (this will update the next_index_number)
+ and create Rdb_key_def structures.
+ */
+ for (i= 0; i < tbl_def_arg->m_key_count; i++)
+ {
+ if (create_key_def(table_arg, i, tbl_def_arg,
+ &m_key_descr_arr[i], cfs[i]))
+ {
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ else
+ {
+ /*
+ old_tbl_def exists. This means we are creating a new tbl_def as part of
+ in-place alter table. Copy over existing keys from the old_tbl_def and
+ generate the necessary new key definitions if any.
+ */
+ if (create_inplace_key_defs(table_arg, tbl_def_arg, old_table_arg,
+ old_tbl_def_arg, cfs))
+ {
+ DBUG_RETURN(1);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Checks index parameters and creates column families needed for storing data
+ in rocksdb if necessary.
+
+ @param in
+ table_arg Table with definition
+ db_table Table name
+ tbl_def_arg Table def structure being populated
+
+ @param out
+ cfs CF info for each key definition in 'key_info' order
+
+ @return
+ 0 - Ok
+ other - error
+*/
+int ha_rocksdb::create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg,
+ std::array<struct key_def_cf_info, MAX_INDEXES + 1>* cfs)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(table_arg->s != nullptr);
+
+ DBUG_ENTER("ha_rocksdb::create_cfs");
+
+ char tablename_sys[NAME_LEN + 1];
+
+ my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(),
+ tablename_sys, sizeof(tablename_sys));
+
+ /*
+ The first loop checks the index parameters and creates
+ column families if necessary.
+ */
+ for (uint i= 0; i < tbl_def_arg->m_key_count; i++)
+ {
+ rocksdb::ColumnFamilyHandle* cf_handle;
+
+ if (rocksdb_strict_collation_check &&
+ !is_hidden_pk(i, table_arg, tbl_def_arg) &&
+ tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0)
+ {
+ for (uint part= 0; part < table_arg->key_info[i].actual_key_parts; part++)
+ {
+ if (!rdb_is_index_collation_supported(
+ table_arg->key_info[i].key_part[part].field) &&
+ !rdb_collation_exceptions->matches(tablename_sys))
+ {
+ std::string collation_err;
+ for (auto coll : RDB_INDEX_COLLATIONS)
+ {
+ if (collation_err != "")
+ {
+ collation_err += ", ";
+ }
+ collation_err += coll->name;
+ }
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Unsupported collation on string indexed "
+ "column %s.%s Use binary collation (%s).", MYF(0),
+ tbl_def_arg->full_tablename().c_str(),
+ table_arg->key_info[i].key_part[part].field->field_name,
+ collation_err.c_str());
+ DBUG_RETURN(1);
+ }
+ }
+ }
+
+ /*
+ index comment has Column Family name. If there was no comment, we get
+ NULL, and it means use the default column family.
+ */
+ const char *comment = get_key_comment(i, table_arg, tbl_def_arg);
+ const char *key_name = get_key_name(i, table_arg, tbl_def_arg);
+
+ if (looks_like_per_index_cf_typo(comment))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "column family name looks like a typo of $per_index_cf");
+ DBUG_RETURN(1);
+ }
+ /* Prevent create from using the system column family */
+ if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0)
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0),
+ "column family not valid for storing index data");
+ DBUG_RETURN(1);
+ }
+ bool is_auto_cf_flag;
+ cf_handle = cf_manager.get_or_create_cf(rdb, comment,
+ tbl_def_arg->full_tablename(),
+ key_name, &is_auto_cf_flag);
+ if (!cf_handle)
+ DBUG_RETURN(1);
+
+ auto& cf = (*cfs)[i];
+ cf.cf_handle = cf_handle;
+ cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(comment);
+ cf.is_auto_cf = is_auto_cf_flag;
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Create key definition needed for storing data in rocksdb during ADD index
+ inplace operations.
+
+ @param in
+ table_arg Table with definition
+ tbl_def_arg New table def structure being populated
+ old_tbl_def_arg Old(current) table def structure
+ cfs Struct array which contains column family information
+
+ @return
+ 0 - Ok
+ other - error, either given table ddl is not supported by rocksdb or OOM.
+*/
+int ha_rocksdb::create_inplace_key_defs(const TABLE *table_arg,
+ Rdb_tbl_def *tbl_def_arg,
+ const TABLE *old_table_arg,
+ const Rdb_tbl_def *old_tbl_def_arg,
+ const std::array<key_def_cf_info, MAX_INDEXES + 1>& cfs)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(tbl_def_arg != nullptr);
+ DBUG_ASSERT(old_tbl_def_arg != nullptr);
+
+ DBUG_ENTER("create_key_def");
+
+ std::shared_ptr<Rdb_key_def>* old_key_descr=
+ old_tbl_def_arg->m_key_descr_arr;
+ std::shared_ptr<Rdb_key_def>* new_key_descr=
+ tbl_def_arg->m_key_descr_arr;
+ std::unordered_map<std::string, uint> old_key_pos =
+ get_old_key_positions(table_arg, tbl_def_arg, old_table_arg,
+ old_tbl_def_arg);
+
+ uint i;
+ for (i= 0; i < tbl_def_arg->m_key_count; i++)
+ {
+ auto it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg));
+ if (it != old_key_pos.end())
+ {
+ /*
+ Found matching index in old table definition, so copy it over to the
+ new one created.
+ */
+ const std::shared_ptr<Rdb_key_def>& okd=
+ old_key_descr[it->second];
+
+ uint16 index_dict_version= 0;
+ uchar index_type= 0;
+ uint16 kv_version= 0;
+ GL_INDEX_ID gl_index_id= okd->get_gl_index_id();
+ if (!dict_manager.get_index_info(gl_index_id, &index_dict_version,
+ &index_type, &kv_version))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Could not get index information "
+ "for Index Number (%u,%u), table %s",
+ gl_index_id.cf_id, gl_index_id.index_id,
+ old_tbl_def_arg->full_tablename().c_str());
+ DBUG_RETURN(1);
+ }
+
+ /*
+ We can't use the copy constructor because we need to update the
+ keynr within the pack_info for each field and the keyno of the keydef
+ itself.
+ */
+ new_key_descr[i]= std::make_shared<Rdb_key_def>(
+ okd->get_index_number(),
+ i,
+ okd->get_cf(),
+ index_dict_version,
+ index_type,
+ kv_version,
+ okd->m_is_reverse_cf,
+ okd->m_is_auto_cf,
+ okd->m_name.c_str(),
+ dict_manager.get_stats(gl_index_id));
+ }
+ else if (create_key_def(table_arg, i, tbl_def_arg,
+ &new_key_descr[i], cfs[i]))
+ {
+ DBUG_RETURN(1);
+ }
+
+ DBUG_ASSERT(new_key_descr[i] != nullptr);
+ new_key_descr[i]->setup(table_arg, tbl_def_arg);
+ }
+
+ DBUG_RETURN(0);
+}
+
+std::unordered_map<std::string, uint> ha_rocksdb::get_old_key_positions(
+ const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg,
+ const TABLE* old_table_arg,
+ const Rdb_tbl_def* old_tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(old_table_arg != nullptr);
+ DBUG_ASSERT(tbl_def_arg != nullptr);
+ DBUG_ASSERT(old_tbl_def_arg != nullptr);
+
+ DBUG_ENTER("get_old_key_positions");
+
+ std::shared_ptr<Rdb_key_def>* old_key_descr=
+ old_tbl_def_arg->m_key_descr_arr;
+ std::unordered_map<std::string, uint> old_key_pos;
+ std::unordered_map<std::string, uint> new_key_pos;
+ uint i;
+
+ for (i= 0; i < tbl_def_arg->m_key_count; i++)
+ {
+ new_key_pos[get_key_name(i, table_arg, tbl_def_arg)] = i;
+ }
+
+ for (i= 0; i < old_tbl_def_arg->m_key_count; i++)
+ {
+ if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg))
+ {
+ old_key_pos[old_key_descr[i]->m_name] = i;
+ continue;
+ }
+
+ /*
+ In case of matching key name, need to check key parts of keys as well,
+ in case a simultaneous drop + add is performed, where the key name is the
+ same but the key parts are different.
+
+ Example:
+ CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB;
+ ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE;
+ */
+ const KEY* old_key = &old_table_arg->key_info[i];
+ auto it = new_key_pos.find(old_key->name);
+ if (it == new_key_pos.end())
+ {
+ continue;
+ }
+
+ KEY* new_key = &table_arg->key_info[it->second];
+
+ if (!compare_key_parts(old_key, new_key))
+ {
+ old_key_pos[old_key->name] = i;
+ }
+ }
+
+ DBUG_RETURN(old_key_pos);
+}
+
+/* Check two keys to ensure that key parts within keys match */
+int ha_rocksdb::compare_key_parts(const KEY* old_key, const KEY* new_key)
+{
+ DBUG_ASSERT(old_key != nullptr);
+ DBUG_ASSERT(new_key != nullptr);
+
+ DBUG_ENTER("compare_key_parts");
+
+ /* Skip if key parts do not match, as it is a different key */
+ if (new_key->user_defined_key_parts != old_key->user_defined_key_parts)
+ {
+ DBUG_RETURN(1);
+ }
+
+ /* Check to see that key parts themselves match */
+ for (uint i= 0; i < old_key->user_defined_key_parts; i++)
+ {
+ if (strcmp(old_key->key_part[i].field->field_name,
+ new_key->key_part[i].field->field_name) != 0)
+ {
+ DBUG_RETURN(1);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Create key definition needed for storing data in rocksdb.
+ This can be called either during CREATE table or doing ADD index operations.
+
+ @param in
+ table_arg Table with definition
+ i Position of index being created inside table_arg->key_info
+ tbl_def_arg Table def structure being populated
+ cf_info Struct which contains column family information
+
+ @param out
+ new_key_def Newly created index definition.
+
+ @return
+ 0 - Ok
+ other - error, either given table ddl is not supported by rocksdb or OOM.
+*/
+int ha_rocksdb::create_key_def(const TABLE *table_arg, uint i,
+ const Rdb_tbl_def* tbl_def_arg,
+ std::shared_ptr<Rdb_key_def>* new_key_def,
+ const struct key_def_cf_info& cf_info)
+{
+ DBUG_ENTER("create_key_def");
+ DBUG_ASSERT(new_key_def != nullptr);
+ DBUG_ASSERT(*new_key_def == nullptr);
+
+ uint index_id= ddl_manager.get_and_update_next_number(&dict_manager);
+ uint16_t index_dict_version= Rdb_key_def::INDEX_INFO_VERSION_LATEST;
+ uchar index_type;
+ uint16_t kv_version;
+
+ if (is_hidden_pk(i, table_arg, tbl_def_arg))
+ {
+ index_type= Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY;
+ kv_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST;
+ }
+ else if (i == table_arg->s->primary_key)
+ {
+ index_type= Rdb_key_def::INDEX_TYPE_PRIMARY;
+ uint16 pk_latest_version= Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST;
+ DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL",
+ {pk_latest_version=
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_INITIAL;
+ });
+ kv_version= pk_latest_version;
+ }
+ else
+ {
+ index_type= Rdb_key_def::INDEX_TYPE_SECONDARY;
+ uint16 sk_latest_version= Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST;
+ DBUG_EXECUTE_IF("MYROCKS_FORMAT_VERSION_INITIAL",
+ {sk_latest_version=
+ Rdb_key_def::SECONDARY_FORMAT_VERSION_INITIAL;
+ });
+ kv_version= sk_latest_version;
+ }
+
+ const char *key_name = get_key_name(i, table_arg, m_tbl_def);
+ *new_key_def= std::make_shared<Rdb_key_def>(
+ index_id, i, cf_info.cf_handle, index_dict_version, index_type,
+ kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name);
+
+ DBUG_RETURN(0);
+}
+
+int rdb_normalize_tablename(const std::string& tablename,
+ std::string* strbuf)
+{
+ DBUG_ASSERT(strbuf != nullptr);
+
+ if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/')
+ {
+ DBUG_ASSERT(0); // We were not passed table name?
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ size_t pos = tablename.find_first_of('/', 2);
+ if (pos == std::string::npos)
+ {
+ DBUG_ASSERT(0); // We were not passed table name?
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1);
+
+ return 0;
+}
+
+/*
+ Check to see if the user's original statement includes foreign key
+ references
+*/
+bool ha_rocksdb::contains_foreign_key(THD* thd)
+{
+ bool success;
+ const char* str = thd_query_string(thd)->str;
+
+ DBUG_ASSERT(str != nullptr);
+
+ while (*str != '\0')
+ {
+ // Scan from our current pos looking for 'FOREIGN'
+ str = rdb_find_in_string(str, "FOREIGN", &success);
+ if (!success)
+ {
+ return false;
+ }
+
+ // Skip past the found "FOREIGN'
+ str = rdb_check_next_token(&my_charset_bin, str, "FOREIGN", &success);
+ DBUG_ASSERT(success);
+
+ if (!my_isspace(&my_charset_bin, *str))
+ {
+ return false;
+ }
+
+ // See if the next token is 'KEY'
+ str = rdb_check_next_token(&my_charset_bin, str, "KEY", &success);
+ if (!success)
+ {
+ continue;
+ }
+
+ // See if the next token is '('
+ str = rdb_check_next_token(&my_charset_bin, str, "(", &success);
+ if (!success)
+ {
+ // There is an optional index id after 'FOREIGN KEY', skip it
+ str = rdb_skip_id(&my_charset_bin, str);
+
+ // Now check for '(' again
+ str = rdb_check_next_token(&my_charset_bin, str, "(", &success);
+ }
+
+ // If we have found 'FOREIGN KEY [<word>] (' we can be confident we have
+ // a foreign key clause.
+ return success;
+ }
+
+ // We never found a valid foreign key clause
+ return false;
+}
+
+/**
+ @brief
+ splits the normalized table name of <dbname>.<tablename>#P#<part_no> into
+ the <dbname>, <tablename> and <part_no> components.
+
+ @param dbbuf returns database name/table_schema
+ @param tablebuf returns tablename
+ @param partitionbuf returns partition suffix if there is one
+ @return 0 on success, non-zero on failure to split
+*/
+int rdb_split_normalized_tablename(const std::string& fullname,
+ std::string* db,
+ std::string* table,
+ std::string* partition)
+{
+ DBUG_ASSERT(!fullname.empty());
+
+#define RDB_PARTITION_STR "#P#"
+
+ /* Normalize returns dbname.tablename. */
+ size_t dotpos = fullname.find('.');
+
+ /* Invalid table name? */
+ if (dotpos == std::string::npos)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ // Table must have a database name associated with it.
+ DBUG_ASSERT(dotpos > 0);
+
+ if (db != nullptr)
+ {
+ *db = fullname.substr(0, dotpos);
+ }
+
+ dotpos++;
+
+ size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos,
+ strlen(RDB_PARTITION_STR));
+
+ if (partpos != std::string::npos)
+ {
+ DBUG_ASSERT(partpos >= dotpos);
+
+ if (table != nullptr)
+ {
+ *table = fullname.substr(dotpos, partpos - dotpos);
+ }
+
+ if (partition != nullptr)
+ {
+ *partition = fullname.substr(partpos + strlen(RDB_PARTITION_STR));
+ }
+ }
+ else if (table != nullptr)
+ {
+ *table = fullname.substr(dotpos);
+ }
+
+ return 0;
+}
+
+
+/**
+ @brief
+ create() is called to create a database. The variable name will have the name
+ of the table.
+
+ @details
+ When create() is called you do not need to worry about
+ opening the table. Also, the .frm file will have already been
+ created so adjusting create_info is not necessary. You can overwrite
+ the .frm file at this point if you wish to change the table
+ definition, but there are no methods currently provided for doing
+ so.
+
+ Called from handle.cc by ha_create_table().
+
+ @see
+ ha_create_table() in handle.cc
+*/
+
+int ha_rocksdb::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ int res;
+ std::string str;
+ DBUG_ENTER("ha_rocksdb::create");
+
+ THD *thd = my_core::thd_get_current_thd();
+
+ if (get_table_if_exists(name)) {
+ if (thd->lex->sql_command == SQLCOM_TRUNCATE) {
+ if (delete_table(name)) {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ } else {
+ if (rdb_normalize_tablename(name, &str)) {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Table '%s' does not exist, but metadata information exists inside "
+ "MyRocks. This is a sign of data inconsistency. Please check if "
+ "'%s.frm' exists, and try to restore it if it does not exist.",
+ MYF(0), str.c_str(), name);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ }
+
+ /*
+ Construct dbname.tablename ourselves, because parititioning
+ passes strings like "./test/t14#P#p0" for individual partitions,
+ while table_arg->s->table_name has none of that.
+ */
+ if (rdb_normalize_tablename(name, &str))
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ if (contains_foreign_key(thd))
+ {
+ my_printf_error(ER_NOT_SUPPORTED_YET, "MyRocks does not currently support "
+ "foreign key constraints", MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ /*
+ TODO(alexyang): Temporarily disable unique indexes support when there is no
+ Primary Key
+ */
+ for (uint keyno= 0; keyno < table_arg->s->keys; keyno++)
+ {
+ if ((table_arg->key_info[keyno].flags & HA_NOSAME) &&
+ has_hidden_pk(table_arg))
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Unique index support is disabled when the table has no primary key. ",
+ MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ }
+
+ bool write_err= false;
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict_manager.begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ /* Create table/key descriptions and put them into the data dictionary */
+ m_tbl_def= new Rdb_tbl_def(str);
+
+ uint n_keys= table_arg->s->keys;
+
+ /*
+ If no primary key found, create a hidden PK and place it inside table
+ definition
+ */
+ if (has_hidden_pk(table_arg))
+ {
+ n_keys += 1;
+ }
+
+ m_key_descr_arr= new std::shared_ptr<Rdb_key_def>[n_keys];
+ m_tbl_def->m_key_count= n_keys;
+ m_tbl_def->m_key_descr_arr= m_key_descr_arr;
+
+ if ((res= create_key_defs(table_arg, m_tbl_def)))
+ {
+ goto error;
+ }
+
+ m_pk_descr= m_key_descr_arr[pk_index(table_arg, m_tbl_def)];
+
+ dict_manager.lock();
+ write_err= ddl_manager.put_and_write(m_tbl_def, batch)
+ || dict_manager.commit(batch);
+ dict_manager.unlock();
+
+ if (write_err)
+ {
+ goto error;
+ }
+
+ if (create_info->auto_increment_value)
+ m_tbl_def->m_auto_incr_val= create_info->auto_increment_value;
+
+ /*
+ We only support auto_increment at start of the PRIMARY KEY.
+ */
+ //Field *field;
+ //if ((field= table_arg->next_number_field))
+ /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work
+ if ((field= table_arg->found_next_number_field))
+ {
+ int pk= table_arg->s->primary_key;
+ Field *pk_field= table_arg->key_info[pk].key_part[0].field;
+ if (field->field_index != pk_field->field_index)
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ */
+ DBUG_RETURN(0);
+
+error:
+ /* Delete what we have allocated so far */
+ delete m_tbl_def;
+ m_tbl_def= nullptr;
+ m_key_descr_arr= nullptr;
+
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+}
+
+
+/**
+ @note
+ This function is used only when the table has not yet been opened, and
+ keyread_allowed bitmap doesn't have the correct values yet.
+
+ See comment in ha_rocksdb::index_flags() for details.
+*/
+
+bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) const
+{
+ bool res= true;
+ KEY *key_info= &table_share->key_info[inx];
+
+ Rdb_field_packing dummy1;
+ res= dummy1.setup(nullptr, key_info->key_part[part].field, inx, part,
+ key_info->key_part[part].length);
+
+ if (res && all_parts)
+ {
+ for (uint i= 0; i < part; i++)
+ {
+ Field *field;
+ if ((field= key_info->key_part[i].field))
+ {
+ Rdb_field_packing dummy;
+ if (!dummy.setup(nullptr, field, inx, i, key_info->key_part[i].length))
+ {
+ /* Cannot do index-only reads for this column */
+ res= false;
+ break;
+ }
+ }
+ }
+ }
+
+ uint pk= table_share->primary_key;
+ if (inx == pk && all_parts &&
+ part + 1 == table_share->key_info[pk].user_defined_key_parts)
+ {
+ m_pk_can_be_decoded= res;
+ }
+
+ return res;
+}
+
+int ha_rocksdb::read_key_exact(const std::shared_ptr<const Rdb_key_def>& kd,
+ rocksdb::Iterator *iter,
+ bool full_key_match,
+ const rocksdb::Slice& key_slice) const
+{
+ DBUG_ASSERT(kd != nullptr);
+ DBUG_ASSERT(iter != nullptr);
+
+ /*
+ We are looking for the first record such that
+ index_tuple= lookup_tuple.
+ lookup_tuple may be a prefix of the index.
+ */
+ if (kd->m_is_reverse_cf)
+ {
+ if (!full_key_match)
+ {
+ if (!iter->Valid())
+ iter->SeekToLast();
+ else
+ iter->Prev();
+ }
+ }
+
+ if (!iter->Valid() || !kd->value_matches_prefix(iter->key(), key_slice))
+ {
+ /*
+ Got a record that is not equal to the lookup value, or even a record
+ from another table.index.
+ */
+ return HA_ERR_KEY_NOT_FOUND;
+ }
+ return 0;
+}
+
+int ha_rocksdb::read_before_key(const std::shared_ptr<const Rdb_key_def>& kd,
+ bool full_key_match,
+ const rocksdb::Slice& key_slice)
+{
+ DBUG_ASSERT(kd != nullptr);
+
+ /*
+ We are looking for record with the biggest t.key such that
+ t.key < lookup_tuple.
+ */
+ if (kd->m_is_reverse_cf)
+ {
+ if (m_scan_it->Valid() && full_key_match &&
+ kd->value_matches_prefix(m_scan_it->key(), key_slice))
+ {
+ /* We are using full key and we've hit an exact match */
+ m_scan_it->Next();
+ }
+ }
+ else
+ {
+ if (m_scan_it->Valid())
+ m_scan_it->Prev();
+ else
+ m_scan_it->SeekToLast();
+ }
+
+ return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND;
+}
+
+int ha_rocksdb::read_after_key(const std::shared_ptr<const Rdb_key_def>& kd,
+ bool full_key_match,
+ const rocksdb::Slice& key_slice)
+{
+ DBUG_ASSERT(kd != nullptr);
+
+ /*
+ We are looking for the first record such that
+
+ index_tuple $GT lookup_tuple
+
+ with HA_READ_AFTER_KEY, $GT = '>',
+ with HA_READ_KEY_OR_NEXT, $GT = '>='
+ */
+ if (kd->m_is_reverse_cf)
+ {
+ if (!m_scan_it->Valid())
+ {
+ m_scan_it->SeekToLast();
+ }
+ else
+ {
+ /*
+ We should step back
+ - when not using full extended key
+ - when using full extended key and when we've got an exact match
+ */
+ if (!full_key_match ||
+ !kd->value_matches_prefix(m_scan_it->key(), key_slice))
+ {
+ m_scan_it->Prev();
+ }
+ }
+ }
+
+ return m_scan_it->Valid() ? 0 : HA_ERR_KEY_NOT_FOUND;
+}
+
+int ha_rocksdb::position_to_correct_key(
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ enum ha_rkey_function find_flag,
+ bool full_key_match,
+ const uchar* key,
+ key_part_map keypart_map,
+ const rocksdb::Slice& key_slice,
+ bool* move_forward)
+{
+ int rc= 0;
+
+ *move_forward= true;
+
+ switch (find_flag) {
+ case HA_READ_KEY_EXACT:
+ rc= read_key_exact(kd, m_scan_it, full_key_match, key_slice);
+ break;
+ case HA_READ_BEFORE_KEY:
+ *move_forward= false;
+ rc= read_before_key(kd, full_key_match, key_slice);
+ if (rc == 0 && !kd->covers_key(m_scan_it->key()))
+ {
+ /* The record we've got is not from this index */
+ rc= HA_ERR_KEY_NOT_FOUND;
+ }
+ break;
+ case HA_READ_AFTER_KEY:
+ case HA_READ_KEY_OR_NEXT:
+ rc= read_after_key(kd, full_key_match, key_slice);
+ if (rc == 0 && !kd->covers_key(m_scan_it->key()))
+ {
+ /* The record we've got is not from this index */
+ rc= HA_ERR_KEY_NOT_FOUND;
+ }
+ break;
+ case HA_READ_KEY_OR_PREV:
+ case HA_READ_PREFIX:
+ /* This flag is not used by the SQL layer, so we don't support it yet. */
+ rc= HA_ERR_UNSUPPORTED;
+ break;
+ case HA_READ_PREFIX_LAST:
+ case HA_READ_PREFIX_LAST_OR_PREV:
+ *move_forward= false;
+ /*
+ Find the last record with the specified index prefix lookup.
+ - HA_READ_PREFIX_LAST requires that the record has the
+ prefix=lookup (if there are no such records,
+ HA_ERR_KEY_NOT_FOUND should be returned).
+ - HA_READ_PREFIX_LAST_OR_PREV has no such requirement. If there are no
+ records with prefix=lookup, we should return the last record
+ before that.
+ */
+ rc= read_before_key(kd, full_key_match, key_slice);
+ if (rc == 0)
+ {
+ const rocksdb::Slice& rkey= m_scan_it->key();
+ if (!kd->covers_key(rkey))
+ {
+ /* The record we've got is not from this index */
+ rc= HA_ERR_KEY_NOT_FOUND;
+ }
+ else if (find_flag == HA_READ_PREFIX_LAST)
+ {
+ uint size = kd->pack_index_tuple(table, m_pack_buffer,
+ m_sk_packed_tuple, key,
+ keypart_map);
+ rocksdb::Slice lookup_tuple(
+ reinterpret_cast<char*>(m_sk_packed_tuple), size);
+
+ // We need to compare the key we've got with the original search prefix.
+ if (!kd->value_matches_prefix(rkey, lookup_tuple))
+ {
+ rc= HA_ERR_KEY_NOT_FOUND;
+ }
+ }
+ }
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+
+ return rc;
+}
+
+int ha_rocksdb::calc_eq_cond_len(const std::shared_ptr<const Rdb_key_def>& kd,
+ enum ha_rkey_function find_flag,
+ const rocksdb::Slice& slice,
+ int bytes_changed_by_succ,
+ const key_range *end_key,
+ uint* end_key_packed_size)
+{
+ if (find_flag == HA_READ_KEY_EXACT)
+ return slice.size();
+
+ if (find_flag == HA_READ_PREFIX_LAST)
+ {
+ /*
+ We have made the kd->successor(m_sk_packed_tuple) call above.
+
+ The slice is at least Rdb_key_def::INDEX_NUMBER_SIZE bytes long.
+ */
+ return slice.size() - bytes_changed_by_succ;
+ }
+
+ if (end_key)
+ {
+ *end_key_packed_size= kd->pack_index_tuple(table, m_pack_buffer,
+ m_end_key_packed_tuple,
+ end_key->key,
+ end_key->keypart_map);
+
+ /*
+ Calculating length of the equal conditions here. 4 byte index id is
+ included.
+ Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3)
+ WHERE id1=1 AND id2=1 AND id3>=2 => eq_cond_len= 4+8+4= 16
+ WHERE id1=1 AND id2>=1 AND id3>=2 => eq_cond_len= 4+8= 12
+ Example2: id1 VARCHAR(30), id2 INT, PRIMARY KEY (id1, id2)
+ WHERE id1 = 'AAA' and id2 < 3; => eq_cond_len=13 (varchar used 9 bytes)
+ */
+ rocksdb::Slice end_slice(reinterpret_cast<char*>(m_end_key_packed_tuple),
+ *end_key_packed_size);
+ return slice.difference_offset(end_slice);
+ }
+
+ /*
+ On range scan without any end key condition, there is no
+ eq cond, and eq cond length is the same as index_id size (4 bytes).
+ Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3)
+ WHERE id1>=1 AND id2 >= 2 and id2 <= 5 => eq_cond_len= 4
+ */
+ return Rdb_key_def::INDEX_NUMBER_SIZE;
+}
+
+int ha_rocksdb::read_row_from_primary_key(uchar *buf)
+{
+ DBUG_ASSERT(buf != nullptr);
+
+ int rc;
+ const rocksdb::Slice& rkey= m_scan_it->key();
+ uint pk_size= rkey.size();
+ const char *pk_data= rkey.data();
+
+ memcpy(m_pk_packed_tuple, pk_data, pk_size);
+ m_last_rowkey.copy(pk_data, pk_size, &my_charset_bin);
+
+ if (m_lock_rows != RDB_LOCK_NONE)
+ {
+ /* We need to put a lock and re-read */
+ rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size);
+ }
+ else
+ {
+ /* Unpack from the row we've read */
+ const rocksdb::Slice& value = m_scan_it->value();
+ rc= convert_record_from_storage_format(&rkey, &value, buf);
+ }
+
+ return rc;
+}
+
+int ha_rocksdb::read_row_from_secondary_key(uchar *buf,
+ const std::shared_ptr<const Rdb_key_def>& kd, bool move_forward)
+{
+ DBUG_ASSERT(buf != nullptr);
+ DBUG_ASSERT(kd != nullptr);
+
+ int rc= 0;
+ uint pk_size;
+
+ if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table))
+ {
+ /* Get the key columns and primary key value */
+ const rocksdb::Slice& rkey= m_scan_it->key();
+ pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey,
+ m_pk_packed_tuple);
+ const rocksdb::Slice& value= m_scan_it->value();
+ if (pk_size == RDB_INVALID_KEY_LEN)
+ {
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+ else
+ {
+ rc= try_keyonly_read_from_sk(buf, kd, rkey, value, pk_size);
+ }
+ }
+ else
+ {
+ if (kd->m_is_reverse_cf)
+ move_forward= !move_forward;
+
+ rc= find_icp_matching_index_rec(move_forward, buf);
+ if (!rc)
+ {
+ const rocksdb::Slice& rkey= m_scan_it->key();
+ pk_size= kd->get_primary_key_tuple(table, m_pk_descr, &rkey,
+ m_pk_packed_tuple);
+ if (pk_size == RDB_INVALID_KEY_LEN)
+ {
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+ else
+ {
+ rc= get_row_by_rowid(buf, m_pk_packed_tuple, pk_size);
+ }
+ }
+ }
+
+ if (!rc)
+ {
+ m_last_rowkey.copy((const char*)m_pk_packed_tuple, pk_size,
+ &my_charset_bin);
+ }
+
+ return rc;
+}
+
+/**
+ @note
+ Some data types require unpack_info in order to do index-only reads.
+
+ However, support for this may have been added after the database was built.
+ This means that we need to support the case where unpack_info is not
+ available. In this case, we should just do a primary key lookup.
+*/
+int ha_rocksdb::try_keyonly_read_from_sk(uchar* buf,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value,
+ const uint rowid_size)
+{
+ int res= kd->unpack_record(table, buf, &key, &value, m_verify_checksums);
+ uint size= rowid_size;
+ if (res == UNPACK_INFO_MISSING)
+ {
+ // If we can't unpack, then do a PK lookup.
+ if (rowid_size == RDB_INVALID_KEY_LEN)
+ {
+ size= kd->get_primary_key_tuple(
+ table, m_pk_descr, &key, m_pk_packed_tuple);
+ }
+ return get_row_by_rowid(buf, m_pk_packed_tuple, size);
+ }
+ else if (res == UNPACK_FAILURE)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ DBUG_ASSERT(res == UNPACK_SUCCESS);
+ return res;
+}
+
+
+/**
+ @note
+ The problem with this function is that SQL layer calls it, when
+ - the table has not been yet opened (no ::open() call done)
+ - this->table_share already exists, but it is in the process of being
+ filled, so some of fields are still NULL.
+ - In particular, table_share->key_info[inx].key_part[] is filled only up
+ to part #part. Subsequent key parts are not yet filled.
+
+ To complicate things further, SQL layer will call index_flags() with
+ all_parts=TRUE. Essentially, we're asked to provide flags for reading
+ keyparts whose datatype is not yet known.
+
+ We walk around this problem by using check_keyread_allowed(), which uses
+ table_share object and is careful not to step on unitialized data.
+
+ When we get a call with all_parts=TRUE, we try to analyze all parts but
+ ignore those that have key_part->field==nullptr (these are not initialized
+ yet).
+*/
+
+ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const
+{
+ ulong base_flags= HA_READ_NEXT | // doesn't seem to be used
+ HA_READ_ORDER |
+ HA_READ_RANGE |
+ HA_READ_PREV;
+
+ if (check_keyread_allowed(inx, part, all_parts))
+ base_flags |= HA_KEYREAD_ONLY;
+
+ if (inx == table_share->primary_key)
+ {
+ /*
+ Index-only reads on primary key are the same as table scan for us. Still,
+ we need to explicitly "allow" them, otherwise SQL layer will miss some
+ plans.
+ */
+ base_flags |= HA_KEYREAD_ONLY;
+ }
+ else
+ {
+ /*
+ We can Index Condition Pushdown any key except the primary. With primary
+ key, we get (pk, record) pair immediately, there is no place to put the
+ ICP check.
+ */
+ base_flags |= HA_DO_INDEX_COND_PUSHDOWN;
+ }
+
+ return base_flags;
+}
+
+
+/**
+ @brief
+ Read next index tuple through the secondary index.
+
+ @details
+ m_scan_it points at the index key-value pair that we should read the (pk,row)
+ pair for.
+*/
+int ha_rocksdb::secondary_index_read(const int keyno, uchar *buf)
+{
+ DBUG_ASSERT(buf != nullptr);
+ DBUG_ASSERT(table != nullptr);
+
+ stats.rows_requested++;
+
+ /* Use STATUS_NOT_FOUND when record not found or some error occurred */
+ table->status= STATUS_NOT_FOUND;
+
+ if (m_scan_it->Valid())
+ {
+ rocksdb::Slice key= m_scan_it->key();
+
+ /* Check if we've ran out of records of this index */
+ if (m_key_descr_arr[keyno]->covers_key(key))
+ {
+ int rc;
+
+ //TODO: We could here check if we have ran out of range we're scanning
+ uint size= m_key_descr_arr[keyno]->get_primary_key_tuple(
+ table, m_pk_descr, &key, m_pk_packed_tuple);
+ if (size == RDB_INVALID_KEY_LEN)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_last_rowkey.copy((const char*)m_pk_packed_tuple, size, &my_charset_bin);
+
+ if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE &&
+ !has_hidden_pk(table))
+ {
+ rc= try_keyonly_read_from_sk(buf, m_key_descr_arr[keyno],
+ key, m_scan_it->value(), size);
+ }
+ else
+ {
+ rc= get_row_by_rowid(buf, m_pk_packed_tuple, size);
+ }
+
+ if (!rc)
+ {
+ table->status= 0;
+ stats.rows_read++;
+ stats.rows_index_next++;
+ update_row_stats(ROWS_READ);
+ }
+ return rc;
+ }
+ }
+ return HA_ERR_END_OF_FILE;
+}
+
+
+/*
+ ha_rocksdb::read_range_first overrides handler::read_range_first.
+ The only difference from handler::read_range_first is that
+ ha_rocksdb::read_range_first passes end_key to
+ ha_rocksdb::index_read_map_impl function.
+*/
+int ha_rocksdb::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range_arg, bool sorted)
+{
+ int result;
+ DBUG_ENTER("ha_rocksdb::read_range_first");
+
+ eq_range= eq_range_arg;
+ set_end_range(end_key, RANGE_SCAN_ASC);
+
+ range_key_part= table->key_info[active_index].key_part;
+
+ if (!start_key) // Read first record
+ result= ha_index_first(table->record[0]);
+ else
+ {
+ if (is_using_prohibited_gap_locks(table,
+ is_using_full_unique_key(
+ active_index,
+ start_key->keypart_map,
+ start_key->flag)))
+ {
+ DBUG_RETURN(HA_ERR_LOCK_DEADLOCK);
+ }
+
+ MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
+ { result= index_read_map_impl(table->record[0],
+ start_key->key,
+ start_key->keypart_map,
+ start_key->flag,
+ end_key); })
+ }
+ if (result)
+ DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
+ ? HA_ERR_END_OF_FILE
+ : result);
+
+ if (compare_key(end_range) <= 0)
+ {
+ DBUG_RETURN(0);
+ }
+ else
+ {
+ /*
+ The last read row does not fall in the range. So request
+ storage engine to release row lock if possible.
+ */
+ unlock_row();
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+}
+
+
+int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
+{
+ return index_read_map_impl(buf, key, keypart_map, find_flag, nullptr);
+}
+
+/*
+ See storage/rocksdb/rocksdb-range-access.txt for description of how MySQL
+ index navigation commands are converted into RocksDB lookup commands.
+
+ This function takes end_key as an argument, and it is set on range scan.
+ MyRocks needs to decide whether prefix bloom filter can be used or not.
+ To decide to use prefix bloom filter or not, calculating equal condition length
+ is needed. On equal lookups (find_flag == HA_READ_KEY_EXACT), equal
+ condition length is the same as rocksdb::Slice.size() of the start key.
+ On range scan, equal condition length is MIN(start_key, end_key) of the
+ rocksdb::Slice expression.
+*/
+int ha_rocksdb::index_read_map_impl(uchar *buf, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag,
+ const key_range *end_key)
+{
+ int rc= 0;
+ DBUG_ENTER("ha_rocksdb::index_read_map");
+
+ ha_statistic_increment(&SSV::ha_read_key_count);
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[active_index];
+ uint actual_key_parts= kd->get_key_parts();
+ bool using_full_key= is_using_full_key(keypart_map, actual_key_parts);
+
+ if (!end_key)
+ end_key= end_range;
+
+ /* By default, we don't need the retrieved records to match the prefix */
+ m_sk_match_prefix= nullptr;
+ stats.rows_requested++;
+
+ if (active_index == table->s->primary_key &&
+ find_flag == HA_READ_KEY_EXACT && using_full_key)
+ {
+ /*
+ Equality lookup over primary key, using full tuple.
+ This is a special case, use DB::Get.
+ */
+ uint size= kd->pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple,
+ key, keypart_map);
+ rc= get_row_by_rowid(buf, m_pk_packed_tuple, size);
+ if (!rc)
+ {
+ stats.rows_read++;
+ stats.rows_index_first++;
+ update_row_stats(ROWS_READ);
+ }
+ DBUG_RETURN(rc);
+ }
+
+ /*
+ Unique secondary index performs lookups without the extended key fields
+ */
+ uint packed_size;
+ if (active_index != table->s->primary_key &&
+ table->key_info[active_index].flags & HA_NOSAME &&
+ find_flag == HA_READ_KEY_EXACT && using_full_key)
+ {
+ key_part_map tmp_map= (key_part_map(1) <<
+ table->key_info[active_index].user_defined_key_parts) - 1;
+ packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple,
+ key, tmp_map);
+ if (table->key_info[active_index].user_defined_key_parts !=
+ kd->get_key_parts())
+ using_full_key= false;
+ }
+ else
+ {
+ packed_size= kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple,
+ key, keypart_map);
+ }
+
+ if ((pushed_idx_cond && pushed_idx_cond_keyno == active_index) &&
+ (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST))
+ {
+ /*
+ We are doing a point index lookup, and ICP is enabled. It is possible
+ that this call will be followed by ha_rocksdb->index_next_same() call.
+
+ Do what InnoDB does: save the lookup tuple now. We will need it in
+ index_next_same/find_icp_matching_index_rec in order to stop scanning
+ as soon as index record doesn't match the lookup tuple.
+
+ When not using ICP, handler::index_next_same() will make sure that rows
+ that don't match the lookup prefix are not returned.
+ row matches the lookup prefix.
+ */
+ m_sk_match_prefix= m_sk_match_prefix_buf;
+ m_sk_match_length= packed_size;
+ memcpy(m_sk_match_prefix, m_sk_packed_tuple, packed_size);
+ }
+
+ int bytes_changed_by_succ= 0;
+ if (find_flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ find_flag == HA_READ_PREFIX_LAST ||
+ find_flag == HA_READ_AFTER_KEY)
+ {
+ /* See below */
+ bytes_changed_by_succ= kd->successor(m_sk_packed_tuple, packed_size);
+ }
+
+ rocksdb::Slice slice(
+ reinterpret_cast<const char*>(m_sk_packed_tuple), packed_size);
+
+ uint end_key_packed_size= 0;
+ uint eq_cond_len= calc_eq_cond_len(kd, find_flag, slice,
+ bytes_changed_by_succ, end_key,
+ &end_key_packed_size);
+
+ bool use_all_keys= false;
+ if (find_flag == HA_READ_KEY_EXACT &&
+ my_count_bits(keypart_map) == kd->get_key_parts())
+ use_all_keys= true;
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ bool is_new_snapshot= !tx->has_snapshot();
+ // Loop as long as we get a deadlock error AND we end up creating the
+ // snapshot here (i.e. it did not exist prior to this)
+ for (;;)
+ {
+ /*
+ This will open the iterator and position it at a record that's equal or
+ greater than the lookup tuple.
+ */
+ setup_scan_iterator(kd, &slice, use_all_keys, is_ascending(kd, find_flag),
+ eq_cond_len);
+
+ /*
+ Once we are positioned on from above, move to the position we really
+ want: See storage/rocksdb/rocksdb-range-access.txt
+ */
+ bool move_forward;
+ rc = position_to_correct_key(kd, find_flag, using_full_key, key,
+ keypart_map, slice, &move_forward);
+
+ if (rc)
+ {
+ /* This status is returned on any error */
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(rc);
+ }
+
+ m_skip_scan_it_next_call= false;
+
+ /*
+ Now get the data for the row into 'buf'. If we were using a primary key
+ then we have all the rows we need. For a secondary key we now need to
+ lookup the primary key.
+ */
+ if (active_index == table->s->primary_key)
+ rc= read_row_from_primary_key(buf);
+ else
+ rc= read_row_from_secondary_key(buf, kd, move_forward);
+
+ if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot)
+ break; /* Exit the loop */
+
+ // release the snapshot and iterator so they will be regenerated
+ tx->release_snapshot();
+ release_scan_iterator();
+ }
+
+ if (rc)
+ {
+ /* the only possible error condition is record-not-found */
+ table->status= STATUS_NOT_FOUND;
+ }
+ else
+ {
+ table->status= 0;
+ stats.rows_read++;
+ stats.rows_index_first++;
+ update_row_stats(ROWS_READ);
+ }
+ DBUG_RETURN(rc);
+}
+
+
+/*
+ @brief
+ Scan the secondary index until we find an index record that satisfies ICP
+
+ @param move_forward TRUE <=> move m_scan_it forward
+ FALSE <=> move m_scan_it backward
+ @param buf Record buffer (must be the same buffer that
+ pushed index condition points to, in practice
+ it is table->record[0])
+
+ @detail
+ Move the current iterator m_scan_it until we get an index tuple that
+ satisfies the pushed Index Condition.
+ (if there is no pushed index condition, return right away)
+
+ @return
+ 0 - Index tuple satisfies ICP, can do index read.
+ other - error code
+*/
+
+int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf)
+{
+ DBUG_ASSERT(buf != nullptr);
+
+ if (pushed_idx_cond && pushed_idx_cond_keyno == active_index)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[active_index];
+ DBUG_ASSERT(kd != nullptr);
+
+ while (1)
+ {
+ if (!m_scan_it->Valid())
+ {
+ table->status= STATUS_NOT_FOUND;
+ return HA_ERR_END_OF_FILE;
+ }
+ rocksdb::Slice rkey= m_scan_it->key();
+
+ if (!kd->covers_key(rkey))
+ {
+ table->status= STATUS_NOT_FOUND;
+ return HA_ERR_END_OF_FILE;
+ }
+
+ if (m_sk_match_prefix)
+ {
+ rocksdb::Slice prefix((const char*)m_sk_match_prefix,
+ m_sk_match_length);
+ if (!kd->value_matches_prefix(rkey, prefix))
+ {
+ table->status= STATUS_NOT_FOUND;
+ return HA_ERR_END_OF_FILE;
+ }
+ }
+
+ rocksdb::Slice value= m_scan_it->value();
+ if (try_keyonly_read_from_sk(buf, kd, rkey, value, RDB_INVALID_KEY_LEN))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ enum icp_result icp_status= check_index_cond();
+ if (icp_status == ICP_NO_MATCH)
+ {
+ if (move_forward)
+ m_scan_it->Next();
+ else
+ m_scan_it->Prev();
+ continue; /* Get the next (or prev) index tuple */
+ }
+ else if (icp_status == ICP_OUT_OF_RANGE)
+ {
+ /* We have walked out of range we are scanning */
+ table->status= STATUS_NOT_FOUND;
+ return HA_ERR_END_OF_FILE;
+ }
+ else /* icp_status == ICP_MATCH */
+ {
+ /* Index Condition is satisfied. We have rc==0, proceed to fetch the row. */
+ break;
+ }
+ /*
+ TODO: should we have this here, or RockDB handles this internally?
+ if (my_core::thd_killed(current_thd))
+ {
+ rc= HA_ERR_INTERNAL_ERROR; // doesn't matter
+ break;
+ }
+ */
+ }
+ }
+ return 0;
+}
+
+
+int ha_rocksdb::index_read_last_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map)
+{
+ return index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST);
+}
+
+
+int ha_rocksdb::check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ uint pk= pk_index(table, m_tbl_def);
+ String rowkey_copy;
+ String sec_key_copy;
+ const char* table_name = table->s->table_name.str;
+
+ // Only when debugging: don't use snapshot when reading
+ // Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ // tx->snapshot= nullptr;
+
+ bool save_verify_checksums= m_verify_checksums;
+ m_verify_checksums= true;
+
+ /* For each secondary index, check that we can get a PK value from it */
+ // NO_LINT_DEBUG
+ sql_print_information("CHECKTABLE %s: Checking table %s", table_name,
+ table_name);
+ ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true
+ ha_rows row_checksums= ha_rows(-1);
+ bool first_index= true;
+
+ for (uint keyno= 0; keyno < table->s->keys; keyno++)
+ {
+ if (keyno != pk)
+ {
+ extra(HA_EXTRA_KEYREAD);
+ ha_index_init(keyno, true);
+ ha_rows rows= 0;
+ ha_rows checksums= 0;
+ if (first_index)
+ row_checksums_at_start= m_row_checksums_checked;
+ int res;
+ // NO_LINT_DEBUG
+ sql_print_information("CHECKTABLE %s: Checking index %s", table_name,
+ table->key_info[keyno].name);
+ while (1)
+ {
+ if (!rows)
+ res= index_first(table->record[0]);
+ else
+ res= index_next(table->record[0]);
+
+ if (res == HA_ERR_END_OF_FILE)
+ break;
+ if (res)
+ {
+ // error
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: .. row %lld: index scan error %d",
+ table_name, rows, res);
+ goto error;
+ }
+ rocksdb::Slice key= m_scan_it->key();
+ sec_key_copy.copy(key.data(), key.size(), &my_charset_bin);
+ rowkey_copy.copy(m_last_rowkey.ptr(), m_last_rowkey.length(),
+ &my_charset_bin);
+
+ if (m_key_descr_arr[keyno]->
+ unpack_info_has_checksum(m_scan_it->value()))
+ {
+ checksums++;
+ }
+
+ if ((res= get_row_by_rowid(table->record[0], rowkey_copy.ptr(),
+ rowkey_copy.length())))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: .. row %lld: "
+ "failed to fetch row by rowid",
+ table_name, rows);
+ goto error;
+ }
+
+ longlong hidden_pk_id= 0;
+ if (has_hidden_pk(table) &&
+ read_hidden_pk_id_from_rowkey(&hidden_pk_id))
+ goto error;
+
+ /* Check if we get the same PK value */
+ uint packed_size= m_pk_descr->pack_record(
+ table, m_pack_buffer, table->record[0], m_pk_packed_tuple, nullptr,
+ false, hidden_pk_id);
+ if (packed_size != rowkey_copy.length() ||
+ memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: .. row %lld: PK value mismatch",
+ table_name, rows);
+ goto print_and_error;
+ }
+
+ /* Check if we get the same secondary key value */
+ packed_size= m_key_descr_arr[keyno]->pack_record(
+ table, m_pack_buffer, table->record[0], m_sk_packed_tuple,
+ &m_sk_tails, false, hidden_pk_id);
+ if (packed_size != sec_key_copy.length() ||
+ memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: .. row %lld: "
+ "secondary index value mismatch",
+ table_name, rows);
+ goto print_and_error;
+ }
+ rows++;
+ continue;
+
+print_and_error:
+ {
+ std::string buf;
+ buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), 1000);
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: rowkey: %s", table_name,
+ buf.c_str());
+
+ buf= rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(),
+ 1000);
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: record: %s", table_name,
+ buf.c_str());
+
+ buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), 1000);
+ // NO_LINT_DEBUG
+ sql_print_error("CHECKTABLE %s: index: %s", table_name,
+ buf.c_str());
+
+ goto error;
+ }
+ }
+ // NO_LINT_DEBUG
+ sql_print_information("CHECKTABLE %s: ... %lld index entries checked "
+ "(%lld had checksums)", table_name, rows,
+ checksums);
+
+ if (first_index)
+ {
+ row_checksums= m_row_checksums_checked - row_checksums_at_start;
+ first_index= false;
+ }
+ ha_index_end();
+ }
+ }
+ if (row_checksums != ha_rows(-1))
+ {
+ // NO_LINT_DEBUG
+ sql_print_information("CHECKTABLE %s: %lld table records had checksums",
+ table_name, row_checksums);
+ }
+ extra(HA_EXTRA_NO_KEYREAD);
+
+ m_verify_checksums= save_verify_checksums;
+ /*
+ TODO: we should check also for PK records that are missing in the secondary
+ indexes.
+ For that, need to walk through the PK and check that every PK record has a
+ proper counterpart in each secondary index.
+ */
+ return HA_ADMIN_OK;
+error:
+ m_verify_checksums= save_verify_checksums;
+ ha_index_or_rnd_end();
+ extra(HA_EXTRA_NO_KEYREAD);
+ return HA_ADMIN_CORRUPT;
+}
+
+
+static void dbug_dump_str(FILE *out, const char *str, int len)
+{
+ fprintf(out, "\"");
+ for (int i= 0; i < len; i++)
+ {
+ if (str[i] > 32)
+ fprintf(out, "%c", str[i]);
+ else
+ fprintf(out, "\\%d", str[i]);
+ }
+ fprintf(out, "\"");
+}
+
+
+/*
+ Debugging help: dump the whole database into a human-readable file.
+ Usage:
+ dbug_dump_database(rdb);
+*/
+
+void dbug_dump_database(rocksdb::DB *db)
+{
+ FILE *out= fopen("/tmp/rocksdb.dump", "wt");
+ if (!out)
+ return;
+
+ rocksdb::Iterator* it = db->NewIterator(rocksdb::ReadOptions());
+ for (it->SeekToFirst(); it->Valid(); it->Next()) {
+ rocksdb::Slice key= it->key();
+ rocksdb::Slice val= it->value();
+ dbug_dump_str(out, key.data(), key.size());
+ fprintf(out, " -> ");
+ dbug_dump_str(out, val.data(), val.size());
+ fprintf(out, "\n");
+ }
+
+ delete it;
+ fclose(out);
+}
+
+
+/*
+ Given a rowid (i.e. packed PK) as a parameter, get the record.
+
+ @return
+ 0 - Ok
+ other - HA_ERR error code.
+*/
+
+int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid,
+ const uint rowid_size)
+{
+ DBUG_ASSERT(buf != nullptr);
+ DBUG_ASSERT(rowid != nullptr);
+ DBUG_ASSERT(table != nullptr);
+
+ int rc;
+ DBUG_ENTER("ha_rocksdb::get_row_by_rowid");
+
+ rocksdb::Slice key_slice(rowid, rowid_size);
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ DBUG_ASSERT(tx != nullptr);
+
+ DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid");
+ DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid",
+ {
+ THD *thd= ha_thd();
+ const char act[]=
+ "now signal Reached "
+ "wait_for signal.rocksdb.get_row_by_rowid_let_running";
+ DBUG_ASSERT(opt_debug_sync_timeout > 0);
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+ bool found;
+ rocksdb::Status s;
+
+ if (m_lock_rows == RDB_LOCK_NONE)
+ {
+ tx->acquire_snapshot(true);
+ s= tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record);
+ }
+ else
+ s= tx->get_for_update(m_pk_descr->get_cf(), key_slice, &m_retrieved_record);
+
+ if (!s.IsNotFound() && !s.ok())
+ {
+ DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def));
+ }
+ found= !s.IsNotFound();
+
+ table->status= STATUS_NOT_FOUND;
+ if (found)
+ {
+ m_last_rowkey.copy((const char*)rowid, rowid_size, &my_charset_bin);
+ rc= convert_record_from_storage_format(&key_slice, buf);
+ if (!rc)
+ table->status= 0;
+ }
+ else
+ {
+ /*
+ Note: we don't need to unlock the row. It is intentional that we keep
+ locks on rows that don't exist.
+ */
+ rc= HA_ERR_KEY_NOT_FOUND;
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+int ha_rocksdb::index_next(uchar *buf)
+{
+ bool moves_forward= true;
+ ha_statistic_increment(&SSV::ha_read_next_count);
+ if (m_key_descr_arr[active_index]->m_is_reverse_cf)
+ {
+ moves_forward= false;
+ }
+
+ int rc= index_next_with_direction(buf, moves_forward);
+ if (rc == HA_ERR_KEY_NOT_FOUND)
+ rc = HA_ERR_END_OF_FILE;
+ return rc;
+}
+
+
+int ha_rocksdb::index_prev(uchar *buf)
+{
+ bool moves_forward= false;
+ ha_statistic_increment(&SSV::ha_read_prev_count);
+ if (m_key_descr_arr[active_index]->m_is_reverse_cf)
+ {
+ moves_forward= true;
+ }
+
+ int rc= index_next_with_direction(buf, moves_forward);
+ if (rc == HA_ERR_KEY_NOT_FOUND)
+ rc = HA_ERR_END_OF_FILE;
+
+ return rc;
+}
+
+
+int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward)
+{
+ int rc;
+ DBUG_ENTER("ha_rocksdb::index_next");
+
+ if (active_index == pk_index(table, m_tbl_def))
+ {
+ rc= rnd_next_with_direction(buf, move_forward);
+ }
+ else
+ {
+ if (m_skip_scan_it_next_call)
+ {
+ m_skip_scan_it_next_call= false;
+ }
+ else
+ {
+ if (move_forward)
+ m_scan_it->Next(); /* this call cannot fail */
+ else
+ m_scan_it->Prev();
+ }
+ rc= find_icp_matching_index_rec(move_forward, buf);
+ if (!rc)
+ rc= secondary_index_read(active_index, buf);
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+int ha_rocksdb::index_first(uchar *buf)
+{
+ m_sk_match_prefix= nullptr;
+ ha_statistic_increment(&SSV::ha_read_first_count);
+ int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ?
+ index_last_intern(buf) : index_first_intern(buf);
+ if (rc == HA_ERR_KEY_NOT_FOUND)
+ rc = HA_ERR_END_OF_FILE;
+ return rc;
+}
+
+
+int ha_rocksdb::index_last(uchar *buf)
+{
+ m_sk_match_prefix= nullptr;
+ ha_statistic_increment(&SSV::ha_read_last_count);
+ int rc= m_key_descr_arr[active_index]->m_is_reverse_cf ?
+ index_first_intern(buf) : index_last_intern(buf);
+ if (rc == HA_ERR_KEY_NOT_FOUND)
+ rc = HA_ERR_END_OF_FILE;
+ return rc;
+}
+
+
+/*
+ Start scanning from the "first" value.
+
+ The 'first' here means "the first from start of the key space".
+ For reverse-ordered key spaces, the first value will be the biggest, 'zzzz'.
+
+ An picture of a forward-ordered keyspace (remember, the keys have form
+ 'indexnr-keyval'. Suppose the index we are at has number n)
+
+ (n-1) - ...
+ ( n ) <--- 1. (n) doesn't exist in the db but it would be here.
+ ( n ) - aaa <--- 2. Seek("n") will put us here on the first index
+ ( n ) - bbb record.
+ ( n ) - cc
+
+ So, need to do: Seek(n);
+
+ A backward-ordered keyspace:
+
+ (n+1) - bbb
+ (n+1) - aaa
+ (n+1) <--- (n+1) doesn't exist in the db but would be here.
+ ( n ) - ccc <--- 1. We need to be here.
+ ( n ) - bbb
+ ( n ) - aaa
+ ( n )
+
+ So, need to: Seek(n+1);
+
+*/
+
+int ha_rocksdb::index_first_intern(uchar *buf)
+{
+ DBUG_ASSERT(buf != nullptr);
+
+ uchar *key;
+ uint key_size;
+ int rc;
+ DBUG_ENTER("ha_rocksdb::index_first");
+
+ if (is_pk(active_index, table, m_tbl_def))
+ {
+ key= m_pk_packed_tuple;
+ }
+ else
+ {
+ key= m_sk_packed_tuple;
+ }
+
+ DBUG_ASSERT(key != nullptr);
+
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[active_index];
+ if (kd->m_is_reverse_cf)
+ {
+ kd->get_supremum_key(key, &key_size);
+ }
+ else
+ {
+ kd->get_infimum_key(key, &key_size);
+ }
+
+ rocksdb::Slice index_key((const char*)key, key_size);
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ DBUG_ASSERT(tx != nullptr);
+
+ bool is_new_snapshot= !tx->has_snapshot();
+ // Loop as long as we get a deadlock error AND we end up creating the
+ // snapshot here (i.e. it did not exist prior to this)
+ for (;;)
+ {
+ setup_scan_iterator(kd, &index_key, false, !kd->m_is_reverse_cf,
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+ m_skip_scan_it_next_call= true;
+
+ rc= index_next_with_direction(buf, true);
+ if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot)
+ break; // exit the loop
+
+ // release the snapshot and iterator so they will be regenerated
+ tx->release_snapshot();
+ release_scan_iterator();
+ }
+
+ if (!rc)
+ {
+ /*
+ index_next is always incremented on success, so decrement if it is
+ index_first instead
+ */
+ stats.rows_index_first++;
+ stats.rows_index_next--;
+ }
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @details
+ Start scanning from the "last" value
+
+ The 'last' here means "the last from start of the key space".
+ For reverse-ordered key spaces, we will actually read the smallest value.
+
+ An picture of a forward-ordered keyspace (remember, the keys have form
+ 'indexnr-keyval'. Suppose the we are at a key that has number n)
+
+ (n-1)-something
+ ( n )-aaa
+ ( n )-bbb
+ ( n )-ccc <----------- Need to seek to here.
+ (n+1) <---- Doesn't exist, but would be here.
+ (n+1)-smth, or no value at all
+
+ RocksDB's Iterator::Seek($val) seeks to "at $val or first value that's
+ greater". We can't see to "(n)-ccc" directly, because we don't know what
+ is the value of 'ccc' (the biggest record with prefix (n)). Instead, we seek
+ to "(n+1)", which is the least possible value that's greater than any value
+ in index #n. Then we step one record back.
+
+ So, need to: it->Seek(n+1) || it->SeekToLast(); it->Prev();
+
+ A backward-ordered keyspace:
+
+ (n+1)-something
+ ( n ) - ccc
+ ( n ) - bbb
+ ( n ) - aaa <---------------- (*) Need to seek here.
+ ( n ) <--- Doesn't exist, but would be here.
+ (n-1)-smth, or no value at all
+
+ So, need to: it->Seek(n) || it->SeekToLast(); it->Prev();
+*/
+
+int ha_rocksdb::index_last_intern(uchar *buf)
+{
+ DBUG_ASSERT(buf != nullptr);
+
+ uchar *key;
+ uint key_size;
+ int rc;
+ DBUG_ENTER("ha_rocksdb::index_last");
+
+ if (is_pk(active_index, table, m_tbl_def))
+ {
+ key= m_pk_packed_tuple;
+ }
+ else
+ {
+ key= m_sk_packed_tuple;
+ }
+
+ DBUG_ASSERT(key != nullptr);
+
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[active_index];
+ if (kd->m_is_reverse_cf)
+ {
+ kd->get_infimum_key(key, &key_size);
+ }
+ else
+ {
+ kd->get_supremum_key(key, &key_size);
+ }
+
+ rocksdb::Slice index_key((const char*)key, key_size);
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ DBUG_ASSERT(tx != nullptr);
+
+ bool is_new_snapshot= !tx->has_snapshot();
+ // Loop as long as we get a deadlock error AND we end up creating the
+ // snapshot here (i.e. it did not exist prior to this)
+ for (;;)
+ {
+ setup_scan_iterator(kd, &index_key, false, kd->m_is_reverse_cf,
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+
+ if (!m_scan_it->Valid())
+ {
+ /*
+ RocksDB keyspace has no records that follow our index. It could be, our
+ index is the last in the keyspace. Navigate to last record in the
+ keyspace.
+ */
+ m_scan_it->SeekToLast();
+ }
+ else
+ {
+ /*
+ Iterator is pointing at some record right after the end of the index.
+ Move one record back.
+ */
+ m_scan_it->Prev();
+ }
+ m_skip_scan_it_next_call= false;
+
+ if (is_pk(active_index, table, m_tbl_def))
+ {
+ m_skip_scan_it_next_call= true;
+ rc= rnd_next_with_direction(buf, false);
+ }
+ else
+ {
+ rc= find_icp_matching_index_rec(false /*move_forward*/, buf);
+ if (!rc)
+ rc= secondary_index_read(active_index, buf);
+ }
+
+ if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot)
+ break; /* exit the loop */
+
+ // release the snapshot and iterator so they will be regenerated
+ tx->release_snapshot();
+ release_scan_iterator();
+ }
+
+ if (!rc)
+ {
+ /*
+ index_next is always incremented on success, so decrement if it is
+ index_first instead
+ */
+ stats.rows_index_first++;
+ stats.rows_index_next--;
+ }
+ DBUG_RETURN(rc);
+}
+
+
+void ha_rocksdb::unlock_row()
+{
+ if (m_lock_rows != RDB_LOCK_NONE)
+ {
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ tx->release_lock(m_pk_descr->get_cf(), std::string(m_last_rowkey.ptr(),
+ m_last_rowkey.length()));
+ }
+}
+
+/*
+ Returning true if SingleDelete can be used.
+ - Secondary Indexes can always use SingleDelete.
+ - If the index is PRIMARY KEY, and if all of the columns of the table
+ are covered by the PRIMARY KEY, SingleDelete can be used.
+*/
+bool ha_rocksdb::can_use_single_delete(uint index)
+{
+ return (index != pk_index(table, m_tbl_def) ||
+ (!has_hidden_pk(table) &&
+ table->key_info[index].actual_key_parts == table->s->fields));
+}
+
+bool ha_rocksdb::skip_unique_check()
+{
+ /*
+ We want to skip unique checks if:
+ 1) bulk_load is on
+ 2) this table is in the whitelist of tables to skip and the replication
+ lag has reached a large enough value (see unique_check_lag_threshold
+ and unique_check_lage_reset_threshold)
+ 3) the user set rocksdb_skip_unique_check
+ */
+ return THDVAR(table->in_use, bulk_load) ||
+ (m_force_skip_unique_check && m_skip_unique_check) ||
+ THDVAR(table->in_use, skip_unique_check);
+}
+
+void ha_rocksdb::set_force_skip_unique_check(bool skip)
+{
+ m_force_skip_unique_check= skip;
+}
+
+bool ha_rocksdb::commit_in_the_middle()
+{
+ return THDVAR(table->in_use, bulk_load) ||
+ THDVAR(table->in_use, commit_in_the_middle);
+}
+
+/*
+ Executing bulk commit if it should.
+ @retval true if bulk commit failed
+ @retval false if bulk commit was skipped or succeeded
+*/
+bool ha_rocksdb::do_bulk_commit(Rdb_transaction *tx)
+{
+ DBUG_ASSERT(tx != nullptr);
+ return commit_in_the_middle() &&
+ tx->get_write_count() >= THDVAR(table->in_use, bulk_load_size) &&
+ tx->flush_batch();
+}
+
+/*
+ If table was created without primary key, SQL layer represents the primary
+ key number as MAX_INDEXES. Hence, this function returns true if the table
+ does not contain a primary key. (In which case we generate a hidden
+ 'auto-incremented' pk.)
+*/
+bool ha_rocksdb::has_hidden_pk(const TABLE* table)
+{
+ DBUG_ASSERT(table != nullptr);
+ return Rdb_key_def::table_has_hidden_pk(table);
+}
+
+/*
+ Returns true if given index number is a hidden_pk.
+ - This is used when a table is created with no primary key.
+*/
+bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(table_arg->s != nullptr);
+ DBUG_ASSERT(tbl_def_arg != nullptr);
+
+ return (table_arg->s->primary_key == MAX_INDEXES &&
+ index == tbl_def_arg->m_key_count - 1);
+}
+
+/* Returns index of primary key */
+uint ha_rocksdb::pk_index(const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(table_arg->s != nullptr);
+ DBUG_ASSERT(tbl_def_arg != nullptr);
+
+ return table_arg->s->primary_key == MAX_INDEXES ?
+ tbl_def_arg->m_key_count - 1 :
+ table_arg->s->primary_key;
+}
+
+/* Returns true if given index number is a primary key */
+bool ha_rocksdb::is_pk(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def *tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+ DBUG_ASSERT(table_arg->s != nullptr);
+ DBUG_ASSERT(tbl_def_arg != nullptr);
+
+ return index == table_arg->s->primary_key ||
+ is_hidden_pk(index, table_arg, tbl_def_arg);
+}
+
+const char* ha_rocksdb::get_key_name(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+
+ if (is_hidden_pk(index, table_arg, tbl_def_arg))
+ {
+ return HIDDEN_PK_NAME;
+ }
+
+ return table_arg->key_info[index].name;
+}
+
+const char* ha_rocksdb::get_key_comment(const uint index,
+ const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+{
+ DBUG_ASSERT(table_arg != nullptr);
+
+ if (is_hidden_pk(index, table_arg, tbl_def_arg))
+ {
+ return nullptr;
+ }
+
+ return table_arg->key_info[index].comment.str;
+}
+
+int ha_rocksdb::write_row(uchar *buf)
+{
+ DBUG_ENTER("ha_rocksdb::write_row");
+ DBUG_ASSERT(buf == table->record[0]);
+ DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE);
+ ha_statistic_increment(&SSV::ha_write_count);
+ /*
+ Note: "buf == table->record[0]" is copied from innodb. I am not aware of
+ any use cases where this condition is not true.
+ */
+ if (table->next_number_field && buf == table->record[0])
+ {
+ int err;
+ if ((err= update_auto_increment()))
+ {
+ DBUG_RETURN(err);
+ }
+ }
+
+ int rv= update_write_row(nullptr, buf, skip_unique_check());
+
+ if (rv == 0)
+ {
+ stats.rows_inserted++;
+ update_row_stats(ROWS_INSERTED);
+ }
+ DBUG_RETURN(rv);
+}
+
+
+/**
+ Constructing m_last_rowkey (MyRocks key expression) from
+ before_update|delete image (MySQL row expression).
+ m_last_rowkey is normally set during lookup phase, such as
+ rnd_next_with_direction() and rnd_pos(). With Read Free Replication,
+ these read functions are skipped and update_rows(), delete_rows() are
+ called without setting m_last_rowkey. This function sets m_last_rowkey
+ for Read Free Replication.
+*/
+void ha_rocksdb::set_last_rowkey(const uchar *old_data)
+{
+ if (old_data && use_read_free_rpl())
+ {
+ int old_pk_size= m_pk_descr->pack_record(
+ table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false);
+ m_last_rowkey.copy((const char*)m_pk_packed_tuple, old_pk_size,
+ &my_charset_bin);
+ }
+}
+
+int ha_rocksdb::get_pk_for_update(struct update_row_info* row_info)
+{
+ int size;
+
+ /*
+ Get new row key for any insert, and any update where the pk is not hidden.
+ Row key for updates with hidden pk is handled below.
+ */
+ if (!has_hidden_pk(table))
+ {
+ row_info->hidden_pk_id= 0;
+
+ row_info->new_pk_unpack_info= &m_pk_unpack_info;
+
+ size= m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data,
+ m_pk_packed_tuple,
+ row_info->new_pk_unpack_info,
+ false);
+ }
+ else if (row_info->old_data == nullptr)
+ {
+ row_info->hidden_pk_id= update_hidden_pk_val();
+ size= m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple);
+ }
+ else
+ {
+ /*
+ If hidden primary key, rowkey for new record will always be the same as
+ before
+ */
+ size= row_info->old_pk_slice.size();
+ memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size);
+ if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ }
+
+ row_info->new_pk_slice= rocksdb::Slice((const char*)m_pk_packed_tuple, size);
+
+ return 0;
+}
+
+int ha_rocksdb::check_and_lock_unique_pk(uint key_id,
+ const struct update_row_info& row_info,
+ bool* found, bool* pk_changed)
+{
+ DBUG_ASSERT(found != nullptr);
+ DBUG_ASSERT(pk_changed != nullptr);
+
+ *pk_changed= false;
+
+ /*
+ For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs
+ always require locking.
+ */
+ if (row_info.old_pk_slice.size() > 0)
+ {
+ /*
+ If the keys are the same, then no lock is needed
+ */
+ if (!Rdb_pk_comparator::bytewise_compare(row_info.new_pk_slice,
+ row_info.old_pk_slice))
+ {
+ *found= false;
+ return 0;
+ }
+
+ *pk_changed= true;
+ }
+
+ /*
+ Perform a read to determine if a duplicate entry exists. For primary
+ keys, a point lookup will be sufficient.
+
+ note: we intentionally don't set options.snapshot here. We want to read
+ the latest committed data.
+ */
+
+ /*
+ To prevent race conditions like below, it is necessary to
+ take a lock for a target row. get_for_update() holds a gap lock if
+ target key does not exist, so below conditions should never
+ happen.
+
+ 1) T1 Get(empty) -> T2 Get(empty) -> T1 Put(insert) -> T1 commit
+ -> T2 Put(overwrite) -> T2 commit
+ 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty)
+ -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite)
+ */
+ rocksdb::Status s= row_info.tx->get_for_update(m_pk_descr->get_cf(),
+ row_info.new_pk_slice,
+ &m_retrieved_record);
+ if (!s.ok() && !s.IsNotFound())
+ {
+ return row_info.tx->set_status_error(table->in_use, s,
+ m_key_descr_arr[key_id], m_tbl_def);
+ }
+
+ *found= !s.IsNotFound();
+ return 0;
+}
+
+int ha_rocksdb::check_and_lock_sk(uint key_id,
+ const struct update_row_info& row_info,
+ bool* found) const
+{
+ DBUG_ASSERT(found != nullptr);
+ *found= false;
+
+ /*
+ Can skip checking this key if none of the key fields have changed.
+ */
+ if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id))
+ {
+ return 0;
+ }
+
+ KEY* key_info= nullptr;
+ uint n_null_fields= 0;
+ uint user_defined_key_parts= 1;
+
+ key_info= &table->key_info[key_id];
+ user_defined_key_parts= key_info->user_defined_key_parts;
+ /*
+ If there are no uniqueness requirements, there's no need to obtain a
+ lock for this key.
+ */
+ if (!(key_info->flags & HA_NOSAME))
+ {
+ return 0;
+ }
+
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[key_id];
+
+ /*
+ Calculate the new key for obtaining the lock
+
+ For unique secondary indexes, the key used for locking does not
+ include the extended fields.
+ */
+ int size= kd->pack_record(table, m_pack_buffer, row_info.new_data,
+ m_sk_packed_tuple, nullptr, false, 0,
+ user_defined_key_parts, &n_null_fields);
+ if (n_null_fields > 0)
+ {
+ /*
+ If any fields are marked as NULL this will never match another row as
+ to NULL never matches anything else including another NULL.
+ */
+ return 0;
+ }
+
+ rocksdb::Slice new_slice= rocksdb::Slice((const char*)m_sk_packed_tuple,
+ size);
+
+ /*
+ For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs
+ always require locking.
+ */
+ if (row_info.old_data != nullptr)
+ {
+ size= kd->pack_record(table, m_pack_buffer, row_info.old_data,
+ m_sk_packed_tuple_old, nullptr, false,
+ row_info.hidden_pk_id,
+ user_defined_key_parts);
+ rocksdb::Slice old_slice= rocksdb::Slice(
+ (const char*)m_sk_packed_tuple_old, size);
+
+ /*
+ For updates, if the keys are the same, then no lock is needed
+
+ Also check to see if the key has any fields set to NULL. If it does, then
+ this key is unique since NULL is not equal to each other, so no lock is
+ needed.
+ */
+ if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice))
+ {
+ return 0;
+ }
+ }
+
+ /*
+ Perform a read to determine if a duplicate entry exists - since this is
+ a secondary indexes a range scan is needed.
+
+ note: we intentionally don't set options.snapshot here. We want to read
+ the latest committed data.
+ */
+
+ bool all_parts_used= (user_defined_key_parts == kd->get_key_parts());
+
+ /*
+ This iterator seems expensive since we need to allocate and free
+ memory for each unique index.
+
+ If this needs to be optimized, for keys without NULL fields, the
+ extended primary key fields can be migrated to the value portion of the
+ key. This enables using Get() instead of Seek() as in the primary key
+ case.
+
+ The bloom filter may need to be disabled for this lookup.
+ */
+ bool total_order_seek=
+ !can_use_bloom_filter(ha_thd(), kd, new_slice, all_parts_used,
+ is_ascending(m_key_descr_arr[key_id],
+ HA_READ_KEY_EXACT));
+ bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache);
+
+ /*
+ psergey-todo: we just need to take lock, lookups not needed:
+ */
+ std::string dummy_value;
+ rocksdb::Status s= row_info.tx->get_for_update(kd->get_cf(), new_slice,
+ &dummy_value);
+ if (!s.ok() && !s.IsNotFound())
+ {
+ return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def);
+ }
+
+ rocksdb::Iterator *iter= row_info.tx->get_iterator(kd->get_cf(),
+ total_order_seek,
+ fill_cache,
+ true /* read current data */,
+ false /* acquire snapshot */);
+ /*
+ Need to scan the transaction to see if there is a duplicate key.
+ Also need to scan RocksDB and verify the key has not been deleted
+ in the transaction.
+ */
+ iter->Seek(new_slice);
+ *found= !read_key_exact(kd, iter, all_parts_used, new_slice);
+ delete iter;
+
+ return 0;
+}
+
+int ha_rocksdb::check_uniqueness_and_lock(
+ const struct update_row_info& row_info,
+ bool* pk_changed)
+{
+ /*
+ Go through each index and determine if the index has uniqueness
+ requirements. If it does, then try to obtain a row lock on the new values.
+ Once all locks have been obtained, then perform the changes needed to
+ update/insert the row.
+ */
+ for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++)
+ {
+ bool found;
+ int rc;
+
+ if (is_pk(key_id, table, m_tbl_def))
+ {
+ rc= check_and_lock_unique_pk(key_id, row_info, &found, pk_changed);
+ }
+ else
+ {
+ rc= check_and_lock_sk(key_id, row_info, &found);
+ }
+
+ if (rc != 0)
+ {
+ return rc;
+ }
+
+ if (found)
+ {
+ /* There is a row with this key already, so error out. */
+ errkey= key_id;
+ m_dupp_errkey= errkey;
+ return HA_ERR_FOUND_DUPP_KEY;
+ }
+ }
+
+ return 0;
+}
+
+int ha_rocksdb::bulk_load_key(Rdb_transaction* tx,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value)
+{
+ rocksdb::ColumnFamilyHandle* cf= kd->get_cf();
+ DBUG_ASSERT(cf != nullptr);
+
+ if (m_sst_info == nullptr)
+ {
+ m_sst_info=
+ std::make_shared<Rdb_sst_info>(rdb, m_table_handler->m_table_name,
+ kd->get_name(), cf, rocksdb_db_options);
+ tx->start_bulk_load(this);
+ }
+
+ DBUG_ASSERT(m_sst_info != nullptr);
+
+ int rc= m_sst_info->put(key, value);
+ if (rc != 0)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Failed to add a key to sst file writer(%s)", MYF(0),
+ m_sst_info->error_message().c_str());
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+
+ return rc;
+}
+
+int ha_rocksdb::finalize_bulk_load()
+{
+ int rc= 0;
+
+ if (m_sst_info != nullptr)
+ {
+ rc= m_sst_info->commit();
+ if (rc != 0)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Failed to commit bulk loaded sst file to the "
+ "data store (%s)",
+ MYF(0), m_sst_info->error_message().c_str());
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_sst_info= nullptr;
+
+ Rdb_transaction *tx= get_or_create_tx(ha_thd());
+ if (tx != nullptr)
+ {
+ tx->end_bulk_load(this);
+ }
+ }
+
+ return rc;
+}
+
+int ha_rocksdb::update_pk(const std::shared_ptr<const Rdb_key_def>& kd,
+ const struct update_row_info& row_info,
+ bool pk_changed)
+{
+ uint key_id= kd->get_keyno();
+ bool hidden_pk= is_hidden_pk(key_id, table, m_tbl_def);
+ if (!hidden_pk && pk_changed)
+ {
+ /*
+ The old key needs to be deleted.
+ */
+ rocksdb::Status s= delete_or_singledelete(key_id, row_info.tx, kd->get_cf(),
+ row_info.old_pk_slice);
+ if (!s.ok())
+ {
+ return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def);
+ }
+ }
+
+ if (table->next_number_field)
+ {
+ update_auto_incr_val();
+ }
+
+ rocksdb::Slice value_slice;
+ convert_record_to_storage_format(row_info.new_pk_slice,
+ row_info.new_pk_unpack_info,
+ &value_slice);
+
+ int rc= 0;
+ auto cf= m_pk_descr->get_cf();
+ if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) &&
+ !hidden_pk)
+ {
+ /*
+ Write the primary key directly to an SST file using an SstFileWriter
+ */
+ rc= bulk_load_key(row_info.tx, kd, row_info.new_pk_slice,
+ value_slice);
+ }
+ else if (row_info.skip_unique_check)
+ {
+ /*
+ It is responsibility of the user to make sure that the data being
+ inserted doesn't violate any unique keys.
+ */
+ row_info.tx->get_blind_write_batch()->Put(cf, row_info.new_pk_slice,
+ value_slice);
+ }
+ else if (row_info.tx->m_ddl_transaction)
+ {
+ /*
+ DDL statement must check for unique key conflicts. For example:
+ ALTER TABLE tbl DROP PRIMARY KEY, ADD PRIMARY KEY(non_unique_column)
+ */
+ row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice,
+ value_slice);
+ }
+ else
+ {
+ auto s= row_info.tx->put(cf, row_info.new_pk_slice, value_slice);
+ if (!s.ok())
+ {
+ if (s.IsBusy())
+ {
+ errkey= table->s->primary_key;
+ m_dupp_errkey= errkey;
+ rc = HA_ERR_FOUND_DUPP_KEY;
+ }
+ else
+ {
+ rc = row_info.tx->set_status_error(table->in_use, s, m_pk_descr,
+ m_tbl_def);
+ }
+ }
+ }
+
+ return rc;
+}
+
+int ha_rocksdb::update_sk(const TABLE* table_arg,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const struct update_row_info& row_info)
+{
+ int new_packed_size;
+ int old_packed_size;
+
+ rocksdb::Slice new_key_slice;
+ rocksdb::Slice new_value_slice;
+ rocksdb::Slice old_key_slice;
+
+ uint key_id = kd->get_keyno();
+ /*
+ Can skip updating this key if none of the key fields have changed.
+ */
+ if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id))
+ {
+ return 0;
+ }
+
+ bool store_checksums= should_store_checksums();
+
+ new_packed_size= kd->pack_record(table_arg, m_pack_buffer, row_info.new_data,
+ m_sk_packed_tuple, &m_sk_tails,
+ store_checksums,
+ row_info.hidden_pk_id);
+
+ if (row_info.old_data != nullptr)
+ {
+ // The old value
+ old_packed_size= kd->pack_record(table_arg, m_pack_buffer,
+ row_info.old_data,
+ m_sk_packed_tuple_old, &m_sk_tails_old,
+ store_checksums,
+ row_info.hidden_pk_id);
+
+ /*
+ Check if we are going to write the same value. This can happen when
+ one does
+ UPDATE tbl SET col='foo'
+ and we are looking at the row that already has col='foo'.
+
+ We also need to compare the unpack info. Suppose, the collation is
+ case-insensitive, and unpack info contains information about whether
+ the letters were uppercase and lowercase. Then, both 'foo' and 'FOO'
+ will have the same key value, but different data in unpack_info.
+
+ (note: anyone changing bytewise_compare should take this code into
+ account)
+ */
+ if (old_packed_size == new_packed_size &&
+ m_sk_tails_old.get_current_pos() == m_sk_tails.get_current_pos() &&
+ memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple,
+ old_packed_size) == 0 &&
+ memcmp(m_sk_tails_old.ptr(), m_sk_tails.ptr(),
+ m_sk_tails.get_current_pos()) == 0)
+ {
+ return 0;
+ }
+
+ /*
+ Deleting entries from secondary index should skip locking, but
+ be visible to the transaction.
+ (also note that DDL statements do not delete rows, so this is not a DDL
+ statement)
+ */
+ old_key_slice= rocksdb::Slice(
+ reinterpret_cast<const char*>(m_sk_packed_tuple_old),
+ old_packed_size);
+
+ row_info.tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(),
+ old_key_slice);
+ }
+
+ /*
+ We're writing a new entry for secondary key. We can skip locking; we
+ should write to
+ - WriteBatchWithIndex normally (so that transaction sees the new row)
+ - non-indexed WriteBatch, when we don't need to see the new row:
+ = when doing a DDL operation and writing to a non-unique index, or
+ = when doing a bulk load
+ */
+ rocksdb::WriteBatchBase *write_batch;
+ if ((row_info.tx->m_ddl_transaction &&
+ !(table_arg->key_info[key_id].flags & HA_NOSAME)) ||
+ row_info.skip_unique_check)
+ {
+ write_batch= row_info.tx->get_blind_write_batch();
+ }
+ else
+ {
+ write_batch= row_info.tx->get_indexed_write_batch();
+ }
+
+ new_key_slice= rocksdb::Slice(
+ reinterpret_cast<const char*>(m_sk_packed_tuple), new_packed_size);
+ new_value_slice= rocksdb::Slice(
+ reinterpret_cast<const char*>(m_sk_tails.ptr()),
+ m_sk_tails.get_current_pos());
+
+ write_batch->Put(kd->get_cf(), new_key_slice, new_value_slice);
+
+ return 0;
+}
+
+int ha_rocksdb::update_indexes(const struct update_row_info& row_info,
+ bool pk_changed)
+{
+ int rc;
+
+ for (uint key_id= 0; key_id < m_tbl_def->m_key_count; key_id++)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[key_id];
+ if (is_pk(key_id, table, m_tbl_def))
+ {
+ rc= update_pk(kd, row_info, pk_changed);
+ }
+ else
+ {
+ rc= update_sk(table, kd, row_info);
+ }
+
+ if (rc != 0)
+ {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int ha_rocksdb::update_write_row(const uchar *old_data,
+ const uchar *new_data,
+ const bool skip_unique_check)
+{
+ bool pk_changed= false;
+ struct update_row_info row_info;
+
+ row_info.old_data= old_data;
+ row_info.new_data= new_data;
+ row_info.skip_unique_check= skip_unique_check;
+ row_info.new_pk_unpack_info= nullptr;
+
+ DBUG_ENTER("ha_rocksdb::write_row_common");
+
+ set_last_rowkey(old_data);
+
+ row_info.tx= get_or_create_tx(table->in_use);
+
+ if (old_data != nullptr)
+ {
+ row_info.old_pk_slice= rocksdb::Slice(m_last_rowkey.ptr(),
+ m_last_rowkey.length());
+
+ /* Determine which indexes need updating. */
+ calc_updated_indexes();
+ }
+
+ /*
+ Get the new row key into row_info.new_pk_slice
+ */
+ int rc= get_pk_for_update(&row_info);
+ if (rc != 0)
+ {
+ DBUG_RETURN(rc);
+ }
+
+ if (!skip_unique_check)
+ {
+ /*
+ Check to see if we are going to have failures because of unique
+ keys. Also lock the appropriate key values.
+ */
+ rc= check_uniqueness_and_lock(row_info, &pk_changed);
+ if (rc != 0)
+ {
+ DBUG_RETURN(rc);
+ }
+ }
+
+ DEBUG_SYNC(ha_thd(), "rocksdb.update_write_row_after_unique_check");
+
+ /*
+ At this point, all locks have been obtained, and all checks for duplicate
+ keys have been performed. No further errors can be allowed to occur from
+ here because updates to the transaction will be made and those updates
+ cannot be easily removed without rolling back the entire transaction.
+ */
+ rc= update_indexes(row_info, pk_changed);
+ if (rc != 0)
+ {
+ DBUG_RETURN(rc);
+ }
+
+ if (do_bulk_commit(row_info.tx))
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Open a cursor and position it at the passed record
+*/
+
+void ha_rocksdb::setup_scan_iterator(
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ rocksdb::Slice *slice,
+ const bool use_all_keys,
+ const bool is_ascending,
+ const uint eq_cond_len)
+{
+ DBUG_ASSERT(kd != nullptr);
+ DBUG_ASSERT(slice != nullptr);
+ DBUG_ASSERT(slice->size() >= eq_cond_len);
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+
+ bool skip_bloom= true;
+
+ rocksdb::Slice eq_cond(slice->data(), eq_cond_len);
+ if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending))
+ {
+ skip_bloom= false;
+ }
+
+ /*
+ In some cases, setup_scan_iterator() is called multiple times from
+ the same query but bloom filter can not always be used.
+ Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY (id1, id2).
+ select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100');
+ In this case, setup_scan_iterator() is called twice, the first time is for
+ (id1, id2)=(100, '00000000000000000000') and the second time is for (100, '100').
+ If prefix bloom filter length is 24 bytes, prefix bloom filter can be used for the
+ first condition but not for the second condition.
+ If bloom filter condition is changed, currently it is necessary to destroy and
+ re-create Iterator.
+ */
+ if (m_scan_it_skips_bloom != skip_bloom)
+ {
+ release_scan_iterator();
+ }
+
+ /*
+ SQL layer can call rnd_init() multiple times in a row.
+ In that case, re-use the iterator, but re-position it at the table start.
+ */
+ if (!m_scan_it)
+ {
+ bool fill_cache= !THDVAR(ha_thd(), skip_fill_cache);
+ m_scan_it= tx->get_iterator(kd->get_cf(), skip_bloom, fill_cache);
+ m_scan_it_skips_bloom= skip_bloom;
+ }
+ /*
+ Seek() will "Position at the first key in the source that at or past target".
+ The operation cannot fail.
+ */
+ m_scan_it->Seek(*slice);
+}
+
+
+void ha_rocksdb::setup_iterator_for_rnd_scan()
+{
+ uint key_size;
+
+ if (m_pk_descr->m_is_reverse_cf)
+ m_pk_descr->get_supremum_key(m_pk_packed_tuple, &key_size);
+ else
+ m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size);
+
+ rocksdb::Slice table_key((const char*)m_pk_packed_tuple, key_size);
+
+ setup_scan_iterator(m_pk_descr, &table_key);
+ m_skip_scan_it_next_call= true;
+}
+
+
+int ha_rocksdb::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_rocksdb::rnd_init");
+
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ setup_read_decoders();
+
+ if (scan)
+ {
+ m_rnd_scan_is_new_snapshot= !tx->has_snapshot();
+ setup_iterator_for_rnd_scan();
+ }
+ else
+ {
+ /* We don't need any preparations for rnd_pos() calls. */
+ }
+
+ // If m_lock_rows is on then we will be doing a get_for_update when accessing
+ // the index, so don't acquire the snapshot right away. Otherwise acquire
+ // the snapshot immediately.
+ tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE);
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_rocksdb::rnd_next(uchar *buf)
+{
+ int rc;
+ ha_statistic_increment(&SSV::ha_read_rnd_next_count);
+ for (;;)
+ {
+ rc= rnd_next_with_direction(buf, true);
+ if (rc != HA_ERR_LOCK_DEADLOCK || !m_rnd_scan_is_new_snapshot)
+ break; /* exit the loop */
+
+ // release the snapshot and iterator and then regenerate them
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ tx->release_snapshot();
+ release_scan_iterator();
+ setup_iterator_for_rnd_scan();
+ }
+
+ m_rnd_scan_is_new_snapshot= false;
+
+ if (rc == HA_ERR_KEY_NOT_FOUND)
+ rc = HA_ERR_END_OF_FILE;
+ return rc;
+}
+
+
+/*
+ See also secondary_index_read().
+*/
+int ha_rocksdb::rnd_next_with_direction(uchar *buf, bool move_forward)
+{
+ int rc;
+ DBUG_ENTER("ha_rocksdb::rnd_next");
+
+ table->status= STATUS_NOT_FOUND;
+ stats.rows_requested++;
+
+ if (!m_scan_it || !m_scan_it->Valid())
+ {
+ /*
+ We can get here when SQL layer has called
+
+ h->index_init(PRIMARY);
+ h->index_read_map(full index tuple, HA_READ_KEY_EXACT);
+
+ In this case, we should return EOF.
+ */
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ }
+
+ if (m_skip_scan_it_next_call)
+ {
+ m_skip_scan_it_next_call= false;
+ }
+ else
+ {
+ if (move_forward)
+ m_scan_it->Next(); /* this call cannot fail */
+ else
+ m_scan_it->Prev(); /* this call cannot fail */
+ }
+
+ if (m_scan_it->Valid())
+ {
+ /* check if we're out of this table */
+ rocksdb::Slice key= m_scan_it->key();
+ if (!m_pk_descr->covers_key(key))
+ {
+ rc= HA_ERR_END_OF_FILE;
+ }
+ else
+ {
+ if (m_lock_rows != RDB_LOCK_NONE)
+ {
+ /*
+ Lock the row we've just read.
+
+ Now we call get_for_update which will 1) Take a lock and 2) Will fail
+ if the row was deleted since the snapshot was taken.
+ */
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete");
+ rocksdb::Status s= tx->get_for_update(m_pk_descr->get_cf(), key,
+ &m_retrieved_record);
+ if (!s.ok())
+ {
+ DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr,
+ m_tbl_def));
+ }
+
+ // If we called get_for_update() use the value from that call not from
+ // the iterator as it may be stale since we don't have a snapshot
+ // when m_lock_rows is not RDB_LOCK_NONE.
+ m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin);
+ rc= convert_record_from_storage_format(&key, buf);
+ }
+ else
+ {
+ // Use the value from the iterator
+ rocksdb::Slice value= m_scan_it->value();
+ m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin);
+ rc= convert_record_from_storage_format(&key, &value, buf);
+ }
+
+ table->status= 0;
+ }
+ }
+ else
+ rc= HA_ERR_END_OF_FILE;
+
+ if (!rc) {
+ stats.rows_read++;
+ stats.rows_index_next++;
+ update_row_stats(ROWS_READ);
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+int ha_rocksdb::rnd_end()
+{
+ DBUG_ENTER("ha_rocksdb::rnd_end");
+
+ release_scan_iterator();
+
+ DBUG_RETURN(0);
+}
+
+int ha_rocksdb::index_init(uint idx, bool sorted)
+{
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+ DBUG_ASSERT(tx != nullptr);
+
+ setup_read_decoders();
+
+ // If m_lock_rows is not RDB_LOCK_NONE then we will be doing a get_for_update
+ // when accessing the index, so don't acquire the snapshot right away.
+ // Otherwise acquire the snapshot immediately.
+ tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE);
+
+ active_index= idx;
+
+ return 0;
+}
+
+int ha_rocksdb::index_end()
+{
+ release_scan_iterator();
+
+ active_index= MAX_KEY;
+ in_range_check_pushed_down= FALSE;
+ return 0;
+}
+
+
+int ha_rocksdb::delete_all_rows(ha_rows* nrows)
+{
+ DBUG_ENTER("ha_rocksdb::delete_all_rows");
+ /*
+ We will have to delete records one-by-one anyway. Let the SQL layer do
+ it.
+ */
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+}
+
+int ha_rocksdb::truncate()
+{
+ m_tbl_def->m_auto_incr_val= 1;
+ m_tbl_def->m_hidden_pk_val= 1;
+ remove_rows(m_tbl_def);
+ return 0;
+}
+
+
+/*
+ Delete the row we've last read. The row is also passed as parameter.
+
+ @detail
+ The caller guarantees table buf points to the row that was just read.
+ The row is either table->record[0] or table->record[1].
+ (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)"
+*/
+int ha_rocksdb::delete_row(const uchar *buf)
+{
+ DBUG_ENTER("ha_rocksdb::delete_row");
+
+ ha_statistic_increment(&SSV::ha_delete_count);
+ set_last_rowkey(buf);
+
+ rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length());
+ Rdb_transaction *tx= get_or_create_tx(table->in_use);
+
+ uint index= pk_index(table, m_tbl_def);
+ rocksdb::Status s= delete_or_singledelete(index, tx,
+ m_pk_descr->get_cf(),
+ key_slice);
+ if (!s.ok())
+ {
+ DBUG_RETURN(tx->set_status_error(table->in_use, s, m_pk_descr, m_tbl_def));
+ }
+
+ longlong hidden_pk_id= 0;
+ if (m_tbl_def->m_key_count > 1 &&
+ has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id))
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+
+ // Delete the record for every secondary index
+ for (uint i= 0; i < m_tbl_def->m_key_count; i++)
+ {
+ if (!is_pk(i, table, m_tbl_def))
+ {
+ int packed_size;
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[i];
+ packed_size= kd->pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple,
+ nullptr, false, hidden_pk_id);
+ rocksdb::Slice secondary_key_slice(
+ reinterpret_cast<const char*>(m_sk_packed_tuple), packed_size);
+ /* Deleting on secondary key doesn't need any locks: */
+ tx->get_indexed_write_batch()->SingleDelete(kd->get_cf(),
+ secondary_key_slice);
+ }
+ }
+
+ if (do_bulk_commit(tx))
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ stats.rows_deleted++;
+ update_row_stats(ROWS_DELETED);
+
+ DBUG_RETURN(0);
+}
+
+
+rocksdb::Status
+ha_rocksdb::delete_or_singledelete(uint index,
+ Rdb_transaction *tx,
+ rocksdb::ColumnFamilyHandle* column_family,
+ const rocksdb::Slice& key)
+{
+ if (can_use_single_delete(index))
+ return tx->single_delete(column_family, key);
+ return tx->delete_key(column_family, key);
+}
+
+
+void ha_rocksdb::update_stats(void)
+{
+ DBUG_ENTER("ha_rocksdb::update_stats");
+
+ stats.records= 0;
+ stats.index_file_length= 0ul;
+ stats.data_file_length= 0ul;
+ stats.mean_rec_length= 0;
+ for (uint i= 0; i < m_tbl_def->m_key_count; i++)
+ {
+ if (is_pk(i, table, m_tbl_def))
+ {
+ stats.data_file_length= m_pk_descr->m_stats.m_actual_disk_size;
+ stats.records = m_pk_descr->m_stats.m_rows;
+ }
+ else
+ {
+ stats.index_file_length+= m_key_descr_arr[i]->m_stats.m_actual_disk_size;
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+int ha_rocksdb::info(uint flag)
+{
+ DBUG_ENTER("ha_rocksdb::info");
+
+ if (!table)
+ return 1;
+
+ if (flag & HA_STATUS_VARIABLE)
+ {
+ /*
+ Test only to simulate corrupted stats
+ */
+ DBUG_EXECUTE_IF("myrocks_simulate_negative_stats",
+ m_pk_descr->m_stats.m_actual_disk_size =
+ -m_pk_descr->m_stats.m_actual_disk_size;);
+
+ update_stats();
+
+ /*
+ If any stats are negative due to bad cached stats, re-run analyze table
+ and re-retrieve the stats.
+ */
+ if (static_cast<longlong>(stats.data_file_length) < 0 ||
+ static_cast<longlong>(stats.index_file_length) < 0 ||
+ static_cast<longlong>(stats.records) < 0)
+ {
+ if (analyze(nullptr, nullptr)) {
+ DBUG_RETURN(1);
+ }
+
+ update_stats();
+ }
+
+ if (stats.records == 0)
+ {
+ // most likely, the table is in memtable
+ // try to deduce from GetApproximateSizes
+ uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2];
+ auto r= get_range(pk_index(table, m_tbl_def), buf);
+ uint64_t sz= 0;
+ rdb->GetApproximateSizes(
+ m_pk_descr->get_cf(),
+ &r, 1,
+ &sz, true);
+ stats.records = sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE;
+ stats.data_file_length = sz;
+
+ if (rocksdb_debug_optimizer_n_rows > 0)
+ stats.records = rocksdb_debug_optimizer_n_rows;
+ }
+
+ if (stats.records != 0)
+ stats.mean_rec_length = stats.data_file_length / stats.records;
+ }
+ if (flag & HA_STATUS_CONST)
+ {
+ ref_length= m_pk_descr->max_storage_fmt_length();
+
+ // TODO: Needs to reimplement after having real index statistics
+ for (uint i= 0; i < m_tbl_def->m_key_count; i++)
+ {
+ if (is_hidden_pk(i, table, m_tbl_def))
+ {
+ continue;
+ }
+ KEY* k= &table->key_info[i];
+ for (uint j = 0; j < k->actual_key_parts; j++)
+ {
+ const Rdb_index_stats& k_stats= m_key_descr_arr[i]->m_stats;
+ uint x = k_stats.m_distinct_keys_per_prefix.size() > j &&
+ k_stats.m_distinct_keys_per_prefix[j] > 0 ?
+ k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] :
+ 0;
+ if (x > stats.records)
+ x = stats.records;
+ if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) ||
+ rocksdb_debug_optimizer_n_rows > 0)
+ {
+ // Fake cardinality implementation. For example, (idx1, idx2, idx3) index
+ // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1.
+ // rec_per_key for the whole index is 1, and multiplied by 2^n if
+ // n suffix columns of the index are not used.
+ x = 1 << (k->actual_key_parts-j-1);
+ }
+ k->rec_per_key[j]= x;
+ }
+ }
+ }
+
+ if (flag & HA_STATUS_ERRKEY)
+ {
+ /*
+ Currently we support only primary keys so we know which key had a
+ uniqueness violation.
+ */
+ errkey= m_dupp_errkey;
+ dup_ref= m_pk_tuple; // TODO(?): this should store packed PK.
+ }
+
+ if (flag & HA_STATUS_AUTO)
+ {
+ stats.auto_increment_value= m_tbl_def->m_auto_incr_val;
+ }
+
+ DBUG_RETURN(0);
+}
+
+void ha_rocksdb::position(const uchar *record)
+{
+ DBUG_ENTER("ha_rocksdb::position");
+
+ longlong hidden_pk_id= 0;
+ if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id))
+ DBUG_ASSERT(false); // should never reach here
+
+ /*
+ Get packed primary key value from the record.
+
+ (Note: m_last_rowkey has the packed PK of last-read-row, which allows to
+ handle most cases, but there is an exception to this: when slave applies
+ RBR events, it fills the record and then calls position(); rnd_pos())
+
+ Also note that we *can* rely on PK column values being available. This is
+ because table_flags() includes HA_PRIMARY_KEY_REQUIRED_FOR_POSITION bit.
+ When that is true, table->prepare_for_position() adds PK columns into the
+ read set (this may potentially disable index-only access when PK column
+ cannot be restored from its mem-comparable form in the secondary indexes).
+ */
+ uint packed_size= m_pk_descr->pack_record(table, m_pack_buffer, record,
+ ref, nullptr, false, hidden_pk_id);
+
+ /*
+ It could be that mem-comparable form of PK occupies less than ref_length
+ bytes. Fill the remainder with zeros.
+ */
+ if (ref_length > packed_size)
+ memset(ref + packed_size, 0, ref_length - packed_size);
+
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_rocksdb::rnd_pos(uchar *buf, uchar *pos)
+{
+ int rc;
+ size_t len;
+ DBUG_ENTER("ha_rocksdb::rnd_pos");
+
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
+ stats.rows_requested++;
+ len= m_pk_descr->key_length(table, rocksdb::Slice((const char*)pos,
+ ref_length));
+ if (len == size_t(-1))
+ {
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR); /* Data corruption? */
+ }
+
+ rc= get_row_by_rowid(buf, pos, len);
+
+ if (!rc)
+ {
+ stats.rows_read++;
+ update_row_stats(ROWS_READ);
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+/*
+ @brief
+ Calculate (if needed) the bitmap of indexes that are modified by the
+ current query
+
+ @detail
+ The calculation done by checking index definitions against the table->write_set
+ column bitmap.
+*/
+
+void ha_rocksdb::calc_updated_indexes()
+{
+ if (!m_update_scope_is_valid)
+ {
+ m_update_scope_is_valid= true;
+ m_update_scope.clear_all();
+
+ for (uint keynr= 0; keynr < table->s->keys; keynr++)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[keynr];
+ /* Walk over all key parts, including the "extended key" suffix */
+ uint key_parts= kd->get_key_parts();
+ for (uint kp= 0; kp < key_parts; kp++)
+ {
+ if (has_hidden_pk(table) && kp + 1 == key_parts)
+ break;
+
+ Field *field= kd->get_table_field_for_part_no(table, kp);
+ if (bitmap_is_set(table->write_set, field->field_index))
+ {
+ m_update_scope.set_bit(keynr);
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+int ha_rocksdb::update_row(const uchar *old_data, uchar *new_data)
+{
+ DBUG_ENTER("ha_rocksdb::update_row");
+ DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE);
+ /*
+ old_data points to record we're updating. It is the same as the record
+ we've just read (for multi-table UPDATE, too, because SQL layer will make
+ an rnd_pos() call to re-read the record before calling update_row())
+ */
+ DBUG_ASSERT(new_data == table->record[0]);
+
+ ha_statistic_increment(&SSV::ha_update_count);
+ int rv;
+ rv= update_write_row(old_data, new_data, false);
+
+ if (rv == 0)
+ {
+ stats.rows_updated++;
+ update_row_stats(ROWS_UPDATED);
+ }
+ DBUG_RETURN(rv);
+}
+
+
+/* The following function was copied from ha_blackhole::store_lock: */
+THR_LOCK_DATA **ha_rocksdb::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ DBUG_ENTER("ha_rocksdb::store_lock");
+
+ bool in_lock_tables = my_core::thd_in_lock_tables(thd);
+
+ /* First, make a decision about MyRocks's internal locking */
+ if (lock_type >= TL_WRITE_ALLOW_WRITE)
+ {
+ m_lock_rows = RDB_LOCK_WRITE;
+ }
+ else if (lock_type == TL_READ_WITH_SHARED_LOCKS)
+ {
+ m_lock_rows = RDB_LOCK_READ;
+ }
+ else
+ {
+ m_lock_rows = RDB_LOCK_NONE;
+ if (THDVAR(thd, lock_scanned_rows))
+ {
+ /*
+ The following logic was copied directly from
+ ha_innobase::store_lock_with_x_type() in
+ storage/innobase/handler/ha_innodb.cc and causes MyRocks to leave
+ locks in place on rows that are in a table that is not beint updated.
+ */
+ uint sql_command = my_core::thd_sql_command(thd);
+ if ((lock_type == TL_READ && in_lock_tables) ||
+ (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) ||
+ can_hold_read_locks_on_select(thd, lock_type))
+ {
+ ulong tx_isolation = my_core::thd_tx_isolation(thd);
+ if (sql_command != SQLCOM_CHECKSUM &&
+ ((my_core::thd_test_options(thd, OPTION_BIN_LOG) &&
+ tx_isolation > ISO_READ_COMMITTED) ||
+ tx_isolation == ISO_SERIALIZABLE ||
+ (lock_type != TL_READ && lock_type != TL_READ_NO_INSERT) ||
+ (sql_command != SQLCOM_INSERT_SELECT &&
+ sql_command != SQLCOM_REPLACE_SELECT &&
+ sql_command != SQLCOM_UPDATE &&
+ sql_command != SQLCOM_DELETE &&
+ sql_command != SQLCOM_CREATE_TABLE)))
+ {
+ m_lock_rows = RDB_LOCK_READ;
+ }
+ }
+ }
+ }
+
+ /* Then, tell the SQL layer what kind of locking it should use: */
+ if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK)
+ {
+ /*
+ Here is where we get into the guts of a row level lock.
+ If TL_UNLOCK is set
+ If we are not doing a LOCK TABLE or DISCARD/IMPORT
+ TABLESPACE, then allow multiple writers
+ */
+
+ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
+ !in_lock_tables && !my_core::thd_tablespace_op(thd))
+ {
+ lock_type = TL_WRITE_ALLOW_WRITE;
+ }
+
+ /*
+ In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
+ MySQL would use the lock TL_READ_NO_INSERT on t2, and that
+ would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
+ to t2. Convert the lock to a normal read lock to allow
+ concurrent inserts to t2.
+ */
+
+ if (lock_type == TL_READ_NO_INSERT && !in_lock_tables)
+ {
+ lock_type = TL_READ;
+ }
+
+ m_db_lock.type= lock_type;
+ }
+ *to++= &m_db_lock;
+ DBUG_RETURN(to);
+}
+
+void ha_rocksdb::read_thd_vars(THD *thd)
+{
+ m_store_checksums= THDVAR(thd, store_checksums);
+ m_verify_checksums= THDVAR(thd, verify_checksums);
+ m_checksums_pct= THDVAR(thd, checksums_pct);
+}
+
+int ha_rocksdb::external_lock(THD *thd, int lock_type)
+{
+ int res= 0;
+ DBUG_ENTER("ha_rocksdb::external_lock");
+
+ int binlog_format= my_core::thd_binlog_format(thd);
+ bool unsafe_for_binlog= THDVAR(ha_thd(), unsafe_for_binlog);
+ if (lock_type == F_WRLCK && !thd->rli_slave &&
+ !unsafe_for_binlog &&
+ binlog_format != BINLOG_FORMAT_ROW &&
+ binlog_format != BINLOG_FORMAT_UNSPEC &&
+ my_core::thd_binlog_filter_ok(thd))
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Can't execute updates on master with binlog_format != ROW.",
+ MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ if (lock_type == F_UNLCK)
+ {
+ Rdb_transaction* tx= get_or_create_tx(thd);
+
+ tx->io_perf_end_and_record(&m_io_perf);
+ if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+ {
+ /*
+ Do like InnoDB: when we get here, it's time to commit a
+ single-statement transaction.
+
+ If the statement involved multiple tables, this code will be executed
+ for each of them, but that's ok because non-first tx->commit() calls
+ will be no-ops.
+ */
+ res= tx->commit_or_rollback();
+ }
+ }
+ else
+ {
+ if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED ||
+ my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "MyRocks supports only READ COMMITTED and "
+ "REPEATABLE READ isolation levels. "
+ "Please change from current isolation "
+ "level %s",
+ MYF(0),
+ tx_isolation_names[my_core::thd_tx_isolation(thd)]);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ /*
+ It's nice to do the following on start of every statement. The problem
+ is, handler->start_stmt() is not called for INSERTs.
+ So, we put this code here.
+ */
+ Rdb_transaction* tx= get_or_create_tx(thd);
+ read_thd_vars(thd);
+
+ m_update_scope_is_valid= false;
+
+ if (skip_unique_check())
+ {
+ if ((thd->lex->sql_command == SQLCOM_INSERT ||
+ thd->lex->sql_command == SQLCOM_LOAD ||
+ thd->lex->sql_command == SQLCOM_REPLACE) &&
+ (thd->lex->duplicates == DUP_REPLACE ||
+ thd->lex->duplicates == DUP_UPDATE))
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "When unique checking is disabled in MyRocks, INSERT,"
+ "UPDATE,LOAD statements with clauses that update or "
+ "replace the key (i.e. "
+ "INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not "
+ "allowed. Query: %s",
+ MYF(0), thd->query());
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ }
+
+ if (lock_type == F_WRLCK)
+ {
+ if (tx->is_tx_read_only())
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Can't execute updates when you started a transaction "
+ "with START TRANSACTION WITH CONSISTENT [ROCKSDB] "
+ "SNAPSHOT.",
+ MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ /*
+ SQL layer signals us to take a write lock. It does so when starting DML
+ statement. We should put locks on the rows we're reading.
+
+ Note: sometimes, external_lock() can be called without a prior
+ ::store_lock call. That's why we need to set lock_* members here, too.
+ */
+ m_lock_rows= RDB_LOCK_WRITE;
+
+ if (thd->lex->sql_command == SQLCOM_CREATE_INDEX ||
+ thd->lex->sql_command == SQLCOM_DROP_INDEX ||
+ thd->lex->sql_command == SQLCOM_ALTER_TABLE)
+ {
+ tx->m_ddl_transaction= true;
+ }
+ }
+ rocksdb_register_tx(rocksdb_hton, thd, tx);
+ tx->io_perf_start(&m_io_perf);
+ }
+
+ DBUG_RETURN(res);
+}
+
+
+/**
+ @note
+ A quote from ha_innobase::start_stmt():
+ <quote>
+ MySQL calls this function at the start of each SQL statement inside LOCK
+ TABLES. Inside LOCK TABLES the ::external_lock method does not work to
+ mark SQL statement borders.
+ </quote>
+*/
+
+int ha_rocksdb::start_stmt(THD *thd, thr_lock_type lock_type)
+{
+ Rdb_transaction* tx= get_or_create_tx(thd);
+ read_thd_vars(thd);
+ rocksdb_register_tx(ht, thd, tx);
+ tx->io_perf_start(&m_io_perf);
+ return 0;
+}
+
+rocksdb::Range get_range(
+ uint32_t i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2], int offset1,
+ int offset2)
+{
+ uchar* buf_begin= buf;
+ uchar* buf_end= buf + Rdb_key_def::INDEX_NUMBER_SIZE;
+ rdb_netbuf_store_index(buf_begin, i + offset1);
+ rdb_netbuf_store_index(buf_end, i + offset2);
+
+ return rocksdb::Range(
+ rocksdb::Slice((const char*) buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE),
+ rocksdb::Slice((const char*) buf_end, Rdb_key_def::INDEX_NUMBER_SIZE));
+}
+
+static rocksdb::Range get_range(
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2],
+ int offset1, int offset2)
+{
+ return get_range(kd->get_index_number(), buf, offset1, offset2);
+}
+
+rocksdb::Range get_range(const std::shared_ptr<const Rdb_key_def>& kd,
+ uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2])
+{
+ if (kd->m_is_reverse_cf)
+ {
+ return myrocks::get_range(kd, buf, 1, 0);
+ }
+ else
+ {
+ return myrocks::get_range(kd, buf, 0, 1);
+ }
+}
+
+rocksdb::Range ha_rocksdb::get_range(
+ int i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]) const
+{
+ return myrocks::get_range(m_key_descr_arr[i], buf);
+}
+
+
+/*
+ Drop index thread's main logic
+*/
+
+void Rdb_drop_index_thread::run()
+{
+ mysql_mutex_lock(&m_signal_mutex);
+
+ for (;;) {
+ // The stop flag might be set by shutdown command
+ // after drop_index_thread releases signal_mutex
+ // (i.e. while executing expensive Seek()). To prevent drop_index_thread
+ // from entering long cond_timedwait, checking if stop flag
+ // is true or not is needed, with drop_index_interrupt_mutex held.
+ if (m_stop) {
+ break;
+ }
+
+ timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_sec += dict_manager.is_drop_index_empty()
+ ? 24*60*60 // no filtering
+ : 60; // filtering
+
+ auto ret __attribute__((__unused__)) = mysql_cond_timedwait(
+ &m_signal_cond, &m_signal_mutex, &ts);
+ if (m_stop) {
+ break;
+ }
+ // make sure, no program error is returned
+ DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT);
+ mysql_mutex_unlock(&m_signal_mutex);
+
+ std::vector<GL_INDEX_ID> indices;
+ dict_manager.get_ongoing_drop_indexes(&indices);
+ if (!indices.empty()) {
+ std::unordered_set<GL_INDEX_ID> finished;
+ rocksdb::ReadOptions read_opts;
+ read_opts.total_order_seek = true; // disable bloom filter
+
+ for (auto d : indices) {
+ uint32 cf_flags= 0;
+ if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags))
+ {
+ sql_print_error("RocksDB: Failed to get column family flags "
+ "from cf id %u. MyRocks data dictionary may "
+ "get corrupted.", d.cf_id);
+ abort_with_stack_traces();
+ }
+ rocksdb::ColumnFamilyHandle* cfh= cf_manager.get_cf(d.cf_id);
+ DBUG_ASSERT(cfh);
+ bool is_reverse_cf= cf_flags & Rdb_key_def::REVERSE_CF_FLAG;
+
+ bool index_removed= false;
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0};
+ rdb_netbuf_store_uint32(key_buf, d.index_id);
+ rocksdb::Slice key = rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+ uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2];
+ rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf?1:0,
+ is_reverse_cf?0:1);
+ rocksdb::CompactRangeOptions compact_range_options;
+ compact_range_options.bottommost_level_compaction =
+ rocksdb::BottommostLevelCompaction::kForce;
+ compact_range_options.exclusive_manual_compaction = false;
+ rocksdb::Status status = DeleteFilesInRange(rdb->GetBaseDB(), cfh,
+ &range.start, &range.limit);
+ if (!status.ok())
+ {
+ if (status.IsShutdownInProgress())
+ {
+ break;
+ }
+ rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD);
+ }
+ status = rdb->CompactRange(
+ compact_range_options, cfh, &range.start, &range.limit);
+ if (!status.ok())
+ {
+ if (status.IsShutdownInProgress())
+ {
+ break;
+ }
+ rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD);
+ }
+ std::unique_ptr<rocksdb::Iterator> it(
+ rdb->NewIterator(read_opts, cfh));
+ it->Seek(key);
+ if (is_reverse_cf)
+ {
+ if (!it->Valid())
+ {
+ it->SeekToLast();
+ }
+ else
+ {
+ it->Prev();
+ }
+ }
+ if (!it->Valid())
+ {
+ index_removed= true;
+ }
+ else
+ {
+ if (memcmp(it->key().data(), key_buf, Rdb_key_def::INDEX_NUMBER_SIZE))
+ {
+ // Key does not have same prefix
+ index_removed= true;
+ }
+ }
+ if (index_removed)
+ {
+ finished.insert(d);
+ }
+ }
+
+ if (!finished.empty()) {
+ dict_manager.finish_drop_indexes(finished);
+ }
+ }
+ mysql_mutex_lock(&m_signal_mutex);
+ }
+
+ mysql_mutex_unlock(&m_signal_mutex);
+}
+
+
+Rdb_tbl_def* ha_rocksdb::get_table_if_exists(const char* tablename)
+{
+ DBUG_ASSERT(tablename != nullptr);
+
+ std::string str;
+ if (rdb_normalize_tablename(tablename, &str))
+ {
+ // We were not passed table name?
+ DBUG_ASSERT(0);
+ return nullptr;
+ }
+
+ return ddl_manager.find(str);
+}
+
+/*
+ Note: the following function is called when the table is not open. That is,
+ this->table==nullptr, pk_key_descr==nullptr, etc.
+
+ tablename points to line in form "./dbname/tablename".
+*/
+
+int ha_rocksdb::delete_table(const char *tablename)
+{
+ DBUG_ENTER("ha_rocksdb::delete_table");
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict_manager.begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ /* Find the table in the hash */
+ Rdb_tbl_def *tbl = get_table_if_exists(tablename);
+ if (!tbl)
+ DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
+ dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch);
+
+ /*
+ Remove the table entry in data dictionary (this will also remove it from
+ the persistent data dictionary).
+ */
+ ddl_manager.remove(tbl, batch, true);
+ if (dict_manager.commit(batch))
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+
+ rdb_drop_idx_thread.signal();
+
+ DBUG_RETURN(0);
+}
+
+
+void ha_rocksdb::remove_rows(Rdb_tbl_def *tbl)
+{
+ rocksdb::WriteOptions wo= rdb_get_rocksdb_write_options(handler::ha_thd());
+
+ rocksdb::ReadOptions opts;
+ opts.total_order_seek= true;
+ rocksdb::Iterator* it= rdb->NewIterator(opts);
+
+ char key_buf[MAX_KEY_LENGTH];
+ uint key_len;
+ /*
+ Remove all records in each index.
+ (This is is not crash-safe, but it doesn't matter, because bulk row
+ deletion will be handled on rocksdb side)
+ */
+ for (uint i= 0; i < tbl->m_key_count ; i++)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= tbl->m_key_descr_arr[i];
+ kd->get_infimum_key(reinterpret_cast<uchar*>(key_buf), &key_len);
+
+ rocksdb::Slice table_key(key_buf, key_len);
+ it->Seek(table_key);
+ while (it->Valid())
+ {
+ rocksdb::Slice key= it->key();
+ if (!kd->covers_key(key))
+ {
+ break;
+ }
+
+ if (can_use_single_delete(i))
+ rdb->SingleDelete(wo, key);
+ else
+ rdb->Delete(wo, key);
+ it->Next();
+ }
+ }
+ delete it;
+}
+
+
+int ha_rocksdb::rename_table(const char *from, const char *to)
+{
+ DBUG_ASSERT(from != nullptr);
+ DBUG_ASSERT(to != nullptr);
+
+ bool write_err= false;
+ std::string from_str;
+ std::string to_str;
+ std::string from_db;
+ std::string to_db;
+
+ if (rdb_normalize_tablename(from, &from_str) ||
+ rdb_split_normalized_tablename(from_str, &from_db))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ if (rdb_normalize_tablename(to, &to_str) ||
+ rdb_split_normalized_tablename(to_str, &to_db))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ // If the user changed the database part of the name then validate that the
+ // 'to' database exists.
+ if (from_db != to_db && !rdb_database_exists(to_db))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ /* Check if any index has a per-index column family */
+ {
+ /*
+ We can't check this directly, because
+ 1. the object that referred to the old table has been close()d.
+ 2. this ha_rocksdb object has no connection to any table at all, it has
+ been just created with (TABLE*)nullptr.
+
+ So, we have to find the old table in the ddl_manager (it's there because
+ it has been opened by this statement), and check is_auto_cf there.
+ */
+ Rdb_tbl_def *tdef;
+ if (!(tdef= ddl_manager.find(from_str)))
+ return HA_ERR_INTERNAL_ERROR;
+
+ for (uint i= 0; i < tdef->m_key_count; i++)
+ {
+ DBUG_ASSERT(tdef->m_key_descr_arr != nullptr);
+
+ if (tdef->m_key_descr_arr[i]->m_is_auto_cf)
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "ALTER TABLE on table with per-index CF");
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ }
+ }
+
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict_manager.begin();
+ rocksdb::WriteBatch *batch= wb.get();
+ dict_manager.lock();
+ write_err= ddl_manager.rename(from_str, to_str, batch)
+ || dict_manager.commit(batch);
+ dict_manager.unlock();
+
+ return write_err ? HA_ERR_INTERNAL_ERROR : 0;
+}
+
+/**
+ check_if_incompatible_data() called if ALTER TABLE can't detect otherwise
+ if new and old definition are compatible
+
+ @details If there are no other explicit signs like changed number of
+ fields this function will be called by compare_tables()
+ (sql/sql_tables.cc) to decide should we rewrite whole table or only .frm
+ file.
+
+*/
+
+bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+{
+ DBUG_ENTER("ha_rocksdb::check_if_incompatible_data");
+ // this function is needed only for online alter-table
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+}
+
+
+int ha_rocksdb::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_rocksdb::extra");
+
+ switch (operation) {
+ case HA_EXTRA_KEYREAD:
+ m_keyread_only= true;
+ break;
+ case HA_EXTRA_NO_KEYREAD:
+ m_keyread_only= false;
+ break;
+ case HA_EXTRA_FLUSH:
+ /*
+ If the table has blobs, then they are part of m_retrieved_record.
+ This call invalidates them.
+ */
+ m_retrieved_record.clear();
+ break;
+ default:
+ break;
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Given a starting key and an ending key, estimate the number of rows that
+ will exist between the two keys.
+*/
+ha_rows ha_rocksdb::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+{
+ DBUG_ENTER("ha_rocksdb::records_in_range");
+ ha_rows ret = THDVAR(ha_thd(), records_in_range);
+ if (ret) {
+ DBUG_RETURN(ret);
+ }
+ if (table->force_index) {
+ ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range);
+ if (force_rows) {
+ DBUG_RETURN(force_rows);
+ }
+ }
+
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[inx];
+ DBUG_ASSERT(kd);
+
+ uint size1=0;
+ if (min_key) {
+ size1 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple,
+ min_key->key, min_key->keypart_map);
+ if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ min_key->flag == HA_READ_PREFIX_LAST ||
+ min_key->flag == HA_READ_AFTER_KEY)
+ {
+ kd->successor(m_sk_packed_tuple, size1);
+ }
+ } else {
+ kd->get_infimum_key(m_sk_packed_tuple, &size1);
+ }
+
+ uint size2=0;
+ if (max_key) {
+ size2 = kd->pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old,
+ max_key->key, max_key->keypart_map);
+ if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ max_key->flag == HA_READ_PREFIX_LAST ||
+ max_key->flag == HA_READ_AFTER_KEY)
+ {
+ kd->successor(m_sk_packed_tuple_old, size2);
+ }
+ // pad the upper key with FFFFs to make sure it is more than the lower
+ if (size1 > size2) {
+ memset(m_sk_packed_tuple_old+size2, 0xff, size1-size2);
+ size2 = size1;
+ }
+ } else {
+ kd->get_supremum_key(m_sk_packed_tuple_old, &size2);
+ }
+
+ rocksdb::Slice slice1((const char*) m_sk_packed_tuple, size1);
+ rocksdb::Slice slice2((const char*) m_sk_packed_tuple_old, size2);
+
+ // slice1 >= slice2 means no row will match
+ if (slice1.compare(slice2) >= 0)
+ {
+ DBUG_RETURN(0);
+ }
+
+ rocksdb::Range r(
+ kd->m_is_reverse_cf ? slice2 : slice1,
+ kd->m_is_reverse_cf ? slice1 : slice2
+ );
+
+ uint64_t sz=0;
+ auto disk_size = kd->m_stats.m_actual_disk_size;
+ if (disk_size == 0)
+ disk_size = kd->m_stats.m_data_size;
+ auto rows = kd->m_stats.m_rows;
+ if (rows == 0 || disk_size == 0)
+ {
+ rows= 1;
+ disk_size= ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE;
+ }
+ // Getting statistics, including from Memtables
+ rdb->GetApproximateSizes(
+ kd->get_cf(),
+ &r, 1,
+ &sz, true);
+ ret = rows*sz/disk_size;
+
+ /*
+ GetApproximateSizes() gives estimates so ret might exceed stats.records.
+ MySQL then decides to use full index scan rather than range scan, which
+ is not efficient for most cases.
+ To prevent this, changing estimated records slightly smaller than
+ stats.records.
+ */
+ if (ret >= stats.records)
+ {
+ ret = stats.records * 0.99;
+ }
+
+ if (rocksdb_debug_optimizer_n_rows > 0)
+ {
+ ret= rocksdb_debug_optimizer_n_rows;
+ }
+ else if (ret == 0)
+ {
+ ret = 1;
+ }
+
+ DBUG_RETURN(ret);
+}
+
+
+void ha_rocksdb::update_create_info(HA_CREATE_INFO *create_info)
+{
+ if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value= m_tbl_def->m_auto_incr_val;
+ }
+}
+
+/**
+ @brief
+ Doing manual compaction on OPTIMIZE TABLE in RocksDB.
+ Compaction itself is executed by background thread in RocksDB, but
+ CompactRange() waits until compaction completes so this function
+ may take a long time.
+ Since RocksDB dataset is allocated per index id, OPTIMIZE TABLE
+ triggers manual compaction for all indexes of the table.
+ @details
+ Compaction range is from the beginning of the index id to
+ the first row of the next index id. When using reverse order
+ column family, the first row of the next index id should be
+ the last row of the previous index id.
+*/
+int ha_rocksdb::optimize(THD *thd, HA_CHECK_OPT* check_opt)
+{
+ int rc= 0;
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE*2];
+ auto range = get_range(i, buf);
+ if (!rdb->CompactRange(rocksdb::CompactRangeOptions(),
+ m_key_descr_arr[i]->get_cf(),
+ &range.start, &range.limit).ok())
+ {
+ rc= 1;
+ break;
+ }
+ }
+ return rc;
+}
+
+int ha_rocksdb::analyze(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ DBUG_ENTER("ha_rocksdb::analyze");
+
+ if (!table)
+ DBUG_RETURN(1);
+
+ // find per column family key ranges which need to be queried
+ std::unordered_map<rocksdb::ColumnFamilyHandle*, std::vector<rocksdb::Range>>
+ ranges;
+ std::unordered_set<GL_INDEX_ID> ids_to_check;
+ std::vector<uchar> buf(table->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE);
+ for (uint i = 0; i < table->s->keys; i++)
+ {
+ auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE];
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[i];
+ ranges[kd->get_cf()].push_back(get_range(i, bufp));
+ ids_to_check.insert(kd->get_gl_index_id());
+ }
+
+ // for analyze statements, force flush on memtable to get accurate cardinality
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+ if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) &&
+ !rocksdb_pause_background_work)
+ {
+ for (auto it : ids_to_check)
+ {
+ rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id));
+ }
+ }
+
+ // get RocksDB table properties for these ranges
+ rocksdb::TablePropertiesCollection props;
+ for (auto it : ranges)
+ {
+ auto old_size __attribute__((__unused__)) = props.size();
+ auto status = rdb->GetPropertiesOfTablesInRange(
+ it.first, &it.second[0], it.second.size(), &props);
+ DBUG_ASSERT(props.size() >= old_size);
+ if (!status.ok())
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ int num_sst= 0;
+ // group stats per index id
+ std::unordered_map<GL_INDEX_ID, Rdb_index_stats> stats;
+ for (auto it : ids_to_check)
+ {
+ // Initialize the stats to 0. If there are no files that contain
+ // this gl_index_id, then 0 should be stored for the cached stats.
+ stats[it] = Rdb_index_stats(it);
+ }
+ for (auto it : props)
+ {
+ std::vector<Rdb_index_stats> sst_stats;
+ Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
+ /*
+ sst_stats is a list of index statistics for indexes that have entries
+ in the current SST file.
+ */
+ for (auto it1 : sst_stats)
+ {
+ /*
+ Only update statistics for indexes that belong to this SQL table.
+
+ The reason is: We are walking through all SST files that have
+ entries from this table (and so can compute good statistics). For
+ other SQL tables, it can be that we're only seeing a small fraction
+ of table's entries (and so we can't update statistics based on that).
+ */
+ if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end())
+ continue;
+
+ const std::shared_ptr<const Rdb_key_def>& kd=
+ ddl_manager.find(it1.m_gl_index_id);
+ stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length());
+ }
+ num_sst++;
+ }
+
+ // set and persist new stats
+ ddl_manager.set_stats(stats);
+ ddl_manager.persist_stats(true);
+
+ DBUG_RETURN(0);
+}
+
+void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
+{
+ /*
+ MySQL has a somewhat complicated way of handling the auto-increment value.
+ The first time get_auto_increment is called for a statement,
+ nb_desired_values is the estimate for how many values will be needed. The
+ engine can then reserve some values, and those will be automatically used
+ by MySQL, until a hard-coded value shows up in the insert statement, after
+ which MySQL again calls this function to reset its starting value.
+ *
+ For simplicity we will just ignore nb_desired_values - we aren't going to
+ reserve any extra values for a multi-insert statement. Each row will
+ simply acquire the next value as needed and we will always tell MySQL that
+ we only reserved 1 value. Since we are using an atomic value for
+ m_auto_incr_val this should be safe - if we had to grab a mutex, doing
+ an actual reserve of some values might be a better solution.
+ */
+ *first_value= m_tbl_def->m_auto_incr_val++;
+ *nb_reserved_values= 1;
+}
+
+
+#ifndef DBUG_OFF
+
+/* Debugger help function */
+static char dbug_item_print_buf[512];
+
+const char *dbug_print_item(Item *item)
+{
+ char *buf= dbug_item_print_buf;
+ String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin);
+ str.length(0);
+ if (!item)
+ return "(Item*)nullptr";
+ item->print(&str ,QT_ORDINARY);
+ if (str.c_ptr() == buf)
+ return buf;
+ else
+ return "Couldn't fit into buffer";
+}
+
+#endif /*DBUG_OFF*/
+
+
+/**
+ SQL layer calls this function to push an index condition.
+
+ @details
+ The condition is for index keyno (only one condition can be pushed at a
+ time).
+ The caller guarantees that condition refers only to index fields; besides
+ that, fields must have
+
+ $field->part_of_key.set_bit(keyno)
+
+ which means that
+
+ (handler->index_flags(keyno, $keypart, 0) & HA_KEYREAD_ONLY) == 1
+
+ which means that field value can be restored from the index tuple.
+
+ @return
+ Part of condition we couldn't check (always nullptr).
+*/
+
+class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* idx_cond)
+{
+ DBUG_ASSERT(keyno != MAX_KEY);
+ DBUG_ASSERT(idx_cond != nullptr);
+
+ pushed_idx_cond = idx_cond;
+ pushed_idx_cond_keyno = keyno;
+ in_range_check_pushed_down = TRUE;
+
+ /* We will check the whole condition */
+ return nullptr;
+}
+
+
+/*
+ @brief
+ Check the index condition.
+
+ @detail
+ Check the index condition. (The caller has unpacked all needed index
+ columns into table->record[0])
+
+ @return
+ ICP_NO_MATCH - Condition not satisfied (caller should continue
+ scanning)
+ OUT_OF_RANGE - We've left the range we're scanning (caller should
+ stop scanning and return HA_ERR_END_OF_FILE)
+
+ ICP_MATCH - Condition is satisfied (caller should fetch the record
+ and return it)
+*/
+
+enum icp_result ha_rocksdb::check_index_cond()
+{
+ DBUG_ASSERT(pushed_idx_cond);
+ DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY);
+
+ if (end_range && compare_key_icp(end_range) > 0)
+ {
+ /* caller should return HA_ERR_END_OF_FILE already */
+ return ICP_OUT_OF_RANGE;
+ }
+
+ return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
+}
+
+
+/**
+ Checking if an index is used for ascending scan or not
+
+ @detail
+ Currently RocksDB does not support bloom filter for
+ prefix lookup + descending scan, but supports bloom filter for
+ prefix lookup + ascending scan. This function returns true if
+ the scan pattern is absolutely ascending.
+ @param kd
+ @param find_flag
+*/
+bool ha_rocksdb::is_ascending(const std::shared_ptr<const Rdb_key_def>& kd,
+ enum ha_rkey_function find_flag) const
+{
+ DBUG_ASSERT(kd != nullptr);
+
+ bool is_ascending;
+ switch (find_flag) {
+ case HA_READ_KEY_EXACT:
+ {
+ is_ascending = !kd->m_is_reverse_cf;
+ break;
+ }
+ case HA_READ_PREFIX:
+ {
+ is_ascending= true;
+ break;
+ }
+ case HA_READ_KEY_OR_NEXT:
+ case HA_READ_AFTER_KEY:
+ {
+ is_ascending = !kd->m_is_reverse_cf;
+ break;
+ }
+ case HA_READ_KEY_OR_PREV:
+ case HA_READ_BEFORE_KEY:
+ case HA_READ_PREFIX_LAST:
+ case HA_READ_PREFIX_LAST_OR_PREV:
+ {
+ is_ascending = kd->m_is_reverse_cf;
+ break;
+ }
+ default:
+ is_ascending= false;
+ }
+ return is_ascending;
+}
+
+/*
+ Checks if inplace alter is supported for a given operation.
+*/
+
+my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter(
+ TABLE* /* altered_table */,
+ my_core::Alter_inplace_info *ha_alter_info)
+{
+ DBUG_ENTER("check_if_supported_alter");
+
+ if (ha_alter_info->handler_flags &
+ ~(my_core::Alter_inplace_info::DROP_INDEX |
+ my_core::Alter_inplace_info::DROP_UNIQUE_INDEX |
+ my_core::Alter_inplace_info::ADD_INDEX))
+ {
+ DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ DBUG_RETURN(my_core::HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE);
+}
+
+/**
+ Allows the storage engine to update internal structures with concurrent
+ writes blocked. If check_if_supported_inplace_alter() returns
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or
+ HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with
+ exclusive lock otherwise the same level of locking as for
+ inplace_alter_table() will be used.
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function reports error, commit_inplace_alter_table()
+ will be called with commit= false.
+
+ @note For partitioning, failing to prepare one partition, means that
+ commit_inplace_alter_table() will be called to roll back changes for
+ all partitions. This means that commit_inplace_alter_table() might be
+ called without prepare_inplace_alter_table() having been called first
+ for a given partition.
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+
+ @retval true Error
+ @retval false Success
+*/
+bool ha_rocksdb::prepare_inplace_alter_table(
+ TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info)
+{
+ DBUG_ENTER("prepare_inplace_alter_table");
+ uint n_dropped_keys= ha_alter_info->index_drop_count;
+ uint n_added_keys= ha_alter_info->index_add_count;
+ uint old_n_keys= m_tbl_def->m_key_count;
+ uint new_n_keys= altered_table->s->keys;
+ if (has_hidden_pk(altered_table))
+ {
+ new_n_keys += 1;
+ }
+
+ std::shared_ptr<Rdb_key_def>* old_key_descr= m_tbl_def->m_key_descr_arr;
+ std::shared_ptr<Rdb_key_def>* new_key_descr=
+ new std::shared_ptr<Rdb_key_def>[new_n_keys];
+
+ Rdb_tbl_def* new_tdef= new Rdb_tbl_def(m_tbl_def->full_tablename());
+ new_tdef->m_key_descr_arr= new_key_descr;
+ new_tdef->m_key_count= new_n_keys;
+ new_tdef->m_auto_incr_val=
+ m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed);
+ new_tdef->m_hidden_pk_val=
+ m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed);
+
+ if (ha_alter_info->handler_flags &
+ (my_core::Alter_inplace_info::DROP_INDEX |
+ my_core::Alter_inplace_info::DROP_UNIQUE_INDEX |
+ my_core::Alter_inplace_info::ADD_INDEX)
+ && create_key_defs(altered_table, new_tdef, table, m_tbl_def))
+ {
+ /* Delete the new key descriptors */
+ delete[] new_key_descr;
+
+ /*
+ Explicitly mark as nullptr so we don't accidentally remove entries
+ from data dictionary on cleanup (or cause double delete[]).
+ */
+ new_tdef->m_key_descr_arr= nullptr;
+ delete new_tdef;
+ DBUG_RETURN(1);
+ }
+
+ std::unordered_set<std::shared_ptr<Rdb_key_def>> added_indexes;
+ std::unordered_set<GL_INDEX_ID> dropped_index_ids;
+
+ uint i;
+ uint j;
+ const KEY* key;
+
+ /* Determine which(if any) key definition(s) need to be dropped */
+ for (i = 0; i < ha_alter_info->index_drop_count; i++)
+ {
+ const KEY* key = ha_alter_info->index_drop_buffer[i];
+ for (j = 0; j < old_n_keys; j++)
+ {
+ if (!old_key_descr[j]->m_name.compare(key->name))
+ {
+ dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id());
+ break;
+ }
+ }
+ }
+
+ DBUG_ASSERT(dropped_index_ids.size() == ha_alter_info->index_drop_count);
+
+ /* Determine which(if any) key definitions(s) need to be added */
+ for (i = 0; i < ha_alter_info->index_add_count; i++)
+ {
+ key = &ha_alter_info->key_info_buffer[
+ ha_alter_info->index_add_buffer[i]];
+ for (j = 0; j < new_n_keys; j++)
+ {
+ if (!new_key_descr[j]->m_name.compare(key->name))
+ {
+ added_indexes.insert(new_key_descr[j]);
+ break;
+ }
+ }
+ }
+
+ DBUG_ASSERT(added_indexes.size() == ha_alter_info->index_add_count);
+
+ ha_alter_info->handler_ctx= new Rdb_inplace_alter_ctx(
+ new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys,
+ added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys);
+
+ DBUG_RETURN(0);
+}
+
+/**
+ Alter the table structure in-place with operations specified using
+ HA_ALTER_FLAGS and Alter_inplace_info. The level of concurrency allowed
+ during this operation depends on the return value from
+ check_if_supported_inplace_alter().
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function reports error, commit_inplace_alter_table()
+ will be called with commit= false.
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+
+ @retval true Error
+ @retval false Success
+*/
+bool ha_rocksdb::inplace_alter_table(TABLE* altered_table,
+ my_core::Alter_inplace_info *ha_alter_info)
+{
+ /* Do nothing for now */
+ DBUG_ENTER("inplace_alter_table");
+ DBUG_ASSERT(ha_alter_info->handler_ctx);
+
+ Rdb_inplace_alter_ctx* ctx=
+ static_cast<Rdb_inplace_alter_ctx*> (ha_alter_info->handler_ctx);
+
+ if (ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_INDEX)
+ {
+ /*
+ Buffers need to be set up again to account for new, possibly longer
+ secondary keys.
+ */
+ free_key_buffers();
+ if (alloc_key_buffers(altered_table, ctx->m_new_tdef))
+ {
+ DBUG_RETURN(1);
+ }
+
+ /* Populate all new secondary keys by scanning primary key */
+ if (inplace_populate_sk(altered_table, ctx->m_added_indexes))
+ {
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", DBUG_RETURN(1););
+
+ DBUG_RETURN(0);
+}
+
+/**
+ Scan the Primary Key index entries and populate the new secondary keys.
+*/
+int ha_rocksdb::inplace_populate_sk(const TABLE* new_table_arg,
+ const std::unordered_set<std::shared_ptr<Rdb_key_def>>& indexes)
+{
+ DBUG_ENTER("ha_rocksdb::inplace_populate_sk");
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict_manager.begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ /* Update the data dictionary */
+ std::unordered_set<GL_INDEX_ID> create_index_ids;
+ for (auto& index : indexes)
+ {
+ create_index_ids.insert(index->get_gl_index_id());
+ }
+ dict_manager.add_create_index(create_index_ids, batch);
+ dict_manager.commit(batch);
+
+ const bool hidden_pk_exists = has_hidden_pk(table);
+
+ int res= 0;
+ Rdb_transaction *tx;
+ ulonglong rdb_merge_buf_size= THDVAR(ha_thd(), merge_buf_size);
+ ulonglong rdb_merge_combine_read_size= THDVAR(ha_thd(),
+ merge_combine_read_size);
+
+ for (auto& index : indexes)
+ {
+ const rocksdb::Comparator* index_comp= index->get_cf()->GetComparator();
+ Rdb_index_merge rdb_merge(rdb_merge_buf_size, rdb_merge_combine_read_size,
+ index_comp);
+
+ if ((res= rdb_merge.init()))
+ {
+ DBUG_RETURN(res);
+ }
+
+ /*
+ Note: We pass in the currently existing table + tbl_def object here,
+ as the pk index position may have changed in the case of hidden primary
+ keys.
+ */
+ uint pk= pk_index(table, m_tbl_def);
+ ha_index_init(pk, true);
+
+ /* Scan each record in the primary key in order */
+ for (res = index_first(table->record[0]); res == 0;
+ res = index_next(table->record[0]))
+ {
+ longlong hidden_pk_id= 0;
+ if (hidden_pk_exists &&
+ read_hidden_pk_id_from_rowkey(&hidden_pk_id))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error retrieving hidden pk id.");
+ ha_index_end();
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ /* Create new secondary index entry */
+ int new_packed_size= index->pack_record(new_table_arg, m_pack_buffer,
+ table->record[0],
+ m_sk_packed_tuple, &m_sk_tails,
+ should_store_checksums(),
+ hidden_pk_id);
+
+ rocksdb::Slice key= rocksdb::Slice(
+ reinterpret_cast<const char*>(m_sk_packed_tuple), new_packed_size);
+ rocksdb::Slice val= rocksdb::Slice(
+ reinterpret_cast<const char*>(m_sk_tails.ptr()),
+ m_sk_tails.get_current_pos());
+
+ /*
+ Add record to offset tree in preparation for writing out to
+ disk in sorted chunks.
+ */
+ if ((res= rdb_merge.add(key, val)))
+ {
+ ha_index_end();
+ DBUG_RETURN(res);
+ }
+ }
+
+ if (res != HA_ERR_END_OF_FILE)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error retrieving index entry from primary key.");
+ ha_index_end();
+ DBUG_RETURN(res);
+ }
+
+ tx= get_or_create_tx(table->in_use);
+ ha_index_end();
+
+ /*
+ Perform an n-way merge of n sorted buffers on disk, then writes all
+ results to RocksDB via SSTFileWriter API.
+ */
+ rocksdb::Slice merge_key;
+ rocksdb::Slice merge_val;
+ while ((res= rdb_merge.next(&merge_key, &merge_val)) == 0)
+ {
+ /*
+ Insert key and slice to SST via SSTFileWriter API.
+ */
+ if ((res= bulk_load_key(tx, index, merge_key, merge_val)))
+ {
+ break;
+ }
+ }
+
+ /*
+ Here, res == -1 means that we are finished, while > 0 means an error
+ occurred.
+ */
+ if (res > 0)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error while bulk loading keys in external merge sort.");
+ DBUG_RETURN(res);
+ }
+
+ if ((res= tx->finish_bulk_load()))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error finishing bulk load.");
+ DBUG_RETURN(res);
+ }
+ }
+
+ /*
+ Explicitly tell jemalloc to clean up any unused dirty pages at this point.
+ See https://reviews.facebook.net/D63723 for more details.
+ */
+ purge_all_jemalloc_arenas();
+
+ DBUG_EXECUTE_IF("crash_during_online_index_creation", DBUG_SUICIDE(););
+ DBUG_RETURN(res);
+}
+
+/**
+ Commit or rollback the changes made during prepare_inplace_alter_table()
+ and inplace_alter_table() inside the storage engine.
+ Note that in case of rollback the allowed level of concurrency during
+ this operation will be the same as for inplace_alter_table() and thus
+ might be higher than during prepare_inplace_alter_table(). (For example,
+ concurrent writes were blocked during prepare, but might not be during
+ rollback).
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function with commit= true reports error, it will be called
+ again with commit= false.
+
+ @note In case of partitioning, this function might be called for rollback
+ without prepare_inplace_alter_table() having been called first.
+ Also partitioned tables sets ha_alter_info->group_commit_ctx to a NULL
+ terminated array of the partitions handlers and if all of them are
+ committed as one, then group_commit_ctx should be set to NULL to indicate
+ to the partitioning handler that all partitions handlers are committed.
+ @see prepare_inplace_alter_table().
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+ @param commit True => Commit, False => Rollback.
+
+ @retval true Error
+ @retval false Success
+*/
+bool ha_rocksdb::commit_inplace_alter_table(
+ my_core::TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info,
+ bool commit)
+{
+ Rdb_inplace_alter_ctx* ctx0=
+ static_cast<Rdb_inplace_alter_ctx*> (ha_alter_info->handler_ctx);
+
+ DBUG_ENTER("commit_inplace_alter_table");
+
+ /*
+ IMPORTANT: When rollback is requested, mysql will abort with
+ an assertion failure. That means every failed commit during inplace alter
+ table will result in a fatal error on the server. Indexes ongoing creation
+ will be detected when the server restarts, and dropped.
+
+ For partitioned tables, a rollback call to this function (commit == false)
+ is done for each partition. A successful commit call only executes once
+ for all partitions.
+ */
+ if (!commit)
+ {
+ /* If ctx has not been created yet, nothing to do here */
+ if (!ctx0)
+ {
+ DBUG_RETURN(0);
+ }
+
+ /*
+ Cannot call destructor for Rdb_tbl_def directly because we don't want to
+ erase the mappings inside the ddl_manager, as the old_key_descr is still
+ using them.
+ */
+ if (ctx0->m_new_key_descr)
+ {
+ /* Delete the new key descriptors */
+ for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++)
+ {
+ ctx0->m_new_key_descr[i]= nullptr;
+ }
+
+ delete[] ctx0->m_new_key_descr;
+ ctx0->m_new_key_descr = nullptr;
+ ctx0->m_new_tdef->m_key_descr_arr = nullptr;
+
+ delete ctx0->m_new_tdef;
+ }
+
+ DBUG_RETURN(0);
+ }
+
+ DBUG_ASSERT(ctx0);
+
+ /*
+ For partitioned tables, we need to commit all changes to all tables at
+ once, unlike in the other inplace alter API methods.
+ */
+ inplace_alter_handler_ctx** ctx_array;
+ inplace_alter_handler_ctx* ctx_single[2];
+
+ if (ha_alter_info->group_commit_ctx)
+ {
+ DBUG_EXECUTE_IF("crash_during_index_creation_partition",
+ DBUG_SUICIDE(););
+ ctx_array = ha_alter_info->group_commit_ctx;
+ }
+ else
+ {
+ ctx_single[0] = ctx0;
+ ctx_single[1] = nullptr;
+ ctx_array = ctx_single;
+ }
+
+ DBUG_ASSERT(ctx0 == ctx_array[0]);
+ ha_alter_info->group_commit_ctx = nullptr;
+
+ if (ha_alter_info->handler_flags &
+ (my_core::Alter_inplace_info::DROP_INDEX |
+ my_core::Alter_inplace_info::DROP_UNIQUE_INDEX |
+ my_core::Alter_inplace_info::ADD_INDEX))
+ {
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict_manager.begin();
+ rocksdb::WriteBatch *batch= wb.get();
+ std::unordered_set<GL_INDEX_ID> create_index_ids;
+
+ m_tbl_def= ctx0->m_new_tdef;
+ m_key_descr_arr= m_tbl_def->m_key_descr_arr;
+ m_pk_descr= m_key_descr_arr[pk_index(altered_table, m_tbl_def)];
+
+ dict_manager.lock();
+ for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++)
+ {
+ Rdb_inplace_alter_ctx* ctx= static_cast<Rdb_inplace_alter_ctx*> (*pctx);
+
+ /* Mark indexes to be dropped */
+ dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch);
+
+ for (auto& index : ctx->m_added_indexes)
+ {
+ create_index_ids.insert(index->get_gl_index_id());
+ }
+
+ if (ddl_manager.put_and_write(ctx->m_new_tdef, batch))
+ {
+ /*
+ Failed to write new entry into data dictionary, this should never
+ happen.
+ */
+ DBUG_ASSERT(0);
+ }
+ }
+
+ if (dict_manager.commit(batch))
+ {
+ /*
+ Should never reach here. We assume MyRocks will abort if commit fails.
+ */
+ DBUG_ASSERT(0);
+ }
+
+ dict_manager.unlock();
+
+ /* Mark ongoing create indexes as finished/remove from data dictionary */
+ dict_manager.finish_indexes_operation(create_index_ids,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ rdb_drop_idx_thread.signal();
+ }
+
+ DBUG_RETURN(0);
+}
+
+#define SHOW_FNAME(name) rocksdb_show_##name
+
+#define DEF_SHOW_FUNC(name, key) \
+ static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR *var, char *buff) \
+ { \
+ rocksdb_status_counters.name = \
+ rocksdb_stats->getTickerCount(rocksdb::key); \
+ var->type = SHOW_LONGLONG; \
+ var->value = (char *)&rocksdb_status_counters.name; \
+ return 0; \
+ }
+
+#define DEF_STATUS_VAR(name) \
+ {"rocksdb_" #name, (char*) &SHOW_FNAME(name), SHOW_FUNC}
+
+#define DEF_STATUS_VAR_PTR(name, ptr, option) \
+ {"rocksdb_" name, (char*) ptr, option}
+
+#define DEF_STATUS_VAR_FUNC(name, ptr, option) \
+ {name, reinterpret_cast<char*>(ptr), option}
+
+struct rocksdb_status_counters_t {
+ uint64_t block_cache_miss;
+ uint64_t block_cache_hit;
+ uint64_t block_cache_add;
+ uint64_t block_cache_index_miss;
+ uint64_t block_cache_index_hit;
+ uint64_t block_cache_filter_miss;
+ uint64_t block_cache_filter_hit;
+ uint64_t block_cache_data_miss;
+ uint64_t block_cache_data_hit;
+ uint64_t bloom_filter_useful;
+ uint64_t memtable_hit;
+ uint64_t memtable_miss;
+ uint64_t compaction_key_drop_new;
+ uint64_t compaction_key_drop_obsolete;
+ uint64_t compaction_key_drop_user;
+ uint64_t number_keys_written;
+ uint64_t number_keys_read;
+ uint64_t number_keys_updated;
+ uint64_t bytes_written;
+ uint64_t bytes_read;
+ uint64_t no_file_closes;
+ uint64_t no_file_opens;
+ uint64_t no_file_errors;
+ uint64_t l0_slowdown_micros;
+ uint64_t memtable_compaction_micros;
+ uint64_t l0_num_files_stall_micros;
+ uint64_t rate_limit_delay_millis;
+ uint64_t num_iterators;
+ uint64_t number_multiget_get;
+ uint64_t number_multiget_keys_read;
+ uint64_t number_multiget_bytes_read;
+ uint64_t number_deletes_filtered;
+ uint64_t number_merge_failures;
+ uint64_t sequence_number;
+ uint64_t bloom_filter_prefix_checked;
+ uint64_t bloom_filter_prefix_useful;
+ uint64_t number_reseeks_iteration;
+ uint64_t getupdatessince_calls;
+ uint64_t block_cachecompressed_miss;
+ uint64_t block_cachecompressed_hit;
+ uint64_t wal_synced;
+ uint64_t wal_bytes;
+ uint64_t write_self;
+ uint64_t write_other;
+ uint64_t write_timedout;
+ uint64_t write_wal;
+ uint64_t flush_write_bytes;
+ uint64_t compact_read_bytes;
+ uint64_t compact_write_bytes;
+ uint64_t number_superversion_acquires;
+ uint64_t number_superversion_releases;
+ uint64_t number_superversion_cleanups;
+ uint64_t number_block_not_compressed;
+};
+
+static rocksdb_status_counters_t rocksdb_status_counters;
+
+DEF_SHOW_FUNC(block_cache_miss, BLOCK_CACHE_MISS)
+DEF_SHOW_FUNC(block_cache_hit, BLOCK_CACHE_HIT)
+DEF_SHOW_FUNC(block_cache_add, BLOCK_CACHE_ADD)
+DEF_SHOW_FUNC(block_cache_index_miss, BLOCK_CACHE_INDEX_MISS)
+DEF_SHOW_FUNC(block_cache_index_hit, BLOCK_CACHE_INDEX_HIT)
+DEF_SHOW_FUNC(block_cache_filter_miss, BLOCK_CACHE_FILTER_MISS)
+DEF_SHOW_FUNC(block_cache_filter_hit, BLOCK_CACHE_FILTER_HIT)
+DEF_SHOW_FUNC(block_cache_data_miss, BLOCK_CACHE_DATA_MISS)
+DEF_SHOW_FUNC(block_cache_data_hit, BLOCK_CACHE_DATA_HIT)
+DEF_SHOW_FUNC(bloom_filter_useful, BLOOM_FILTER_USEFUL)
+DEF_SHOW_FUNC(memtable_hit, MEMTABLE_HIT)
+DEF_SHOW_FUNC(memtable_miss, MEMTABLE_MISS)
+DEF_SHOW_FUNC(compaction_key_drop_new, COMPACTION_KEY_DROP_NEWER_ENTRY)
+DEF_SHOW_FUNC(compaction_key_drop_obsolete, COMPACTION_KEY_DROP_OBSOLETE)
+DEF_SHOW_FUNC(compaction_key_drop_user, COMPACTION_KEY_DROP_USER)
+DEF_SHOW_FUNC(number_keys_written, NUMBER_KEYS_WRITTEN)
+DEF_SHOW_FUNC(number_keys_read, NUMBER_KEYS_READ)
+DEF_SHOW_FUNC(number_keys_updated, NUMBER_KEYS_UPDATED)
+DEF_SHOW_FUNC(bytes_written, BYTES_WRITTEN)
+DEF_SHOW_FUNC(bytes_read, BYTES_READ)
+DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES)
+DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS)
+DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS)
+DEF_SHOW_FUNC(l0_slowdown_micros, STALL_L0_SLOWDOWN_MICROS)
+DEF_SHOW_FUNC(memtable_compaction_micros, STALL_MEMTABLE_COMPACTION_MICROS)
+DEF_SHOW_FUNC(l0_num_files_stall_micros, STALL_L0_NUM_FILES_MICROS)
+DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS)
+DEF_SHOW_FUNC(num_iterators, NO_ITERATORS)
+DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS)
+DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ)
+DEF_SHOW_FUNC(number_multiget_bytes_read, NUMBER_MULTIGET_BYTES_READ)
+DEF_SHOW_FUNC(number_deletes_filtered, NUMBER_FILTERED_DELETES)
+DEF_SHOW_FUNC(number_merge_failures, NUMBER_MERGE_FAILURES)
+DEF_SHOW_FUNC(sequence_number, SEQUENCE_NUMBER)
+DEF_SHOW_FUNC(bloom_filter_prefix_checked, BLOOM_FILTER_PREFIX_CHECKED)
+DEF_SHOW_FUNC(bloom_filter_prefix_useful, BLOOM_FILTER_PREFIX_USEFUL)
+DEF_SHOW_FUNC(number_reseeks_iteration, NUMBER_OF_RESEEKS_IN_ITERATION)
+DEF_SHOW_FUNC(getupdatessince_calls, GET_UPDATES_SINCE_CALLS)
+DEF_SHOW_FUNC(block_cachecompressed_miss, BLOCK_CACHE_COMPRESSED_MISS)
+DEF_SHOW_FUNC(block_cachecompressed_hit, BLOCK_CACHE_COMPRESSED_HIT)
+DEF_SHOW_FUNC(wal_synced, WAL_FILE_SYNCED)
+DEF_SHOW_FUNC(wal_bytes, WAL_FILE_BYTES)
+DEF_SHOW_FUNC(write_self, WRITE_DONE_BY_SELF)
+DEF_SHOW_FUNC(write_other, WRITE_DONE_BY_OTHER)
+DEF_SHOW_FUNC(write_timedout, WRITE_TIMEDOUT)
+DEF_SHOW_FUNC(write_wal, WRITE_WITH_WAL)
+DEF_SHOW_FUNC(flush_write_bytes, FLUSH_WRITE_BYTES)
+DEF_SHOW_FUNC(compact_read_bytes, COMPACT_READ_BYTES)
+DEF_SHOW_FUNC(compact_write_bytes, COMPACT_WRITE_BYTES)
+DEF_SHOW_FUNC(number_superversion_acquires, NUMBER_SUPERVERSION_ACQUIRES)
+DEF_SHOW_FUNC(number_superversion_releases, NUMBER_SUPERVERSION_RELEASES)
+DEF_SHOW_FUNC(number_superversion_cleanups, NUMBER_SUPERVERSION_CLEANUPS)
+DEF_SHOW_FUNC(number_block_not_compressed, NUMBER_BLOCK_NOT_COMPRESSED)
+
+static void myrocks_update_status() {
+ export_stats.rows_deleted = global_stats.rows[ROWS_DELETED];
+ export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED];
+ export_stats.rows_read = global_stats.rows[ROWS_READ];
+ export_stats.rows_updated = global_stats.rows[ROWS_UPDATED];
+
+ export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED];
+ export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED];
+ export_stats.system_rows_read = global_stats.system_rows[ROWS_READ];
+ export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED];
+}
+
+static SHOW_VAR myrocks_status_variables[]= {
+ DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("system_rows_inserted",
+ &export_stats.system_rows_inserted, SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_FUNC("system_rows_updated", &export_stats.system_rows_updated,
+ SHOW_LONGLONG),
+
+ {NullS, NullS, SHOW_LONG}
+};
+
+static void show_myrocks_vars(THD* thd, SHOW_VAR* var, char* buff) {
+ myrocks_update_status();
+ var->type = SHOW_ARRAY;
+ var->value = reinterpret_cast<char*>(&myrocks_status_variables);
+}
+
+static SHOW_VAR rocksdb_status_vars[]= {
+ DEF_STATUS_VAR(block_cache_miss),
+ DEF_STATUS_VAR(block_cache_hit),
+ DEF_STATUS_VAR(block_cache_add),
+ DEF_STATUS_VAR(block_cache_index_miss),
+ DEF_STATUS_VAR(block_cache_index_hit),
+ DEF_STATUS_VAR(block_cache_filter_miss),
+ DEF_STATUS_VAR(block_cache_filter_hit),
+ DEF_STATUS_VAR(block_cache_data_miss),
+ DEF_STATUS_VAR(block_cache_data_hit),
+ DEF_STATUS_VAR(bloom_filter_useful),
+ DEF_STATUS_VAR(memtable_hit),
+ DEF_STATUS_VAR(memtable_miss),
+ DEF_STATUS_VAR(compaction_key_drop_new),
+ DEF_STATUS_VAR(compaction_key_drop_obsolete),
+ DEF_STATUS_VAR(compaction_key_drop_user),
+ DEF_STATUS_VAR(number_keys_written),
+ DEF_STATUS_VAR(number_keys_read),
+ DEF_STATUS_VAR(number_keys_updated),
+ DEF_STATUS_VAR(bytes_written),
+ DEF_STATUS_VAR(bytes_read),
+ DEF_STATUS_VAR(no_file_closes),
+ DEF_STATUS_VAR(no_file_opens),
+ DEF_STATUS_VAR(no_file_errors),
+ DEF_STATUS_VAR(l0_slowdown_micros),
+ DEF_STATUS_VAR(memtable_compaction_micros),
+ DEF_STATUS_VAR(l0_num_files_stall_micros),
+ DEF_STATUS_VAR(rate_limit_delay_millis),
+ DEF_STATUS_VAR(num_iterators),
+ DEF_STATUS_VAR(number_multiget_get),
+ DEF_STATUS_VAR(number_multiget_keys_read),
+ DEF_STATUS_VAR(number_multiget_bytes_read),
+ DEF_STATUS_VAR(number_deletes_filtered),
+ DEF_STATUS_VAR(number_merge_failures),
+ DEF_STATUS_VAR(sequence_number),
+ DEF_STATUS_VAR(bloom_filter_prefix_checked),
+ DEF_STATUS_VAR(bloom_filter_prefix_useful),
+ DEF_STATUS_VAR(number_reseeks_iteration),
+ DEF_STATUS_VAR(getupdatessince_calls),
+ DEF_STATUS_VAR(block_cachecompressed_miss),
+ DEF_STATUS_VAR(block_cachecompressed_hit),
+ DEF_STATUS_VAR(wal_synced),
+ DEF_STATUS_VAR(wal_bytes),
+ DEF_STATUS_VAR(write_self),
+ DEF_STATUS_VAR(write_other),
+ DEF_STATUS_VAR(write_timedout),
+ DEF_STATUS_VAR(write_wal),
+ DEF_STATUS_VAR(flush_write_bytes),
+ DEF_STATUS_VAR(compact_read_bytes),
+ DEF_STATUS_VAR(compact_write_bytes),
+ DEF_STATUS_VAR(number_superversion_acquires),
+ DEF_STATUS_VAR(number_superversion_releases),
+ DEF_STATUS_VAR(number_superversion_cleanups),
+ DEF_STATUS_VAR(number_block_not_compressed),
+ DEF_STATUS_VAR_PTR("snapshot_conflict_errors",
+ &rocksdb_snapshot_conflict_errors,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_sst_entry_singledelete",
+ &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge,
+ SHOW_LONGLONG),
+ DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other,
+ SHOW_LONGLONG),
+ {"rocksdb", reinterpret_cast<char*>(&show_myrocks_vars), SHOW_FUNC},
+ {NullS, NullS, SHOW_LONG}
+};
+
+
+/*
+ Background thread's main logic
+*/
+
+void Rdb_background_thread::run()
+{
+ timespec ts_next_sync;
+ clock_gettime(CLOCK_REALTIME, &ts_next_sync);
+ ts_next_sync.tv_sec++;
+
+ for (;;)
+ {
+ // wait for 1 second or until we received a condition to stop the thread
+ mysql_mutex_lock(&m_signal_mutex);
+ auto ret __attribute__((__unused__)) = mysql_cond_timedwait(
+ &m_signal_cond, &m_signal_mutex, &ts_next_sync);
+ // make sure that no program error is returned
+ DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT);
+ bool local_stop= m_stop;
+ bool local_save_stats= m_save_stats;
+ reset();
+ mysql_mutex_unlock(&m_signal_mutex);
+
+ if (local_stop)
+ {
+ break;
+ }
+
+ if (local_save_stats)
+ {
+ ddl_manager.persist_stats();
+ }
+
+ // Flush the WAL if need be but don't do it more frequent
+ // than once per second
+ timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ if (ts.tv_sec - ts_next_sync.tv_sec >= 1)
+ {
+ if (rdb && rocksdb_background_sync)
+ {
+ DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes);
+ rocksdb::Status s= rdb->SyncWAL();
+ if (!s.ok())
+ rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD);
+ }
+ ts_next_sync.tv_sec= ts.tv_sec + 1;
+ }
+ }
+
+ // save remaining stats which might've left unsaved
+ ddl_manager.persist_stats();
+}
+
+
+/**
+ Deciding if it is possible to use bloom filter or not.
+
+ @detail
+ Even if bloom filter exists, it is not always possible
+ to use bloom filter. If using bloom filter when you shouldn't,
+ false negative may happen -- fewer rows than expected may be returned.
+ It is users' responsibility to use bloom filter correctly.
+
+ If bloom filter does not exist, return value does not matter because
+ RocksDB does not use bloom filter internally.
+
+ @param kd
+ @param eq_cond Equal condition part of the key. This always includes
+ system index id (4 bytes).
+ @param use_all_keys True if all key parts are set with equal conditions.
+ This is aware of extended keys.
+*/
+bool can_use_bloom_filter(THD *thd,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice &eq_cond,
+ const bool use_all_keys,
+ bool is_ascending)
+{
+ bool can_use= false;
+
+ if (THDVAR(thd, skip_bloom_filter_on_read))
+ {
+ return can_use;
+ }
+
+ rocksdb::Options opt = rdb->GetOptions(kd->get_cf());
+ if (opt.prefix_extractor)
+ {
+ /*
+ This is an optimized use case for CappedPrefixTransform.
+ If eq_cond length >= prefix extractor length and if
+ all keys are used for equal lookup, it is
+ always possible to use bloom filter.
+
+ Prefix bloom filter can't be used on descending scan with
+ prefix lookup (i.e. WHERE id1=1 ORDER BY id2 DESC), because of
+ RocksDB's limitation. On ascending (or not sorting) scan,
+ keys longer than the capped prefix length will be truncated down
+ to the capped length and the resulting key is added to the bloom filter.
+
+ Keys shorter than the capped prefix length will be added to
+ the bloom filter. When keys are looked up, key conditionals
+ longer than the capped length can be used; key conditionals
+ shorter require all parts of the key to be available
+ for the short key match.
+ */
+ if (use_all_keys && opt.prefix_extractor->InRange(eq_cond))
+ can_use= true;
+ else if (!is_ascending)
+ can_use= false;
+ else if (opt.prefix_extractor->SameResultWhenAppended(eq_cond))
+ can_use= true;
+ else
+ can_use= false;
+ } else
+ {
+ /*
+ if prefix extractor is not defined, all key parts have to be
+ used by eq_cond.
+ */
+ if (use_all_keys)
+ can_use= true;
+ else
+ can_use= false;
+ }
+
+ return can_use;
+}
+
+/* For modules that need access to the global data structures */
+rocksdb::DB *rdb_get_rocksdb_db()
+{
+ return rdb;
+}
+
+Rdb_cf_manager& rdb_get_cf_manager()
+{
+ return cf_manager;
+}
+
+rocksdb::BlockBasedTableOptions& rdb_get_table_options()
+{
+ return rocksdb_tbl_options;
+}
+
+
+int rdb_get_table_perf_counters(const char *tablename,
+ Rdb_perf_counters *counters)
+{
+ DBUG_ASSERT(counters != nullptr);
+ DBUG_ASSERT(tablename != nullptr);
+
+ Rdb_table_handler *table_handler;
+ table_handler= rdb_open_tables.get_table_handler(tablename);
+ if (table_handler == nullptr)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ counters->load(table_handler->m_table_perf_context);
+
+ rdb_open_tables.release_table_handler(table_handler);
+ return 0;
+}
+
+
+void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type)
+{
+ if (status.IsIOError())
+ {
+ switch (err_type) {
+ case RDB_IO_ERROR_TX_COMMIT:
+ case RDB_IO_ERROR_DICT_COMMIT:
+ {
+ sql_print_error("RocksDB: Failed to write to WAL - status %d, %s",
+ status.code(), status.ToString().c_str());
+ sql_print_error("RocksDB: Aborting on WAL write error.");
+ abort_with_stack_traces();
+ break;
+ }
+ case RDB_IO_ERROR_BG_THREAD:
+ {
+ sql_print_warning("RocksDB: BG Thread failed to write to RocksDB "
+ "- status %d, %s", status.code(),
+ status.ToString().c_str());
+ break;
+ }
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ }
+ else if (status.IsCorruption())
+ {
+ /* NO_LINT_DEBUG */
+ sql_print_error("RocksDB: Data Corruption detected! %d, %s",
+ status.code(), status.ToString().c_str());
+ /* NO_LINT_DEBUG */
+ sql_print_error("RocksDB: Aborting because of data corruption.");
+ abort_with_stack_traces();
+ }
+ else if (!status.ok())
+ {
+ switch (err_type) {
+ case RDB_IO_ERROR_DICT_COMMIT:
+ {
+ sql_print_error("RocksDB: Failed to write to WAL (dictionary) - "
+ "status %d, %s",
+ status.code(), status.ToString().c_str());
+ sql_print_error("RocksDB: Aborting on WAL write error.");
+ abort_with_stack_traces();
+ break;
+ }
+ default:
+ sql_print_warning("RocksDB: Failed to write to RocksDB "
+ "- status %d, %s", status.code(),
+ status.ToString().c_str());
+ break;
+ }
+ }
+}
+
+Rdb_dict_manager *rdb_get_dict_manager(void)
+{
+ return &dict_manager;
+}
+
+Rdb_ddl_manager *rdb_get_ddl_manager(void)
+{
+ return &ddl_manager;
+}
+
+Rdb_binlog_manager *rdb_get_binlog_manager(void)
+{
+ return &binlog_manager;
+}
+
+
+void
+rocksdb_set_compaction_options(
+ my_core::THD* thd __attribute__((__unused__)),
+ my_core::st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr,
+ const void* save)
+{
+ if (var_ptr && save) {
+ *(uint64_t*)var_ptr = *(const uint64_t*) save;
+ }
+ Rdb_compact_params params = {
+ (uint64_t)rocksdb_compaction_sequential_deletes,
+ (uint64_t)rocksdb_compaction_sequential_deletes_window,
+ (uint64_t)rocksdb_compaction_sequential_deletes_file_size
+ };
+ if (properties_collector_factory) {
+ properties_collector_factory->SetCompactionParams(params);
+ }
+}
+
+void rocksdb_set_table_stats_sampling_pct(
+ my_core::THD* thd __attribute__((__unused__)),
+ my_core::st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr __attribute__((__unused__)),
+ const void* save)
+{
+ mysql_mutex_lock(&rdb_sysvars_mutex);
+
+ uint32_t new_val= *static_cast<const uint32_t*>(save);
+
+ if (new_val != rocksdb_table_stats_sampling_pct) {
+ rocksdb_table_stats_sampling_pct = new_val;
+
+ if (properties_collector_factory) {
+ properties_collector_factory->SetTableStatsSamplingPct(
+ rocksdb_table_stats_sampling_pct);
+ }
+ }
+
+ mysql_mutex_unlock(&rdb_sysvars_mutex);
+}
+
+/*
+ This function allows setting the rate limiter's bytes per second value
+ but only if the rate limiter is turned on which has to be done at startup.
+ If the rate is already 0 (turned off) or we are changing it to 0 (trying
+ to turn it off) this function will push a warning to the client and do
+ nothing.
+ This is similar to the code in innodb_doublewrite_update (found in
+ storage/innobase/handler/ha_innodb.cc).
+*/
+void
+rocksdb_set_rate_limiter_bytes_per_sec(
+ my_core::THD* thd,
+ my_core::st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr __attribute__((__unused__)),
+ const void* save)
+{
+ uint64_t new_val= *static_cast<const uint64_t*>(save);
+ if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0)
+ {
+ /*
+ If a rate_limiter was not enabled at startup we can't change it nor
+ can we disable it if one was created at startup
+ */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot "
+ "be dynamically changed to or from 0. Do a clean "
+ "shutdown if you want to change it from or to 0.");
+ }
+ else if (new_val != rocksdb_rate_limiter_bytes_per_sec)
+ {
+ /* Apply the new value to the rate limiter and store it locally */
+ DBUG_ASSERT(rocksdb_rate_limiter != nullptr);
+ rocksdb_rate_limiter_bytes_per_sec= new_val;
+ rocksdb_rate_limiter->SetBytesPerSecond(new_val);
+ }
+}
+
+void rdb_set_collation_exception_list(const char *exception_list)
+{
+ DBUG_ASSERT(rdb_collation_exceptions != nullptr);
+
+ if (!rdb_collation_exceptions->set_patterns(exception_list))
+ {
+ my_core::warn_about_bad_patterns(rdb_collation_exceptions,
+ "strict_collation_exceptions");
+ }
+}
+
+void
+rocksdb_set_collation_exception_list(THD* thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr,
+ const void* save)
+{
+ const char* val = *static_cast<const char*const*>(save);
+
+ rdb_set_collation_exception_list(val);
+
+ *static_cast<const char**>(var_ptr) = val;
+}
+
+void
+rocksdb_set_bulk_load(THD* thd,
+ struct st_mysql_sys_var* var __attribute__((__unused__)),
+ void* var_ptr,
+ const void* save)
+{
+ Rdb_transaction*& tx= get_tx_from_thd(thd);
+
+ if (tx != nullptr)
+ {
+ int rc= tx->finish_bulk_load();
+ if (rc != 0)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Error %d finalizing last SST file while "
+ "setting bulk loading variable", rc);
+ abort_with_stack_traces();
+ }
+ }
+
+ *static_cast<bool*>(var_ptr) = *static_cast<const bool*>(save);
+}
+
+void rdb_queue_save_stats_request()
+{
+ rdb_bg_thread.request_save_stats();
+}
+
+void ha_rocksdb::rpl_before_delete_rows()
+{
+ m_in_rpl_delete_rows= true;
+}
+
+void ha_rocksdb::rpl_after_delete_rows()
+{
+ m_in_rpl_delete_rows= false;
+}
+
+void ha_rocksdb::rpl_before_update_rows()
+{
+ m_in_rpl_update_rows= true;
+}
+
+void ha_rocksdb::rpl_after_update_rows()
+{
+ m_in_rpl_update_rows= false;
+}
+
+/**
+ @brief
+ Read Free Replication can be used or not. Returning False means
+ Read Free Replication can be used. Read Free Replication can be used
+ on UPDATE or DELETE row events, and table must have user defined
+ primary key.
+*/
+bool ha_rocksdb::use_read_free_rpl()
+{
+ return ((m_in_rpl_delete_rows || m_in_rpl_update_rows) &&
+ !has_hidden_pk(table) && m_use_read_free_rpl);
+}
+
+double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows)
+{
+ if (index != table->s->primary_key) {
+ /* Non covering index range scan */
+ return(handler::read_time(index, ranges, rows));
+ }
+ return (rows/20.0) + 1;
+}
+
+} // namespace myrocks
+
+/*
+ Register the storage engine plugin outside of myrocks namespace
+ so that mysql_declare_plugin does not get confused when it does
+ its name generation.
+*/
+
+struct st_mysql_storage_engine rocksdb_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(rocksdb_se)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */
+ &rocksdb_storage_engine, /* Plugin Descriptor */
+ "ROCKSDB", /* Plugin Name */
+ "Monty Program Ab", /* Plugin Author */
+ "RocksDB storage engine", /* Plugin Description */
+ PLUGIN_LICENSE_GPL, /* Plugin Licence */
+ myrocks::rocksdb_init_func, /* Plugin Entry Point */
+ myrocks::rocksdb_done_func, /* Plugin Deinitializer */
+ 0x0001, /* version number (0.1) */
+ myrocks::rocksdb_status_vars, /* status variables */
+ myrocks::rocksdb_system_variables, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+},
+myrocks::rdb_i_s_cfstats,
+myrocks::rdb_i_s_dbstats,
+myrocks::rdb_i_s_perf_context,
+myrocks::rdb_i_s_perf_context_global,
+myrocks::rdb_i_s_cfoptions,
+myrocks::rdb_i_s_global_info,
+myrocks::rdb_i_s_ddl,
+myrocks::rdb_i_s_index_file_map
+mysql_declare_plugin_end;
diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h
new file mode 100644
index 00000000000..8f63050e74c
--- /dev/null
+++ b/storage/rocksdb/ha_rocksdb.h
@@ -0,0 +1,1054 @@
+/*
+ Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface /* gcc class implementation */
+#endif
+
+/* C++ standard header files */
+#include <set>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+/* MySQL header files */
+#include "./handler.h" /* handler */
+#include "./my_global.h" /* ulonglong */
+#include "./sql_string.h"
+#include "./ut0counter.h"
+
+/* RocksDB header files */
+#include "rocksdb/cache.h"
+#include "rocksdb/perf_context.h"
+#include "rocksdb/statistics.h"
+#include "rocksdb/utilities/options_util.h"
+#include "rocksdb/utilities/transaction_db.h"
+#include "rocksdb/utilities/write_batch_with_index.h"
+
+/* MyRocks header files */
+#include "./rdb_comparator.h"
+#include "./rdb_index_merge.h"
+#include "./rdb_perf_context.h"
+#include "./rdb_sst_info.h"
+#include "./rdb_utils.h"
+
+/**
+ @note MyRocks Coding Conventions:
+ MyRocks code follows the baseline MySQL coding conventions, available at
+ http://dev.mysql.com/doc/internals/en/coding-guidelines.html, with several
+ refinements (@see /storage/rocksdb/README file).
+*/
+
+/**
+ @note MyRocks Coding Conventions:
+ MyRocks code follows the baseline MySQL coding conventions, available at
+ http://dev.mysql.com/doc/internals/en/coding-guidelines.html, with several
+ refinements (@see /storage/rocksdb/README file).
+*/
+
+namespace myrocks {
+
+/*
+ This is
+ - the name of the default Column Family (the CF which stores indexes which
+ didn't explicitly specify which CF they are in)
+ - the name used to set the default column family parameter for per-cf
+ arguments.
+*/
+const char * const DEFAULT_CF_NAME= "default";
+
+/*
+ This is the name of the Column Family used for storing the data dictionary.
+*/
+const char * const DEFAULT_SYSTEM_CF_NAME= "__system__";
+
+/*
+ This is the name of the hidden primary key for tables with no pk.
+*/
+const char * const HIDDEN_PK_NAME= "HIDDEN_PK_ID";
+
+/*
+ Column family name which means "put this index into its own column family".
+ See Rdb_cf_manager::get_per_index_cf_name().
+*/
+const char * const PER_INDEX_CF_NAME = "$per_index_cf";
+
+/*
+ Default, minimal valid, and maximum valid sampling rate values when collecting
+ statistics about table.
+*/
+#define RDB_DEFAULT_TBL_STATS_SAMPLE_PCT 10
+#define RDB_TBL_STATS_SAMPLE_PCT_MIN 1
+#define RDB_TBL_STATS_SAMPLE_PCT_MAX 100
+
+/*
+ Default and maximum values for rocksdb-compaction-sequential-deletes and
+ rocksdb-compaction-sequential-deletes-window to add basic boundary checking.
+*/
+#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES 0
+#define MAX_COMPACTION_SEQUENTIAL_DELETES 2000000
+
+#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW 0
+#define MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW 2000000
+
+/*
+ Default and maximum values for various compaction and flushing related
+ options. Numbers are based on the hardware we currently use and our internal
+ benchmarks which indicate that parallelization helps with the speed of
+ compactions.
+
+ Ideally of course we'll use heuristic technique to determine the number of
+ CPU-s and derive the values from there. This however has its own set of
+ problems and we'll choose simplicity for now.
+*/
+#define MAX_BACKGROUND_COMPACTIONS 64
+#define MAX_BACKGROUND_FLUSHES 64
+
+#define DEFAULT_SUBCOMPACTIONS 1
+#define MAX_SUBCOMPACTIONS 64
+
+/*
+ Defines the field sizes for serializing XID object to a string representation.
+ string byte format: [field_size: field_value, ...]
+ [
+ 8: XID.formatID,
+ 1: XID.gtrid_length,
+ 1: XID.bqual_length,
+ XID.gtrid_length + XID.bqual_length: XID.data
+ ]
+*/
+#define RDB_FORMATID_SZ 8
+#define RDB_GTRID_SZ 1
+#define RDB_BQUAL_SZ 1
+#define RDB_XIDHDR_LEN (RDB_FORMATID_SZ + RDB_GTRID_SZ + RDB_BQUAL_SZ)
+
+/*
+ To fix an unhandled exception we specify the upper bound as LONGLONGMAX
+ instead of ULONGLONGMAX because the latter is -1 and causes an exception when
+ cast to jlong (signed) of JNI
+
+ The reason behind the cast issue is the lack of unsigned int support in Java.
+*/
+#define MAX_RATE_LIMITER_BYTES_PER_SEC static_cast<uint64_t>(LONGLONG_MAX)
+
+/*
+ Hidden PK column (for tables with no primary key) is a longlong (aka 8 bytes).
+ static_assert() in code will validate this assumption.
+*/
+#define ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN sizeof(longlong)
+
+/*
+ MyRocks specific error codes. NB! Please make sure that you will update
+ HA_ERR_ROCKSDB_LAST when adding new ones.
+*/
+#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1)
+#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2)
+#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3)
+#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS
+
+inline bool looks_like_per_index_cf_typo(const char *name)
+{
+ return (name && name[0]=='$' && strcmp(name, PER_INDEX_CF_NAME));
+}
+
+/**
+ @brief
+ Rdb_table_handler is a reference-counted structure storing information for
+ each open table. All the objects are stored in a global hash map.
+
+ //TODO: join this with Rdb_tbl_def ?
+*/
+struct Rdb_table_handler
+{
+ char *m_table_name;
+ uint m_table_name_length;
+ int m_ref_count;
+
+ my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock
+
+ /* Stores cumulative table statistics */
+ my_io_perf_atomic_t m_io_perf_read;
+ Rdb_atomic_perf_counters m_table_perf_context;
+};
+
+
+
+class Rdb_key_def;
+class Rdb_tbl_def;
+class Rdb_transaction;
+class Rdb_transaction_impl;
+class Rdb_writebatch_impl;
+class Rdb_field_encoder;
+
+const char *const rocksdb_hton_name= "ROCKSDB";
+
+typedef struct _gl_index_id_s {
+ uint32_t cf_id;
+ uint32_t index_id;
+ bool operator==(const struct _gl_index_id_s& other) const
+ {
+ return cf_id == other.cf_id && index_id == other.index_id;
+ }
+ bool operator!=(const struct _gl_index_id_s& other) const
+ {
+ return cf_id != other.cf_id || index_id != other.index_id;
+ }
+ bool operator<(const struct _gl_index_id_s& other) const
+ {
+ return cf_id < other.cf_id ||
+ (cf_id == other.cf_id && index_id < other.index_id);
+ }
+ bool operator<=(const struct _gl_index_id_s& other) const
+ {
+ return cf_id < other.cf_id ||
+ (cf_id == other.cf_id && index_id <= other.index_id);
+ }
+ bool operator>(const struct _gl_index_id_s& other) const
+ {
+ return cf_id > other.cf_id ||
+ (cf_id == other.cf_id && index_id > other.index_id);
+ }
+ bool operator>=(const struct _gl_index_id_s& other) const
+ {
+ return cf_id > other.cf_id ||
+ (cf_id == other.cf_id && index_id >= other.index_id);
+ }
+} GL_INDEX_ID;
+
+enum operation_type {
+ ROWS_DELETED = 0,
+ ROWS_INSERTED,
+ ROWS_READ,
+ ROWS_UPDATED,
+ ROWS_MAX
+};
+
+/* Global statistics struct used inside MyRocks */
+struct st_global_stats {
+ ib_counter_t<ulonglong, 64> rows[ROWS_MAX];
+
+ // system_rows_ stats are only for system
+ // tables. They are not counted in rows_* stats.
+ ib_counter_t<ulonglong, 64> system_rows[ROWS_MAX];
+};
+
+/* Struct used for exporting status to MySQL */
+struct st_export_stats {
+ ulonglong rows_deleted;
+ ulonglong rows_inserted;
+ ulonglong rows_read;
+ ulonglong rows_updated;
+
+ ulonglong system_rows_deleted;
+ ulonglong system_rows_inserted;
+ ulonglong system_rows_read;
+ ulonglong system_rows_updated;
+};
+
+} // namespace myrocks
+
+#include "./rdb_buff.h"
+
+/* Provide hash function for GL_INDEX_ID so we can include it in sets */
+namespace std {
+ template <>
+ struct hash<myrocks::GL_INDEX_ID>
+ {
+ std::size_t operator()(const myrocks::GL_INDEX_ID& gl_index_id) const
+ {
+ uint64_t val = ((uint64_t) gl_index_id.cf_id << 32 |
+ (uint64_t) gl_index_id.index_id);
+ return std::hash<uint64_t>()(val);
+ }
+ };
+} // namespace std
+
+namespace myrocks {
+
+/**
+ @brief
+ Class definition for ROCKSDB storage engine plugin handler
+*/
+
+class ha_rocksdb: public my_core::handler
+{
+ my_core::THR_LOCK_DATA m_db_lock; ///< MySQL database lock
+
+ Rdb_table_handler *m_table_handler; ///< Open table handler
+
+ /* Iterator used for range scans and for full table/index scans */
+ rocksdb::Iterator *m_scan_it;
+
+ /* Whether m_scan_it was created with skip_bloom=true */
+ bool m_scan_it_skips_bloom;
+
+ Rdb_tbl_def *m_tbl_def;
+
+ /* Primary Key encoder from KeyTupleFormat to StorageFormat */
+ std::shared_ptr<Rdb_key_def> m_pk_descr;
+
+ /* Array of index descriptors */
+ std::shared_ptr<Rdb_key_def> *m_key_descr_arr;
+
+ bool check_keyread_allowed(uint inx, uint part, bool all_parts) const;
+
+ /*
+ Number of key parts in PK. This is the same as
+ table->key_info[table->s->primary_key].keyparts
+ */
+ uint m_pk_key_parts;
+
+ /*
+ TRUE <=> Primary Key columns can be decoded from the index
+ */
+ mutable bool m_pk_can_be_decoded;
+
+ /*
+ TRUE <=> Some fields in the PK may require unpack_info.
+ */
+ bool m_maybe_unpack_info;
+
+ uchar *m_pk_tuple; /* Buffer for storing PK in KeyTupleFormat */
+ uchar *m_pk_packed_tuple; /* Buffer for storing PK in StorageFormat */
+ // ^^ todo: change it to 'char*'? TODO: ^ can we join this with last_rowkey?
+
+ /*
+ Temporary buffers for storing the key part of the Key/Value pair
+ for secondary indexes.
+ */
+ uchar *m_sk_packed_tuple;
+
+ /*
+ Temporary buffers for storing end key part of the Key/Value pair.
+ This is used for range scan only.
+ */
+ uchar *m_end_key_packed_tuple;
+
+ Rdb_string_writer m_sk_tails;
+ Rdb_string_writer m_pk_unpack_info;
+
+ /*
+ ha_rockdb->index_read_map(.. HA_READ_KEY_EXACT or similar) will save here
+ mem-comparable form of the index lookup tuple.
+ */
+ uchar *m_sk_match_prefix;
+ uint m_sk_match_length;
+
+ /* Buffer space for the above */
+ uchar *m_sk_match_prefix_buf;
+
+ /* Second buffers, used by UPDATE. */
+ uchar *m_sk_packed_tuple_old;
+ Rdb_string_writer m_sk_tails_old;
+
+ /*
+ Temporary space for packing VARCHARs (we provide it to
+ pack_record()/pack_index_tuple() calls).
+ */
+ uchar *m_pack_buffer;
+
+ /* rowkey of the last record we've read, in StorageFormat. */
+ String m_last_rowkey;
+
+ /* Buffer used by convert_record_to_storage_format() */
+ String m_storage_record;
+
+ /*
+ Last retrieved record, in table->record[0] data format.
+
+ This is used only when we get the record with rocksdb's Get() call (The
+ other option is when we get a rocksdb::Slice from an iterator)
+ */
+ std::string m_retrieved_record;
+
+ /* Type of locking to apply to rows */
+ enum { RDB_LOCK_NONE, RDB_LOCK_READ, RDB_LOCK_WRITE } m_lock_rows;
+
+ /* TRUE means we're doing an index-only read. FALSE means otherwise. */
+ bool m_keyread_only;
+
+ bool m_skip_scan_it_next_call;
+
+ /* TRUE means we are accessing the first row after a snapshot was created */
+ bool m_rnd_scan_is_new_snapshot;
+
+ /* TRUE means the replication slave will use Read Free Replication */
+ bool m_use_read_free_rpl;
+
+ /*
+ TRUE means we should skip unique key checks for this table if the
+ replication lag gets too large
+ */
+ bool m_skip_unique_check;
+
+ /**
+ @brief
+ This is a bitmap of indexes (i.e. a set) whose keys (in future, values) may
+ be changed by this statement. Indexes that are not in the bitmap do not need
+ to be updated.
+ @note Valid inside UPDATE statements, IIF(m_update_scope_is_valid == true).
+ */
+ my_core::key_map m_update_scope;
+ bool m_update_scope_is_valid;
+
+ /* SST information used for bulk loading the primary key */
+ std::shared_ptr<Rdb_sst_info> m_sst_info;
+
+ /*
+ MySQL index number for duplicate key error
+ */
+ int m_dupp_errkey;
+
+ int create_key_defs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg,
+ const TABLE *old_table_arg= nullptr,
+ const Rdb_tbl_def *old_tbl_def_arg= nullptr)
+ __attribute__((__nonnull__(2, 3), __warn_unused_result__));
+ int secondary_index_read(const int keyno, uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ void setup_iterator_for_rnd_scan();
+ void setup_scan_iterator(const std::shared_ptr<const Rdb_key_def>& kd,
+ rocksdb::Slice *slice)
+ __attribute__((__nonnull__))
+ {
+ setup_scan_iterator(kd, slice, false, false, 0);
+ }
+ bool is_ascending(const std::shared_ptr<const Rdb_key_def>& keydef,
+ enum ha_rkey_function find_flag) const
+ __attribute__((__nonnull__, __warn_unused_result__));
+ void setup_scan_iterator(const std::shared_ptr<const Rdb_key_def>& kd,
+ rocksdb::Slice *slice, const bool use_all_keys,
+ const bool is_ascending, const uint eq_cond_len)
+ __attribute__((__nonnull__));
+ void release_scan_iterator(void)
+ {
+ delete m_scan_it;
+ m_scan_it= nullptr;
+ }
+
+ int get_row_by_rowid(uchar *buf, const char *rowid,
+ const uint rowid_size)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int get_row_by_rowid(uchar *buf, const uchar *rowid,
+ const uint rowid_size)
+ __attribute__((__nonnull__, __warn_unused_result__))
+ {
+ return get_row_by_rowid(buf, reinterpret_cast<const char *>(rowid),
+ rowid_size);
+ }
+
+ void update_auto_incr_val();
+ void load_auto_incr_value();
+ longlong update_hidden_pk_val();
+ int load_hidden_pk_value() __attribute__((__warn_unused_result__));
+ int read_hidden_pk_id_from_rowkey(longlong *hidden_pk_id)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ bool can_use_single_delete(uint index)
+ __attribute__((__warn_unused_result__));
+ bool skip_unique_check() __attribute__((__warn_unused_result__));
+ void set_force_skip_unique_check(bool skip) override;
+ bool commit_in_the_middle() __attribute__((__warn_unused_result__));
+ bool do_bulk_commit(Rdb_transaction *tx)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ bool has_hidden_pk(const TABLE* table)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void update_row_stats(operation_type type);
+
+ void set_last_rowkey(const uchar *old_data);
+
+ /*
+ Array of table->s->fields elements telling how to store fields in the
+ record.
+ */
+ Rdb_field_encoder *m_encoder_arr;
+
+ /* Describes instructions on how to decode the field */
+ class READ_FIELD
+ {
+ public:
+ /* Points to Rdb_field_encoder describing the field */
+ Rdb_field_encoder* m_field_enc;
+ /* if true, decode the field, otherwise skip it */
+ bool m_decode;
+ /* Skip this many bytes before reading (or skipping) this field */
+ int m_skip;
+ };
+
+ /*
+ This tells which table fields should be decoded (or skipped) when
+ decoding table row from (pk, encoded_row) pair. (Secondary keys are
+ just always decoded in full currently)
+ */
+ std::vector<READ_FIELD> m_decoders_vect;
+
+ /* Setup field_decoders based on type of scan and table->read_set */
+ void setup_read_decoders();
+
+ /*
+ Number of bytes in on-disk (storage) record format that are used for
+ storing SQL NULL flags.
+ */
+ uint m_null_bytes_in_rec;
+
+ void get_storage_type(Rdb_field_encoder *encoder, uint kp);
+ void setup_field_converters();
+ int alloc_key_buffers(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ void free_key_buffers();
+
+ // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
+ rocksdb::Range get_range(int i, uchar buf[]) const;
+
+ /*
+ Perf timers for data reads
+ */
+ Rdb_io_perf m_io_perf;
+
+ /*
+ A counter of how many row checksums were checked for this table. Note that
+ this does not include checksums for secondary index entries.
+ */
+ my_core::ha_rows m_row_checksums_checked;
+
+ /*
+ Update stats
+ */
+ void update_stats(void);
+
+public:
+ /*
+ Controls whether writes include checksums. This is updated from the session variable
+ at the start of each query.
+ */
+ bool m_store_checksums;
+
+ /* Same as above but for verifying checksums when reading */
+ bool m_verify_checksums;
+ int m_checksums_pct;
+
+ ha_rocksdb(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg);
+ ~ha_rocksdb()
+ {
+ int err __attribute__((__unused__));
+ err= finalize_bulk_load();
+ DBUG_ASSERT(err == 0);
+ }
+
+ /** @brief
+ The name that will be used for display purposes.
+ */
+ const char *table_type() const { return rocksdb_hton_name; }
+
+ /* The following is only used by SHOW KEYS: */
+ const char *index_type(uint inx) { return "LSMTREE"; }
+
+ /** @brief
+ The file extensions.
+ */
+ const char **bas_ext() const;
+
+ /*
+ See if this is the same base table - this should only be true for different
+ partitions of the same table.
+ */
+ bool same_table(const ha_rocksdb& other) const;
+
+ /** @brief
+ This is a list of flags that indicate what functionality the storage engine
+ implements. The current table flags are documented in handler.h
+ */
+ ulonglong table_flags() const override
+ {
+ /*
+ HA_BINLOG_STMT_CAPABLE
+ We are saying that this engine is just statement capable to have
+ an engine that can only handle statement-based logging. This is
+ used in testing.
+ HA_REC_NOT_IN_SEQ
+ If we don't set it, filesort crashes, because it assumes rowids are
+ 1..8 byte numbers
+ */
+ return HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
+ HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS |
+ (m_pk_can_be_decoded? HA_PRIMARY_KEY_IN_READ_INDEX : 0) |
+ HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
+ HA_NULL_IN_KEY |
+ HA_PARTIAL_COLUMN_READ;
+ }
+
+ bool init_with_fields() override;
+
+ /** @brief
+ This is a bitmap of flags that indicates how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero here.
+
+ @details
+ part is the key part to check. First key part is 0.
+ If all_parts is set, MySQL wants to know the flags for the combined
+ index, up to and including 'part'.
+ */
+ ulong index_flags(uint inx, uint part, bool all_parts) const;
+
+ const key_map * keys_to_use_for_scanning()
+ {
+ return &key_map_full;
+ }
+
+ bool primary_key_is_clustered()
+ {
+ return true;
+ }
+
+ bool should_store_checksums() const
+ {
+ return m_store_checksums && (rand() % 100 < m_checksums_pct);
+ }
+
+ int rename_table(const char *from, const char *to)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int convert_record_from_storage_format(const rocksdb::Slice *key,
+ const rocksdb::Slice *value,
+ uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int convert_record_from_storage_format(const rocksdb::Slice *key,
+ uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void convert_record_to_storage_format(const rocksdb::Slice& pk_packed_slice,
+ Rdb_string_writer *pk_unpack_info,
+ rocksdb::Slice *packed_rec)
+ __attribute__((__nonnull__));
+
+ static const char* get_key_name(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ static const char* get_key_comment(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ static bool is_hidden_pk(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ static uint pk_index(const TABLE* table_arg, const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ static bool is_pk(const uint index, const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ /** @brief
+ unireg.cc will call max_supported_record_length(), max_supported_keys(),
+ max_supported_key_parts(), uint max_supported_key_length()
+ to make sure that the storage engine can handle the data it is about to
+ send. Return *real* limits of your storage engine here; MySQL will do
+ min(your_limits, MySQL_limits) automatically.
+ */
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+
+ uint max_supported_keys() const { return MAX_INDEXES; }
+ uint max_supported_key_parts() const { return MAX_REF_PARTS; }
+ uint max_supported_key_part_length() const { return 2048; }
+ /** @brief
+ unireg.cc will call this to make sure that the storage engine can handle
+ the data it is about to send. Return *real* limits of your storage engine
+ here; MySQL will do min(your_limits, MySQL_limits) automatically.
+
+ @details
+ There is no need to implement ..._key_... methods if your engine doesn't
+ support indexes.
+ */
+ uint max_supported_key_length() const { return 16*1024; /* just to return something*/ }
+
+ /**
+ TODO: return actual upper bound of number of records in the table.
+ (e.g. save number of records seen on full table scan and/or use file size
+ as upper bound)
+ */
+ ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
+
+ /* At the moment, we're ok with default handler::index_init() implementation. */
+ int index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ __attribute__((__warn_unused_result__));
+
+ int index_read_map_impl(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag,
+ const key_range *end_key)
+ __attribute__((__warn_unused_result__));;
+
+ int index_read_last_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map)
+ __attribute__((__warn_unused_result__));;
+
+ int read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted)
+ __attribute__((__warn_unused_result__));;
+
+ virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
+ virtual double read_time(uint, uint, ha_rows rows) override;
+
+ int open(const char *name, int mode, uint test_if_locked)
+ __attribute__((__warn_unused_result__));
+ int close(void) __attribute__((__warn_unused_result__));
+
+ int write_row(uchar *buf) __attribute__((__warn_unused_result__));
+ int update_row(const uchar *old_data, uchar *new_data)
+ __attribute__((__warn_unused_result__));
+ int delete_row(const uchar *buf) __attribute__((__warn_unused_result__));
+ rocksdb::Status delete_or_singledelete(uint index,
+ Rdb_transaction *tx,
+ rocksdb::ColumnFamilyHandle* cf,
+ const rocksdb::Slice& key)
+ __attribute__((__warn_unused_result__));
+
+ int index_next(uchar *buf) __attribute__((__warn_unused_result__));
+ int index_next_with_direction(uchar *buf, bool move_forward)
+ __attribute__((__warn_unused_result__));
+ int index_prev(uchar *buf) __attribute__((__warn_unused_result__));
+
+ int index_first(uchar *buf) __attribute__((__warn_unused_result__));
+ int index_last(uchar *buf) __attribute__((__warn_unused_result__));
+
+ class Item* idx_cond_push(uint keyno, class Item* idx_cond);
+ /*
+ Default implementation from cancel_pushed_idx_cond() suits us
+ */
+private:
+ struct key_def_cf_info
+ {
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ bool is_reverse_cf;
+ bool is_auto_cf;
+ };
+
+ struct update_row_info
+ {
+ Rdb_transaction* tx;
+ const uchar* new_data;
+ const uchar* old_data;
+ rocksdb::Slice new_pk_slice;
+ rocksdb::Slice old_pk_slice;
+
+ // "unpack_info" data for the new PK value
+ Rdb_string_writer *new_pk_unpack_info;
+
+ longlong hidden_pk_id;
+ bool skip_unique_check;
+ };
+
+ int create_cfs(const TABLE *table_arg, Rdb_tbl_def *tbl_def_arg,
+ std::array<struct key_def_cf_info, MAX_INDEXES + 1>* cfs);
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int create_key_def(const TABLE *table_arg, uint i,
+ const Rdb_tbl_def* tbl_def_arg,
+ std::shared_ptr<Rdb_key_def>* new_key_def,
+ const struct key_def_cf_info& cf_info);
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int create_inplace_key_defs(const TABLE *table_arg,
+ Rdb_tbl_def *tbl_def_arg,
+ const TABLE *old_table_arg,
+ const Rdb_tbl_def *old_tbl_def_arg,
+ const std::array<key_def_cf_info, MAX_INDEXES + 1>& cfs);
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ std::unordered_map<std::string, uint> get_old_key_positions(
+ const TABLE* table_arg,
+ const Rdb_tbl_def* tbl_def_arg,
+ const TABLE* old_table_arg,
+ const Rdb_tbl_def* old_tbl_def_arg)
+ __attribute__((__nonnull__));
+
+ int compare_key_parts(const KEY* old_key, const KEY* new_key);
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int index_first_intern(uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int index_last_intern(uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ enum icp_result check_index_cond();
+ int find_icp_matching_index_rec(bool move_forward, uchar *buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void calc_updated_indexes();
+ int update_write_row(const uchar *old_data, const uchar *new_data,
+ const bool skip_unique_check)
+ __attribute__((__warn_unused_result__));
+ int get_pk_for_update(struct update_row_info* row_info);
+ int check_and_lock_unique_pk(uint key_id,
+ const struct update_row_info& row_info,
+ bool* found, bool* pk_changed)
+ __attribute__((__warn_unused_result__));
+ int check_and_lock_sk(uint key_id, const struct update_row_info& row_info,
+ bool* found) const
+ __attribute__((__warn_unused_result__));
+ int check_uniqueness_and_lock(const struct update_row_info& row_info,
+ bool* pk_changed)
+ __attribute__((__warn_unused_result__));
+ bool over_bulk_load_threshold(int* err)
+ __attribute__((__warn_unused_result__));
+ int bulk_load_key(Rdb_transaction* tx,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int update_pk(const std::shared_ptr<const Rdb_key_def>& kd,
+ const struct update_row_info& row_info,
+ bool pk_changed)
+ __attribute__((__warn_unused_result__));
+ int update_sk(const TABLE* table_arg,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const struct update_row_info& row_info)
+ __attribute__((__warn_unused_result__));
+ int update_indexes(const struct update_row_info& row_info, bool pk_changed)
+ __attribute__((__warn_unused_result__));
+
+ int read_key_exact(const std::shared_ptr<const Rdb_key_def>& kd,
+ rocksdb::Iterator* iter, bool using_full_key,
+ const rocksdb::Slice& key_slice) const
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int read_before_key(const std::shared_ptr<const Rdb_key_def>& kd,
+ bool using_full_key, const rocksdb::Slice& key_slice)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int read_after_key(const std::shared_ptr<const Rdb_key_def>& kd,
+ bool using_full_key, const rocksdb::Slice& key_slice)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int position_to_correct_key(const std::shared_ptr<const Rdb_key_def>& kd,
+ enum ha_rkey_function find_flag,
+ bool full_key_match, const uchar* key,
+ key_part_map keypart_map,
+ const rocksdb::Slice& key_slice,
+ bool* move_forward)
+ __attribute__((__warn_unused_result__));
+
+ int read_row_from_primary_key(uchar* buf)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int read_row_from_secondary_key(uchar* buf,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ bool move_forward)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ int try_keyonly_read_from_sk(uchar* buf,
+ const std::shared_ptr<const Rdb_key_def>& kd,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& value,
+ uint rowid_size)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int calc_eq_cond_len(const std::shared_ptr<const Rdb_key_def>& kd,
+ enum ha_rkey_function find_flag,
+ const rocksdb::Slice& slice, int bytes_changed_by_succ,
+ const key_range *end_key, uint* end_key_packed_size)
+ __attribute__((__warn_unused_result__));
+
+ Rdb_tbl_def* get_table_if_exists(const char* tablename)
+ __attribute__((__nonnull__, __warn_unused_result__));
+ void read_thd_vars(THD *thd)
+ __attribute__((__nonnull__));
+ bool contains_foreign_key(THD* thd)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int inplace_populate_sk(const TABLE* table_arg,
+ const std::unordered_set<std::shared_ptr<Rdb_key_def>>& indexes);
+
+public:
+ int index_init(uint idx, bool sorted) __attribute__((__warn_unused_result__));
+ int index_end() __attribute__((__warn_unused_result__));
+
+ void unlock_row();
+
+ /** @brief
+ Unlike index_init(), rnd_init() can be called two consecutive times
+ without rnd_end() in between (it only makes sense if scan=1). In this
+ case, the second call should prepare for the new table scan (e.g if
+ rnd_init() allocates the cursor, the second call should position the
+ cursor to the start of the table; no need to deallocate and allocate
+ it again. This is a required method.
+ */
+ int rnd_init(bool scan) __attribute__((__warn_unused_result__));
+ int rnd_end() __attribute__((__warn_unused_result__));
+
+ int rnd_next(uchar *buf) __attribute__((__warn_unused_result__));
+ int rnd_next_with_direction(uchar *buf, bool move_forward)
+ __attribute__((__warn_unused_result__));
+
+ int rnd_pos(uchar *buf, uchar *pos) __attribute__((__warn_unused_result__));
+ void position(const uchar *record);
+ int info(uint) override;
+
+ /* This function will always return success, therefore no annotation related
+ * to checking the return value. Can't change the signature because it's
+ * required by the interface. */
+ int extra(enum ha_extra_function operation);
+
+ int start_stmt(THD *thd, thr_lock_type lock_type)
+ __attribute__((__warn_unused_result__));
+ int external_lock(THD *thd, int lock_type)
+ __attribute__((__warn_unused_result__));
+ int delete_all_rows(ha_rows* nrows) __attribute__((__warn_unused_result__));
+ int truncate() __attribute__((__warn_unused_result__));
+
+ int reset() override
+ {
+ /* Free blob data */
+ m_retrieved_record.clear();
+ return 0;
+ }
+
+ int check(THD* thd, HA_CHECK_OPT* check_opt)
+ __attribute__((__warn_unused_result__));
+ void remove_rows(Rdb_tbl_def *tbl);
+ ha_rows records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+ __attribute__((__warn_unused_result__));
+ int delete_table(const char *from) __attribute__((__warn_unused_result__));
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info)
+ __attribute__((__warn_unused_result__));
+ bool check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+ __attribute__((__warn_unused_result__));
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+ __attribute__((__warn_unused_result__));
+
+ my_bool register_query_cache_table(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data)
+ {
+ /* Currently, we don't support query cache */
+ return FALSE;
+ }
+
+ bool get_error_message(const int error, String *buf)
+ __attribute__((__nonnull__));
+
+ void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+ void update_create_info(HA_CREATE_INFO *create_info);
+ int optimize(THD *thd, HA_CHECK_OPT *check_opt)
+ __attribute__((__warn_unused_result__));
+ int analyze(THD* thd, HA_CHECK_OPT* check_opt)
+ __attribute__((__warn_unused_result__));
+
+ enum_alter_inplace_result check_if_supported_inplace_alter(
+ TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info) override;
+
+ bool prepare_inplace_alter_table(TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info);
+
+ bool inplace_alter_table(TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info);
+
+ bool commit_inplace_alter_table(TABLE *altered_table,
+ my_core::Alter_inplace_info *ha_alter_info,
+ bool commit);
+
+ int finalize_bulk_load() __attribute__((__warn_unused_result__));
+
+ void set_use_read_free_rpl(const char* whitelist);
+ void set_skip_unique_check_tables(const char* whitelist);
+
+ public:
+ virtual void rpl_before_delete_rows() override;
+ virtual void rpl_after_delete_rows() override;
+ virtual void rpl_before_update_rows() override;
+ virtual void rpl_after_update_rows() override;
+ virtual bool use_read_free_rpl();
+
+ private:
+ /* Flags tracking if we are inside different replication operation */
+ bool m_in_rpl_delete_rows;
+ bool m_in_rpl_update_rows;
+
+ bool m_force_skip_unique_check;
+};
+
+/*
+ Helper class for in-place alter, for storing handler context between inplace
+ alter calls
+*/
+struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx
+{
+ /* The new table definition */
+ Rdb_tbl_def* m_new_tdef;
+
+ /* Stores the original key definitions */
+ std::shared_ptr<Rdb_key_def>* m_old_key_descr;
+
+ /* Stores the new key definitions */
+ std::shared_ptr<Rdb_key_def>* m_new_key_descr;
+
+ /* Stores the old number of key definitions */
+ const uint m_old_n_keys;
+
+ /* Stores the new number of key definitions */
+ const uint m_new_n_keys;
+
+ /* Stores the added key glids */
+ std::unordered_set<std::shared_ptr<Rdb_key_def>> m_added_indexes;
+
+ /* Stores the dropped key glids */
+ std::unordered_set<GL_INDEX_ID> m_dropped_index_ids;
+
+ /* Stores number of keys to add */
+ const uint m_n_added_keys;
+
+ /* Stores number of keys to drop */
+ const uint m_n_dropped_keys;
+
+ Rdb_inplace_alter_ctx(
+ Rdb_tbl_def* new_tdef, std::shared_ptr<Rdb_key_def>* old_key_descr,
+ std::shared_ptr<Rdb_key_def>* new_key_descr, uint old_n_keys,
+ uint new_n_keys,
+ std::unordered_set<std::shared_ptr<Rdb_key_def>> added_indexes,
+ std::unordered_set<GL_INDEX_ID> dropped_index_ids,
+ uint n_added_keys, uint n_dropped_keys) :
+ my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef),
+ m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr),
+ m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys),
+ m_added_indexes(added_indexes),
+ m_dropped_index_ids(dropped_index_ids),
+ m_n_added_keys(n_added_keys),
+ m_n_dropped_keys(n_dropped_keys)
+ {
+ }
+
+ ~Rdb_inplace_alter_ctx(){}
+
+ private:
+ /* Disable Copying */
+ Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx&);
+ Rdb_inplace_alter_ctx& operator=(const Rdb_inplace_alter_ctx&);
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h
new file mode 100644
index 00000000000..df41d2de619
--- /dev/null
+++ b/storage/rocksdb/ha_rocksdb_proto.h
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ standard header files */
+#include <string>
+#include <vector>
+
+/* MySQL header files */
+#include "./sql_string.h"
+
+/* RocksDB includes */
+#include "rocksdb/table.h"
+
+namespace myrocks {
+
+enum RDB_IO_ERROR_TYPE {
+ RDB_IO_ERROR_TX_COMMIT,
+ RDB_IO_ERROR_DICT_COMMIT,
+ RDB_IO_ERROR_BG_THREAD
+};
+
+void rdb_handle_io_error(rocksdb::Status status, RDB_IO_ERROR_TYPE err_type);
+
+int rdb_normalize_tablename(const std::string& tablename, std::string* str)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+int rdb_split_normalized_tablename(const std::string& fullname, std::string *db,
+ std::string *table = nullptr,
+ std::string *partition = nullptr)
+ __attribute__((__warn_unused_result__));
+
+std::vector<std::string> rdb_get_open_table_names(void);
+
+int rdb_get_table_perf_counters(const char *tablename,
+ Rdb_perf_counters *counters)
+ __attribute__((__nonnull__(2)));
+
+void rdb_get_global_perf_counters(Rdb_perf_counters *counters)
+ __attribute__((__nonnull__(1)));
+
+void rdb_queue_save_stats_request();
+
+/*
+ Access to singleton objects.
+*/
+
+rocksdb::DB *rdb_get_rocksdb_db();
+
+class Rdb_cf_manager;
+Rdb_cf_manager& rdb_get_cf_manager();
+
+rocksdb::BlockBasedTableOptions& rdb_get_table_options();
+
+class Rdb_dict_manager;
+Rdb_dict_manager *rdb_get_dict_manager(void)
+ __attribute__((__warn_unused_result__));
+
+class Rdb_ddl_manager;
+Rdb_ddl_manager *rdb_get_ddl_manager(void)
+ __attribute__((__warn_unused_result__));
+
+class Rdb_binlog_manager;
+Rdb_binlog_manager *rdb_get_binlog_manager(void)
+ __attribute__((__warn_unused_result__));
+
+} // namespace myrocks
diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h
new file mode 100644
index 00000000000..a1333e8dfd4
--- /dev/null
+++ b/storage/rocksdb/logger.h
@@ -0,0 +1,73 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+#include <log.h>
+#include <sstream>
+#include <string>
+
+namespace myrocks {
+
+class Rdb_logger : public rocksdb::Logger
+{
+ public:
+ void Logv(const rocksdb::InfoLogLevel log_level,
+ const char* format,
+ va_list ap) override
+ {
+ DBUG_ASSERT(format != nullptr);
+
+ enum loglevel mysql_log_level;
+
+ if (m_logger) {
+ m_logger->Logv(log_level, format, ap);
+ }
+
+ if (log_level < GetInfoLogLevel()) {
+ return;
+ }
+
+ if (log_level >= rocksdb::InfoLogLevel::ERROR_LEVEL) {
+ mysql_log_level= ERROR_LEVEL;
+ } else if (log_level >= rocksdb::InfoLogLevel::WARN_LEVEL) {
+ mysql_log_level= WARNING_LEVEL;
+ } else {
+ mysql_log_level= INFORMATION_LEVEL;
+ }
+
+ // log to MySQL
+ std::string f("LibRocksDB:");
+ f.append(format);
+ error_log_print(mysql_log_level, f.c_str(), ap);
+ }
+
+ void Logv(const char* format, va_list ap) override
+ {
+ DBUG_ASSERT(format != nullptr);
+ // If no level is specified, it is by default at information level
+ Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap);
+ }
+
+ void SetRocksDBLogger(std::shared_ptr<rocksdb::Logger> logger)
+ {
+ m_logger = logger;
+ }
+
+ private:
+ std::shared_ptr<rocksdb::Logger> m_logger;
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc
new file mode 100644
index 00000000000..82ceda1914d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc
@@ -0,0 +1,69 @@
+let $max_table = 2;
+
+--disable_query_log
+let $table = 1;
+while ($table <= $max_table) {
+ let $max = 9;
+ let $i = 2;
+ while ($i <= $max) {
+ let $insert = INSERT INTO t$table VALUES ($i, $i, $i);
+ eval $insert;
+ inc $i;
+ }
+ inc $table;
+}
+--enable_query_log
+
+let $table = 1;
+while ($table <= $max_table) {
+ let $i = 1;
+ let $j = 9;
+ while ($i <= $max) {
+
+ let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j;
+ eval $insert;
+
+ let $select = SELECT * FROM t$table WHERE id1 = $i;
+ eval $select;
+
+ let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i;
+ eval $select;
+
+ inc $j;
+
+ let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j;
+ eval $insert;
+
+ let $select = SELECT * FROM t$table WHERE id1 = $i;
+ eval $select;
+
+ let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i;
+ eval $select;
+
+ inc $j;
+
+ let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j;
+ eval $insert;
+
+ let $select = SELECT * FROM t$table WHERE id1 = $i;
+ eval $select;
+
+ let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i;
+ eval $select;
+
+ inc $j;
+
+ inc $i;
+ inc $i;
+ inc $i;
+ inc $i;
+ }
+
+ let $select = SELECT * FROM t$table;
+ eval $select;
+
+ let $select = SELECT * FROM t$table FORCE INDEX (id3);
+ eval $select;
+
+ inc $table;
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc
new file mode 100644
index 00000000000..6dc5a78e3a0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc
@@ -0,0 +1,51 @@
+#
+# Check concurrent locking issues:
+# Locking rows that do not exist when using all primary key columns in a
+# WHERE clause
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case1_1.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 1.1:
+--echo - Locking rows that do not exist when using all primary key columns in
+--echo - a WHERE clause
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+
+connection con2;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t0 VALUES (1,5,0);
+
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+
+connection con1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc
new file mode 100644
index 00000000000..13083bf82d9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc
@@ -0,0 +1,48 @@
+#
+# Check concurrent locking issues:
+# Locking rows that do not exist without using all primary key columns in a
+# WHERE clause
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case1_2.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 1.2:
+--echo - Locking rows that do not exist without using all primary key
+--echo - columns in a WHERE clause
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
+
+connection con2;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
+INSERT INTO t0 VALUES (1,5,0);
+
+connection con1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc
new file mode 100644
index 00000000000..61c604dd6d3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc
@@ -0,0 +1,97 @@
+#
+# Check concurrent locking issues:
+# Rows that are scanned but do not match the WHERE clause are not locked.
+#
+# To call this, set $isolation_level and call this file
+# If you want to enable rocksdb_lock_scanned_rows set $lock_scanned_rows=1
+#
+# let $isolation_level = REPEATABLE READ;
+# let $lock_scanned_rows = 1 (optional)
+# --source suite/rocksdb/include/locking_issues_case2.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 2:
+--echo - Rows that are scanned but do not match the WHERE are not locked
+--echo - using $isolation_level transaction isolation level unless
+--echo - rocksdb_lock_scanned_rows is on
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+SELECT @@global.rocksdb_lock_scanned_rows;
+
+if ($lock_scanned_rows)
+{
+ let $original_val=query_get_value(
+ select @@global.rocksdb_lock_scanned_rows as val, val, 1);
+ SET GLOBAL rocksdb_lock_scanned_rows=ON;
+}
+
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+
+connection con2;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+
+if ($lock_scanned_rows == 1)
+{
+ connection con1;
+ # This is expected to leave locks on all the rows in t0
+ SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+ connection con2;
+ --error ER_LOCK_WAIT_TIMEOUT
+ UPDATE t0 SET VALUE=10 WHERE id=1;
+}
+
+if ($lock_scanned_rows == 0)
+{
+ connection con1;
+ # This is expected to release locks on rows with value=0
+ SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+ connection con2;
+ # This should succeed as con1 should have released the lock on row (1,0)
+ UPDATE t0 SET VALUE=10 WHERE id=1;
+
+ # This should fail because lock on row (5,1) is still held.
+ --error ER_LOCK_WAIT_TIMEOUT
+ UPDATE t0 SET VALUE=10 WHERE id=5;
+
+ connection con1;
+ # Do another operation
+ UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
+
+ connection con2;
+ # Check that row (4,0) is still not locked
+ SELECT * FROM t0 WHERE id=4 FOR UPDATE;
+
+ COMMIT;
+ SELECT * FROM t0;
+}
+
+connection con1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
+
+if ($lock_scanned_rows == 1)
+{
+ eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val;
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc
new file mode 100644
index 00000000000..bd9af241e5c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc
@@ -0,0 +1,69 @@
+#
+# Check concurrent locking issues:
+# After creating a snapshot, other clients updating rows
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case3.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 3:
+--echo - After creating a snapshot, other clients updating rows
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+
+# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time
+--echo Inserting 200,000 rows
+--disable_query_log
+SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load;
+SET rocksdb_bulk_load=1;
+SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal;
+SET GLOBAL rocksdb_write_disable_wal=1;
+let $i = 1;
+while ($i <= 200) {
+ eval BEGIN;
+ let $j = 1;
+ while ($j <= 100) {
+ eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0);
+ inc $j;
+ }
+ eval COMMIT;
+ inc $i;
+}
+SET rocksdb_bulk_load=@save_rocksdb_bulk_load;
+SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal;
+--enable_query_log
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+let $ID = `SELECT connection_id()`;
+send SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+connection con2;
+let $wait_condition = SELECT 1 FROM information_schema.processlist
+ WHERE id = $ID AND state = "Sending data";
+--source include/wait_condition.inc
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
+
+connection con1;
+--error ER_LOCK_DEADLOCK
+reap;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc
new file mode 100644
index 00000000000..da80f796750
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc
@@ -0,0 +1,68 @@
+#
+# Check concurrent locking issues:
+# Phantom rows
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case4.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 4:
+--echo - Phantom rows
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+
+# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time
+--echo Inserting 200,000 rows
+--disable_query_log
+SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load;
+SET rocksdb_bulk_load=1;
+SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal;
+SET GLOBAL rocksdb_write_disable_wal=1;
+let $i = 1;
+while ($i <= 200) {
+ eval BEGIN;
+ let $j = 1;
+ while ($j <= 100) {
+ eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0);
+ inc $j;
+ }
+ eval COMMIT;
+ inc $i;
+}
+SET rocksdb_bulk_load=@save_rocksdb_bulk_load;
+SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal;
+--enable_query_log
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+let $ID = `SELECT connection_id()`;
+send SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+connection con2;
+let $wait_condition = SELECT 1 FROM information_schema.processlist
+ WHERE id = $ID AND state = "Sending data";
+--source include/wait_condition.inc
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+INSERT INTO t0 VALUES(200001,1), (-1,1);
+
+connection con1;
+reap;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc
new file mode 100644
index 00000000000..3e4f6350b79
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc
@@ -0,0 +1,75 @@
+#
+# Check concurrent locking issues:
+# Deleting primary key
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case5.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 5:
+--echo - Deleting primary key
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+
+# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time
+--echo Inserting 200,000 rows
+--disable_query_log
+SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load;
+SET rocksdb_bulk_load=1;
+SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal;
+SET GLOBAL rocksdb_write_disable_wal=1;
+let $i = 1;
+while ($i <= 200) {
+ eval BEGIN;
+ let $j = 1;
+ while ($j <= 100) {
+ eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0);
+ inc $j;
+ }
+ eval COMMIT;
+ inc $i;
+}
+SET rocksdb_bulk_load=@save_rocksdb_bulk_load;
+SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal;
+--enable_query_log
+
+UPDATE t0 SET value=100 WHERE id=190000;
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+let $ID = `SELECT connection_id()`;
+send SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+connection con2;
+let $wait_condition = SELECT 1 FROM information_schema.processlist
+ WHERE id = $ID AND state = "Sending data";
+--source include/wait_condition.inc
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+DELETE FROM t0 WHERE id=190000;
+COMMIT;
+
+connection con1;
+--error ER_LOCK_DEADLOCK
+reap;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc
new file mode 100644
index 00000000000..4cb5cae15aa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc
@@ -0,0 +1,75 @@
+#
+# Check concurrent locking issues:
+# Changing primary key
+#
+# To call this, set $isolation_level and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# --source suite/rocksdb/include/locking_issues_case6.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 6:
+--echo - Changing primary key
+--echo - using $isolation_level transaction isolation level
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t0;
+--enable_warnings
+
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+
+# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time
+--echo Inserting 200,000 rows
+--disable_query_log
+SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load;
+SET rocksdb_bulk_load=1;
+SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal;
+SET GLOBAL rocksdb_write_disable_wal=1;
+let $i = 1;
+while ($i <= 200) {
+ eval BEGIN;
+ let $j = 1;
+ while ($j <= 100) {
+ eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0);
+ inc $j;
+ }
+ eval COMMIT;
+ inc $i;
+}
+SET rocksdb_bulk_load=@save_rocksdb_bulk_load;
+SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal;
+--enable_query_log
+
+UPDATE t0 SET value=100 WHERE id=190000;
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+let $ID = `SELECT connection_id()`;
+send SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+
+connection con2;
+let $wait_condition = SELECT 1 FROM information_schema.processlist
+ WHERE id = $ID AND state = "Sending data";
+--source include/wait_condition.inc
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+UPDATE t0 SET id=200001 WHERE id=190000;
+COMMIT;
+
+connection con1;
+--error ER_LOCK_DEADLOCK
+reap;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc
new file mode 100644
index 00000000000..d71d398982e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc
@@ -0,0 +1,89 @@
+#
+# Check concurrent locking issues:
+# Rows scanned but are not in the updated table should be locked when
+# rocksdb_lock_scanned_rows is on but not locked otherwise.
+#
+# To call this, set $isolation_level and $lock_scanned_rows and call this file
+#
+# let $isolation_level = REPEATABLE READ;
+# let $lock_scanned_rows = 0 (or 1)
+# --source suite/rocksdb/include/locking_issues_case7.inc
+#
+
+--echo
+--echo -----------------------------------------------------------------------
+--echo - Locking issues case 7:
+--echo - Rows that are scanned as part of a query but not in the table being
+--echo - updated should not be locked unless rocksdb_lock_scanned_rows is on
+--echo -----------------------------------------------------------------------
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+SELECT @@global.rocksdb_lock_scanned_rows;
+
+if ($lock_scanned_rows)
+{
+ let $original_val=query_get_value(
+ select @@global.rocksdb_lock_scanned_rows as val, val, 1);
+ SET GLOBAL rocksdb_lock_scanned_rows=ON;
+}
+
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+
+connection con2;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level;
+BEGIN;
+
+--echo lock_scanned_rows is $lock_scanned_rows
+if ($lock_scanned_rows == 1)
+{
+ connection con1;
+ # This is expected to leave a lock id=3 in t2;
+ UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+
+ connection con2;
+ --error ER_LOCK_WAIT_TIMEOUT
+ UPDATE t2 SET value=value+100 WHERE id=3;
+
+ # No other row in t2 should be locked;
+ UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
+ SELECT * FROM t2;
+}
+
+if ($lock_scanned_rows == 0)
+{
+ connection con1;
+ # This should leave no locks on any row in t2;
+ UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+
+ connection con2;
+ UPDATE t2 SET value=value+100;
+ SELECT * FROM t2;
+}
+
+connection con1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t1;
+DROP TABLE t2;
+
+if ($lock_scanned_rows == 1)
+{
+ eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val;
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc
new file mode 100644
index 00000000000..71e713226d7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc
@@ -0,0 +1,53 @@
+# Usage:
+#
+# let $order = ASC; # or DESC
+# let $comment = "rev:cf2"; # or ""
+# --source suite/rocksdb/include/rocksdb_concurrent_delete.inc
+
+let $first_row = -1; # Error this should never happen
+if ($order == 'ASC')
+{
+ let $first_row = 1;
+}
+if ($order == 'DESC')
+{
+ let $first_row = 3;
+}
+
+connect (con, localhost, root,,);
+connection default;
+
+--disable_warnings
+SET debug_sync='RESET';
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT $comment, a INT);
+INSERT INTO t1 VALUES(1,1), (2,2), (3,3);
+
+# This will cause the SELECT to block after finding the first row, but
+# before locking and reading it.
+connection con;
+SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go';
+send_eval SELECT * FROM t1 order by t1.pk $order FOR UPDATE;
+
+# While that connection is waiting, delete the first row (the one con
+# is about to lock and read
+connection default;
+SET debug_sync='now WAIT_FOR parked';
+eval DELETE FROM t1 WHERE pk = $first_row;
+
+# Signal the waiting select to continue
+SET debug_sync='now SIGNAL go';
+
+# Now get the results from the select. The first entry (1,1) (or (3,3) when
+# using reverse ordering) should be missing. Prior to the fix the SELECT
+# would have returned: "1815: Internal error: NotFound:"
+connection con;
+reap;
+
+# Cleanup
+connection default;
+disconnect con;
+set debug_sync='RESET';
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf
new file mode 100644
index 00000000000..2ed68088259
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf
@@ -0,0 +1,7 @@
+!include include/default_my.cnf
+
+[server]
+sql-mode=NO_ENGINE_SUBSTITUTION
+explicit-defaults-for-timestamp=1
+rocksdb_lock_wait_timeout=1
+rocksdb_strict_collation_check=0
diff --git a/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl b/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl
new file mode 100644
index 00000000000..8199d5051df
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/optimize_table_check_sst.pl
@@ -0,0 +1,22 @@
+#!/usr/bin/perl
+
+die unless($ARGV[0]);
+open(my $f, "<", $ARGV[0]) or die $!;
+my @sst;
+while(my $l = readline($f)) {
+ chomp($l);
+ push @sst, int($l);
+}
+
+for(my $i= 0; $i < $#sst; $i++) {
+ printf("checking sst file reduction on optimize table from %d to %d..\n", $i, $i+1);
+
+ if($sst[$i] - 1000 < $sst[$i+1]) {
+ printf("sst file reduction was not enough. %d->%d (minimum 1000kb)\n", $sst[$i], $sst[$i+1]);
+ die;
+ }else {
+ print "ok.\n";
+ }
+}
+exit(0);
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/1st.result b/storage/rocksdb/mysql-test/rocksdb/r/1st.result
new file mode 100644
index 00000000000..7d1e8607645
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/1st.result
@@ -0,0 +1,22 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL DEFAULT '0',
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI 0
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 VALUES (1, 1,'a');
+INSERT INTO t1 (a,b) VALUES (2,'b');
+SELECT a,b FROM t1;
+a b
+1 a
+2 b
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result
new file mode 100644
index 00000000000..4a707d3a6f4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result
@@ -0,0 +1,378 @@
+drop table if exists t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `ka` (`a`),
+ KEY `kab` (`a`,`b`),
+ KEY `kb` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+a b
+2 6
+3 7
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+a b
+3 7
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `kab` (`a`,`b`),
+ KEY `kb` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+a b
+2 6
+3 7
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+a b
+3 7
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `kb` (`b`),
+ KEY `kab` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+a b
+2 6
+3 7
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+a b
+3 7
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `kb` (`b`),
+ KEY `kba` (`b`,`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+a b
+2 6
+3 7
+SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2;
+a b
+3 7
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `kab` (`a`,`b`),
+ KEY `ka` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5;
+a b
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+a b
+DROP TABLE t1;
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk char(8) NO PRI NULL
+a varchar(11) YES NULL
+b int(10) unsigned YES NULL
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`),
+ KEY `kab` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3;
+pk a b
+bbb 2222 2
+DROP TABLE t1;
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk char(8) NO PRI NULL
+a varchar(11) YES NULL
+b int(10) unsigned YES NULL
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`),
+ KEY `ka` (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3;
+pk a b
+bbb 2222 2
+DROP TABLE t1;
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk char(8) NO PRI NULL
+a varchar(11) YES NULL
+b int(10) unsigned YES NULL
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE;
+ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(8) COLLATE utf8_bin NOT NULL,
+ `a` varchar(11) COLLATE utf8_bin DEFAULT NULL,
+ `b` int(10) unsigned DEFAULT NULL,
+ PRIMARY KEY (`pk`),
+ KEY `kab` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3;
+pk a b
+bbb 2222 2
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+CREATE INDEX kb on t1 (b);
+CREATE INDEX kba on t1 (b,a);
+DROP INDEX ka on t1;
+DROP INDEX kab on t1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `kb` (`b`),
+ KEY `kba` (`b`,`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+a b
+2 6
+3 7
+SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2;
+a b
+3 7
+DROP TABLE t1;
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+DROP INDEX kij ON t1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ `k` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `j` (`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY KEY (i)
+PARTITIONS 4 */
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+i j k
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+100
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+# crash_during_online_index_creation
+flush logs;
+SET SESSION debug="+d,crash_during_online_index_creation";
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+ERROR HY000: Lost connection to MySQL server during query
+SET SESSION debug="-d,crash_during_online_index_creation";
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ KEY `ka` (`a`),
+ KEY `kab` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+# crash_during_index_creation_partition
+flush logs;
+SET SESSION debug="+d,crash_during_index_creation_partition";
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+ERROR HY000: Lost connection to MySQL server during query
+SET SESSION debug="-d,crash_during_index_creation_partition";
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ `k` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `j` (`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY KEY (i)
+PARTITIONS 4 */
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+i j k
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+100
+DROP TABLE t1;
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+# crash_during_index_creation_partition
+flush logs;
+SET SESSION debug="+d,myrocks_simulate_index_create_rollback";
+# expected assertion failure from sql layer here for alter rollback
+call mtr.add_suppression("Assertion `0' failed.");
+call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+ERROR HY000: Lost connection to MySQL server during query
+SET SESSION debug="-d,myrocks_simulate_index_create_rollback";
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ `k` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `j` (`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY KEY (i)
+PARTITIONS 4 */
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ `k` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `j` (`j`),
+ KEY `kij` (`i`,`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY KEY (i)
+PARTITIONS 4 */
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+100
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b TEXT);
+ALTER TABLE t1 ADD KEY kb(b(10));
+ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (binary, latin1_bin, utf8_bin).
+ALTER TABLE t1 ADD PRIMARY KEY(a);
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result
new file mode 100644
index 00000000000..2d1ba7ca1d8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result
@@ -0,0 +1,72 @@
+drop table if exists t1;
+CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin';
+set rocksdb_bulk_load=1;
+set rocksdb_bulk_load_size=100000;
+LOAD DATA INFILE <input_file> INTO TABLE t1;
+set rocksdb_bulk_load=0;
+select count(pk) from t1;
+count(pk)
+3000000
+select count(a) from t1;
+count(a)
+3000000
+select count(b) from t1;
+count(b)
+3000000
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
+SELECT COUNT(*) as c FROM
+(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`)
+UNION DISTINCT
+SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#',
+`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE
+INDEX(`kb_copy`)) as temp;
+c
+1
+select count(*) from t1 FORCE INDEX(kb);
+count(*)
+3000000
+select count(*) from t1 FORCE INDEX(kb_copy);
+count(*)
+3000000
+select count(*) from t1 FORCE INDEX(PRIMARY);
+count(*)
+3000000
+ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE;
+ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SELECT COUNT(*) FROM t1 FORCE INDEX(kab);
+COUNT(*)
+3000000
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+COUNT(*)
+3000000
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` char(5) COLLATE latin1_bin NOT NULL,
+ `a` char(30) COLLATE latin1_bin DEFAULT NULL,
+ `b` char(30) COLLATE latin1_bin DEFAULT NULL,
+ PRIMARY KEY (`pk`),
+ KEY `kb` (`b`),
+ KEY `kab` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `kb` (`b`) COMMENT 'rev:cf1'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+COUNT(*)
+3
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result
new file mode 100644
index 00000000000..4fef9bce405
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result
@@ -0,0 +1,7 @@
+drop table if exists t1;
+# Binary must be compiled with debug for this test
+CREATE TABLE t1 (a INT) ENGINE=rocksdb;
+SELECT COUNT(*) from t1;
+COUNT(*)
+400
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result
new file mode 100644
index 00000000000..34a14ff39d8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result
@@ -0,0 +1,251 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,'a'),(5,'z');
+ALTER TABLE t1 ADD COLUMN b INT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT * FROM t1;
+a c b
+1 a NULL
+5 z NULL
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT * FROM t1;
+a c
+1 a
+5 z
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+ALTER TABLE t1 DROP COLUMN pk;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb;
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+CHECK TABLE t1, t2 FOR UPGRADE;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+test.t2 check status OK
+INSERT INTO t2 (a,b) VALUES (5,'e');
+CHECK TABLE t2 QUICK;
+Table Op Msg_type Msg_text
+test.t2 check status OK
+INSERT INTO t1 (a,b) VALUES (6,'f');
+CHECK TABLE t1 FAST;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (7,'g');
+INSERT INTO t2 (a,b) VALUES (8,'h');
+CHECK TABLE t2, t1 MEDIUM;
+Table Op Msg_type Msg_text
+test.t2 check status OK
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (9,'i');
+INSERT INTO t2 (a,b) VALUES (10,'j');
+CHECK TABLE t1, t2 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+test.t2 check status OK
+INSERT INTO t1 (a,b) VALUES (11,'k');
+CHECK TABLE t1 CHANGED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb;
+ERROR HY000: Unique index support is disabled when the table has no primary key.
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (36,'foo');
+DELETE FROM t1 WHERE a = 35 AND b = 'foo';
+SELECT * FROM t1;
+a b
+36 foo
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result
new file mode 100644
index 00000000000..f8508febb01
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result
@@ -0,0 +1,780 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+ALTER TABLE t1 ADD INDEX (b);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `b` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES MUL NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+ALTER TABLE t1 DROP INDEX b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+ALTER TABLE t1 DROP COLUMN pk;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+#
+# MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB;
+ALTER TABLE t1 DROP COLUMN `pk`;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `a` (`a`),
+ KEY `b` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b char(8) YES MUL NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `a` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b char(8) YES NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `a` (`a`),
+ KEY `b` (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b char(8) YES MUL NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (1),(2),(5);
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (6),(8),(12);
+CHECK TABLE t1 FOR UPGRADE;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (13),(15),(16);
+CHECK TABLE t1 QUICK;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (17),(120),(132);
+CHECK TABLE t1 FAST;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (801),(900),(7714);
+CHECK TABLE t1 MEDIUM;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (8760),(10023),(12000);
+CHECK TABLE t1 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028);
+CHECK TABLE t1 CHANGED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ `d` int(11) DEFAULT NULL,
+ KEY `kab` (`a`,`b`),
+ KEY `kbc` (`b`,`c`),
+ KEY `kabc` (`a`,`b`,`c`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b int(11) YES MUL NULL
+c int(11) YES NULL
+d int(11) YES NULL
+INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4);
+INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8);
+INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13);
+INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17);
+SELECT * FROM t1;
+a b c d
+1 2 3 4
+10 11 12 13
+14 15 16 17
+5 6 7 8
+SELECT * FROM t1 WHERE a = 1 OR a = 10;
+a b c d
+1 2 3 4
+10 11 12 13
+SELECT * FROM t1 WHERE c = 3 OR d = 17;
+a b c d
+1 2 3 4
+14 15 16 17
+SELECT * FROM t1 WHERE a > 5 OR d > 5;
+a b c d
+10 11 12 13
+14 15 16 17
+5 6 7 8
+SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11;
+a b c
+1 2 3
+10 11 12
+SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12;
+d
+17
+UPDATE t1 SET a=a+100;
+UPDATE t1 SET a=a-100, b=99 WHERE a>100;
+SELECT * FROM t1;
+a b c d
+1 99 3 4
+10 99 12 13
+14 99 16 17
+5 99 7 8
+DELETE FROM t1 WHERE a>5;
+DELETE FROM t1 WHERE b=99 AND d>4;
+SELECT * FROM t1;
+a b c d
+1 99 3 4
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b)
+comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ KEY `ka` (`a`) COMMENT 'rev:cf1',
+ KEY `kb` (`b`) COMMENT 'rev:cf1',
+ KEY `kab` (`a`,`b`) COMMENT 'rev:cf2'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) YES MUL NULL
+b char(8) YES MUL NULL
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+SELECT * FROM t1 WHERE a = 35;
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+a b
+35 foo
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+a b
+76 bar
+77 baz
+SELECT * FROM t1 WHERE a > 35;
+a b
+76 bar
+77 baz
+SELECT * FROM t1;
+a b
+35 foo
+76 bar
+77 baz
+UPDATE t1 SET a=a+100;
+SELECT * FROM t1;
+a b
+135 foo
+176 bar
+177 baz
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+SELECT * FROM t1;
+a b
+35 bbb
+76 bbb
+77 bbb
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+SELECT * FROM t1;
+a b
+300 ccc
+300 ccc
+35 bbb
+UPDATE t1 SET a=123 WHERE a=35;
+SELECT * FROM t1;
+a b
+123 bbb
+300 ccc
+300 ccc
+UPDATE t1 SET a=321 WHERE b='ccc';
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+SELECT * FROM t1;
+a b
+123 bbb
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE a=123;
+SELECT * FROM t1;
+a b
+321 ccc
+321 ccc
+45 bob
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+SELECT * FROM t1;
+a b
+45 bob
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB;
+INSERT INTO t1 (col1, col2) values (2,2);
+ALTER TABLE t1 ADD COLUMN extra INT;
+UPDATE t1 SET col2 = 1;
+select * from t1;
+col1 col2 extra
+2 1 NULL
+DELETE FROM t1 WHERE col1 = 2;
+set global rocksdb_force_flush_memtable_now = true;
+select * from t1;
+col1 col2 extra
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result
new file mode 100644
index 00000000000..d15566f5a2c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_os_buffer.result
@@ -0,0 +1 @@
+ RocksDB: Can't disable allow_os_buffer if allow_mmap_reads is enabled
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result
new file mode 100644
index 00000000000..b37bf17e1ac
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result
@@ -0,0 +1,183 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z');
+ALTER TABLE t1 ADD COLUMN b INT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT '0',
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 ALTER a DROP DEFAULT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b1` char(8) DEFAULT NULL,
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 CHANGE b1 b INT AFTER c;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 CHANGE b b CHAR(8);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY COLUMN b INT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` char(8) DEFAULT NULL,
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY COLUMN b INT AFTER a;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `b` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 RENAME TO t2;
+SHOW CREATE TABLE t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `pk` int(11) NOT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t2;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC;
+Warnings:
+Warning 1105 ORDER BY ignored as there is a user-defined clustered index in the table 't1'
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT * FROM t1;
+pk a b
+1 1 5
+2 2 2
+3 4 3
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs;
+INSERT INTO t1 VALUES (1,5,'z','t');
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) COLLATE latin1_general_cs DEFAULT NULL,
+ `c` char(8) COLLATE latin1_general_cs DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs
+ALTER TABLE t1 CONVERT TO CHARACTER SET utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8
+ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ `c` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
+ALTER TABLE t1 FORCE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ `c` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ PRIMARY KEY (`pk`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result
new file mode 100644
index 00000000000..ff2973230db
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result
@@ -0,0 +1,29 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b');
+CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (3,3,'c');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+INSERT INTO t2 VALUES (1,4,'d');
+ANALYZE NO_WRITE_TO_BINLOG TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+INSERT INTO t1 VALUES (4,5,'e');
+INSERT INTO t2 VALUES (2,6,'f');
+ANALYZE LOCAL TABLE t1, t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
+DROP TABLE t1, t2;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result
new file mode 100644
index 00000000000..a5d81031cd2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result
@@ -0,0 +1,64 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY,
+key1 INT NOT NULL,
+KEY (key1)
+) ENGINE=ROCKSDB;
+INSERT INTO t1 VALUES (12,12);
+INSERT INTO t1 VALUES (6,6);
+BEGIN;
+INSERT INTO t1 VALUES (8,8), (10,10);
+SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC;
+pk key1
+10 10
+8 8
+6 6
+SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC;
+pk key1
+6 6
+8 8
+10 10
+SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC;
+pk key1
+10 10
+8 8
+6 6
+SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC;
+pk key1
+6 6
+8 8
+10 10
+ROLLBACK;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY,
+key1 INT NOT NULL,
+KEY (key1) COMMENT 'rev:cf'
+) ENGINE=ROCKSDB;
+INSERT INTO t2 VALUES (12,12);
+INSERT INTO t2 VALUES (6,6);
+BEGIN;
+INSERT INTO t2 VALUES (8,8), (10,10);
+SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC;
+pk key1
+10 10
+8 8
+6 6
+SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC;
+pk key1
+6 6
+8 8
+10 10
+SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC;
+pk key1
+10 10
+8 8
+6 6
+SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC;
+pk key1
+6 6
+8 8
+10 10
+ROLLBACK;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result
new file mode 100644
index 00000000000..100bc5fd638
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result
@@ -0,0 +1,16 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (pk) VALUES (3), (2), (1);
+SELECT * FROM t1;
+pk a
+3 1
+2 2
+1 3
+INSERT INTO t1 (pk) VALUES (4);
+SELECT * FROM t1;
+pk a
+3 1
+2 2
+1 3
+4 4
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result
new file mode 100644
index 00000000000..b14a7a4c0a9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result
@@ -0,0 +1,64 @@
+DROP TABLE IF EXISTS t1;
+#---------------------------
+# auto_increment_offset
+#---------------------------
+SET auto_increment_offset = 200;
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c');
+SELECT LAST_INSERT_ID();
+LAST_INSERT_ID()
+1
+SELECT a,b FROM t1 ORDER BY a;
+a b
+1 a
+2 b
+3 c
+#---------------------------
+# auto_increment_increment
+#---------------------------
+SET auto_increment_increment = 300;
+INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f');
+SELECT LAST_INSERT_ID();
+LAST_INSERT_ID()
+200
+SELECT a,b FROM t1 ORDER BY a;
+a b
+1 a
+2 b
+3 c
+200 d
+500 e
+800 f
+SET auto_increment_increment = 50;
+INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i');
+SELECT LAST_INSERT_ID();
+LAST_INSERT_ID()
+850
+SELECT a,b FROM t1 ORDER BY a;
+a b
+1 a
+2 b
+3 c
+200 d
+500 e
+800 f
+850 g
+900 h
+950 i
+DROP TABLE t1;
+#---------------------------
+# offset is greater than the max value
+#---------------------------
+SET auto_increment_increment = 500;
+SET auto_increment_offset = 300;
+CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (NULL);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT LAST_INSERT_ID();
+LAST_INSERT_ID()
+127
+SELECT a FROM t1 ORDER BY a;
+a
+127
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result
new file mode 100644
index 00000000000..28b5b6cd070
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result
@@ -0,0 +1 @@
+# The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE.
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result
new file mode 100644
index 00000000000..d65a4efea30
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result
@@ -0,0 +1,1235 @@
+CREATE PROCEDURE bloom_start()
+BEGIN
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+END//
+CREATE PROCEDURE bloom_end()
+BEGIN
+select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+END//
+drop table if exists t1;
+Warnings:
+Note 1051 Unknown table 'test.t1'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 'test.t2'
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4),
+index id2 (id2),
+index id2_id1 (id2, id1),
+index id2_id3 (id2, id3),
+index id2_id4 (id2, id4),
+index id2_id3_id1_id4 (id2, id3, id1, id4),
+index id3_id2 (id3, id2)
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4),
+index id2 (id2),
+index id2_id3 (id2, id3),
+index id2_id4 (id2, id4),
+index id2_id4_id5 (id2, id4, id5),
+index id3_id4 (id3, id4),
+index id3_id5 (id3, id5)
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+false
+drop table if exists t1;
+drop table if exists t2;
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix',
+index id2 (id2) COMMENT 'cf_short_prefix',
+index id2_id1 (id2, id1) COMMENT 'cf_short_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_short_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_short_prefix',
+index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix',
+index id3_id2 (id3, id2) COMMENT 'cf_short_prefix'
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4) COMMENT 'cf_short_prefix',
+index id2 (id2) COMMENT 'cf_short_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_short_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_short_prefix',
+index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix',
+index id3_id4 (id3, id4) COMMENT 'cf_short_prefix',
+index id3_id5 (id3, id5) COMMENT 'cf_short_prefix'
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+true
+drop table if exists t1;
+drop table if exists t2;
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix',
+index id2 (id2) COMMENT 'cf_long_prefix',
+index id2_id1 (id2, id1) COMMENT 'cf_long_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_long_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_long_prefix',
+index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix',
+index id3_id2 (id3, id2) COMMENT 'cf_long_prefix'
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4) COMMENT 'cf_long_prefix',
+index id2 (id2) COMMENT 'cf_long_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_long_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_long_prefix',
+index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix',
+index id3_id4 (id3, id4) COMMENT 'cf_long_prefix',
+index id3_id5 (id3, id5) COMMENT 'cf_long_prefix'
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+true
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+false
+create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin;
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 asc;
+id1 id2 id3 v1 v2
+1 1 1 1 1
+call bloom_end();
+checked
+true
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 desc;
+id1 id2 id3 v1 v2
+1 1 1 1 1
+call bloom_end();
+checked
+false
+DROP PROCEDURE bloom_start;
+DROP PROCEDURE bloom_end;
+truncate table t1;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+truncate table t2;
+optimize table t2;
+Table Op Msg_type Msg_text
+test.t2 optimize status OK
+drop table if exists t1;
+drop table if exists t2;
+drop table if exists r1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result
new file mode 100644
index 00000000000..d5369e2dbed
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result
@@ -0,0 +1,71 @@
+CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin;
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1;
+COUNT(*)
+10000
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u = 0 then 'true' else 'false' end
+true
+DROP TABLE t0;
+CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb;
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2;
+COUNT(*)
+9999
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u = 0 then 'true' else 'false' end
+true
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2;
+COUNT(*)
+9999
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u = 0 then 'true' else 'false' end
+true
+DROP TABLE t1;
+CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin;
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100');
+count(*)
+1
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u > 0 then 'true' else 'false' end
+true
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200');
+count(*)
+1
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u > 0 then 'true' else 'false' end
+true
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200');
+count(*)
+1
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u = 0 then 'true' else 'false' end
+true
+DROP TABLE t2;
+CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin;
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1;
+COUNT(*)
+0
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful';
+case when variable_value-@u > 0 then 'true' else 'false' end
+true
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1;
+COUNT(*)
+1
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u > 0 then 'true' else 'false' end
+true
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500;
+COUNT(*)
+1
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+case when variable_value-@u > 0 then 'true' else 'false' end
+true
+DROP TABLE t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result
new file mode 100644
index 00000000000..c7b5c42f2b3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result
@@ -0,0 +1,122 @@
+CREATE TABLE `linktable` (
+`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+`id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+`link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+`visibility` tinyint(3) NOT NULL DEFAULT '0',
+`data` varchar(255) NOT NULL DEFAULT '',
+`time` bigint(20) unsigned NOT NULL DEFAULT '0',
+`version` int(11) unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk',
+KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type',
+KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2',
+KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c > 0 then 'true' else 'false' end
+true
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c > 0 then 'true' else 'false' end
+true
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c = 0 then 'true' else 'false' end
+true
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c > 0 then 'true' else 'false' end
+true
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c = 0 then 'true' else 'false' end
+true
+## HA_READ_PREFIX_LAST
+# BF len 20
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c > 0 then 'true' else 'false' end
+true
+# BF len 19
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c = 0 then 'true' else 'false' end
+true
+# BF len 12
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc;
+id1 id2 link_type visibility data time version
+100 100 1 1 100 100 100
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c = 0 then 'true' else 'false' end
+true
+DROP TABLE linktable;
+#
+# bloom filter prefix is 20 byte
+# Create a key which is longer than that, so that we see that
+# eq_cond_len= slice.size() - 1;
+# doesnt work.
+#
+# indexnr 4
+# kp0 + 4 = 8
+# kp1 + 8 = 16
+# kp2 + 8 = 24 24>20 byte length prefix
+# kp3 + 8 = 28
+create table t1 (
+pk int primary key,
+kp0 int not null,
+kp1 bigint not null,
+kp2 bigint not null,
+kp3 bigint not null,
+key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1'
+) engine=rocksdb;
+insert into t1 values (1, 1,1, 1,1);
+insert into t1 values (10,1,1,0x12FFFFFFFFFF,1);
+insert into t1 values (11,1,1,0x12FFFFFFFFFF,1);
+insert into t1 values (20,2,2,0x12FFFFFFFFFF,1);
+insert into t1 values (21,2,2,0x12FFFFFFFFFF,1);
+explain
+select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index kp12 kp12 28 NULL # Using where; Using index
+show status like '%rocksdb_bloom_filter_prefix%';
+Variable_name Value
+rocksdb_bloom_filter_prefix_checked 0
+rocksdb_bloom_filter_prefix_useful 0
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc;
+pk kp0 kp1 kp2 kp3
+11 1 1 20890720927743 1
+10 1 1 20890720927743 1
+show status like '%rocksdb_bloom_filter_prefix%';
+Variable_name Value
+rocksdb_bloom_filter_prefix_checked 0
+rocksdb_bloom_filter_prefix_useful 0
+# The following MUST show TRUE:
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+case when variable_value-@c = 0 then 'true' else 'false' end
+true
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
new file mode 100644
index 00000000000..1f4d1a641a2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
@@ -0,0 +1,30 @@
+CREATE TABLE t1 (
+`id1` int unsigned NOT NULL DEFAULT '0',
+`id2` int unsigned NOT NULL DEFAULT '0',
+`link_type` int unsigned NOT NULL DEFAULT '0',
+`visibility` tinyint NOT NULL DEFAULT '0',
+`data` varchar(255) NOT NULL DEFAULT '',
+`time` int unsigned NOT NULL DEFAULT '0',
+`version` int unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+CREATE PROCEDURE select_test()
+BEGIN
+DECLARE id1_cond INT;
+SET id1_cond = 1;
+WHILE id1_cond <= 20000 DO
+SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt;
+IF @cnt < 1 THEN
+SELECT id1_cond, @cnt;
+END IF;
+SET id1_cond = id1_cond + 1;
+END WHILE;
+END//
+"Skipping bloom filter"
+SET session rocksdb_skip_bloom_filter_on_read=1;
+CALL select_test();
+"Using bloom filter"
+SET session rocksdb_skip_bloom_filter_on_read=0;
+CALL select_test();
+DROP PROCEDURE select_test;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result
new file mode 100644
index 00000000000..af7feaf8682
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result
@@ -0,0 +1,1235 @@
+CREATE PROCEDURE bloom_start()
+BEGIN
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+END//
+CREATE PROCEDURE bloom_end()
+BEGIN
+select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+END//
+drop table if exists t1;
+Warnings:
+Note 1051 Unknown table 'test.t1'
+drop table if exists t2;
+Warnings:
+Note 1051 Unknown table 'test.t2'
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4),
+index id2 (id2),
+index id2_id1 (id2, id1),
+index id2_id3 (id2, id3),
+index id2_id4 (id2, id4),
+index id2_id3_id1_id4 (id2, id3, id1, id4),
+index id3_id2 (id3, id2)
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4),
+index id2 (id2),
+index id2_id3 (id2, id3),
+index id2_id4 (id2, id4),
+index id2_id4_id5 (id2, id4, id5),
+index id3_id4 (id3, id4),
+index id3_id5 (id3, id5)
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+false
+drop table if exists t1;
+drop table if exists t2;
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix',
+index id2 (id2) COMMENT 'cf_short_prefix',
+index id2_id1 (id2, id1) COMMENT 'cf_short_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_short_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_short_prefix',
+index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix',
+index id3_id2 (id3, id2) COMMENT 'cf_short_prefix'
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4) COMMENT 'cf_short_prefix',
+index id2 (id2) COMMENT 'cf_short_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_short_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_short_prefix',
+index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix',
+index id3_id4 (id3, id4) COMMENT 'cf_short_prefix',
+index id3_id5 (id3, id5) COMMENT 'cf_short_prefix'
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+false
+drop table if exists t1;
+drop table if exists t2;
+create table t1 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix',
+index id2 (id2) COMMENT 'cf_long_prefix',
+index id2_id1 (id2, id1) COMMENT 'cf_long_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_long_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_long_prefix',
+index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix',
+index id3_id2 (id3, id2) COMMENT 'cf_long_prefix'
+) engine=ROCKSDB;
+create table t2 (
+id1 bigint not null,
+id2 bigint not null,
+id3 varchar(100) not null,
+id4 int not null,
+id5 int not null,
+value bigint,
+value2 varchar(100),
+primary key (id4) COMMENT 'cf_long_prefix',
+index id2 (id2) COMMENT 'cf_long_prefix',
+index id2_id3 (id2, id3) COMMENT 'cf_long_prefix',
+index id2_id4 (id2, id4) COMMENT 'cf_long_prefix',
+index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix',
+index id3_id4 (id3, id4) COMMENT 'cf_long_prefix',
+index id3_id5 (id3, id5) COMMENT 'cf_long_prefix'
+) engine=ROCKSDB;
+call bloom_start();
+select count(*) from t1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+count(*)
+10000
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+count(*)
+10
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+count(*)
+5
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+count(*)
+1
+call bloom_end();
+checked
+false
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+count(*)
+1
+call bloom_end();
+checked
+false
+create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin;
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 asc;
+id1 id2 id3 v1 v2
+1 1 1 1 1
+call bloom_end();
+checked
+false
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 desc;
+id1 id2 id3 v1 v2
+1 1 1 1 1
+call bloom_end();
+checked
+false
+DROP PROCEDURE bloom_start;
+DROP PROCEDURE bloom_end;
+truncate table t1;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+truncate table t2;
+optimize table t2;
+Table Op Msg_type Msg_text
+test.t2 optimize status OK
+drop table if exists t1;
+drop table if exists t2;
+drop table if exists r1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
new file mode 100644
index 00000000000..4440cb3ea8d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
@@ -0,0 +1,49 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
+CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
+CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'
+ PARTITION BY KEY() PARTITIONS 4;
+set session transaction isolation level repeatable read;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 0
+start transaction with consistent snapshot;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 1
+set rocksdb_bulk_load=1;
+set rocksdb_bulk_load_size=100000;
+LOAD DATA INFILE <input_file> INTO TABLE t1;
+LOAD DATA INFILE <input_file> INTO TABLE t2;
+LOAD DATA INFILE <input_file> INTO TABLE t3;
+set rocksdb_bulk_load=0;
+select count(pk) from t1;
+count(pk)
+10000000
+select count(a) from t1;
+count(a)
+10000000
+select count(b) from t1;
+count(b)
+10000000
+select count(pk) from t2;
+count(pk)
+10000000
+select count(a) from t2;
+count(a)
+10000000
+select count(b) from t2;
+count(b)
+10000000
+select count(pk) from t3;
+count(pk)
+10000000
+select count(a) from t3;
+count(a)
+10000000
+select count(b) from t3;
+count(b)
+10000000
+longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
+test.bulk_load.tmp
+DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result
new file mode 100644
index 00000000000..840ad9a723c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result
@@ -0,0 +1,50 @@
+DROP TABLE IF EXISTS t1;
+create table t1(
+id bigint not null primary key,
+i1 bigint, #unique
+i2 bigint, #repeating
+c1 varchar(20), #unique
+c2 varchar(20), #repeating
+index t1_1(id, i1),
+index t1_2(i1, i2),
+index t1_3(i2, i1),
+index t1_4(c1, c2),
+index t1_5(c2, c1)
+) engine=rocksdb;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+show index in t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE
+t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE
+t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE
+t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE
+t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name table_rows
+t1 100000
+restarting...
+show index in t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE
+t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE
+t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE
+t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE
+t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE
+t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name table_rows
+t1 100000
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/check_table.result b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result
new file mode 100644
index 00000000000..116c168c4da
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result
@@ -0,0 +1,68 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+CHECK TABLE t1, t2 FOR UPGRADE;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+test.t2 check status OK
+INSERT INTO t2 (a,b) VALUES (5,'e');
+CHECK TABLE t2 QUICK;
+Table Op Msg_type Msg_text
+test.t2 check status OK
+INSERT INTO t1 (a,b) VALUES (6,'f');
+CHECK TABLE t1 FAST;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (7,'g');
+INSERT INTO t2 (a,b) VALUES (8,'h');
+CHECK TABLE t2, t1 MEDIUM;
+Table Op Msg_type Msg_text
+test.t2 check status OK
+test.t1 check status OK
+INSERT INTO t1 (a,b) VALUES (9,'i');
+INSERT INTO t2 (a,b) VALUES (10,'j');
+CHECK TABLE t1, t2 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+test.t2 check status OK
+INSERT INTO t1 (a,b) VALUES (11,'k');
+CHECK TABLE t1 CHANGED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (1),(2),(5);
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (6),(8),(12);
+CHECK TABLE t1 FOR UPGRADE;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (13),(15),(16);
+CHECK TABLE t1 QUICK;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (17),(120),(132);
+CHECK TABLE t1 FAST;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (801),(900),(7714);
+CHECK TABLE t1 MEDIUM;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (8760),(10023),(12000);
+CHECK TABLE t1 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028);
+CHECK TABLE t1 CHANGED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result
new file mode 100644
index 00000000000..fd1ac63629f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result
@@ -0,0 +1,59 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t2 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t3 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t4 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t1;
+DELETE FROM t2;
+DELETE FROM t3;
+DELETE FROM t4;
+CREATE TABLE t5 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t5;
+SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]';
+CURRENT
+SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]';
+CURRENT
+truncate table t1;
+optimize table t1;
+truncate table t2;
+optimize table t2;
+truncate table t3;
+optimize table t3;
+truncate table t4;
+optimize table t4;
+truncate table t5;
+optimize table t5;
+drop table if exists t1;
+drop table if exists t2;
+drop table if exists t3;
+drop table if exists t4;
+drop table if exists t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result
new file mode 100644
index 00000000000..06a4c3f6f1c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result
@@ -0,0 +1,87 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0;
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 4259194219
+CHECKSUM TABLE t2, t1;
+Table Checksum
+test.t2 0
+test.t1 4259194219
+CHECKSUM TABLE t1, t2 QUICK;
+Table Checksum
+test.t1 NULL
+test.t2 NULL
+CHECKSUM TABLE t1, t2 EXTENDED;
+Table Checksum
+test.t1 4259194219
+test.t2 0
+DROP TABLE t1, t2;
+#
+# Issue #110: SQL command checksum returns inconsistent result
+#
+create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb;
+insert into t1 values (2,'fooo');
+insert into t1 values (1,NULL);
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+select * from t1 where pk=2;
+pk col1
+2 fooo
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+flush tables;
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+drop table t1;
+#
+# The following test is about making sure MyRocks CHECKSUM TABLE
+# values are the same as with InnoDB.
+# If you see checksum values changed, make sure their counterparts
+# in suite/innodb/r/checksum-matches-myrocks.result match.
+#
+create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb;
+insert into t1 values (2,'fooo');
+insert into t1 values (1,NULL);
+checksum table t1;
+Table Checksum
+test.t1 1303411884
+drop table t1;
+create table t1 (
+pk bigint unsigned primary key,
+col1 varchar(10),
+col2 tinyint,
+col3 double
+) engine=rocksdb;
+checksum table t1;
+Table Checksum
+test.t1 0
+insert into t1 values (1, NULL, NULL, NULL);
+insert into t1 values (2, 'foo', NULL, NULL);
+checksum table t1;
+Table Checksum
+test.t1 3633741545
+insert into t1 values (3, NULL, 123, NULL);
+insert into t1 values (4, NULL, NULL, 2.78);
+checksum table t1;
+Table Checksum
+test.t1 390004011
+insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78);
+insert into t1 values (6, '', NULL, 2.78);
+checksum table t1;
+Table Checksum
+test.t1 3183101003
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result
new file mode 100644
index 00000000000..fb86c0af260
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result
@@ -0,0 +1,20 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 4259194219
+CHECKSUM TABLE t2, t1;
+Table Checksum
+test.t2 0
+test.t1 4259194219
+CHECKSUM TABLE t1, t2 QUICK;
+Table Checksum
+test.t1 NULL
+test.t2 NULL
+CHECKSUM TABLE t1, t2 EXTENDED;
+Table Checksum
+test.t1 4259194219
+test.t2 0
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result
new file mode 100644
index 00000000000..af53f061753
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result
@@ -0,0 +1,20 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI 0
+INSERT INTO t1 (a) VALUES (1);
+SELECT a FROM t1;
+a
+1
+ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT '';
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI 0
+b char(8) YES
+INSERT INTO t1 (b) VALUES ('a');
+SELECT a,b FROM t1 ORDER BY a,b;
+a b
+0 a
+1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result
new file mode 100644
index 00000000000..005aa1e2989
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result
@@ -0,0 +1,2612 @@
+########################
+# BINARY columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+b BINARY NOT NULL,
+b0 BINARY(0) NOT NULL,
+b1 BINARY(1) NOT NULL,
+b20 BINARY(20) NOT NULL,
+b255 BINARY(255) NOT NULL,
+pk BINARY PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) NO NULL
+b0 binary(0) NO NULL
+b1 binary(1) NO NULL
+b20 binary(20) NO NULL
+b255 binary(255) NO NULL
+pk binary(1) NO PRI NULL
+INSERT INTO t1 VALUES ('','','','','','');
+INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a');
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b');
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b20' at row 1
+Warning 1265 Data truncated for column 'b255' at row 1
+INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1;
+ERROR 23000: Duplicate entry 'c' for key 'PRIMARY'
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NOT NULL;
+ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) NO NULL
+b0 binary(0) NO NULL
+b1 binary(1) NO NULL
+b20 binary(20) NO NULL
+b255 binary(255) NO NULL
+pk binary(1) NO PRI NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# BINARY NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BINARY NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c binary(1) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+SELECT HEX(c) FROM t1;
+HEX(c)
+30
+DROP TABLE t1;
+#----------------------------------
+# BINARY NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c BINARY NOT NULL DEFAULT 0
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c binary(1) NO 0
+ALTER TABLE t1 ADD COLUMN err BINARY NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 30
+2 30
+DROP TABLE t1;
+########################
+# VARBINARY columns
+########################
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARBINARY(0) NOT NULL,
+v1 VARBINARY(1) NOT NULL,
+v64 VARBINARY(64) NOT NULL,
+v65000 VARBINARY(65000) NOT NULL,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) NO NULL
+v1 varbinary(1) NO NULL
+v64 varbinary(64) NO PRI NULL
+v65000 varbinary(65000) NO NULL
+CREATE TABLE t2 (v VARBINARY(65532) NOT NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varbinary(65532) NO PRI NULL
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) HEX(v65000)
+
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+Warning 1265 Data truncated for column 'v65000' at row 6
+SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000))
+ 0
+ 61 0
+ 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932
+ 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930
+ 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000
+ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NOT NULL;
+Warnings:
+Note 1246 Converting column 'v65536' from VARBINARY to BLOB
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) NO NULL
+v1 varbinary(1) NO NULL
+v64 varbinary(64) NO PRI NULL
+v65000 varbinary(65000) NO NULL
+v65536 mediumblob NO NULL
+DROP TABLE t1, t2;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# VARBINARY(64) NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARBINARY(64) NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c varbinary(64) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test');
+SELECT HEX(c) FROM t1;
+HEX(c)
+74657374
+DROP TABLE t1;
+#----------------------------------
+# VARBINARY(64) NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c VARBINARY(64) NOT NULL DEFAULT 'test'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c varbinary(64) NO test
+ALTER TABLE t1 ADD COLUMN err VARBINARY(64) NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 74657374
+2 74657374
+DROP TABLE t1;
+########################
+# BIT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a BIT NOT NULL,
+b BIT(20) NOT NULL,
+c BIT(64) NOT NULL,
+d BIT(1) NOT NULL,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) NO NULL
+b bit(20) NO NULL
+c bit(64) NO PRI NULL
+d bit(1) NO NULL
+ALTER TABLE t1 DROP COLUMN d;
+ALTER TABLE t1 ADD COLUMN d BIT(0) NOT NULL;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) NO NULL
+b bit(20) NO NULL
+c bit(64) NO PRI NULL
+d bit(1) NO NULL
+INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1);
+SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0;
+BIN(a) HEX(b) c+0
+0 FFFFF 18446744073709551615
+INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d<100;
+a+0 b+0 c+0
+0 1048575 18446744073709551615
+1 0 18446744073709551614
+INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2);
+a+0 b+0 c+0
+1 0 18446744073709551614
+1 102 255
+DELETE FROM t1;
+INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0);
+Warnings:
+Warning 1264 Out of range value for column 'c' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+1 0 18446744073709551615 0
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NOT NULL) ENGINE=rocksdb;
+ERROR 42000: Display width out of range for column 'a' (max = 64)
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# BIT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c bit(1) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1);
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+#----------------------------------
+# BIT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c BIT NOT NULL DEFAULT 1
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c bit(1) NO b'1'
+ALTER TABLE t1 ADD COLUMN err BIT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 1
+2 1
+DROP TABLE t1;
+########################
+# BLOB columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b BLOB NOT NULL,
+b0 BLOB(0) NOT NULL,
+b1 BLOB(1) NOT NULL,
+b300 BLOB(300) NOT NULL,
+bm BLOB(65535) NOT NULL,
+b70k BLOB(70000) NOT NULL,
+b17m BLOB(17000000) NOT NULL,
+t TINYBLOB NOT NULL,
+m MEDIUMBLOB NOT NULL,
+l LONGBLOB NOT NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b blob NO NULL
+b0 blob NO NULL
+b1 tinyblob NO NULL
+b300 blob NO NULL
+bm blob NO NULL
+b70k mediumblob NO NULL
+b17m longblob NO NULL
+t tinyblob NO NULL
+m mediumblob NO NULL
+l longblob NO NULL
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) );
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b300' at row 1
+Warning 1265 Data truncated for column 'bm' at row 1
+Warning 1265 Data truncated for column 't' at row 1
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296);
+ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# BLOB NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BLOB NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c blob NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# BLOB NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c BLOB NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c blob NO NULL
+ALTER TABLE t1 ADD COLUMN err BLOB NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TINYBLOB NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYBLOB NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyblob NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# TINYBLOB NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TINYBLOB NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyblob NO NULL
+ALTER TABLE t1 ADD COLUMN err TINYBLOB NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# MEDIUMBLOB NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMBLOB NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumblob NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# MEDIUMBLOB NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c MEDIUMBLOB NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumblob NO NULL
+ALTER TABLE t1 ADD COLUMN err MEDIUMBLOB NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# LONGBLOB NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGBLOB NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c longblob NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# LONGBLOB NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c LONGBLOB NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c longblob NO NULL
+ALTER TABLE t1 ADD COLUMN err LONGBLOB NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+########################
+# BOOL columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b1 BOOL NOT NULL,
+b2 BOOLEAN NOT NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b1 tinyint(1) NO NULL
+b2 tinyint(1) NO NULL
+INSERT INTO t1 (b1,b2) VALUES (1,TRUE);
+SELECT b1,b2 FROM t1;
+b1 b2
+1 1
+INSERT INTO t1 (b1,b2) VALUES (FALSE,0);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+INSERT INTO t1 (b1,b2) VALUES (2,3);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (-1,-2);
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+1 1
+2 3
+SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1;
+a b
+false false
+true true
+true true
+true true
+SELECT b1,b2 FROM t1 WHERE b1 = TRUE;
+b1 b2
+1 1
+SELECT b1,b2 FROM t1 WHERE b2 = FALSE;
+b1 b2
+0 0
+INSERT INTO t1 (b1,b2) VALUES ('a','b');
+Warnings:
+Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1
+Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (128,-129);
+Warnings:
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+127 -128
+2 3
+ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NOT NULL;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NOT NULL' at line 1
+ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NOT NULL;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NOT NULL' at line 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# BOOL NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BOOL NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyint(1) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('0');
+SELECT HEX(c) FROM t1;
+HEX(c)
+0
+DROP TABLE t1;
+#----------------------------------
+# BOOL NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c BOOL NOT NULL DEFAULT '0'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyint(1) NO 0
+ALTER TABLE t1 ADD COLUMN err BOOL NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('0');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 0
+2 0
+DROP TABLE t1;
+########################
+# CHAR columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c CHAR NOT NULL,
+c0 CHAR(0) NOT NULL,
+c1 CHAR(1) NOT NULL,
+c20 CHAR(20) NOT NULL,
+c255 CHAR(255) NOT NULL,
+PRIMARY KEY (c255)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c char(1) NO NULL
+c0 char(0) NO NULL
+c1 char(1) NO NULL
+c20 char(20) NO NULL
+c255 char(255) NO PRI NULL
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','','');
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.');
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256));
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+Warning 1265 Data truncated for column 'c0' at row 1
+Warning 1265 Data truncated for column 'c1' at row 1
+Warning 1265 Data truncated for column 'c20' at row 1
+Warning 1265 Data truncated for column 'c255' at row 1
+INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 5
+Warning 1265 Data truncated for column 'c0' at row 5
+Warning 1265 Data truncated for column 'c1' at row 5
+Warning 1265 Data truncated for column 'c20' at row 5
+Warning 1265 Data truncated for column 'c' at row 6
+Warning 1265 Data truncated for column 'c0' at row 6
+Warning 1265 Data truncated for column 'c1' at row 6
+Warning 1265 Data truncated for column 'c20' at row 6
+Warning 1265 Data truncated for column 'c255' at row 6
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+ a
+C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b
+a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20;
+c20 REPEAT('a',LENGTH(c20)) COUNT(*)
+ 2
+Creating an article aaaaaaaaaaaaaaaaaaa 1
+aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1
+abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1
+xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1
+ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NOT NULL;
+ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# CHAR NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c char(1) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('_');
+SELECT HEX(c) FROM t1;
+HEX(c)
+5F
+DROP TABLE t1;
+#----------------------------------
+# CHAR NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c CHAR NOT NULL DEFAULT '_'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c char(1) NO _
+ALTER TABLE t1 ADD COLUMN err CHAR NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('_');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 5F
+2 5F
+DROP TABLE t1;
+########################
+# VARCHAR columns
+########################
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARCHAR(0) NOT NULL,
+v1 VARCHAR(1) NOT NULL,
+v64 VARCHAR(64) NOT NULL,
+v65000 VARCHAR(65000) NOT NULL,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) NO NULL
+v1 varchar(1) NO NULL
+v64 varchar(64) NO PRI NULL
+v65000 varchar(65000) NO NULL
+CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varchar(65532) NO PRI
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT v0,v1,v64,v65000 FROM t1;
+v0 v1 v64 v65000
+
+
+
+
+
+
+
+
+
+
+
+ y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+ o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly)
+ o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject.
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Free to read in the Knowledgebase!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+ o The 'default' book to read if you wont to learn to use MySQL / MariaDB.
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ * MariaDB Crash Course by Ben Forta
+ * MySQL (4th Edition) by Paul DuBois
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ * MySQL Admin Cookbook
+ * MySQL Cookbook by Paul DuBois
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ For MariaDB / MySQL end users
+ For developers who want to code on MariaDB or MySQL
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v65000' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+SELECT v0, v1, v64, LENGTH(v65000) FROM t1;
+v0 v1 v64 LENGTH(v65000)
+ 0
+ a 0
+ H aHere is a list of recommended books on MariaDB and MySQL. We've 2966
+ a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000
+ y Once there, double check that an article doesn't already exist 2965
+ y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000
+ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NOT NULL;
+Warnings:
+Note 1246 Converting column 'v65536' from VARCHAR to TEXT
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) NO NULL
+v1 varchar(1) NO NULL
+v64 varchar(64) NO PRI NULL
+v65000 varchar(65000) NO NULL
+v65536 mediumtext NO NULL
+DROP TABLE t1, t2;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# VARCHAR(64) NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARCHAR(64) NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c varchar(64) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test default');
+SELECT HEX(c) FROM t1;
+HEX(c)
+746573742064656661756C74
+DROP TABLE t1;
+#----------------------------------
+# VARCHAR(64) NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c VARCHAR(64) NOT NULL DEFAULT 'test default'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c varchar(64) NO test default
+ALTER TABLE t1 ADD COLUMN err VARCHAR(64) NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test default');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 746573742064656661756C74
+2 746573742064656661756C74
+DROP TABLE t1;
+########################
+# date and time columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DATE NOT NULL,
+dt DATETIME NOT NULL,
+ts TIMESTAMP NOT NULL,
+t TIME NOT NULL,
+y YEAR NOT NULL,
+y4 YEAR(4) NOT NULL,
+y2 YEAR(2) NOT NULL,
+pk DATETIME PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d date NO NULL
+dt datetime NO NULL
+ts timestamp NO NULL
+t time NO NULL
+y year(4) NO NULL
+y4 year(4) NO NULL
+y2 year(4) NO NULL
+pk datetime NO PRI NULL
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'),
+('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'),
+('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15');
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16');
+Warnings:
+Warning 1265 Data truncated for column 'd' at row 1
+Warning 1264 Out of range value for column 'dt' at row 1
+Warning 1264 Out of range value for column 'ts' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 'y' at row 1
+Warning 1264 Out of range value for column 'y4' at row 1
+Warning 1264 Out of range value for column 'y2' at row 1
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000
+DROP TABLE t1;
+SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12');
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# DATE NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATE NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c date NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21');
+SELECT HEX(c) FROM t1;
+HEX(c)
+323031322D31322D3231
+DROP TABLE t1;
+#----------------------------------
+# DATE NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c DATE NOT NULL DEFAULT '2012-12-21'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c date NO 2012-12-21
+ALTER TABLE t1 ADD COLUMN err DATE NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 323031322D31322D3231
+2 323031322D31322D3231
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# DATETIME NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATETIME NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c datetime NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12');
+SELECT HEX(c) FROM t1;
+HEX(c)
+323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+#----------------------------------
+# DATETIME NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c DATETIME NOT NULL DEFAULT '2012-12-21 12:21:12'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c datetime NO 2012-12-21 12:21:12
+ALTER TABLE t1 ADD COLUMN err DATETIME NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 323031322D31322D32312031323A32313A3132
+2 323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TIMESTAMP NOT NULL column without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIMESTAMP NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c timestamp NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12');
+SELECT HEX(c) FROM t1;
+HEX(c)
+323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+#----------------------------------
+# TIMESTAMP NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TIMESTAMP NOT NULL DEFAULT '2012-12-21 12:21:12'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c timestamp NO 2012-12-21 12:21:12
+ALTER TABLE t1 ADD COLUMN err TIMESTAMP NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 323031322D31322D32312031323A32313A3132
+2 323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TIME NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIME NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c time NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('12:21:12');
+SELECT HEX(c) FROM t1;
+HEX(c)
+31323A32313A3132
+DROP TABLE t1;
+#----------------------------------
+# TIME NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TIME NOT NULL DEFAULT '12:21:12'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c time NO 12:21:12
+ALTER TABLE t1 ADD COLUMN err TIME NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 31323A32313A3132
+2 31323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# YEAR NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c year(4) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012');
+SELECT HEX(c) FROM t1;
+HEX(c)
+7DC
+DROP TABLE t1;
+#----------------------------------
+# YEAR NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c YEAR NOT NULL DEFAULT '2012'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c year(4) NO 2012
+ALTER TABLE t1 ADD COLUMN err YEAR NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('2012');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7DC
+2 7DC
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# YEAR(2) NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR(2) NOT NULL) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c year(4) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('12');
+SELECT HEX(c) FROM t1;
+HEX(c)
+7DC
+DROP TABLE t1;
+#----------------------------------
+# YEAR(2) NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c YEAR(2) NOT NULL DEFAULT '12'
+) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c year(4) NO 2012
+ALTER TABLE t1 ADD COLUMN err YEAR(2) NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7DC
+2 7DC
+DROP TABLE t1;
+########################
+# ENUM columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a ENUM('') NOT NULL,
+b ENUM('test1','test2','test3','test4','test5') NOT NULL,
+c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NOT NULL,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') NO NULL
+b enum('test1','test2','test3','test4','test5') NO PRI NULL
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL
+INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2);
+SELECT a,b,c FROM t1;
+a b c
+ test2 4
+ test5 2
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'a' at row 1
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+
+ test2 4
+ test5 2
+ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NOT NULL;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in ENUM
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') NO NULL
+b enum('test1','test2','test3','test4','test5') NO PRI NULL
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL
+e enum('a','A') NO NULL
+INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A');
+SELECT a,b,c,e FROM t1;
+a b c e
+ a
+ test2 4 a
+ test3 75 a
+ test5 2 a
+SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != '';
+a b c e
+ test2 4 a
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# ENUM('test1','test2','test3') NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c ENUM('test1','test2','test3') NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c enum('test1','test2','test3') NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test2');
+SELECT HEX(c) FROM t1;
+HEX(c)
+7465737432
+DROP TABLE t1;
+#----------------------------------
+# ENUM('test1','test2','test3') NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c ENUM('test1','test2','test3') NOT NULL DEFAULT 'test2'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c enum('test1','test2','test3') NO test2
+ALTER TABLE t1 ADD COLUMN err ENUM('test1','test2','test3') NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test2');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7465737432
+2 7465737432
+DROP TABLE t1;
+########################
+# Fixed point columns (NUMERIC, DECIMAL)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DECIMAL NOT NULL,
+d0 DECIMAL(0) NOT NULL,
+d1_1 DECIMAL(1,1) NOT NULL,
+d10_2 DECIMAL(10,2) NOT NULL,
+d60_10 DECIMAL(60,10) NOT NULL,
+n NUMERIC NOT NULL,
+n0_0 NUMERIC(0,0) NOT NULL,
+n1 NUMERIC(1) NOT NULL,
+n20_4 NUMERIC(20,4) NOT NULL,
+n65_4 NUMERIC(65,4) NOT NULL,
+pk NUMERIC NOT NULL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d decimal(10,0) NO NULL
+d0 decimal(10,0) NO NULL
+d1_1 decimal(1,1) NO NULL
+d10_2 decimal(10,2) NO NULL
+d60_10 decimal(60,10) NO NULL
+n decimal(10,0) NO NULL
+n0_0 decimal(10,0) NO NULL
+n1 decimal(1,0) NO NULL
+n20_4 decimal(20,4) NO NULL
+n65_4 decimal(65,4) NO NULL
+pk decimal(10,0) NO PRI NULL
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+6
+);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+Warnings:
+Note 1265 Data truncated for column 'd' at row 1
+Note 1265 Data truncated for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Note 1265 Data truncated for column 'd10_2' at row 1
+Note 1265 Data truncated for column 'd60_10' at row 1
+Note 1265 Data truncated for column 'n' at row 1
+Note 1265 Data truncated for column 'n0_0' at row 1
+Note 1265 Data truncated for column 'n1' at row 1
+Note 1265 Data truncated for column 'n20_4' at row 1
+Note 1265 Data truncated for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NOT NULL;
+ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NOT NULL;
+ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NOT NULL;
+ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# DECIMAL NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DECIMAL NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c decimal(10,0) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1.1);
+Warnings:
+Note 1265 Data truncated for column 'c' at row 1
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+#----------------------------------
+# DECIMAL NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c DECIMAL NOT NULL DEFAULT 1.1
+) ENGINE=rocksdb;
+Warnings:
+Note 1265 Data truncated for column 'c' at row 1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c decimal(10,0) NO 1
+ALTER TABLE t1 ADD COLUMN err DECIMAL NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1.1);
+Warnings:
+Note 1265 Data truncated for column 'c' at row 1
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 1
+2 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# NUMERIC NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c NUMERIC NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c decimal(10,0) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+SELECT HEX(c) FROM t1;
+HEX(c)
+0
+DROP TABLE t1;
+#----------------------------------
+# NUMERIC NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c NUMERIC NOT NULL DEFAULT 0
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c decimal(10,0) NO 0
+ALTER TABLE t1 ADD COLUMN err NUMERIC NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 0
+2 0
+DROP TABLE t1;
+########################
+# Floating point columns (FLOAT, DOUBLE)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT NOT NULL,
+f0 FLOAT(0) NOT NULL,
+r1_1 REAL(1,1) NOT NULL,
+f23_0 FLOAT(23) NOT NULL,
+f20_3 FLOAT(20,3) NOT NULL,
+d DOUBLE NOT NULL,
+d1_0 DOUBLE(1,0) NOT NULL,
+d10_10 DOUBLE PRECISION (10,10) NOT NULL,
+d53 DOUBLE(53,0) NOT NULL,
+d53_10 DOUBLE(53,10) NOT NULL,
+pk DOUBLE NOT NULL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+f float NO NULL
+f0 float NO NULL
+r1_1 double(1,1) NO NULL
+f23_0 float NO NULL
+f20_3 float(20,3) NO NULL
+d double NO NULL
+d1_0 double(1,0) NO NULL
+d10_10 double(10,10) NO NULL
+d53 double(53,0) NO NULL
+d53_10 double(53,10) NO NULL
+pk double NO PRI NULL
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 11111111.111
+d10_10 0.0123456789
+d1_0 8
+d53 1234566789123456800
+d53_10 100000000000000000.0000000000
+f0 12345.1
+f20_3 56789.988
+f23_0 123457000
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+99999999999999999999999999999999999999,
+99999999999999999999999999999999999999.9999999999999999,
+0.9,
+99999999999999999999999999999999999999.9,
+99999999999999999.999,
+999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+9,
+0.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+3
+);
+Warnings:
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 11111111.111
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 8
+d1_0 9
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 1e38
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1;
+MAX(f) 9.999999680285692e37
+MAX(d) 1e81
+MAX(d10_10) 0.9999999999
+MAX(d1_0) 9
+MAX(d53) 100000000000000000000000000000000000000000000000000000
+MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000
+MAX(f0) 9.999999680285692e37
+MAX(f20_3) 99999998430674940.000
+MAX(f23_0) 9.999999680285692e37
+MAX(r1_1) 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+5
+);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+999999999999999999999999999999999999999,
+999999999999999999999999999999999999999.9999999999999999,
+1.9,
+999999999999999999999999999999999999999.9,
+999999999999999999.999,
+9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+99,
+1.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+6
+);
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: ''
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e65
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NOT NULL;
+ERROR 42000: Display width out of range for column 'd0_0' (max = 255)
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NOT NULL;
+ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NOT NULL;
+ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# FLOAT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c FLOAT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c float NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1.1);
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+#----------------------------------
+# FLOAT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c FLOAT NOT NULL DEFAULT 1.1
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c float NO 1.1
+ALTER TABLE t1 ADD COLUMN err FLOAT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1.1);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 1
+2 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# DOUBLE NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DOUBLE NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c double NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+SELECT HEX(c) FROM t1;
+HEX(c)
+0
+DROP TABLE t1;
+#----------------------------------
+# DOUBLE NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c DOUBLE NOT NULL DEFAULT 0
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c double NO 0
+ALTER TABLE t1 ADD COLUMN err DOUBLE NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 0
+2 0
+DROP TABLE t1;
+########################
+# INT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT NOT NULL,
+i0 INT(0) NOT NULL,
+i1 INT(1) NOT NULL,
+i20 INT(20) NOT NULL,
+t TINYINT NOT NULL,
+t0 TINYINT(0) NOT NULL,
+t1 TINYINT(1) NOT NULL,
+t20 TINYINT(20) NOT NULL,
+s SMALLINT NOT NULL,
+s0 SMALLINT(0) NOT NULL,
+s1 SMALLINT(1) NOT NULL,
+s20 SMALLINT(20) NOT NULL,
+m MEDIUMINT NOT NULL,
+m0 MEDIUMINT(0) NOT NULL,
+m1 MEDIUMINT(1) NOT NULL,
+m20 MEDIUMINT(20) NOT NULL,
+b BIGINT NOT NULL,
+b0 BIGINT(0) NOT NULL,
+b1 BIGINT(1) NOT NULL,
+b20 BIGINT(20) NOT NULL,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+i int(11) NO NULL
+i0 int(11) NO NULL
+i1 int(1) NO NULL
+i20 int(20) NO NULL
+t tinyint(4) NO NULL
+t0 tinyint(4) NO NULL
+t1 tinyint(1) NO NULL
+t20 tinyint(20) NO NULL
+s smallint(6) NO NULL
+s0 smallint(6) NO NULL
+s1 smallint(1) NO NULL
+s20 smallint(20) NO NULL
+m mediumint(9) NO NULL
+m0 mediumint(9) NO NULL
+m1 mediumint(1) NO NULL
+m20 mediumint(20) NO NULL
+b bigint(20) NO NULL
+b0 bigint(20) NO NULL
+b1 bigint(1) NO NULL
+b20 bigint(20) NO NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 8
+Warning 1264 Out of range value for column 'i0' at row 8
+Warning 1264 Out of range value for column 'i1' at row 8
+Warning 1264 Out of range value for column 'i20' at row 8
+Warning 1264 Out of range value for column 't' at row 8
+Warning 1264 Out of range value for column 't0' at row 8
+Warning 1264 Out of range value for column 't1' at row 8
+Warning 1264 Out of range value for column 't20' at row 8
+Warning 1264 Out of range value for column 's' at row 8
+Warning 1264 Out of range value for column 's0' at row 8
+Warning 1264 Out of range value for column 's1' at row 8
+Warning 1264 Out of range value for column 's20' at row 8
+Warning 1264 Out of range value for column 'm' at row 8
+Warning 1264 Out of range value for column 'm0' at row 8
+Warning 1264 Out of range value for column 'm1' at row 8
+Warning 1264 Out of range value for column 'm20' at row 8
+Warning 1264 Out of range value for column 'i' at row 9
+Warning 1264 Out of range value for column 'i0' at row 9
+Warning 1264 Out of range value for column 'i1' at row 9
+Warning 1264 Out of range value for column 'i20' at row 9
+Warning 1264 Out of range value for column 't' at row 9
+Warning 1264 Out of range value for column 't0' at row 9
+Warning 1264 Out of range value for column 't1' at row 9
+Warning 1264 Out of range value for column 't20' at row 9
+Warning 1264 Out of range value for column 's' at row 9
+Warning 1264 Out of range value for column 's0' at row 9
+Warning 1264 Out of range value for column 's1' at row 9
+Warning 1264 Out of range value for column 's20' at row 9
+Warning 1264 Out of range value for column 'm' at row 9
+Warning 1264 Out of range value for column 'm0' at row 9
+Warning 1264 Out of range value for column 'm1' at row 9
+Warning 1264 Out of range value for column 'm20' at row 9
+Warning 1264 Out of range value for column 'i' at row 10
+Warning 1264 Out of range value for column 'i0' at row 10
+Warning 1264 Out of range value for column 'i1' at row 10
+Warning 1264 Out of range value for column 'i20' at row 10
+Warning 1264 Out of range value for column 't' at row 10
+Warning 1264 Out of range value for column 't0' at row 10
+Warning 1264 Out of range value for column 't1' at row 10
+Warning 1264 Out of range value for column 't20' at row 10
+Warning 1264 Out of range value for column 's' at row 10
+Warning 1264 Out of range value for column 's0' at row 10
+Warning 1264 Out of range value for column 's1' at row 10
+Warning 1264 Out of range value for column 's20' at row 10
+Warning 1264 Out of range value for column 'm' at row 10
+Warning 1264 Out of range value for column 'm0' at row 10
+Warning 1264 Out of range value for column 'm1' at row 10
+Warning 1264 Out of range value for column 'm20' at row 10
+Warning 1264 Out of range value for column 'i' at row 11
+Warning 1264 Out of range value for column 'i0' at row 11
+Warning 1264 Out of range value for column 'i1' at row 11
+Warning 1264 Out of range value for column 'i20' at row 11
+Warning 1264 Out of range value for column 't' at row 11
+Warning 1264 Out of range value for column 't0' at row 11
+Warning 1264 Out of range value for column 't1' at row 11
+Warning 1264 Out of range value for column 't20' at row 11
+Warning 1264 Out of range value for column 's' at row 11
+Warning 1264 Out of range value for column 's0' at row 11
+Warning 1264 Out of range value for column 's1' at row 11
+Warning 1264 Out of range value for column 's20' at row 11
+Warning 1264 Out of range value for column 'm' at row 11
+Warning 1264 Out of range value for column 'm0' at row 11
+Warning 1264 Out of range value for column 'm1' at row 11
+Warning 1264 Out of range value for column 'm20' at row 11
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+ALTER TABLE t1 ADD COLUMN i257 INT(257) NOT NULL;
+ERROR 42000: Display width out of range for column 'i257' (max = 255)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# INT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c INT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c int(11) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (2147483647);
+SELECT HEX(c) FROM t1;
+HEX(c)
+7FFFFFFF
+DROP TABLE t1;
+#----------------------------------
+# INT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c INT NOT NULL DEFAULT 2147483647
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c int(11) NO 2147483647
+ALTER TABLE t1 ADD COLUMN err INT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (2147483647);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7FFFFFFF
+2 7FFFFFFF
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TINYINT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYINT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyint(4) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (127);
+SELECT HEX(c) FROM t1;
+HEX(c)
+7F
+DROP TABLE t1;
+#----------------------------------
+# TINYINT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TINYINT NOT NULL DEFAULT 127
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinyint(4) NO 127
+ALTER TABLE t1 ADD COLUMN err TINYINT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (127);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7F
+2 7F
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# SMALLINT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SMALLINT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c smallint(6) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+SELECT HEX(c) FROM t1;
+HEX(c)
+0
+DROP TABLE t1;
+#----------------------------------
+# SMALLINT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c SMALLINT NOT NULL DEFAULT 0
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c smallint(6) NO 0
+ALTER TABLE t1 ADD COLUMN err SMALLINT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 0
+2 0
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# MEDIUMINT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMINT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumint(9) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1);
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+#----------------------------------
+# MEDIUMINT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c MEDIUMINT NOT NULL DEFAULT 1
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumint(9) NO 1
+ALTER TABLE t1 ADD COLUMN err MEDIUMINT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (1);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 1
+2 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# BIGINT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIGINT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c bigint(20) NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (9223372036854775807);
+SELECT HEX(c) FROM t1;
+HEX(c)
+7FFFFFFFFFFFFFFF
+DROP TABLE t1;
+#----------------------------------
+# BIGINT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c BIGINT NOT NULL DEFAULT 9223372036854775807
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c bigint(20) NO 9223372036854775807
+ALTER TABLE t1 ADD COLUMN err BIGINT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES (9223372036854775807);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 7FFFFFFFFFFFFFFF
+2 7FFFFFFFFFFFFFFF
+DROP TABLE t1;
+########################
+# SET columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a SET('') NOT NULL,
+b SET('test1','test2','test3','test4','test5') NOT NULL,
+c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NOT NULL,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') NO NULL
+b set('test1','test2','test3','test4','test5') NO NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,,23'),
+('',5,2),
+(',','test4,test2','');
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+ALTER TABLE t1 ADD COLUMN e SET('a','A') NOT NULL;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in SET
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') NO NULL
+b set('test1','test2','test3','test4','test5') NO NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL
+e set('a','A') NO NULL
+ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NOT NULL;
+ERROR HY000: Too many strings for column f and SET
+SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != '';
+a b c e
+ test2,test3 01,23,34,44
+ test2,test4
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# SET('test1','test2','test3') NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SET('test1','test2','test3') NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c set('test1','test2','test3') NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test2,test3');
+SELECT HEX(c) FROM t1;
+HEX(c)
+74657374322C7465737433
+DROP TABLE t1;
+#----------------------------------
+# SET('test1','test2','test3') NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c SET('test1','test2','test3') NOT NULL DEFAULT 'test2,test3'
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c set('test1','test2','test3') NO test2,test3
+ALTER TABLE t1 ADD COLUMN err SET('test1','test2','test3') NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('test2,test3');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1 74657374322C7465737433
+2 74657374322C7465737433
+DROP TABLE t1;
+########################
+# TEXT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+t TEXT NOT NULL,
+t0 TEXT(0) NOT NULL,
+t1 TEXT(1) NOT NULL,
+t300 TEXT(300) NOT NULL,
+tm TEXT(65535) NOT NULL,
+t70k TEXT(70000) NOT NULL,
+t17m TEXT(17000000) NOT NULL,
+tt TINYTEXT NOT NULL,
+m MEDIUMTEXT NOT NULL,
+l LONGTEXT NOT NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+t text NO NULL
+t0 text NO NULL
+t1 tinytext NO NULL
+t300 text NO NULL
+tm text NO NULL
+t70k mediumtext NO NULL
+t17m longtext NO NULL
+tt tinytext NO NULL
+m mediumtext NO NULL
+l longtext NO NULL
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) );
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 't' at row 1
+Warning 1265 Data truncated for column 't0' at row 1
+Warning 1265 Data truncated for column 't1' at row 1
+Warning 1265 Data truncated for column 't300' at row 1
+Warning 1265 Data truncated for column 'tm' at row 1
+Warning 1265 Data truncated for column 'tt' at row 1
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NOT NULL;
+ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TEXT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TEXT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c text NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# TEXT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TEXT NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c text NO NULL
+ALTER TABLE t1 ADD COLUMN err TEXT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# TINYTEXT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYTEXT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinytext NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# TINYTEXT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c TINYTEXT NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c tinytext NO NULL
+ALTER TABLE t1 ADD COLUMN err TINYTEXT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# MEDIUMTEXT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMTEXT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumtext NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# MEDIUMTEXT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c MEDIUMTEXT NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c mediumtext NO NULL
+ALTER TABLE t1 ADD COLUMN err MEDIUMTEXT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+#----------------------------------
+# LONGTEXT NOT NULL columns without a default
+#----------------------------------
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGTEXT NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c longtext NO NULL
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+SELECT HEX(c) FROM t1;
+HEX(c)
+
+DROP TABLE t1;
+#----------------------------------
+# LONGTEXT NOT NULL columns with a default
+#----------------------------------
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+c LONGTEXT NOT NULL DEFAULT ''
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+c longtext NO NULL
+ALTER TABLE t1 ADD COLUMN err LONGTEXT NOT NULL DEFAULT NULL;
+ERROR 42000: Invalid default value for 'err'
+INSERT INTO t1 (c) VALUES (NULL);
+ERROR 23000: Column 'c' cannot be null
+INSERT INTO t1 (c) VALUES ('');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+pk HEX(c)
+1
+2
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result
new file mode 100644
index 00000000000..051784528b1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result
@@ -0,0 +1,2270 @@
+########################
+# BINARY columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+b BINARY NULL,
+b0 BINARY(0) NULL,
+b1 BINARY(1) NULL,
+b20 BINARY(20) NULL,
+b255 BINARY(255) NULL,
+pk BINARY PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) YES NULL
+b0 binary(0) YES NULL
+b1 binary(1) YES NULL
+b20 binary(20) YES NULL
+b255 binary(255) YES NULL
+pk binary(1) NO PRI NULL
+INSERT INTO t1 VALUES ('','','','','','');
+INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a');
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b');
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b20' at row 1
+Warning 1265 Data truncated for column 'b255' at row 1
+INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1;
+ERROR 23000: Duplicate entry 'c' for key 'PRIMARY'
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NULL;
+ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) YES NULL
+b0 binary(0) YES NULL
+b1 binary(1) YES NULL
+b20 binary(20) YES NULL
+b255 binary(255) YES NULL
+pk binary(1) NO PRI NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c BINARY NULL,
+c1 BINARY NULL DEFAULT NULL,
+c2 BINARY NULL DEFAULT 0,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c binary(1) YES NULL
+c1 binary(1) YES NULL
+c2 binary(1) YES 0
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (0,0,0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 30 30 30
+3 NULL NULL 30
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 30
+3 30
+DROP TABLE t1;
+########################
+# VARBINARY columns
+########################
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARBINARY(0) NULL,
+v1 VARBINARY(1) NULL,
+v64 VARBINARY(64) NULL,
+v65000 VARBINARY(65000) NULL,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) YES NULL
+v1 varbinary(1) YES NULL
+v64 varbinary(64) NO PRI
+v65000 varbinary(65000) YES NULL
+CREATE TABLE t2 (v VARBINARY(65532) NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varbinary(65532) NO PRI
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) HEX(v65000)
+
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+Warning 1265 Data truncated for column 'v65000' at row 6
+SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000))
+ 0
+ 61 0
+ 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932
+ 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930
+ 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000
+ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NULL;
+Warnings:
+Note 1246 Converting column 'v65536' from VARBINARY to BLOB
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) YES NULL
+v1 varbinary(1) YES NULL
+v64 varbinary(64) NO PRI
+v65000 varbinary(65000) YES NULL
+v65536 mediumblob YES NULL
+DROP TABLE t1, t2;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c VARBINARY(64) NULL,
+c1 VARBINARY(64) NULL DEFAULT NULL,
+c2 VARBINARY(64) NULL DEFAULT 'test',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c varbinary(64) YES NULL
+c1 varbinary(64) YES NULL
+c2 varbinary(64) YES test
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('test','test','test');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 74657374 74657374 74657374
+3 NULL NULL 74657374
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 74657374
+3 74657374
+DROP TABLE t1;
+########################
+# BIT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a BIT NULL,
+b BIT(20) NULL,
+c BIT(64) NULL,
+d BIT(1) NULL,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) YES NULL
+b bit(20) YES NULL
+c bit(64) NO PRI b'0'
+d bit(1) YES NULL
+ALTER TABLE t1 DROP COLUMN d;
+ALTER TABLE t1 ADD COLUMN d BIT(0) NULL;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) YES NULL
+b bit(20) YES NULL
+c bit(64) NO PRI b'0'
+d bit(1) YES NULL
+INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1);
+SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0;
+BIN(a) HEX(b) c+0
+0 FFFFF 18446744073709551615
+INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d<100;
+a+0 b+0 c+0
+0 1048575 18446744073709551615
+1 0 18446744073709551614
+INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2);
+a+0 b+0 c+0
+1 0 18446744073709551614
+1 102 255
+DELETE FROM t1;
+INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0);
+Warnings:
+Warning 1264 Out of range value for column 'c' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+1 0 18446744073709551615 0
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NULL) ENGINE=rocksdb;
+ERROR 42000: Display width out of range for column 'a' (max = 64)
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c BIT NULL,
+c1 BIT NULL DEFAULT NULL,
+c2 BIT NULL DEFAULT 1,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c bit(1) YES NULL
+c1 bit(1) YES NULL
+c2 bit(1) YES b'1'
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (1,1,1);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 1 1 1
+3 NULL NULL 1
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 1
+3 1
+DROP TABLE t1;
+########################
+# BLOB columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b BLOB NULL,
+b0 BLOB(0) NULL,
+b1 BLOB(1) NULL,
+b300 BLOB(300) NULL,
+bm BLOB(65535) NULL,
+b70k BLOB(70000) NULL,
+b17m BLOB(17000000) NULL,
+t TINYBLOB NULL,
+m MEDIUMBLOB NULL,
+l LONGBLOB NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b blob YES NULL
+b0 blob YES NULL
+b1 tinyblob YES NULL
+b300 blob YES NULL
+bm blob YES NULL
+b70k mediumblob YES NULL
+b17m longblob YES NULL
+t tinyblob YES NULL
+m mediumblob YES NULL
+l longblob YES NULL
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) );
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b300' at row 1
+Warning 1265 Data truncated for column 'bm' at row 1
+Warning 1265 Data truncated for column 't' at row 1
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296);
+ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c BLOB NULL,
+c1 BLOB NULL DEFAULT NULL,
+c2 BLOB NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c blob YES NULL
+c1 blob YES NULL
+c2 blob YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TINYBLOB NULL,
+c1 TINYBLOB NULL DEFAULT NULL,
+c2 TINYBLOB NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c tinyblob YES NULL
+c1 tinyblob YES NULL
+c2 tinyblob YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c MEDIUMBLOB NULL,
+c1 MEDIUMBLOB NULL DEFAULT NULL,
+c2 MEDIUMBLOB NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c mediumblob YES NULL
+c1 mediumblob YES NULL
+c2 mediumblob YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c LONGBLOB NULL,
+c1 LONGBLOB NULL DEFAULT NULL,
+c2 LONGBLOB NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c longblob YES NULL
+c1 longblob YES NULL
+c2 longblob YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+########################
+# BOOL columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b1 BOOL NULL,
+b2 BOOLEAN NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b1 tinyint(1) YES NULL
+b2 tinyint(1) YES NULL
+INSERT INTO t1 (b1,b2) VALUES (1,TRUE);
+SELECT b1,b2 FROM t1;
+b1 b2
+1 1
+INSERT INTO t1 (b1,b2) VALUES (FALSE,0);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+INSERT INTO t1 (b1,b2) VALUES (2,3);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (-1,-2);
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+1 1
+2 3
+SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1;
+a b
+false false
+true true
+true true
+true true
+SELECT b1,b2 FROM t1 WHERE b1 = TRUE;
+b1 b2
+1 1
+SELECT b1,b2 FROM t1 WHERE b2 = FALSE;
+b1 b2
+0 0
+INSERT INTO t1 (b1,b2) VALUES ('a','b');
+Warnings:
+Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1
+Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (128,-129);
+Warnings:
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+127 -128
+2 3
+ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NULL;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED NULL' at line 1
+ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NULL;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NULL' at line 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c BOOL NULL,
+c1 BOOL NULL DEFAULT NULL,
+c2 BOOL NULL DEFAULT '0',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c tinyint(1) YES NULL
+c1 tinyint(1) YES NULL
+c2 tinyint(1) YES 0
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('0','0','0');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 0 0 0
+3 NULL NULL 0
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 0
+3 0
+DROP TABLE t1;
+########################
+# CHAR columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c CHAR NULL,
+c0 CHAR(0) NULL,
+c1 CHAR(1) NULL,
+c20 CHAR(20) NULL,
+c255 CHAR(255) NULL,
+PRIMARY KEY (c255)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c char(1) YES NULL
+c0 char(0) YES NULL
+c1 char(1) YES NULL
+c20 char(20) YES NULL
+c255 char(255) NO PRI
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','','');
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.');
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256));
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+Warning 1265 Data truncated for column 'c0' at row 1
+Warning 1265 Data truncated for column 'c1' at row 1
+Warning 1265 Data truncated for column 'c20' at row 1
+Warning 1265 Data truncated for column 'c255' at row 1
+INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 5
+Warning 1265 Data truncated for column 'c0' at row 5
+Warning 1265 Data truncated for column 'c1' at row 5
+Warning 1265 Data truncated for column 'c20' at row 5
+Warning 1265 Data truncated for column 'c' at row 6
+Warning 1265 Data truncated for column 'c0' at row 6
+Warning 1265 Data truncated for column 'c1' at row 6
+Warning 1265 Data truncated for column 'c20' at row 6
+Warning 1265 Data truncated for column 'c255' at row 6
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+ a
+C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b
+a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20;
+c20 REPEAT('a',LENGTH(c20)) COUNT(*)
+ 2
+Creating an article aaaaaaaaaaaaaaaaaaa 1
+aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1
+abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1
+xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1
+ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NULL;
+ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c CHAR NULL,
+c1 CHAR NULL DEFAULT NULL,
+c2 CHAR NULL DEFAULT '_',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c char(1) YES NULL
+c1 char(1) YES NULL
+c2 char(1) YES _
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('_','_','_');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 5F 5F 5F
+3 NULL NULL 5F
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 5F
+3 5F
+DROP TABLE t1;
+########################
+# VARCHAR columns
+########################
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARCHAR(0) NULL,
+v1 VARCHAR(1) NULL,
+v64 VARCHAR(64) NULL,
+v65000 VARCHAR(65000) NULL,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) YES NULL
+v1 varchar(1) YES NULL
+v64 varchar(64) NO PRI
+v65000 varchar(65000) YES NULL
+CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varchar(65532) NO PRI
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT v0,v1,v64,v65000 FROM t1;
+v0 v1 v64 v65000
+
+
+
+
+
+
+
+
+
+
+
+ y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+ o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly)
+ o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject.
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Free to read in the Knowledgebase!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+ o The 'default' book to read if you wont to learn to use MySQL / MariaDB.
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ * MariaDB Crash Course by Ben Forta
+ * MySQL (4th Edition) by Paul DuBois
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ * MySQL Admin Cookbook
+ * MySQL Cookbook by Paul DuBois
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ For MariaDB / MySQL end users
+ For developers who want to code on MariaDB or MySQL
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v65000' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+SELECT v0, v1, v64, LENGTH(v65000) FROM t1;
+v0 v1 v64 LENGTH(v65000)
+ 0
+ a 0
+ H aHere is a list of recommended books on MariaDB and MySQL. We've 2966
+ a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000
+ y Once there, double check that an article doesn't already exist 2965
+ y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000
+ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NULL;
+Warnings:
+Note 1246 Converting column 'v65536' from VARCHAR to TEXT
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) YES NULL
+v1 varchar(1) YES NULL
+v64 varchar(64) NO PRI
+v65000 varchar(65000) YES NULL
+v65536 mediumtext YES NULL
+DROP TABLE t1, t2;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c VARCHAR(64) NULL,
+c1 VARCHAR(64) NULL DEFAULT NULL,
+c2 VARCHAR(64) NULL DEFAULT 'test default',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c varchar(64) YES NULL
+c1 varchar(64) YES NULL
+c2 varchar(64) YES test default
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('test default','test default','test default');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 746573742064656661756C74 746573742064656661756C74 746573742064656661756C74
+3 NULL NULL 746573742064656661756C74
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 746573742064656661756C74
+3 746573742064656661756C74
+DROP TABLE t1;
+########################
+# date and time columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DATE NULL,
+dt DATETIME NULL,
+ts TIMESTAMP NULL,
+t TIME NULL,
+y YEAR NULL,
+y4 YEAR(4) NULL,
+y2 YEAR(2) NULL,
+pk DATETIME PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d date YES NULL
+dt datetime YES NULL
+ts timestamp YES NULL
+t time YES NULL
+y year(4) YES NULL
+y4 year(4) YES NULL
+y2 year(4) YES NULL
+pk datetime NO PRI NULL
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'),
+('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'),
+('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15');
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16');
+Warnings:
+Warning 1265 Data truncated for column 'd' at row 1
+Warning 1264 Out of range value for column 'dt' at row 1
+Warning 1264 Out of range value for column 'ts' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 'y' at row 1
+Warning 1264 Out of range value for column 'y4' at row 1
+Warning 1264 Out of range value for column 'y2' at row 1
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c DATE NULL,
+c1 DATE NULL DEFAULT NULL,
+c2 DATE NULL DEFAULT '2012-12-21',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c date YES NULL
+c1 date YES NULL
+c2 date YES 2012-12-21
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21','2012-12-21','2012-12-21');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 323031322D31322D3231 323031322D31322D3231 323031322D31322D3231
+3 NULL NULL 323031322D31322D3231
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 323031322D31322D3231
+3 323031322D31322D3231
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c DATETIME NULL,
+c1 DATETIME NULL DEFAULT NULL,
+c2 DATETIME NULL DEFAULT '2012-12-21 12:21:12',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c datetime YES NULL
+c1 datetime YES NULL
+c2 datetime YES 2012-12-21 12:21:12
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132
+3 NULL NULL 323031322D31322D32312031323A32313A3132
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 323031322D31322D32312031323A32313A3132
+3 323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TIMESTAMP NULL,
+c1 TIMESTAMP NULL DEFAULT NULL,
+c2 TIMESTAMP NULL DEFAULT '2012-12-21 12:21:12',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c timestamp YES NULL
+c1 timestamp YES NULL
+c2 timestamp YES 2012-12-21 12:21:12
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132
+3 NULL NULL 323031322D31322D32312031323A32313A3132
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 323031322D31322D32312031323A32313A3132
+3 323031322D31322D32312031323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TIME NULL,
+c1 TIME NULL DEFAULT NULL,
+c2 TIME NULL DEFAULT '12:21:12',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c time YES NULL
+c1 time YES NULL
+c2 time YES 12:21:12
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('12:21:12','12:21:12','12:21:12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 31323A32313A3132 31323A32313A3132 31323A32313A3132
+3 NULL NULL 31323A32313A3132
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 31323A32313A3132
+3 31323A32313A3132
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c YEAR NULL,
+c1 YEAR NULL DEFAULT NULL,
+c2 YEAR NULL DEFAULT '2012',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c year(4) YES NULL
+c1 year(4) YES NULL
+c2 year(4) YES 2012
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('2012','2012','2012');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7DC 7DC 7DC
+3 NULL NULL 7DC
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7DC
+3 7DC
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c YEAR(2) NULL,
+c1 YEAR(2) NULL DEFAULT NULL,
+c2 YEAR(2) NULL DEFAULT '12',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c year(4) YES NULL
+c1 year(4) YES NULL
+c2 year(4) YES 2012
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('12','12','12');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7DC 7DC 7DC
+3 NULL NULL 7DC
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7DC
+3 7DC
+DROP TABLE t1;
+########################
+# ENUM columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a ENUM('') NULL,
+b ENUM('test1','test2','test3','test4','test5') NULL,
+c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NULL,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') YES NULL
+b enum('test1','test2','test3','test4','test5') NO PRI test1
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL
+INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2);
+SELECT a,b,c FROM t1;
+a b c
+ test2 4
+ test5 2
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'a' at row 1
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+
+ test2 4
+ test5 2
+ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NULL;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in ENUM
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') YES NULL
+b enum('test1','test2','test3','test4','test5') NO PRI test1
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL
+e enum('a','A') YES NULL
+INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A');
+SELECT a,b,c,e FROM t1;
+a b c e
+ NULL
+ test2 4 NULL
+ test3 75 a
+ test5 2 NULL
+SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != '';
+a b c e
+ test2 4 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c ENUM('test1','test2','test3') NULL,
+c1 ENUM('test1','test2','test3') NULL DEFAULT NULL,
+c2 ENUM('test1','test2','test3') NULL DEFAULT 'test2',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c enum('test1','test2','test3') YES NULL
+c1 enum('test1','test2','test3') YES NULL
+c2 enum('test1','test2','test3') YES test2
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('test2','test2','test2');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7465737432 7465737432 7465737432
+3 NULL NULL 7465737432
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7465737432
+3 7465737432
+DROP TABLE t1;
+########################
+# Fixed point columns (NUMERIC, DECIMAL)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DECIMAL NULL,
+d0 DECIMAL(0) NULL,
+d1_1 DECIMAL(1,1) NULL,
+d10_2 DECIMAL(10,2) NULL,
+d60_10 DECIMAL(60,10) NULL,
+n NUMERIC NULL,
+n0_0 NUMERIC(0,0) NULL,
+n1 NUMERIC(1) NULL,
+n20_4 NUMERIC(20,4) NULL,
+n65_4 NUMERIC(65,4) NULL,
+pk NUMERIC NULL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d decimal(10,0) YES NULL
+d0 decimal(10,0) YES NULL
+d1_1 decimal(1,1) YES NULL
+d10_2 decimal(10,2) YES NULL
+d60_10 decimal(60,10) YES NULL
+n decimal(10,0) YES NULL
+n0_0 decimal(10,0) YES NULL
+n1 decimal(1,0) YES NULL
+n20_4 decimal(20,4) YES NULL
+n65_4 decimal(65,4) YES NULL
+pk decimal(10,0) NO PRI NULL
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+6
+);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+Warnings:
+Note 1265 Data truncated for column 'd' at row 1
+Note 1265 Data truncated for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Note 1265 Data truncated for column 'd10_2' at row 1
+Note 1265 Data truncated for column 'd60_10' at row 1
+Note 1265 Data truncated for column 'n' at row 1
+Note 1265 Data truncated for column 'n0_0' at row 1
+Note 1265 Data truncated for column 'n1' at row 1
+Note 1265 Data truncated for column 'n20_4' at row 1
+Note 1265 Data truncated for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NULL;
+ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NULL;
+ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NULL;
+ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c DECIMAL NULL,
+c1 DECIMAL NULL DEFAULT NULL,
+c2 DECIMAL NULL DEFAULT 1.1,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Note 1265 Data truncated for column 'c2' at row 1
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c decimal(10,0) YES NULL
+c1 decimal(10,0) YES NULL
+c2 decimal(10,0) YES 1
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (1.1,1.1,1.1);
+Warnings:
+Note 1265 Data truncated for column 'c' at row 1
+Note 1265 Data truncated for column 'c1' at row 1
+Note 1265 Data truncated for column 'c2' at row 1
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 1 1 1
+3 NULL NULL 1
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 1
+3 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c NUMERIC NULL,
+c1 NUMERIC NULL DEFAULT NULL,
+c2 NUMERIC NULL DEFAULT 0 ,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c decimal(10,0) YES NULL
+c1 decimal(10,0) YES NULL
+c2 decimal(10,0) YES 0
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 );
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 0 0 0
+3 NULL NULL 0
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 0
+3 0
+DROP TABLE t1;
+########################
+# Floating point columns (FLOAT, DOUBLE)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT NULL,
+f0 FLOAT(0) NULL,
+r1_1 REAL(1,1) NULL,
+f23_0 FLOAT(23) NULL,
+f20_3 FLOAT(20,3) NULL,
+d DOUBLE NULL,
+d1_0 DOUBLE(1,0) NULL,
+d10_10 DOUBLE PRECISION (10,10) NULL,
+d53 DOUBLE(53,0) NULL,
+d53_10 DOUBLE(53,10) NULL,
+pk DOUBLE NULL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+f float YES NULL
+f0 float YES NULL
+r1_1 double(1,1) YES NULL
+f23_0 float YES NULL
+f20_3 float(20,3) YES NULL
+d double YES NULL
+d1_0 double(1,0) YES NULL
+d10_10 double(10,10) YES NULL
+d53 double(53,0) YES NULL
+d53_10 double(53,10) YES NULL
+pk double NO PRI NULL
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 11111111.111
+d10_10 0.0123456789
+d1_0 8
+d53 1234566789123456800
+d53_10 100000000000000000.0000000000
+f0 12345.1
+f20_3 56789.988
+f23_0 123457000
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+99999999999999999999999999999999999999,
+99999999999999999999999999999999999999.9999999999999999,
+0.9,
+99999999999999999999999999999999999999.9,
+99999999999999999.999,
+999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+9,
+0.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+3
+);
+Warnings:
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 11111111.111
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 8
+d1_0 9
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 1e38
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1;
+MAX(f) 9.999999680285692e37
+MAX(d) 1e81
+MAX(d10_10) 0.9999999999
+MAX(d1_0) 9
+MAX(d53) 100000000000000000000000000000000000000000000000000000
+MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000
+MAX(f0) 9.999999680285692e37
+MAX(f20_3) 99999998430674940.000
+MAX(f23_0) 9.999999680285692e37
+MAX(r1_1) 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+5
+);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+999999999999999999999999999999999999999,
+999999999999999999999999999999999999999.9999999999999999,
+1.9,
+999999999999999999999999999999999999999.9,
+999999999999999999.999,
+9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+99,
+1.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+6
+);
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: ''
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e65
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NULL;
+ERROR 42000: Display width out of range for column 'd0_0' (max = 255)
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NULL;
+ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NULL;
+ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c FLOAT NULL,
+c1 FLOAT NULL DEFAULT NULL,
+c2 FLOAT NULL DEFAULT 1.1 ,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c float YES NULL
+c1 float YES NULL
+c2 float YES 1.1
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (1.1 ,1.1 ,1.1 );
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 1 1 1
+3 NULL NULL 1
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 1
+3 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c DOUBLE NULL,
+c1 DOUBLE NULL DEFAULT NULL,
+c2 DOUBLE NULL DEFAULT 0 ,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c double YES NULL
+c1 double YES NULL
+c2 double YES 0
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 );
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 0 0 0
+3 NULL NULL 0
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 0
+3 0
+DROP TABLE t1;
+########################
+# INT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT NULL,
+i0 INT(0) NULL,
+i1 INT(1) NULL,
+i20 INT(20) NULL,
+t TINYINT NULL,
+t0 TINYINT(0) NULL,
+t1 TINYINT(1) NULL,
+t20 TINYINT(20) NULL,
+s SMALLINT NULL,
+s0 SMALLINT(0) NULL,
+s1 SMALLINT(1) NULL,
+s20 SMALLINT(20) NULL,
+m MEDIUMINT NULL,
+m0 MEDIUMINT(0) NULL,
+m1 MEDIUMINT(1) NULL,
+m20 MEDIUMINT(20) NULL,
+b BIGINT NULL,
+b0 BIGINT(0) NULL,
+b1 BIGINT(1) NULL,
+b20 BIGINT(20) NULL,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+i0 int(11) YES NULL
+i1 int(1) YES NULL
+i20 int(20) YES NULL
+t tinyint(4) YES NULL
+t0 tinyint(4) YES NULL
+t1 tinyint(1) YES NULL
+t20 tinyint(20) YES NULL
+s smallint(6) YES NULL
+s0 smallint(6) YES NULL
+s1 smallint(1) YES NULL
+s20 smallint(20) YES NULL
+m mediumint(9) YES NULL
+m0 mediumint(9) YES NULL
+m1 mediumint(1) YES NULL
+m20 mediumint(20) YES NULL
+b bigint(20) YES NULL
+b0 bigint(20) YES NULL
+b1 bigint(1) YES NULL
+b20 bigint(20) YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 8
+Warning 1264 Out of range value for column 'i0' at row 8
+Warning 1264 Out of range value for column 'i1' at row 8
+Warning 1264 Out of range value for column 'i20' at row 8
+Warning 1264 Out of range value for column 't' at row 8
+Warning 1264 Out of range value for column 't0' at row 8
+Warning 1264 Out of range value for column 't1' at row 8
+Warning 1264 Out of range value for column 't20' at row 8
+Warning 1264 Out of range value for column 's' at row 8
+Warning 1264 Out of range value for column 's0' at row 8
+Warning 1264 Out of range value for column 's1' at row 8
+Warning 1264 Out of range value for column 's20' at row 8
+Warning 1264 Out of range value for column 'm' at row 8
+Warning 1264 Out of range value for column 'm0' at row 8
+Warning 1264 Out of range value for column 'm1' at row 8
+Warning 1264 Out of range value for column 'm20' at row 8
+Warning 1264 Out of range value for column 'i' at row 9
+Warning 1264 Out of range value for column 'i0' at row 9
+Warning 1264 Out of range value for column 'i1' at row 9
+Warning 1264 Out of range value for column 'i20' at row 9
+Warning 1264 Out of range value for column 't' at row 9
+Warning 1264 Out of range value for column 't0' at row 9
+Warning 1264 Out of range value for column 't1' at row 9
+Warning 1264 Out of range value for column 't20' at row 9
+Warning 1264 Out of range value for column 's' at row 9
+Warning 1264 Out of range value for column 's0' at row 9
+Warning 1264 Out of range value for column 's1' at row 9
+Warning 1264 Out of range value for column 's20' at row 9
+Warning 1264 Out of range value for column 'm' at row 9
+Warning 1264 Out of range value for column 'm0' at row 9
+Warning 1264 Out of range value for column 'm1' at row 9
+Warning 1264 Out of range value for column 'm20' at row 9
+Warning 1264 Out of range value for column 'i' at row 10
+Warning 1264 Out of range value for column 'i0' at row 10
+Warning 1264 Out of range value for column 'i1' at row 10
+Warning 1264 Out of range value for column 'i20' at row 10
+Warning 1264 Out of range value for column 't' at row 10
+Warning 1264 Out of range value for column 't0' at row 10
+Warning 1264 Out of range value for column 't1' at row 10
+Warning 1264 Out of range value for column 't20' at row 10
+Warning 1264 Out of range value for column 's' at row 10
+Warning 1264 Out of range value for column 's0' at row 10
+Warning 1264 Out of range value for column 's1' at row 10
+Warning 1264 Out of range value for column 's20' at row 10
+Warning 1264 Out of range value for column 'm' at row 10
+Warning 1264 Out of range value for column 'm0' at row 10
+Warning 1264 Out of range value for column 'm1' at row 10
+Warning 1264 Out of range value for column 'm20' at row 10
+Warning 1264 Out of range value for column 'i' at row 11
+Warning 1264 Out of range value for column 'i0' at row 11
+Warning 1264 Out of range value for column 'i1' at row 11
+Warning 1264 Out of range value for column 'i20' at row 11
+Warning 1264 Out of range value for column 't' at row 11
+Warning 1264 Out of range value for column 't0' at row 11
+Warning 1264 Out of range value for column 't1' at row 11
+Warning 1264 Out of range value for column 't20' at row 11
+Warning 1264 Out of range value for column 's' at row 11
+Warning 1264 Out of range value for column 's0' at row 11
+Warning 1264 Out of range value for column 's1' at row 11
+Warning 1264 Out of range value for column 's20' at row 11
+Warning 1264 Out of range value for column 'm' at row 11
+Warning 1264 Out of range value for column 'm0' at row 11
+Warning 1264 Out of range value for column 'm1' at row 11
+Warning 1264 Out of range value for column 'm20' at row 11
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+ALTER TABLE t1 ADD COLUMN i257 INT(257) NULL;
+ERROR 42000: Display width out of range for column 'i257' (max = 255)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c INT NULL,
+c1 INT NULL DEFAULT NULL,
+c2 INT NULL DEFAULT 2147483647,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c int(11) YES NULL
+c1 int(11) YES NULL
+c2 int(11) YES 2147483647
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (2147483647,2147483647,2147483647);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7FFFFFFF 7FFFFFFF 7FFFFFFF
+3 NULL NULL 7FFFFFFF
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7FFFFFFF
+3 7FFFFFFF
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TINYINT NULL,
+c1 TINYINT NULL DEFAULT NULL,
+c2 TINYINT NULL DEFAULT 127 ,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c tinyint(4) YES NULL
+c1 tinyint(4) YES NULL
+c2 tinyint(4) YES 127
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (127 ,127 ,127 );
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7F 7F 7F
+3 NULL NULL 7F
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7F
+3 7F
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c SMALLINT NULL,
+c1 SMALLINT NULL DEFAULT NULL,
+c2 SMALLINT NULL DEFAULT 0,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c smallint(6) YES NULL
+c1 smallint(6) YES NULL
+c2 smallint(6) YES 0
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (0,0,0);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 0 0 0
+3 NULL NULL 0
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 0
+3 0
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c MEDIUMINT NULL,
+c1 MEDIUMINT NULL DEFAULT NULL,
+c2 MEDIUMINT NULL DEFAULT 1,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c mediumint(9) YES NULL
+c1 mediumint(9) YES NULL
+c2 mediumint(9) YES 1
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (1,1,1);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 1 1 1
+3 NULL NULL 1
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 1
+3 1
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c BIGINT NULL,
+c1 BIGINT NULL DEFAULT NULL,
+c2 BIGINT NULL DEFAULT 9223372036854775807,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c bigint(20) YES NULL
+c1 bigint(20) YES NULL
+c2 bigint(20) YES 9223372036854775807
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES (9223372036854775807,9223372036854775807,9223372036854775807);
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF
+3 NULL NULL 7FFFFFFFFFFFFFFF
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 7FFFFFFFFFFFFFFF
+3 7FFFFFFFFFFFFFFF
+DROP TABLE t1;
+########################
+# SET columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a SET('') NULL,
+b SET('test1','test2','test3','test4','test5') NULL,
+c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NULL,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') YES NULL
+b set('test1','test2','test3','test4','test5') YES NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,,23'),
+('',5,2),
+(',','test4,test2','');
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+ALTER TABLE t1 ADD COLUMN e SET('a','A') NULL;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in SET
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') YES NULL
+b set('test1','test2','test3','test4','test5') YES NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI
+e set('a','A') YES NULL
+ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NULL;
+ERROR HY000: Too many strings for column f and SET
+SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != '';
+a b c e
+ test2,test3 01,23,34,44 NULL
+ test2,test4 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c SET('test1','test2','test3') NULL,
+c1 SET('test1','test2','test3') NULL DEFAULT NULL,
+c2 SET('test1','test2','test3') NULL DEFAULT 'test2,test3',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c set('test1','test2','test3') YES NULL
+c1 set('test1','test2','test3') YES NULL
+c2 set('test1','test2','test3') YES test2,test3
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('test2,test3','test2,test3','test2,test3');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2 74657374322C7465737433 74657374322C7465737433 74657374322C7465737433
+3 NULL NULL 74657374322C7465737433
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2 74657374322C7465737433
+3 74657374322C7465737433
+DROP TABLE t1;
+########################
+# TEXT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+t TEXT NULL,
+t0 TEXT(0) NULL,
+t1 TEXT(1) NULL,
+t300 TEXT(300) NULL,
+tm TEXT(65535) NULL,
+t70k TEXT(70000) NULL,
+t17m TEXT(17000000) NULL,
+tt TINYTEXT NULL,
+m MEDIUMTEXT NULL,
+l LONGTEXT NULL
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+t text YES NULL
+t0 text YES NULL
+t1 tinytext YES NULL
+t300 text YES NULL
+tm text YES NULL
+t70k mediumtext YES NULL
+t17m longtext YES NULL
+tt tinytext YES NULL
+m mediumtext YES NULL
+l longtext YES NULL
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) );
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 't' at row 1
+Warning 1265 Data truncated for column 't0' at row 1
+Warning 1265 Data truncated for column 't1' at row 1
+Warning 1265 Data truncated for column 't300' at row 1
+Warning 1265 Data truncated for column 'tm' at row 1
+Warning 1265 Data truncated for column 'tt' at row 1
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NULL;
+ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295)
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TEXT NULL,
+c1 TEXT NULL DEFAULT NULL,
+c2 TEXT NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c text YES NULL
+c1 text YES NULL
+c2 text YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c TINYTEXT NULL,
+c1 TINYTEXT NULL DEFAULT NULL,
+c2 TINYTEXT NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c tinytext YES NULL
+c1 tinytext YES NULL
+c2 tinytext YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c MEDIUMTEXT NULL,
+c1 MEDIUMTEXT NULL DEFAULT NULL,
+c2 MEDIUMTEXT NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c mediumtext YES NULL
+c1 mediumtext YES NULL
+c2 mediumtext YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c LONGTEXT NULL,
+c1 LONGTEXT NULL DEFAULT NULL,
+c2 LONGTEXT NULL DEFAULT '',
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1101 BLOB/TEXT column 'c2' can't have a default value
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c longtext YES NULL
+c1 longtext YES NULL
+c2 longtext YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+INSERT INTO t1 (c,c1,c2) VALUES ('','','');
+INSERT INTO t1 () VALUES ();
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c) HEX(c1) HEX(c2)
+1 NULL NULL NULL
+2
+3 NULL NULL NULL
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+pk HEX(c2)
+1 NULL
+2
+3 NULL
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result
new file mode 100644
index 00000000000..66b6a5fe799
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result
@@ -0,0 +1,741 @@
+########################
+# Fixed point columns (NUMERIC, DECIMAL)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DECIMAL UNSIGNED,
+d0 DECIMAL(0) UNSIGNED,
+d1_1 DECIMAL(1,1) UNSIGNED,
+d10_2 DECIMAL(10,2) UNSIGNED,
+d60_10 DECIMAL(60,10) UNSIGNED,
+n NUMERIC UNSIGNED,
+n0_0 NUMERIC(0,0) UNSIGNED,
+n1 NUMERIC(1) UNSIGNED,
+n20_4 NUMERIC(20,4) UNSIGNED,
+n65_4 NUMERIC(65,4) UNSIGNED,
+pk NUMERIC UNSIGNED PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d decimal(10,0) unsigned YES NULL
+d0 decimal(10,0) unsigned YES NULL
+d1_1 decimal(1,1) unsigned YES NULL
+d10_2 decimal(10,2) unsigned YES NULL
+d60_10 decimal(60,10) unsigned YES NULL
+n decimal(10,0) unsigned YES NULL
+n0_0 decimal(10,0) unsigned YES NULL
+n1 decimal(1,0) unsigned YES NULL
+n20_4 decimal(20,4) unsigned YES NULL
+n65_4 decimal(65,4) unsigned YES NULL
+pk decimal(10,0) unsigned NO PRI NULL
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+6
+);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+Warnings:
+Note 1265 Data truncated for column 'd' at row 1
+Note 1265 Data truncated for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Note 1265 Data truncated for column 'd10_2' at row 1
+Note 1265 Data truncated for column 'd60_10' at row 1
+Note 1265 Data truncated for column 'n' at row 1
+Note 1265 Data truncated for column 'n0_0' at row 1
+Note 1265 Data truncated for column 'n1' at row 1
+Note 1265 Data truncated for column 'n20_4' at row 1
+Note 1265 Data truncated for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) UNSIGNED;
+ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) UNSIGNED;
+ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) UNSIGNED;
+ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+CREATE TABLE t1 (
+a DECIMAL UNSIGNED,
+b NUMERIC UNSIGNED,
+PRIMARY KEY (a)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a decimal(10,0) unsigned NO PRI 0
+b decimal(10,0) unsigned YES NULL
+INSERT INTO t1 (a,b) VALUES (1.0,-1.0);
+Warnings:
+Warning 1264 Out of range value for column 'b' at row 1
+INSERT INTO t1 (a,b) VALUES (-100,100);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT a,b FROM t1;
+a b
+0 100
+1 0
+DROP TABLE t1;
+########################
+# Floating point columns (FLOAT, DOUBLE)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT UNSIGNED,
+f0 FLOAT(0) UNSIGNED,
+r1_1 REAL(1,1) UNSIGNED,
+f23_0 FLOAT(23) UNSIGNED,
+f20_3 FLOAT(20,3) UNSIGNED,
+d DOUBLE UNSIGNED,
+d1_0 DOUBLE(1,0) UNSIGNED,
+d10_10 DOUBLE PRECISION (10,10) UNSIGNED,
+d53 DOUBLE(53,0) UNSIGNED,
+d53_10 DOUBLE(53,10) UNSIGNED,
+pk DOUBLE UNSIGNED PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+f float unsigned YES NULL
+f0 float unsigned YES NULL
+r1_1 double(1,1) unsigned YES NULL
+f23_0 float unsigned YES NULL
+f20_3 float(20,3) unsigned YES NULL
+d double unsigned YES NULL
+d1_0 double(1,0) unsigned YES NULL
+d10_10 double(10,10) unsigned YES NULL
+d53 double(53,0) unsigned YES NULL
+d53_10 double(53,10) unsigned YES NULL
+pk double unsigned NO PRI NULL
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 11111111.111
+d10_10 0.0123456789
+d1_0 8
+d53 1234566789123456800
+d53_10 100000000000000000.0000000000
+f0 12345.1
+f20_3 56789.988
+f23_0 123457000
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+99999999999999999999999999999999999999,
+99999999999999999999999999999999999999.9999999999999999,
+0.9,
+99999999999999999999999999999999999999.9,
+99999999999999999.999,
+999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+9,
+0.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+3
+);
+Warnings:
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 11111111.111
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 8
+d1_0 9
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 1e38
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 0
+d 11111111.111
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d53 0
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 0
+f 1e38
+f0 0
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 0.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1;
+MAX(f) 9.999999680285692e37
+MAX(d) 1e81
+MAX(d10_10) 0.9999999999
+MAX(d1_0) 9
+MAX(d53) 100000000000000000000000000000000000000000000000000000
+MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000
+MAX(f0) 9.999999680285692e37
+MAX(f20_3) 99999998430674940.000
+MAX(f23_0) 9.999999680285692e37
+MAX(r1_1) 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+5
+);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 0
+d 11111111.111
+d 1e61
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d53 0
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 0
+f 1e38
+f 3.40282e38
+f0 0
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f20_3 0.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+999999999999999999999999999999999999999,
+999999999999999999999999999999999999999.9999999999999999,
+1.9,
+999999999999999999999999999999999999999.9,
+999999999999999999.999,
+9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+99,
+1.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+6
+);
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: ''
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 0
+d 11111111.111
+d 1e61
+d 1e65
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d1_0 9
+d53 0
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 0
+f 1e38
+f 3.40282e38
+f 3.40282e38
+f0 0
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f0 3.40282e38
+f20_3 0.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+f23_0 3.40282e38
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) UNSIGNED;
+ERROR 42000: Display width out of range for column 'd0_0' (max = 255)
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) UNSIGNED;
+ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) UNSIGNED;
+ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+CREATE TABLE t1 (
+a DOUBLE UNSIGNED,
+b FLOAT UNSIGNED,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a double unsigned YES NULL
+b float unsigned NO PRI 0
+INSERT INTO t1 (a,b) VALUES (1.0,-1.0);
+Warnings:
+Warning 1264 Out of range value for column 'b' at row 1
+INSERT INTO t1 (a,b) VALUES (-100,100);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT a,b FROM t1;
+a b
+0 100
+1 0
+DROP TABLE t1;
+########################
+# INT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT UNSIGNED,
+i0 INT(0) UNSIGNED,
+i1 INT(1) UNSIGNED,
+i20 INT(20) UNSIGNED,
+t TINYINT UNSIGNED,
+t0 TINYINT(0) UNSIGNED,
+t1 TINYINT(1) UNSIGNED,
+t20 TINYINT(20) UNSIGNED,
+s SMALLINT UNSIGNED,
+s0 SMALLINT(0) UNSIGNED,
+s1 SMALLINT(1) UNSIGNED,
+s20 SMALLINT(20) UNSIGNED,
+m MEDIUMINT UNSIGNED,
+m0 MEDIUMINT(0) UNSIGNED,
+m1 MEDIUMINT(1) UNSIGNED,
+m20 MEDIUMINT(20) UNSIGNED,
+b BIGINT UNSIGNED,
+b0 BIGINT(0) UNSIGNED,
+b1 BIGINT(1) UNSIGNED,
+b20 BIGINT(20) UNSIGNED,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+i int(10) unsigned YES NULL
+i0 int(10) unsigned YES NULL
+i1 int(1) unsigned YES NULL
+i20 int(20) unsigned YES NULL
+t tinyint(3) unsigned YES NULL
+t0 tinyint(3) unsigned YES NULL
+t1 tinyint(1) unsigned YES NULL
+t20 tinyint(20) unsigned YES NULL
+s smallint(5) unsigned YES NULL
+s0 smallint(5) unsigned YES NULL
+s1 smallint(1) unsigned YES NULL
+s20 smallint(20) unsigned YES NULL
+m mediumint(8) unsigned YES NULL
+m0 mediumint(8) unsigned YES NULL
+m1 mediumint(1) unsigned YES NULL
+m20 mediumint(20) unsigned YES NULL
+b bigint(20) unsigned YES NULL
+b0 bigint(20) unsigned YES NULL
+b1 bigint(1) unsigned YES NULL
+b20 bigint(20) unsigned YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 8
+Warning 1264 Out of range value for column 'i0' at row 8
+Warning 1264 Out of range value for column 'i1' at row 8
+Warning 1264 Out of range value for column 'i20' at row 8
+Warning 1264 Out of range value for column 't' at row 8
+Warning 1264 Out of range value for column 't0' at row 8
+Warning 1264 Out of range value for column 't1' at row 8
+Warning 1264 Out of range value for column 't20' at row 8
+Warning 1264 Out of range value for column 's' at row 8
+Warning 1264 Out of range value for column 's0' at row 8
+Warning 1264 Out of range value for column 's1' at row 8
+Warning 1264 Out of range value for column 's20' at row 8
+Warning 1264 Out of range value for column 'm' at row 8
+Warning 1264 Out of range value for column 'm0' at row 8
+Warning 1264 Out of range value for column 'm1' at row 8
+Warning 1264 Out of range value for column 'm20' at row 8
+Warning 1264 Out of range value for column 'i' at row 9
+Warning 1264 Out of range value for column 'i0' at row 9
+Warning 1264 Out of range value for column 'i1' at row 9
+Warning 1264 Out of range value for column 'i20' at row 9
+Warning 1264 Out of range value for column 't' at row 9
+Warning 1264 Out of range value for column 't0' at row 9
+Warning 1264 Out of range value for column 't1' at row 9
+Warning 1264 Out of range value for column 't20' at row 9
+Warning 1264 Out of range value for column 's' at row 9
+Warning 1264 Out of range value for column 's0' at row 9
+Warning 1264 Out of range value for column 's1' at row 9
+Warning 1264 Out of range value for column 's20' at row 9
+Warning 1264 Out of range value for column 'm' at row 9
+Warning 1264 Out of range value for column 'm0' at row 9
+Warning 1264 Out of range value for column 'm1' at row 9
+Warning 1264 Out of range value for column 'm20' at row 9
+Warning 1264 Out of range value for column 'i' at row 10
+Warning 1264 Out of range value for column 'i0' at row 10
+Warning 1264 Out of range value for column 'i1' at row 10
+Warning 1264 Out of range value for column 'i20' at row 10
+Warning 1264 Out of range value for column 't' at row 10
+Warning 1264 Out of range value for column 't0' at row 10
+Warning 1264 Out of range value for column 't1' at row 10
+Warning 1264 Out of range value for column 't20' at row 10
+Warning 1264 Out of range value for column 's' at row 10
+Warning 1264 Out of range value for column 's0' at row 10
+Warning 1264 Out of range value for column 's1' at row 10
+Warning 1264 Out of range value for column 's20' at row 10
+Warning 1264 Out of range value for column 'm' at row 10
+Warning 1264 Out of range value for column 'm0' at row 10
+Warning 1264 Out of range value for column 'm1' at row 10
+Warning 1264 Out of range value for column 'm20' at row 10
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+ALTER TABLE t1 ADD COLUMN i257 INT(257) UNSIGNED;
+ERROR 42000: Display width out of range for column 'i257' (max = 255)
+DROP TABLE t1;
+CREATE TABLE t1 (
+t TINYINT UNSIGNED,
+s SMALLINT UNSIGNED,
+m MEDIUMINT UNSIGNED,
+i INT UNSIGNED,
+b BIGINT UNSIGNED,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+t tinyint(3) unsigned YES NULL
+s smallint(5) unsigned YES NULL
+m mediumint(8) unsigned YES NULL
+i int(10) unsigned YES NULL
+b bigint(20) unsigned NO PRI 0
+INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615);
+INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1);
+Warnings:
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+SELECT t,s,m,i,b FROM t1;
+t s m i b
+0 0 0 0 0
+255 65535 16777215 4294967295 18446744073709551615
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result
new file mode 100644
index 00000000000..823ad2f2fc4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result
@@ -0,0 +1,723 @@
+########################
+# Fixed point columns (NUMERIC, DECIMAL)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DECIMAL ZEROFILL,
+d0 DECIMAL(0) ZEROFILL,
+d1_1 DECIMAL(1,1) ZEROFILL,
+d10_2 DECIMAL(10,2) ZEROFILL,
+d60_10 DECIMAL(60,10) ZEROFILL,
+n NUMERIC ZEROFILL,
+n0_0 NUMERIC(0,0) ZEROFILL,
+n1 NUMERIC(1) ZEROFILL,
+n20_4 NUMERIC(20,4) ZEROFILL,
+n65_4 NUMERIC(65,4) ZEROFILL,
+pk NUMERIC ZEROFILL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d decimal(10,0) unsigned zerofill YES NULL
+d0 decimal(10,0) unsigned zerofill YES NULL
+d1_1 decimal(1,1) unsigned zerofill YES NULL
+d10_2 decimal(10,2) unsigned zerofill YES NULL
+d60_10 decimal(60,10) unsigned zerofill YES NULL
+n decimal(10,0) unsigned zerofill YES NULL
+n0_0 decimal(10,0) unsigned zerofill YES NULL
+n1 decimal(1,0) unsigned zerofill YES NULL
+n20_4 decimal(20,4) unsigned zerofill YES NULL
+n65_4 decimal(65,4) unsigned zerofill YES NULL
+pk decimal(10,0) unsigned zerofill NO PRI NULL
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+6
+);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+Warnings:
+Note 1265 Data truncated for column 'd' at row 1
+Note 1265 Data truncated for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Note 1265 Data truncated for column 'd10_2' at row 1
+Note 1265 Data truncated for column 'd60_10' at row 1
+Note 1265 Data truncated for column 'n' at row 1
+Note 1265 Data truncated for column 'n0_0' at row 1
+Note 1265 Data truncated for column 'n1' at row 1
+Note 1265 Data truncated for column 'n20_4' at row 1
+Note 1265 Data truncated for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000
+0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000
+9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ZEROFILL;
+ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ZEROFILL;
+ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ZEROFILL;
+ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+CREATE TABLE t1 (
+a DECIMAL ZEROFILL,
+b NUMERIC ZEROFILL,
+PRIMARY KEY (a)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a decimal(10,0) unsigned zerofill NO PRI 0000000000
+b decimal(10,0) unsigned zerofill YES NULL
+INSERT INTO t1 (a,b) VALUES (1.1,1234);
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+SELECT a,b FROM t1;
+a b
+0000000001 0000001234
+DROP TABLE t1;
+########################
+# Floating point columns (FLOAT, DOUBLE)
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT ZEROFILL,
+f0 FLOAT(0) ZEROFILL,
+r1_1 REAL(1,1) ZEROFILL,
+f23_0 FLOAT(23) ZEROFILL,
+f20_3 FLOAT(20,3) ZEROFILL,
+d DOUBLE ZEROFILL,
+d1_0 DOUBLE(1,0) ZEROFILL,
+d10_10 DOUBLE PRECISION (10,10) ZEROFILL,
+d53 DOUBLE(53,0) ZEROFILL,
+d53_10 DOUBLE(53,10) ZEROFILL,
+pk DOUBLE ZEROFILL PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+f float unsigned zerofill YES NULL
+f0 float unsigned zerofill YES NULL
+r1_1 double(1,1) unsigned zerofill YES NULL
+f23_0 float unsigned zerofill YES NULL
+f20_3 float(20,3) unsigned zerofill YES NULL
+d double unsigned zerofill YES NULL
+d1_0 double(1,0) unsigned zerofill YES NULL
+d10_10 double(10,10) unsigned zerofill YES NULL
+d53 double(53,0) unsigned zerofill YES NULL
+d53_10 double(53,10) unsigned zerofill YES NULL
+pk double unsigned zerofill NO PRI NULL
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 0000012345.1
+d 000000000011111111.111
+d10_10 0.0123456789
+d1_0 8
+d53 00000000000000000000000000000000001234566789123456800
+d53_10 000000000000000000000000100000000000000000.0000000000
+f0 0000012345.1
+f20_3 0000000000056789.988
+f23_0 000123457000
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+99999999999999999999999999999999999999,
+99999999999999999999999999999999999999.9999999999999999,
+0.9,
+99999999999999999999999999999999999999.9,
+99999999999999999.999,
+999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+9,
+0.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+3
+);
+Warnings:
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 0000012345.1
+d 0000000000000000000000
+d 0000000000000000001e81
+d 000000000011111111.111
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 8
+d1_0 9
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000001234566789123456800
+d53 100000000000000000000000000000000000000000000000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 000000000000
+f 000000001e38
+f0 000000000000
+f0 000000001e38
+f0 0000012345.1
+f20_3 0000000000000000.000
+f20_3 0000000000056789.988
+f20_3 99999998430674940.000
+f23_0 000000000000
+f23_0 000000001e38
+f23_0 000123457000
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 0000012345.1
+d 0000000000000000000000
+d 0000000000000000000000
+d 0000000000000000001e81
+d 000000000011111111.111
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000001234566789123456800
+d53 100000000000000000000000000000000000000000000000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 000000000000
+f 000000000000
+f 000000001e38
+f0 000000000000
+f0 000000000000
+f0 000000001e38
+f0 0000012345.1
+f20_3 0000000000000000.000
+f20_3 0000000000000000.000
+f20_3 0000000000056789.988
+f20_3 99999998430674940.000
+f23_0 000000000000
+f23_0 000000000000
+f23_0 000000001e38
+f23_0 000123457000
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1;
+MAX(f) 9.999999680285692e37
+MAX(d) 1e81
+MAX(d10_10) 0.9999999999
+MAX(d1_0) 9
+MAX(d53) 100000000000000000000000000000000000000000000000000000
+MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000
+MAX(f0) 9.999999680285692e37
+MAX(f20_3) 99999998430674940.000
+MAX(f23_0) 9.999999680285692e37
+MAX(r1_1) 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+5
+);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 0000012345.1
+d 0000000000000000000000
+d 0000000000000000000000
+d 0000000000000000001e61
+d 0000000000000000001e81
+d 000000000011111111.111
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000001234566789123456800
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 000000000000
+f 000000000000
+f 000000001e38
+f 003.40282e38
+f0 000000000000
+f0 000000000000
+f0 000000001e38
+f0 0000012345.1
+f0 003.40282e38
+f20_3 0000000000000000.000
+f20_3 0000000000000000.000
+f20_3 0000000000056789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 000000000000
+f23_0 000000000000
+f23_0 000000001e38
+f23_0 000123457000
+f23_0 003.40282e38
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+999999999999999999999999999999999999999,
+999999999999999999999999999999999999999.9999999999999999,
+1.9,
+999999999999999999999999999999999999999.9,
+999999999999999999.999,
+9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+99,
+1.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+6
+);
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: ''
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 0000012345.1
+d 0000000000000000000000
+d 0000000000000000000000
+d 0000000000000000001e61
+d 0000000000000000001e65
+d 0000000000000000001e81
+d 000000000011111111.111
+d10_10 0.0000000000
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 0
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d1_0 9
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000000000000000000000000
+d53 00000000000000000000000000000000001234566789123456800
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000000000000000000000.0000000000
+d53_10 000000000000000000000000100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 000000000000
+f 000000000000
+f 000000001e38
+f 003.40282e38
+f 003.40282e38
+f0 000000000000
+f0 000000000000
+f0 000000001e38
+f0 0000012345.1
+f0 003.40282e38
+f0 003.40282e38
+f20_3 0000000000000000.000
+f20_3 0000000000000000.000
+f20_3 0000000000056789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 000000000000
+f23_0 000000000000
+f23_0 000000001e38
+f23_0 000123457000
+f23_0 003.40282e38
+f23_0 003.40282e38
+r1_1 0.0
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ZEROFILL;
+ERROR 42000: Display width out of range for column 'd0_0' (max = 255)
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ZEROFILL;
+ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ZEROFILL;
+ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
+CREATE TABLE t1 (
+a DOUBLE ZEROFILL,
+b FLOAT ZEROFILL,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a double unsigned zerofill YES NULL
+b float unsigned zerofill NO PRI 000000000000
+INSERT INTO t1 (a,b) VALUES (1,1234.5);
+SELECT a,b FROM t1;
+a b
+0000000000000000000001 0000001234.5
+DROP TABLE t1;
+########################
+# INT columns
+########################
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT ZEROFILL,
+i0 INT(0) ZEROFILL,
+i1 INT(1) ZEROFILL,
+i20 INT(20) ZEROFILL,
+t TINYINT ZEROFILL,
+t0 TINYINT(0) ZEROFILL,
+t1 TINYINT(1) ZEROFILL,
+t20 TINYINT(20) ZEROFILL,
+s SMALLINT ZEROFILL,
+s0 SMALLINT(0) ZEROFILL,
+s1 SMALLINT(1) ZEROFILL,
+s20 SMALLINT(20) ZEROFILL,
+m MEDIUMINT ZEROFILL,
+m0 MEDIUMINT(0) ZEROFILL,
+m1 MEDIUMINT(1) ZEROFILL,
+m20 MEDIUMINT(20) ZEROFILL,
+b BIGINT ZEROFILL,
+b0 BIGINT(0) ZEROFILL,
+b1 BIGINT(1) ZEROFILL,
+b20 BIGINT(20) ZEROFILL,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+i int(10) unsigned zerofill YES NULL
+i0 int(10) unsigned zerofill YES NULL
+i1 int(1) unsigned zerofill YES NULL
+i20 int(20) unsigned zerofill YES NULL
+t tinyint(3) unsigned zerofill YES NULL
+t0 tinyint(3) unsigned zerofill YES NULL
+t1 tinyint(1) unsigned zerofill YES NULL
+t20 tinyint(20) unsigned zerofill YES NULL
+s smallint(5) unsigned zerofill YES NULL
+s0 smallint(5) unsigned zerofill YES NULL
+s1 smallint(1) unsigned zerofill YES NULL
+s20 smallint(20) unsigned zerofill YES NULL
+m mediumint(8) unsigned zerofill YES NULL
+m0 mediumint(8) unsigned zerofill YES NULL
+m1 mediumint(1) unsigned zerofill YES NULL
+m20 mediumint(20) unsigned zerofill YES NULL
+b bigint(20) unsigned zerofill YES NULL
+b0 bigint(20) unsigned zerofill YES NULL
+b1 bigint(1) unsigned zerofill YES NULL
+b20 bigint(20) unsigned zerofill YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020
+2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020
+2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 8
+Warning 1264 Out of range value for column 'i0' at row 8
+Warning 1264 Out of range value for column 'i1' at row 8
+Warning 1264 Out of range value for column 'i20' at row 8
+Warning 1264 Out of range value for column 't' at row 8
+Warning 1264 Out of range value for column 't0' at row 8
+Warning 1264 Out of range value for column 't1' at row 8
+Warning 1264 Out of range value for column 't20' at row 8
+Warning 1264 Out of range value for column 's' at row 8
+Warning 1264 Out of range value for column 's0' at row 8
+Warning 1264 Out of range value for column 's1' at row 8
+Warning 1264 Out of range value for column 's20' at row 8
+Warning 1264 Out of range value for column 'm' at row 8
+Warning 1264 Out of range value for column 'm0' at row 8
+Warning 1264 Out of range value for column 'm1' at row 8
+Warning 1264 Out of range value for column 'm20' at row 8
+Warning 1264 Out of range value for column 'i' at row 9
+Warning 1264 Out of range value for column 'i0' at row 9
+Warning 1264 Out of range value for column 'i1' at row 9
+Warning 1264 Out of range value for column 'i20' at row 9
+Warning 1264 Out of range value for column 't' at row 9
+Warning 1264 Out of range value for column 't0' at row 9
+Warning 1264 Out of range value for column 't1' at row 9
+Warning 1264 Out of range value for column 't20' at row 9
+Warning 1264 Out of range value for column 's' at row 9
+Warning 1264 Out of range value for column 's0' at row 9
+Warning 1264 Out of range value for column 's1' at row 9
+Warning 1264 Out of range value for column 's20' at row 9
+Warning 1264 Out of range value for column 'm' at row 9
+Warning 1264 Out of range value for column 'm0' at row 9
+Warning 1264 Out of range value for column 'm1' at row 9
+Warning 1264 Out of range value for column 'm20' at row 9
+Warning 1264 Out of range value for column 'i' at row 10
+Warning 1264 Out of range value for column 'i0' at row 10
+Warning 1264 Out of range value for column 'i1' at row 10
+Warning 1264 Out of range value for column 'i20' at row 10
+Warning 1264 Out of range value for column 't' at row 10
+Warning 1264 Out of range value for column 't0' at row 10
+Warning 1264 Out of range value for column 't1' at row 10
+Warning 1264 Out of range value for column 't20' at row 10
+Warning 1264 Out of range value for column 's' at row 10
+Warning 1264 Out of range value for column 's0' at row 10
+Warning 1264 Out of range value for column 's1' at row 10
+Warning 1264 Out of range value for column 's20' at row 10
+Warning 1264 Out of range value for column 'm' at row 10
+Warning 1264 Out of range value for column 'm0' at row 10
+Warning 1264 Out of range value for column 'm1' at row 10
+Warning 1264 Out of range value for column 'm20' at row 10
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000
+0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020
+2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615
+ALTER TABLE t1 ADD COLUMN i257 INT(257) ZEROFILL;
+ERROR 42000: Display width out of range for column 'i257' (max = 255)
+DROP TABLE t1;
+CREATE TABLE t1 (
+t TINYINT ZEROFILL,
+s SMALLINT ZEROFILL,
+m MEDIUMINT ZEROFILL,
+i INT ZEROFILL,
+b BIGINT ZEROFILL,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+t tinyint(3) unsigned zerofill YES NULL
+s smallint(5) unsigned zerofill YES NULL
+m mediumint(8) unsigned zerofill YES NULL
+i int(10) unsigned zerofill YES NULL
+b bigint(20) unsigned zerofill NO PRI 00000000000000000000
+INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0);
+SELECT t,s,m,i,b FROM t1;
+t s m i b
+001 00010 00000100 0000001000 00000000000000000000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result
new file mode 100644
index 00000000000..b6bde05cc70
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result
@@ -0,0 +1,128 @@
+SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.t1.value3 Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_check=0;
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8;
+DROP TABLE t1;
+SET GLOBAL rocksdb_strict_collation_check=1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
+DROP TABLE t1;
+CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin;
+DROP TABLE t1;
+CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb;
+DROP TABLE t1;
+SET GLOBAL rocksdb_strict_collation_exceptions=t1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="t.*";
+CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t123;
+CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.s123.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions=".t.*";
+CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE xt123;
+CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.t123.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions='t1';
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb;
+ALTER TABLE t1 AUTO_INCREMENT=1;
+DROP TABLE t1;
+CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb;
+ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin).
+CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb;
+ALTER TABLE t2 ADD INDEX(value);
+ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (binary, latin1_bin, utf8_bin).
+DROP TABLE t2;
+SET GLOBAL rocksdb_strict_collation_exceptions="[a-b";
+ Invalid pattern in strict_collation_exceptions: [a-b
+CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]";
+CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (binary, latin1_bin, utf8_bin).
+DROP TABLE a, b;
+SET GLOBAL rocksdb_strict_collation_exceptions="abc\\";
+ Invalid pattern in strict_collation_exceptions: abc\
+CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (binary, latin1_bin, utf8_bin).
+SET GLOBAL rocksdb_strict_collation_exceptions="abc";
+CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin).
+DROP TABLE abc;
+SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result
new file mode 100644
index 00000000000..83d72d6c449
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result
@@ -0,0 +1,25 @@
+CREATE TABLE `r1.lol` (
+`c1` int(10) NOT NULL DEFAULT '0',
+`c2` int(11) NOT NULL DEFAULT '0',
+`c3` int(1) NOT NULL DEFAULT '0',
+`c4` int(11) NOT NULL DEFAULT '0',
+`c5` int(11) NOT NULL DEFAULT '0',
+`c6` varchar(100) NOT NULL DEFAULT '',
+`c7` varchar(100) NOT NULL DEFAULT '',
+`c8` varchar(255) NOT NULL DEFAULT '',
+`c9` int(10) NOT NULL DEFAULT '125',
+`c10` int(10) NOT NULL DEFAULT '125',
+`c11` text NOT NULL,
+`c12` int(11) NOT NULL DEFAULT '0',
+`c13` int(10) NOT NULL DEFAULT '0',
+`c14` text NOT NULL,
+`c15` blob NOT NULL,
+`c16` int(11) NOT NULL DEFAULT '0',
+`c17` int(11) NOT NULL DEFAULT '0',
+`c18` int(11) NOT NULL DEFAULT '0',
+PRIMARY KEY (`c1`),
+KEY i1 (`c4`),
+KEY i2 (`c7`),
+KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1;
+DROP INDEX i1 ON `r1.lol`;
+DROP TABLE `r1.lol`;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result
new file mode 100644
index 00000000000..408a93441b9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result
@@ -0,0 +1,93 @@
+DROP TABLE IF EXISTS r1;
+create table r1 (
+id1 int,
+id2 int,
+type int,
+value varchar(100),
+value2 int,
+value3 int,
+primary key (type, id1, id2),
+index id1_type (id1, type, value2, value, id2)
+) engine=rocksdb collate latin1_bin;
+select 'loading data';
+loading data
+loading data
+set global rocksdb_force_flush_memtable_now=1;
+optimize table r1;
+Table Op Msg_type Msg_text
+test.r1 optimize status OK
+Test 1: Do a bunch of updates without setting the compaction sysvar
+Expect: no compaction
+set global rocksdb_compaction_sequential_deletes_window=0;
+set global rocksdb_compaction_sequential_deletes= 0;
+set global rocksdb_compaction_sequential_deletes_file_size=0;
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+sleep(1)
+0
+wait_for_delete: 0
+There are deletes left
+SET GLOBAL rocksdb_compaction_sequential_deletes= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0;
+Test 2: Do a bunch of updates and set the compaction sysvar
+Expect: compaction
+set global rocksdb_compaction_sequential_deletes_window=1000;
+set global rocksdb_compaction_sequential_deletes= 990;
+set global rocksdb_compaction_sequential_deletes_file_size=0;
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+sleep(1)
+0
+wait_for_delete: 1
+No more deletes left
+SET GLOBAL rocksdb_compaction_sequential_deletes= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0;
+Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large
+Expect: no compaction
+set global rocksdb_compaction_sequential_deletes_window=1000;
+set global rocksdb_compaction_sequential_deletes= 1000;
+set global rocksdb_compaction_sequential_deletes_file_size=1000000;
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+sleep(1)
+0
+wait_for_delete: 0
+There are deletes left
+SET GLOBAL rocksdb_compaction_sequential_deletes= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0;
+Test 4: Do a bunch of secondary key updates and set the compaction sysvar
+Expect: compaction
+set global rocksdb_compaction_sequential_deletes_window=1000;
+set global rocksdb_compaction_sequential_deletes= 50;
+set global rocksdb_compaction_sequential_deletes_file_size=0;
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+sleep(1)
+0
+wait_for_delete: 1
+No more deletes left
+SET GLOBAL rocksdb_compaction_sequential_deletes= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0;
+Test 5: Do a bunch of secondary key updates and set the compaction sysvar,
+and rocksdb_compaction_sequential_deletes_count_sd turned on
+Expect: compaction
+SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd;
+SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON;
+set global rocksdb_compaction_sequential_deletes_window=1000;
+set global rocksdb_compaction_sequential_deletes= 50;
+set global rocksdb_compaction_sequential_deletes_file_size=0;
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+sleep(1)
+0
+wait_for_delete: 1
+No more deletes left
+SET GLOBAL rocksdb_compaction_sequential_deletes= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0;
+SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd;
+drop table r1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result
new file mode 100644
index 00000000000..62a6dbbdaca
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result
@@ -0,0 +1,2 @@
+create table t (id int primary key) engine=rocksdb;
+drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result
new file mode 100644
index 00000000000..396f80a2ecb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result
@@ -0,0 +1,12 @@
+DROP DATABASE IF EXISTS mysqlslap;
+CREATE DATABASE mysqlslap;
+use mysqlslap;
+CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB;
+INSERT INTO a1 VALUES (1, 1);
+SHOW CREATE TABLE a1;
+Table Create Table
+a1 CREATE TABLE `a1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP DATABASE mysqlslap;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result
new file mode 100644
index 00000000000..d75a548e6ff
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result
@@ -0,0 +1,151 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 0
+connection con1;
+COMMIT;
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 0
+connection con1;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+connection con2;
+INSERT INTO t1 (a) VALUES (1);
+connection con1;
+# If consistent read works on this isolation level (READ COMMITTED), the following SELECT should not return the value we inserted (1)
+SELECT a FROM t1;
+a
+1
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4);
+BEGIN;
+connection con2;
+INSERT INTO r1 values (5,5,5);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+connection con2;
+INSERT INTO r1 values (6,6,6);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+COMMIT;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+connection con2;
+INSERT INTO r1 values (7,7,7);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+connection con2;
+INSERT INTO r1 values (8,8,8);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+COMMIT;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+connection con2;
+INSERT INTO r1 values (9,9,9);
+connection con1;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+connection con2;
+INSERT INTO r1 values (10,10,10);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 1105
+INSERT INTO r1 values (11,11,11);
+ERROR: 0
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+11 11 11
+drop table r1;
+connection default;
+disconnect con1;
+disconnect con2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result
new file mode 100644
index 00000000000..7458e6b72c3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result
@@ -0,0 +1,144 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 1
+connection con1;
+COMMIT;
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+STAT_TYPE VALUE
+DB_NUM_SNAPSHOTS 0
+connection con1;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+connection con2;
+INSERT INTO t1 (a) VALUES (1);
+connection con1;
+# If consistent read works on this isolation level (REPEATABLE READ), the following SELECT should not return the value we inserted (1)
+SELECT a FROM t1;
+a
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4);
+BEGIN;
+connection con2;
+INSERT INTO r1 values (5,5,5);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+connection con2;
+INSERT INTO r1 values (6,6,6);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+COMMIT;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+connection con2;
+INSERT INTO r1 values (7,7,7);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+connection con2;
+INSERT INTO r1 values (8,8,8);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+COMMIT;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+connection con2;
+INSERT INTO r1 values (9,9,9);
+connection con1;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+connection con2;
+INSERT INTO r1 values (10,10,10);
+connection con1;
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR: 0
+INSERT INTO r1 values (11,11,11);
+ERROR: 1105
+SELECT * FROM r1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+drop table r1;
+connection default;
+disconnect con1;
+disconnect con2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result
new file mode 100644
index 00000000000..9c55b0dd689
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result
@@ -0,0 +1,24 @@
+# -- WARNING ----------------------------------------------------------------
+# According to I_S.ENGINES, does not support transactions.
+# If it is true, the test will most likely fail; you can
+# either create an rdiff file, or add the test to disabled.def.
+# If transactions should be supported, check the data in Information Schema.
+# ---------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection con2;
+INSERT INTO t1 (a) VALUES (1);
+connection con1;
+# If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
+SELECT a FROM t1;
+a
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result
new file mode 100644
index 00000000000..28c200ebf30
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result
@@ -0,0 +1,74 @@
+#
+# Test how MyRocks handles reading corrupted data from disk.
+# Data corruption is simulated at source-code level.
+#
+#
+# A test for case when data in the table *record* is longer
+# than table DDL expects it to be
+#
+create table t1 (
+pk int not null primary key,
+col1 varchar(10)
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+select * from t1;
+pk col1
+1 1
+2 2
+3 3
+set @tmp1=@@rocksdb_verify_checksums;
+set rocksdb_verify_checksums=1;
+set session debug= "+d,myrocks_simulate_bad_row_read1";
+select * from t1 where pk=1;
+ERROR HY000: Got error 122 from storage engine
+set session debug= "-d,myrocks_simulate_bad_row_read1";
+set rocksdb_verify_checksums=@tmp1;
+select * from t1 where pk=1;
+pk col1
+1 1
+set session debug= "+d,myrocks_simulate_bad_row_read2";
+select * from t1 where pk=1;
+ERROR HY000: Got error 122 from storage engine
+set session debug= "-d,myrocks_simulate_bad_row_read2";
+set session debug= "+d,myrocks_simulate_bad_row_read3";
+select * from t1 where pk=1;
+ERROR HY000: Got error 122 from storage engine
+set session debug= "-d,myrocks_simulate_bad_row_read3";
+insert into t1 values(4,'0123456789');
+select * from t1;
+pk col1
+1 1
+2 2
+3 3
+4 0123456789
+drop table t1;
+#
+# A test for case when index data is longer than table DDL
+# expects it to be
+#
+create table t2 (
+pk varchar(4) not null primary key,
+col1 int not null
+) engine=rocksdb collate latin1_bin;
+insert into t2 values ('ABCD',1);
+select * from t2;
+pk col1
+ABCD 1
+set session debug= "+d,myrocks_simulate_bad_pk_read1";
+select * from t2;
+ERROR HY000: Got error 122 from storage engine
+set session debug= "-d,myrocks_simulate_bad_pk_read1";
+drop table t2;
+create table t2 (
+pk varchar(4) not null primary key,
+col1 int not null
+) engine=rocksdb;
+insert into t2 values ('ABCD',1);
+select * from t2;
+pk col1
+ABCD 1
+set session debug= "+d,myrocks_simulate_bad_pk_read1";
+select * from t2;
+ERROR HY000: Got error 122 from storage engine
+set session debug= "-d,myrocks_simulate_bad_pk_read1";
+drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/create_table.result b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result
new file mode 100644
index 00000000000..8c879d82611
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result
@@ -0,0 +1,165 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+Warnings:
+Note 1050 Table 't1' already exists
+CREATE TABLE t2 LIKE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb;
+ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY'
+DROP TABLE t2;
+DROP TABLE IF EXISTS t1;
+SET default_storage_engine = rocksdb;
+CREATE TABLE t1 (a INT PRIMARY KEY);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT * FROM t1;
+a
+1
+2
+FLUSH LOGS;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT,
+c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT,
+c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT,
+c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT,
+c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT,
+c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT,
+c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT,
+c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT,
+c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT,
+c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT,
+c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT,
+c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT,
+c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT,
+c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT,
+c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT,
+c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT,
+c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT,
+c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT,
+c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT,
+c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT,
+c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT,
+c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT,
+c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT,
+c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT,
+c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT,
+c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT,
+c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT,
+c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT,
+c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT,
+c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT,
+c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT,
+c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT,
+c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT,
+c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT,
+c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT,
+c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT,
+c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT,
+c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT,
+c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT,
+c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT,
+c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT,
+c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT,
+c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT,
+c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT,
+c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT,
+c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT,
+c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT,
+c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT,
+c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT,
+c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT,
+c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT,
+c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT,
+c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT,
+c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT,
+c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT,
+c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT,
+c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT,
+c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT,
+c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT,
+c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT,
+c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT,
+c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT,
+c448 INT,
+KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14),
+KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28),
+KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42),
+KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56),
+KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70),
+KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84),
+KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98),
+KEY (c99,c100,c101,c102,c103,c104,c105),
+KEY (c106,c107,c108,c109,c110,c111,c112),
+KEY (c113,c114,c115,c116,c117,c118,c119),
+KEY (c120,c121,c122,c123,c124,c125,c126),
+KEY (c127,c128,c129,c130,c131,c132,c133),
+KEY (c134,c135,c136,c137,c138,c139,c140),
+KEY (c141,c142,c143,c144,c145,c146,c147),
+KEY (c148,c149,c150,c151,c152,c153,c154),
+KEY (c155,c156,c157,c158,c159,c160,c161),
+KEY (c162,c163,c164,c165,c166,c167,c168),
+KEY (c169,c170,c171,c172,c173,c174,c175),
+KEY (c176,c177,c178,c179,c180,c181,c182),
+KEY (c183,c184,c185,c186,c187,c188,c189),
+KEY (c190,c191,c192,c193,c194,c195,c196),
+KEY (c197,c198,c199,c200,c201,c202,c203),
+KEY (c204,c205,c206,c207,c208,c209,c210),
+KEY (c211,c212,c213,c214,c215,c216,c217),
+KEY (c218,c219,c220,c221,c222,c223,c224),
+KEY (c225,c226,c227,c228,c229,c230,c231),
+KEY (c232,c233,c234,c235,c236,c237,c238),
+KEY (c239,c240,c241,c242,c243,c244,c245),
+KEY (c246,c247,c248,c249,c250,c251,c252),
+KEY (c253,c254,c255,c256,c257,c258,c259),
+KEY (c260,c261,c262,c263,c264,c265,c266),
+KEY (c267,c268,c269,c270,c271,c272,c273),
+KEY (c274,c275,c276,c277,c278,c279,c280),
+KEY (c281,c282,c283,c284,c285,c286,c287),
+KEY (c288,c289,c290,c291,c292,c293,c294),
+KEY (c295,c296,c297,c298,c299,c300,c301),
+KEY (c302,c303,c304,c305,c306,c307,c308),
+KEY (c309,c310,c311,c312,c313,c314,c315),
+KEY (c316,c317,c318,c319,c320,c321,c322),
+KEY (c323,c324,c325,c326,c327,c328,c329),
+KEY (c330,c331,c332,c333,c334,c335,c336),
+KEY (c337,c338,c339,c340,c341,c342,c343),
+KEY (c344,c345,c346,c347,c348,c349,c350),
+KEY (c351,c352,c353,c354,c355,c356,c357),
+KEY (c358,c359,c360,c361,c362,c363,c364),
+KEY (c365,c366,c367,c368,c369,c370,c371),
+KEY (c372,c373,c374,c375,c376,c377,c378),
+KEY (c379,c380,c381,c382,c383,c384,c385),
+KEY (c386,c387,c388,c389,c390,c391,c392),
+KEY (c393,c394,c395,c396,c397,c398,c399),
+KEY (c400,c401,c402,c403,c404,c405,c406),
+KEY (c407,c408,c409,c410,c411,c412,c413),
+KEY (c414,c415,c416,c417,c418,c419,c420),
+KEY (c421,c422,c423,c424,c425,c426,c427),
+KEY (c428,c429,c430,c431,c432,c433,c434),
+KEY (c435,c436,c437,c438,c439,c440,c441),
+KEY (c442,c443,c444,c445,c446,c447,c448));
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result
new file mode 100644
index 00000000000..3e2f5709ca0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result
@@ -0,0 +1,37 @@
+#
+# Validate that deadlock errors don't occur with a high level of concurrency
+#
+# Disable for valgrind because this takes too long
+DROP DATABASE IF EXISTS mysqlslap;
+CREATE DATABASE mysqlslap;
+USE mysqlslap;
+CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb;
+CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb;
+SET @save = @@global.rocksdb_lock_wait_timeout;
+SET GLOBAL rocksdb_lock_wait_timeout = 60;
+SELECT count from t1;
+count
+50000
+SELECT count from t1;
+count
+100000
+SELECT count from t1;
+count
+150000
+SELECT count from t1;
+count
+200000
+SELECT count from t1rev;
+count
+50000
+SELECT count from t1rev;
+count
+100000
+SELECT count from t1rev;
+count
+150000
+SELECT count from t1rev;
+count
+200000
+SET GLOBAL rocksdb_lock_wait_timeout = @save;
+DROP DATABASE mysqlslap;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete.result b/storage/rocksdb/mysql-test/rocksdb/r/delete.result
new file mode 100644
index 00000000000..8ec3c50f466
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/delete.result
@@ -0,0 +1,166 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+DELETE FROM t1 WHERE b IN ('c');
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+DELETE FROM t1 WHERE a < 0 OR b = 'a';
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+5 e
+5 e
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+SELECT c,d FROM t2;
+c d
+b 2
+b 2
+d 4
+e 5
+e 5
+foobar 10000
+foobar 10000
+DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+5 e
+5 e
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+2 b
+2 b
+4 d
+5 e
+5 e
+SELECT c,d FROM t2;
+c d
+DELETE FROM t1;
+SELECT a,b FROM t1;
+a b
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+BEGIN;
+DELETE FROM t1 WHERE b IN ('c');
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+6 f
+6 f
+7 g
+7 g
+8 h
+8 h
+DELETE FROM t1 WHERE a < 0 OR b = 'a';
+COMMIT;
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+6 f
+6 f
+7 g
+7 g
+8 h
+8 h
+BEGIN;
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+SAVEPOINT spt1;
+DELETE FROM t1;
+RELEASE SAVEPOINT spt1;
+ROLLBACK;
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+6 f
+6 f
+7 g
+7 g
+8 h
+8 h
+BEGIN;
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+SAVEPOINT spt1;
+DELETE FROM t1;
+INSERT INTO t1 (a,b) VALUES (1,'a');
+ROLLBACK TO SAVEPOINT spt1;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+COMMIT;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+SELECT a,b FROM t1;
+a b
+10000 foobar
+10000 foobar
+2 b
+2 b
+4 d
+4 d
+5 e
+5 e
+6 f
+6 f
+7 g
+7 g
+8 h
+8 h
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result
new file mode 100644
index 00000000000..402ef539ffd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result
@@ -0,0 +1,15 @@
+set debug_sync='RESET';
+drop table if exists t1;
+create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb;
+insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2);
+set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go';
+update t1 set value=100 where id1=1;
+set debug_sync='now WAIT_FOR parked';
+delete from t1 where id1=1 and id2=1;
+set debug_sync='now SIGNAL go';
+select * from t1 where id1=1 for update;
+id1 id2 value
+1 2 100
+1 3 100
+set debug_sync='RESET';
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result
new file mode 100644
index 00000000000..1f017dfb990
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result
@@ -0,0 +1,59 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+10000 foobar
+10000 foobar
+2 b
+2 b
+3 c
+3 c
+4 d
+4 d
+5 e
+5 e
+SELECT c,d FROM t2;
+c d
+a 1
+a 1
+b 2
+b 2
+c 3
+c 3
+d 4
+d 4
+e 5
+e 5
+foobar 10000
+foobar 10000
+DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1;
+SELECT a,b FROM t1;
+a b
+1 a
+10000 foobar
+10000 foobar
+2 b
+2 b
+3 c
+3 c
+4 d
+4 d
+5 e
+5 e
+DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+Warnings:
+Error 1242 Subquery returns more than 1 row
+SELECT a,b FROM t1;
+a b
+1 a
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result
new file mode 100644
index 00000000000..4173d875a82
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result
@@ -0,0 +1,24 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo';
+SELECT a,b FROM t1;
+a b
+2 b
+3 c
+4 d
+5 e
+CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+SELECT c,d FROM t2;
+c d
+b 2
+c 3
+d 4
+e 5
+DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d;
+SELECT a,b FROM t1;
+a b
+SELECT c,d FROM t2;
+c d
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result
new file mode 100644
index 00000000000..c94708b872f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+DELETE FROM t1 WHERE b > 'y';
+DELETE FROM t1 WHERE a=2;
+SELECT a,b FROM t1;
+a b
+1 a
+3 c
+4 d
+5 e
+6 x
+7 y
+DELETE FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+DELETE FROM t1 WHERE b > 'y';
+DELETE FROM t1 WHERE a=2;
+SELECT a,b FROM t1;
+a b
+1 a
+3 c
+4 d
+5 e
+6 x
+7 y
+DELETE FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9);
+DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
+SELECT a,b,c FROM t1;
+a b c
+1 2 3
+4 5 6
+7 8 9
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/describe.result b/storage/rocksdb/mysql-test/rocksdb/r/describe.result
new file mode 100644
index 00000000000..ec828577ae5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/describe.result
@@ -0,0 +1,19 @@
+DROP TABLE IF EXISTS t1, t2, t3;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b');
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8;
+INSERT INTO t2 (a,b) VALUES (1, 'bar');
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8;
+DESCRIBE t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI 0
+b char(8) YES NULL
+DESC t2 a;
+Field Type Null Key Default Extra
+a int(11) YES NULL
+DESCRIBE t3 '%';
+Field Type Null Key Default Extra
+a int(11) YES NULL
+b char(8) YES NULL
+pk int(11) NO PRI NULL auto_increment
+DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result
new file mode 100644
index 00000000000..f220efe9f38
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result
@@ -0,0 +1,6 @@
+DROP DATABASE IF EXISTS test_drop_database;
+CREATE DATABASE test_drop_database;
+CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB;
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a);
+DROP TABLE t1;
+DROP DATABASE test_drop_database;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result
new file mode 100644
index 00000000000..dfa5c5b2590
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result
@@ -0,0 +1,154 @@
+drop table if exists t1;
+CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`b`),
+ KEY `ka` (`a`),
+ KEY `kb` (`a`,`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+INSERT INTO t1 (a) VALUES (1);
+INSERT INTO t1 (a) VALUES (3);
+INSERT INTO t1 (a) VALUES (5);
+ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`b`),
+ KEY `kb` (`a`,`b`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+SELECT * FROM t1 FORCE INDEX(ka) where a > 1;
+ERROR 42000: Key 'ka' doesn't exist in table 't1'
+SELECT * FROM t1 FORCE INDEX(kb) where a > 1;
+a b
+3 2
+5 3
+SELECT * FROM t1 where b > 1;
+a b
+3 2
+5 3
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `kb` (`b`),
+ KEY `kbc` (`b`,`c`),
+ KEY `kc` (`c`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `kc` (`c`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `kc` (`c`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+SELECT * FROM t1 FORCE INDEX(kc) where c > 3;
+a b c
+2 3 4
+3 5 6
+5 3 4
+6 5 6
+SELECT * FROM t1 where b > 3;
+a b c
+3 5 6
+6 5 6
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
+t1 1 kb 1 b A 0 NULL NULL YES LSMTREE
+t1 1 kbc 1 b A 0 NULL NULL YES LSMTREE
+t1 1 kbc 2 c A 0 NULL NULL YES LSMTREE
+t1 1 kc 1 c A 0 NULL NULL YES LSMTREE
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
+t1 1 kc 1 c A 0 NULL NULL YES LSMTREE
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 1 kc 1 c A 0 NULL NULL YES LSMTREE
+ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+ALTER TABLE t1 ADD UNIQUE INDEX kb(b);
+ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c);
+ALTER TABLE t1 ADD UNIQUE INDEX kc(c);
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
+t1 0 kb 1 b A 0 NULL NULL YES LSMTREE
+t1 0 kbc 1 b A 0 NULL NULL YES LSMTREE
+t1 0 kbc 2 c A 0 NULL NULL YES LSMTREE
+t1 0 kc 1 c A 0 NULL NULL YES LSMTREE
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
+t1 0 kc 1 c A 0 NULL NULL YES LSMTREE
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+SELECT * FROM t1 FORCE INDEX(kc) where c > 3;
+a b c
+2 3 4
+3 5 6
+ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT);
+INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
+ALTER TABLE t1 ADD KEY idx ( col1, col2 );
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ALTER TABLE t1 DROP COLUMN col2;
+ALTER TABLE t1 DROP COLUMN col3;
+DROP TABLE t1;
+CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT);
+INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
+ALTER TABLE t1 ADD KEY idx ( col1, col2 );
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ALTER TABLE t1 DROP COLUMN col2;
+ALTER TABLE t1 DROP COLUMN col3;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result
new file mode 100644
index 00000000000..7d0fae229da
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result
@@ -0,0 +1,71 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t2 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t3 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t4 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t1;
+DELETE FROM t2;
+DELETE FROM t3;
+DELETE FROM t4;
+drop table t2;
+DELETE FROM t1;
+DELETE FROM t4;
+drop table t3;
+DELETE FROM t1;
+DELETE FROM t4;
+drop table t4;
+CREATE TABLE t5 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t5;
+drop table t5;
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+Begin filtering dropped index+ 0
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Begin filtering dropped index+ 1
+Finished filtering dropped index+ 0
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+Finished filtering dropped index+ 1
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result
new file mode 100644
index 00000000000..c46d3522dd7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result
@@ -0,0 +1,53 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t2 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t3 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+CREATE TABLE t4 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t1;
+DELETE FROM t2;
+DELETE FROM t3;
+DELETE FROM t4;
+DELETE FROM t1;
+DELETE FROM t4;
+DELETE FROM t1;
+DELETE FROM t4;
+CREATE TABLE t5 (
+a int not null,
+b int not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t5;
+drop table t1;
+drop table t2;
+drop table t3;
+drop table t4;
+drop table t5;
+Compacted
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result
new file mode 100644
index 00000000000..c69d789c12a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result
@@ -0,0 +1,20 @@
+DROP TABLE IF EXISTS t1;
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c varchar(500) not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t1;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+drop table t1;
+select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+case when variable_value-@a < 500000 then 'true' else 'false' end
+true
+DROP TABLE IF EXISTS t1;
+Warnings:
+Note 1051 Unknown table 'test.t1'
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result
new file mode 100644
index 00000000000..954335debf2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result
@@ -0,0 +1,362 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t1 (id1 INT, id2 INT, id3 INT,
+PRIMARY KEY (id1, id2, id3),
+UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB;
+CREATE TABLE t2 (id1 INT, id2 INT, id3 INT,
+PRIMARY KEY (id1, id2, id3),
+UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB;
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 1 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 1 1
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 10 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 10 1
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 11 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 11 1
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 12 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 12 5
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 13 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 13 5
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 14 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 14 5
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 15 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 15 9
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 16 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 16 9
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t1;
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+SELECT * FROM t1 FORCE INDEX (id3);
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 1 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 1 1
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 10 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 10 1
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 11 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 11 1
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 12 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 12 5
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 13 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 13 5
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 14 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 14 5
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 15 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 15 9
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 16 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 16 9
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t2;
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+SELECT * FROM t2 FORCE INDEX (id3);
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+DROP TABLE t1;
+DROP TABLE t2;
+CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin,
+id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin,
+id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+PRIMARY KEY (id1, id2, id3),
+UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB;
+CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin,
+id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin,
+id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+PRIMARY KEY (id1, id2, id3),
+UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB;
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 1 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 1 1
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 10 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 10 1
+INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11;
+SELECT * FROM t1 WHERE id1 = 1;
+id1 id2 id3
+1 11 1
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 11 1
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 12 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 12 5
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 13 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 13 5
+INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14;
+SELECT * FROM t1 WHERE id1 = 5;
+id1 id2 id3
+5 14 5
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 14 5
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 15 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 15 9
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 16 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 16 9
+INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17;
+SELECT * FROM t1 WHERE id1 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t1;
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+SELECT * FROM t1 FORCE INDEX (id3);
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 1 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 1 1
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 10 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 10 1
+INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11;
+SELECT * FROM t2 WHERE id1 = 1;
+id1 id2 id3
+1 11 1
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1;
+id1 id2 id3
+1 11 1
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 12 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 12 5
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 13 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 13 5
+INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14;
+SELECT * FROM t2 WHERE id1 = 5;
+id1 id2 id3
+5 14 5
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5;
+id1 id2 id3
+5 14 5
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 15 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 15 9
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 16 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 16 9
+INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17;
+SELECT * FROM t2 WHERE id1 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9;
+id1 id2 id3
+9 17 9
+SELECT * FROM t2;
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+SELECT * FROM t2 FORCE INDEX (id3);
+id1 id2 id3
+1 11 1
+2 2 2
+3 3 3
+4 4 4
+5 14 5
+6 6 6
+7 7 7
+8 8 8
+9 17 9
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result
new file mode 100644
index 00000000000..ba16aaa6d35
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result
@@ -0,0 +1,15 @@
+DROP TABLE IF EXISTS t;
+CREATE TABLE t(id int primary key) engine=rocksdb;
+INSERT INTO t values (1), (2), (3);
+CREATE TABLE t(id int primary key) engine=rocksdb;
+ERROR 42S01: Table 't' already exists
+FLUSH TABLES;
+CREATE TABLE t(id int primary key) engine=rocksdb;
+ERROR HY000: Table 'test.t' does not exist, but metadata information exists inside MyRocks. This is a sign of data inconsistency. Please check if './test/t.frm' exists, and try to restore it if it does not exist.
+FLUSH TABLES;
+SELECT * FROM t;
+id
+1
+2
+3
+DROP TABLE t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result
new file mode 100644
index 00000000000..1ae56ae5f05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result
@@ -0,0 +1,4 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB;
+ERROR HY000: Incorrect arguments to column family not valid for storing index data
+DROP TABLE IF EXISTS t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result
new file mode 100644
index 00000000000..483be726bb3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result
@@ -0,0 +1,25 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (b INT PRIMARY KEY);
+CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b));
+ERROR 42000: MyRocks does not currently support foreign key constraints
+CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL);
+DROP TABLE t2;
+CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL);
+DROP TABLE t2;
+CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b));
+ERROR 42000: MyRocks does not currently support foreign key constraints
+CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL);
+ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b);
+ERROR 42000: MyRocks does not currently support foreign key constraints
+DROP TABLE t2;
+CREATE TABLE t2 (a INT NOT NULL);
+ALTER TABLE t2 ADD bforeign INT NOT NULL;
+DROP TABLE t2;
+CREATE TABLE t2 (a INT NOT NULL);
+ALTER TABLE t2 ADD foreignkey INT NOT NULL;
+DROP TABLE t2;
+CREATE TABLE t2 (a INT NOT NULL);
+ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b);
+ERROR 42000: MyRocks does not currently support foreign key constraints
+DROP TABLE t2;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result
new file mode 100644
index 00000000000..d42041183c8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result
@@ -0,0 +1,9 @@
+create table t (id int primary key, value int);
+begin;
+update t set value=100 where id in (1, 2);
+commit;
+begin;
+select * from t for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from t for update
+commit;
+drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result
new file mode 100644
index 00000000000..c1cf1e77ecf
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result
@@ -0,0 +1,504 @@
+drop table if exists gap1,gap2,gap3;
+CREATE DATABASE mysqlslap;
+CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT,
+PRIMARY KEY (id1, id2, id3),
+INDEX i (c1)) ENGINE=rocksdb;
+CREATE TABLE gap2 like gap1;
+CREATE TABLE gap3 (id INT, value INT,
+PRIMARY KEY (id),
+UNIQUE KEY ui(value)) ENGINE=rocksdb;
+insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5);
+create table gap4 (
+pk int primary key,
+a int,
+b int,
+key(a)
+) ENGINE=rocksdb;
+insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+create table gap5 like gap4;
+insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+set session gap_lock_raise_error=1;
+set session gap_lock_write_log=1;
+set @save_gap_lock_write_log = @@gap_lock_write_log;
+set @save_gap_lock_raise_error = @@gap_lock_raise_error;
+set gap_lock_write_log = 1;
+set gap_lock_raise_error = 0;
+begin;
+update gap4 set a= (select 1+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b);
+1
+update gap4 set a= (select 2+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b);
+update gap4 set a= (select 3+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b);
+1
+1
+0
+flush logs;
+0
+rollback;
+set gap_lock_write_log = @save_gap_lock_write_log;
+set gap_lock_raise_error = @save_gap_lock_raise_error;
+set global gap_lock_write_log = 1;
+set global gap_lock_write_log = 0;
+1000
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update
+select * from gap1 where value != 100 limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update
+select * from gap1 where id1=1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 for update
+select * from gap1 where id1=1 and id2= 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 for update
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update
+select * from gap1 order by id1 asc limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 for update
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update
+select * from gap1 order by id1 desc limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 for update
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update
+select * from gap1 force index(i) where c1=1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 for update
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+set session autocommit=1;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 for update;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+set session autocommit=0;
+select * from gap1 limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 lock in share mode
+select * from gap1 where value != 100 limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 lock in share mode
+select * from gap1 where id1=1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 lock in share mode
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 lock in share mode
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 lock in share mode
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 lock in share mode
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode
+select * from gap1 force index(i) where c1=1 lock in share mode;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 lock in share mode
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+set session autocommit=1;
+select * from gap1 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 lock in share mode;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+set session autocommit=0;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+set session autocommit=1;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+set session autocommit=0;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set value=100 where id1=1
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete from gap1 where id1=2
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+set session autocommit=1;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+set session autocommit=1;
+insert into gap2 select * from gap1;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1
+insert into gap2 select * from gap1 where id1=1;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1 where id1=1
+insert into gap2 select * from gap1 where id1=1 and id2=1 and id3=1;
+create table t4 select * from gap1 where id1=1 and id2=1 and id3=1;
+drop table t4;
+create table t4 select * from gap1;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1
+create table t4 select * from gap1 where id1=1;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1 where id1=1
+update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3
+and gap2.id2=3 and gap2.id3=3;
+update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3
+update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id
+set gap1.value=100 where gap2.id1=3;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id
+set gap1.value=100 where gap2.id1=3
+update gap1 set gap1.value= (select count(*) from gap2);
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set gap1.value= (select count(*) from gap2)
+delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3
+and gap2.id2=3 and gap2.id3=3;
+delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3
+select * from gap1, gap2 limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1, gap2 limit 1 for update
+select * from gap1 a, gap1 b limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 a, gap1 b limit 1 for update
+create table u1(
+c1 int,
+c2 int,
+c3 int,
+c4 int,
+primary key (c1, c2, c3),
+unique key (c3, c1)
+);
+set session gap_lock_raise_error=1;
+begin;
+insert into u1 values (1,1,1,1);
+commit;
+begin;
+insert into u1 values (1,2,1,1) on duplicate key update c4=10;
+commit;
+begin;
+select * from u1 where c3=1 and c1 = 1 for update;
+c1 c2 c3 c4
+1 1 1 10
+select * from u1 where c3=1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from u1 where c3=1 for update
+commit;
+drop table u1;
+set global gap_lock_write_log= 0;
+set global gap_lock_raise_error= 0;
+drop table if exists gap1, gap2, gap3, gap4, gap5;
+DROP DATABASE mysqlslap;
+0
+SET GLOBAL gap_lock_log_file='<GAP_LOCK_ORIG>';
+SET GLOBAL gap_lock_log_file='<GAP_LOCK>';
+flush general logs;
+SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions;
+SET GLOBAL gap_lock_exceptions="t.*";
+drop table if exists gap1,gap2,gap3;
+CREATE DATABASE mysqlslap;
+CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT,
+PRIMARY KEY (id1, id2, id3),
+INDEX i (c1)) ENGINE=rocksdb;
+CREATE TABLE gap2 like gap1;
+CREATE TABLE gap3 (id INT, value INT,
+PRIMARY KEY (id),
+UNIQUE KEY ui(value)) ENGINE=rocksdb;
+insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5);
+create table gap4 (
+pk int primary key,
+a int,
+b int,
+key(a)
+) ENGINE=rocksdb;
+insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+create table gap5 like gap4;
+insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+set session gap_lock_raise_error=1;
+set session gap_lock_write_log=1;
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update
+select * from gap1 where value != 100 limit 1 for update;
+ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update
+set global gap_lock_write_log= 0;
+set global gap_lock_raise_error= 0;
+drop table if exists gap1, gap2, gap3, gap4, gap5;
+DROP DATABASE mysqlslap;
+0
+SET GLOBAL gap_lock_log_file='<GAP_LOCK_ORIG>';
+SET GLOBAL gap_lock_log_file='<GAP_LOCK>';
+flush general logs;
+SET GLOBAL gap_lock_exceptions="gap.*";
+drop table if exists gap1,gap2,gap3;
+CREATE DATABASE mysqlslap;
+CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT,
+PRIMARY KEY (id1, id2, id3),
+INDEX i (c1)) ENGINE=rocksdb;
+CREATE TABLE gap2 like gap1;
+CREATE TABLE gap3 (id INT, value INT,
+PRIMARY KEY (id),
+UNIQUE KEY ui(value)) ENGINE=rocksdb;
+insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5);
+create table gap4 (
+pk int primary key,
+a int,
+b int,
+key(a)
+) ENGINE=rocksdb;
+insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+create table gap5 like gap4;
+insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+set session gap_lock_raise_error=1;
+set session gap_lock_write_log=1;
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+set global gap_lock_write_log= 0;
+set global gap_lock_raise_error= 0;
+drop table if exists gap1, gap2, gap3, gap4, gap5;
+DROP DATABASE mysqlslap;
+0
+SET GLOBAL gap_lock_log_file='<GAP_LOCK_ORIG>';
+SET GLOBAL gap_lock_log_file='<GAP_LOCK>';
+flush general logs;
+SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result
new file mode 100644
index 00000000000..04dcac1fcb4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result
@@ -0,0 +1,8 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3;
+SHOW TABLES;
+Tables_in_test
+t1
+ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index');
+ERROR 42000: Incorrect table name 'H:/mysqltest/p3Index'
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result
new file mode 100644
index 00000000000..1ab8bd7678c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result
@@ -0,0 +1,115 @@
+DROP TABLE IF EXISTS t1;
+FLUSH STATUS;
+CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT,
+INDEX b(b)) ENGINE=rocksdb;
+INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50);
+SHOW SESSION STATUS LIKE 'Handler_write%';
+Variable_name Value
+Handler_write 3
+UPDATE t1 SET b=1000 WHERE id=1;
+SHOW SESSION STATUS LIKE 'Handler_update%';
+Variable_name Value
+Handler_update 1
+DELETE FROM t1 WHERE id=2;
+SHOW SESSION STATUS LIKE 'Handler_delete%';
+Variable_name Value
+Handler_delete 1
+INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
+SHOW SESSION STATUS LIKE 'Handler_write%';
+Variable_name Value
+Handler_write 10
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id=8;
+id a b
+8 NULL 8
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 0
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 0
+FLUSH STATUS;
+SELECT * FROM t1 WHERE b=6;
+id a b
+6 NULL 6
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 1
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 0
+FLUSH STATUS;
+SELECT * FROM t1;
+id a b
+1 foobar 1000
+10 NULL 10
+3 bar 50
+4 NULL 4
+5 NULL 5
+6 NULL 6
+7 NULL 7
+8 NULL 8
+9 NULL 9
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 0
+Handler_read_last 0
+Handler_read_next 0
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 10
+FLUSH STATUS;
+SELECT * FROM t1 WHERE b <=5 ORDER BY b;
+id a b
+4 NULL 4
+5 NULL 5
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 2
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 0
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id >=8 ORDER BY id;
+id a b
+8 NULL 8
+9 NULL 9
+10 NULL 10
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 3
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 0
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id < 8 ORDER BY id;
+id a b
+1 foobar 1000
+3 bar 50
+4 NULL 4
+5 NULL 5
+6 NULL 6
+7 NULL 7
+SHOW SESSION STATUS LIKE 'Handler_read%';
+Variable_name Value
+Handler_read_first 1
+Handler_read_key 0
+Handler_read_last 0
+Handler_read_next 6
+Handler_read_prev 0
+Handler_read_rnd 0
+Handler_read_rnd_next 0
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result
new file mode 100644
index 00000000000..e4d080289dc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result
@@ -0,0 +1,648 @@
+DROP TABLE IF EXISTS test;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connect con3,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connection con1;
+create table test (id int primary key, value int) engine=rocksdb;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test;
+id value
+1 10
+2 20
+update test set value = 101 where id = 1;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+connection con1;
+rollback;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 101 where id = 1;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+connection con1;
+update test set value = 11 where id = 1;
+commit;
+connection con2;
+select * from test;
+id value
+1 11
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 22 where id = 2;
+connection con1;
+select * from test where id = 2;
+id value
+2 20
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+connection con1;
+commit;
+connection con2;
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 11 where id = 1;
+update test set value = 19 where id = 2;
+connection con2;
+update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+connection con3;
+select * from test;
+id value
+1 11
+2 19
+connection con2;
+update test set value = 18 where id = 2;
+connection con3;
+select * from test;
+id value
+1 11
+2 19
+connection con2;
+commit;
+connection con3;
+select * from test;
+id value
+1 12
+2 18
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value = 30;
+id value
+connection con2;
+insert into test (id, value) values(3, 30);
+commit;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+3 30
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = value + 10;
+connection con2;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors';
+select * from test;
+id value
+1 10
+2 20
+delete from test where value = 20;
+connection con1;
+commit;
+connection con2;
+select * from test;
+id value
+2 30
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+select * from test;
+id value
+1 12
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+select * from test where id = 2;
+id value
+2 20
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+select * from test where id = 2;
+id value
+2 18
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value % 5 = 0;
+id value
+1 10
+2 20
+connection con2;
+update test set value = 12 where value = 10;
+commit;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+1 12
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+delete from test where value = 20;
+select * from test where id = 2;
+id value
+2 18
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id in (1,2);
+id value
+1 10
+2 20
+connection con2;
+select * from test where id in (1,2);
+id value
+1 10
+2 20
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 21 where id = 2;
+connection con1;
+commit;
+connection con2;
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+connection con2;
+select * from test where value % 3 = 0;
+id value
+connection con1;
+insert into test (id, value) values(3, 30);
+connection con2;
+insert into test (id, value) values(4, 42);
+connection con1;
+commit;
+connection con2;
+commit;
+select * from test where value % 3 = 0;
+id value
+3 30
+4 42
+connection con1;
+select * from test where value % 3 = 0;
+id value
+3 30
+4 42
+connection default;
+drop table test;
+disconnect con1;
+disconnect con2;
+disconnect con3;
+DROP TABLE IF EXISTS test;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connect con3,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connection con1;
+create table test (id int primary key, value int) engine=rocksdb;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test;
+id value
+1 10
+2 20
+update test set value = 101 where id = 1;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+connection con1;
+rollback;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 101 where id = 1;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+connection con1;
+update test set value = 11 where id = 1;
+commit;
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 22 where id = 2;
+connection con1;
+select * from test where id = 2;
+id value
+2 20
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+connection con1;
+commit;
+connection con2;
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = 11 where id = 1;
+update test set value = 19 where id = 2;
+connection con2;
+update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+connection con3;
+select * from test;
+id value
+1 11
+2 19
+connection con2;
+update test set value = 18 where id = 2;
+connection con3;
+select * from test;
+id value
+1 11
+2 19
+connection con2;
+commit;
+connection con3;
+select * from test;
+id value
+1 11
+2 19
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value = 30;
+id value
+connection con2;
+insert into test (id, value) values(3, 30);
+commit;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+update test set value = value + 10;
+connection con2;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors';
+select * from test;
+id value
+1 10
+2 20
+delete from test where value = 20;
+connection con1;
+commit;
+connection con2;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors';
+variable_value-@a
+1
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test where id = 1;
+id value
+1 10
+select * from test where id = 2;
+id value
+2 20
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+select * from test where id = 2;
+id value
+2 20
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value % 5 = 0;
+id value
+1 10
+2 20
+connection con2;
+update test set value = 12 where value = 10;
+commit;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id = 1;
+id value
+1 10
+connection con2;
+select * from test;
+id value
+1 10
+2 20
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+delete from test where value = 20;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where id in (1,2);
+id value
+1 10
+2 20
+connection con2;
+select * from test where id in (1,2);
+id value
+1 10
+2 20
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 21 where id = 2;
+connection con1;
+commit;
+connection con2;
+commit;
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
+connection con1;
+select * from test where value % 3 = 0;
+id value
+connection con2;
+select * from test where value % 3 = 0;
+id value
+connection con1;
+insert into test (id, value) values(3, 30);
+connection con2;
+insert into test (id, value) values(4, 42);
+connection con1;
+commit;
+connection con2;
+commit;
+select * from test where value % 3 = 0;
+id value
+3 30
+4 42
+connection con1;
+select * from test where value % 3 = 0;
+id value
+3 30
+4 42
+connection default;
+drop table test;
+disconnect con1;
+disconnect con2;
+disconnect con3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result
new file mode 100644
index 00000000000..a0fd7a13780
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result
@@ -0,0 +1,17 @@
+DROP TABLE IF EXISTS is_ddl_t1;
+DROP TABLE IF EXISTS is_ddl_t2;
+CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT,
+PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf')
+ENGINE = ROCKSDB;
+CREATE TABLE is_ddl_t2 (x INT, y INT, z INT,
+PRIMARY KEY (z, y) COMMENT 'zy_cf',
+KEY (x)) ENGINE = ROCKSDB;
+SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF
+test is_ddl_t2 NULL PRIMARY 1 11 zy_cf
+test is_ddl_t2 NULL x 2 11 default
+test is_ddl_t1 NULL PRIMARY 1 11 default
+test is_ddl_t1 NULL j 2 11 default
+test is_ddl_t1 NULL k 2 11 kl_cf
+DROP TABLE is_ddl_t1;
+DROP TABLE is_ddl_t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index.result b/storage/rocksdb/mysql-test/rocksdb/r/index.result
new file mode 100644
index 00000000000..f61bad7c4a9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index.result
@@ -0,0 +1,42 @@
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY (a)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY a_b (a,b) COMMENT 'a_b index'
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index
+t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY (a),
+KEY (b)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+t1 1 b 1 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'z');
+ALTER TABLE t1 ADD KEY (a) COMMENT 'simple index on a';
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a
+ALTER TABLE t1 DROP KEY a;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result
new file mode 100644
index 00000000000..c3e54a25864
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result
@@ -0,0 +1,28 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB;
+CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB;
+INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
+INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4);
+COMMIT;
+SET GLOBAL rocksdb_force_flush_memtable_now = 1;
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
+COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS
+# # SSTNAME 5 # # # # #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j");
+COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS
+# # SSTNAME 5 # # # # #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY");
+COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS
+# # SSTNAME 4 # # # # #
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result
new file mode 100644
index 00000000000..b0113d79bb2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result
@@ -0,0 +1,51 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY (a) KEY_BLOCK_SIZE=8
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY ind1(b ASC) KEY_BLOCK_SIZE=0
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 ind1 1 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value'
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A # 1 NULL LSMTREE big key_block_size value
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT AUTO_INCREMENT PRIMARY KEY,
+KEY a_b(a,b) KEY_BLOCK_SIZE=8192
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a_b 1 a A # NULL NULL YES LSMTREE
+t1 1 a_b 2 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'z');
+ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192;
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result
new file mode 100644
index 00000000000..66481f81c67
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result
@@ -0,0 +1,48 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY,
+b CHAR(8)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A # NULL NULL LSMTREE
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+INSERT INTO t1 (a,b) VALUES (1,'c');
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY,
+b CHAR(8) PRIMARY KEY
+) ENGINE=rocksdb;
+ERROR 42000: Multiple primary key defined
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+PRIMARY KEY (a,b)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A # NULL NULL LSMTREE
+t1 0 PRIMARY 2 b A # NULL NULL LSMTREE
+INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b');
+INSERT INTO t1 (a,b) VALUES (1,'b');
+ERROR 23000: Duplicate entry '1-b' for key 'PRIMARY'
+DROP TABLE t1;
+CREATE TABLE t1 (a INT KEY,
+b CHAR(8),
+KEY (b)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A # NULL NULL LSMTREE
+t1 1 b 1 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8) PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A # NULL NULL LSMTREE
+ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a);
+ERROR 42000: Multiple primary key defined
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A # NULL NULL LSMTREE
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result
new file mode 100644
index 00000000000..a604663954b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result
@@ -0,0 +1,42 @@
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY USING BTREE (a)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY a_b USING BTREE (a,b) COMMENT 'a_b index'
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index
+t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY USING BTREE (a),
+KEY USING BTREE (b)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+t1 1 b 1 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'z');
+ALTER TABLE t1 ADD KEY (a) USING BTREE COMMENT 'simple index on a';
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a
+ALTER TABLE t1 DROP KEY a;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result
new file mode 100644
index 00000000000..ae99badff14
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result
@@ -0,0 +1,42 @@
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY USING HASH (a)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY a_b USING HASH (a,b) COMMENT 'a_b index'
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index
+t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index
+DROP TABLE t1;
+CREATE TABLE t1 (a INT,
+b CHAR(8),
+pk INT PRIMARY KEY,
+KEY USING HASH (a),
+KEY USING HASH (b)
+) ENGINE=rocksdb;
+SHOW KEYS IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE
+t1 1 b 1 b A # NULL NULL YES LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'z');
+ALTER TABLE t1 ADD KEY (a) USING HASH COMMENT 'simple index on a';
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE
+t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a
+ALTER TABLE t1 DROP KEY a;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result
new file mode 100644
index 00000000000..d6177a3f019
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result
@@ -0,0 +1,78 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+TYPE NAME VALUE
+MAX_INDEX_ID MAX_INDEX_ID max_index_id
+CF_FLAGS 0 default [0]
+CF_FLAGS 1 __system__ [0]
+select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+count(*)
+3
+CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
+INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+TYPE NAME VALUE
+BINLOG FILE master-bin.000001
+BINLOG POS 1066
+BINLOG GTID uuid:5
+MAX_INDEX_ID MAX_INDEX_ID max_index_id
+CF_FLAGS 0 default [0]
+CF_FLAGS 1 __system__ [0]
+select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+count(*)
+6
+CREATE INDEX tindex1 on t1 (i1);
+CREATE INDEX tindex2 on t1 (i2);
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS';
+TYPE NAME VALUE
+CF_FLAGS 0 default [0]
+CF_FLAGS 1 __system__ [0]
+CREATE TABLE t2 (
+a int,
+b int,
+c int,
+d int,
+e int,
+PRIMARY KEY (a) COMMENT "cf_a",
+KEY (b) COMMENT "cf_b",
+KEY (c) COMMENT "cf_c",
+KEY (d) COMMENT "$per_index_cf",
+KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB;
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS';
+TYPE NAME VALUE
+CF_FLAGS 0 default [0]
+CF_FLAGS 1 __system__ [0]
+CF_FLAGS 2 cf_a [0]
+CF_FLAGS 3 cf_b [0]
+CF_FLAGS 4 cf_c [0]
+CF_FLAGS 5 test.t2.d [2]
+CF_FLAGS 6 rev:cf_d [1]
+CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB;
+insert into t3 (a) values (1), (2), (3);
+SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+Variable_name Value
+rocksdb_pause_background_work OFF
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+Variable_name Value
+rocksdb_pause_background_work ON
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+Variable_name Value
+rocksdb_pause_background_work ON
+DROP TABLE t3;
+cf_id:0,index_id:268
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+Variable_name Value
+rocksdb_pause_background_work OFF
+next line shouldn't cause assertion to fail
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+Variable_name Value
+rocksdb_pause_background_work OFF
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result
new file mode 100644
index 00000000000..6d8d9685a79
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result
@@ -0,0 +1,120 @@
+SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX;
+trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_adaptive_hash_timeout trx_is_read_only trx_autocommit_non_locking
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_TRX but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS;
+FILE OPERATION REQUESTS SLOW BYTES BYTES/R SVC:SECS SVC:MSECS/R SVC:MAX_MSECS WAIT:SECS WAIT:MSECS/R WAIT:MAX_MSECS
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FILE_STATUS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS;
+lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCKS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS;
+requesting_trx_id requested_lock_id blocking_trx_id blocking_lock_id
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_LOCK_WAITS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP;
+page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET;
+page_size compress_ops compress_ops_ok compress_time compress_ok_time compress_primary_ops compress_primary_ops_ok compress_primary_time compress_primary_ok_time compress_secondary_ops compress_secondary_ops_ok compress_secondary_time compress_secondary_ok_time uncompress_ops uncompress_time uncompress_primary_ops uncompress_primary_time uncompress_secondary_ops uncompress_secondary_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_RESET but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX;
+database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET;
+database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM;
+page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET;
+page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_CMPMEM_RESET but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS;
+NAME SUBSYSTEM COUNT MAX_COUNT MIN_COUNT AVG_COUNT COUNT_RESET MAX_COUNT_RESET MIN_COUNT_RESET AVG_COUNT_RESET TIME_ENABLED TIME_DISABLED TIME_ELAPSED TIME_RESET STATUS TYPE COMMENT
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_METRICS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD;
+value
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED;
+DOC_ID
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_DELETED but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED;
+DOC_ID
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE;
+WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
+WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG;
+KEY VALUE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_FT_CONFIG but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS;
+POOL_ID POOL_SIZE FREE_BUFFERS DATABASE_PAGES OLD_DATABASE_PAGES MODIFIED_DATABASE_PAGES PENDING_DECOMPRESS PENDING_READS PENDING_FLUSH_LRU PENDING_FLUSH_LIST PAGES_MADE_YOUNG PAGES_NOT_MADE_YOUNG PAGES_MADE_YOUNG_RATE PAGES_MADE_NOT_YOUNG_RATE NUMBER_PAGES_READ NUMBER_PAGES_CREATED NUMBER_PAGES_WRITTEN PAGES_READ_RATE PAGES_CREATE_RATE PAGES_WRITTEN_RATE NUMBER_PAGES_GET HIT_RATE YOUNG_MAKE_PER_THOUSAND_GETS NOT_YOUNG_MAKE_PER_THOUSAND_GETS NUMBER_PAGES_READ_AHEAD NUMBER_READ_AHEAD_EVICTED READ_AHEAD_RATE READ_AHEAD_EVICTED_RATE LRU_IO_TOTAL LRU_IO_CURRENT UNCOMPRESS_TOTAL UNCOMPRESS_CURRENT
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE;
+POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU;
+POOL_ID LRU_POSITION SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE COMPRESSED IO_FIX IS_OLD FREE_PAGE_CLOCK
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES;
+TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLES but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS;
+TABLE_ID NAME STATS_INITIALIZED NUM_ROWS CLUST_INDEX_SIZE OTHER_INDEX_SIZE MODIFIED_COUNTER AUTOINC REF_COUNT
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES;
+INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_INDEXES but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS;
+TABLE_ID NAME POS MTYPE PRTYPE LEN
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_COLUMNS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS;
+INDEX_ID NAME POS
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FIELDS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN;
+ID FOR_NAME REF_NAME N_COLS TYPE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS;
+ID FOR_COL_NAME REF_COL_NAME POS
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES;
+SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES;
+SPACE PATH
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DATAFILES but the InnoDB storage engine is not installed
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS;
+INDEX_ID POS DOCUMENT_PATH DOCUMENT_TYPE
+Warnings:
+Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS but the InnoDB storage engine is not installed
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert.result b/storage/rocksdb/mysql-test/rocksdb/r/insert.result
new file mode 100644
index 00000000000..a1fb3ae90be
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/insert.result
@@ -0,0 +1,202 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106);
+SELECT a,b FROM t1;
+a b
+1 a
+100 foobar
+2 b
+3 c
+4 d
+5 e
+INSERT t1 VALUE (10,'foo',107),(11,'abc',108);
+SELECT a,b FROM t1;
+a b
+1 a
+10 foo
+100 foobar
+11 abc
+2 b
+3 c
+4 d
+5 e
+INSERT INTO t1 (b,a) VALUES ('test',0);
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 foo
+100 foobar
+11 abc
+2 b
+3 c
+4 d
+5 e
+INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL);
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 foo
+100 foobar
+11 abc
+2 b
+3 c
+4 d
+5 e
+NULL NULL
+INSERT t1 (a) VALUE (10),(20);
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 NULL
+10 foo
+100 foobar
+11 abc
+2 b
+20 NULL
+3 c
+4 d
+5 e
+NULL NULL
+INSERT INTO t1 SET a = 11, b = 'f';
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 NULL
+10 foo
+100 foobar
+11 abc
+11 f
+2 b
+20 NULL
+3 c
+4 d
+5 e
+NULL NULL
+INSERT t1 SET b = DEFAULT;
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 NULL
+10 foo
+100 foobar
+11 abc
+11 f
+2 b
+20 NULL
+3 c
+4 d
+5 e
+NULL NULL
+NULL NULL
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 SELECT a,b,pk FROM t1;
+INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo';
+SELECT a,b FROM t1;
+a b
+0 test
+1 a
+10 NULL
+10 NULL
+10 foo
+100 foobar
+11 abc
+11 f
+2 b
+20 NULL
+3 c
+4 d
+5 e
+NULL NULL
+NULL NULL
+INSERT t1 (a,b) SELECT a,b FROM t1;
+SELECT a,b FROM t1;
+a b
+0 test
+0 test
+1 a
+1 a
+10 NULL
+10 NULL
+10 NULL
+10 NULL
+10 foo
+10 foo
+100 foobar
+100 foobar
+11 abc
+11 abc
+11 f
+11 f
+2 b
+2 b
+20 NULL
+20 NULL
+3 c
+3 c
+4 d
+4 d
+5 e
+5 e
+NULL NULL
+NULL NULL
+NULL NULL
+NULL NULL
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+BEGIN;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo');
+INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc');
+COMMIT;
+SELECT a,b FROM t1;
+a b
+1 a
+10 foo
+100 foo
+11 abc
+2 b
+3 c
+4 d
+5 e
+BEGIN;
+INSERT INTO t1 (b,a) VALUES ('test',0);
+SAVEPOINT spt1;
+INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT);
+RELEASE SAVEPOINT spt1;
+INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT);
+ROLLBACK;
+SELECT a,b FROM t1;
+a b
+1 a
+10 foo
+100 foo
+11 abc
+2 b
+3 c
+4 d
+5 e
+BEGIN;
+INSERT t1 (a) VALUE (10),(20);
+SAVEPOINT spt1;
+INSERT INTO t1 SET a = 11, b = 'f';
+INSERT t1 SET b = DEFAULT;
+ROLLBACK TO SAVEPOINT spt1;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+INSERT INTO t1 (b,a) VALUES ('test1',10);
+COMMIT;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+SELECT a,b FROM t1;
+a b
+1 a
+10 foo
+100 foo
+11 abc
+2 b
+3 c
+4 d
+5 e
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result
new file mode 100644
index 00000000000..ded48057854
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result
@@ -0,0 +1,16 @@
+DROP TABLE IF EXISTS t1;
+create table t1(
+id bigint not null primary key,
+i1 bigint, #unique
+i2 bigint, #repeating
+c1 varchar(20), #unique
+c2 varchar(20), #repeating
+index t1_2(i1)
+) engine=rocksdb;
+select count(*), sum(id), sum(i1), sum(i2) from t1;
+count(*) sum(id) sum(i1) sum(i2)
+50000 1250025000 1250025000 124980000
+select count(*), sum(id), sum(i1), sum(i2) from t1;
+count(*) sum(id) sum(i1) sum(i2)
+50000 1250025000 1250025000 124980000
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result
new file mode 100644
index 00000000000..9d0fef276e9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result
@@ -0,0 +1,63 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (3,'a'),(0,'');
+SELECT a,b FROM t1;
+a b
+0
+1 a
+1 a
+100 a
+12345 z
+2 b
+29 n
+3 a
+3 c
+30 m
+4 d
+5 e
+6 f
+DROP TABLE t1;
+#----------------------------------------
+# UNIQUE KEYS are not supported currently
+#-----------------------------------------
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+INSERT INTO t1 (a,b) VALUES (3,'a'),(0,'');
+ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
+INSERT INTO t1 (a,b) VALUES (0,'');
+SELECT a,b FROM t1;
+a b
+0
+1 a
+100 a
+2 b
+29 n
+3 c
+30 m
+4 d
+5 e
+6 f
+INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b);
+SELECT a,b FROM t1;
+a b
+0
+1 aa
+100 a
+12345 zz
+2 b
+29 n
+3 c
+30 m
+4 d
+5 e
+6 f
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result
new file mode 100644
index 00000000000..ee73ac3e134
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result
@@ -0,0 +1,23 @@
+create table t1 (
+id int,
+value int,
+primary key (id)
+) engine=rocksdb;
+insert into t1 values(1,1),(2,2);
+set autocommit=0;
+begin;
+insert into t1 values (50,50);
+select * from t1;
+id value
+1 1
+2 2
+50 50
+update t1 set id=id+100;
+select * from t1;
+id value
+101 1
+102 2
+150 50
+rollback;
+set autocommit=1;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result
new file mode 100644
index 00000000000..9e55ebd006f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result
@@ -0,0 +1,17 @@
+create table ten(a int primary key);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+create table t100(pk int primary key, a int, b int, key(a));
+insert into t100 select a,a,a from test.one_k;
+set global rocksdb_force_flush_memtable_now=1;
+select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes;
+num_rows entry_deletes entry_singledeletes
+1000 0 0
+update t100 set a=a+1;
+set global rocksdb_force_flush_memtable_now=1;
+select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes;
+num_rows entry_deletes entry_singledeletes
+1000 0 0
+1000 0 1000
+drop table ten, t100, one_k;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result
new file mode 100644
index 00000000000..315d2d2b50b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result
@@ -0,0 +1,32 @@
+create table t1 (
+pk int not null primary key,
+col1 int not null,
+col2 int not null,
+key(col1)
+) engine=rocksdb;
+create table ten(a int primary key);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+insert into t1 select a,a,a from one_k;
+# Start the transaction, get the snapshot
+begin;
+select * from t1 where col1<10;
+pk col1 col2
+0 0 0
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+# Connect with another connection and make a conflicting change
+begin;
+update t1 set col2=123456 where pk=0;
+commit;
+update t1 set col2=col2+1 where col1 < 10 limit 5;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+drop table t1, ten, one_k;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue290.result b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result
new file mode 100644
index 00000000000..8b1a35648c0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result
@@ -0,0 +1,28 @@
+CREATE TABLE `linktable` (
+`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+`id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+`link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+`visibility` tinyint(3) NOT NULL DEFAULT '0',
+`data` varchar(255) NOT NULL DEFAULT '',
+`time` bigint(20) unsigned NOT NULL DEFAULT '0',
+`version` int(11) unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk',
+KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+set global rocksdb_force_flush_memtable_now=1;
+insert into linktable (id1, link_type, id2) values (2, 1, 1);
+insert into linktable (id1, link_type, id2) values (2, 1, 2);
+insert into linktable (id1, link_type, id2) values (2, 1, 3);
+insert into linktable (id1, link_type, id2) values (2, 1, 4);
+insert into linktable (id1, link_type, id2) values (2, 1, 5);
+insert into linktable (id1, link_type, id2) values (2, 1, 6);
+insert into linktable (id1, link_type, id2) values (2, 1, 7);
+insert into linktable (id1, link_type, id2) values (2, 1, 8);
+insert into linktable (id1, link_type, id2) values (2, 1, 9);
+insert into linktable (id1, link_type, id2) values (2, 1, 10);
+explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where
+drop table linktable;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue314.result b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result
new file mode 100644
index 00000000000..eee90800286
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result
@@ -0,0 +1,12 @@
+drop table if exists t1;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE t1(a int);
+SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
+INSERT INTO t1 VALUES(1);
+select * from t1;
+ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+select * from t1;
+a
+1
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result
new file mode 100644
index 00000000000..b68b37cf6c5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result
@@ -0,0 +1,111 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connection con1;
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+START TRANSACTION;
+SELECT a FROM t1;
+a
+connection con2;
+BEGIN;
+INSERT INTO t1 (a) VALUES(1);
+connection con1;
+SELECT a FROM t1;
+a
+connection con2;
+INSERT INTO t1 (a) VALUES (2);
+connection con1;
+SELECT a FROM t1;
+a
+INSERT INTO t1 (a) SELECT a+100 FROM t1;
+SELECT a FROM t1;
+a
+connection con2;
+SELECT a FROM t1;
+a
+1
+2
+COMMIT;
+SELECT a FROM t1;
+a
+1
+2
+connection con1;
+SELECT a FROM t1;
+a
+1
+2
+INSERT INTO t1 (a) SELECT a+200 FROM t1;
+SELECT a FROM t1;
+a
+1
+2
+201
+202
+COMMIT;
+SELECT a FROM t1;
+a
+1
+2
+201
+202
+connection con2;
+SELECT a FROM t1;
+a
+1
+2
+201
+202
+connection default;
+CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (a) VALUES (1);
+COMMIT;
+connection con1;
+BEGIN;
+SELECT a from t2;
+a
+1
+INSERT INTO t2 (a) VALUES (1), (3);
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+connection con2;
+INSERT INTO t2 (a) VALUES (2);
+COMMIT;
+connection con1;
+SELECT a from t2;
+a
+1
+2
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
+CREATE TABLE t3 (
+pk int unsigned PRIMARY KEY,
+count int unsigned DEFAULT '0'
+) ENGINE=ROCKSDB;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+connection con1;
+BEGIN;
+SELECT * FROM t3;
+pk count
+connection con2;
+BEGIN;
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+connection con1;
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+SELECT count FROM t3;
+count
+1
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result
new file mode 100644
index 00000000000..68fbe5632cb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result
@@ -0,0 +1,116 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection con1;
+CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+START TRANSACTION;
+SELECT a FROM t1;
+a
+connection con2;
+BEGIN;
+INSERT INTO t1 (a) VALUES(1);
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+connection con1;
+SELECT a FROM t1;
+a
+1
+connection con2;
+INSERT INTO t1 (a) VALUES (2);
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+connection con1;
+SELECT a FROM t1;
+a
+1
+2
+INSERT INTO t1 (a) SELECT a+100 FROM t1;
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+connection con2;
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+COMMIT;
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+connection con1;
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+INSERT INTO t1 (a) SELECT a+200 FROM t1;
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+201
+202
+301
+302
+COMMIT;
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+201
+202
+301
+302
+connection con2;
+SELECT a FROM t1;
+a
+1
+101
+102
+2
+201
+202
+301
+302
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection con2;
+INSERT INTO t1 (a) VALUES (1);
+connection con1;
+# If consistent read works on this isolation level (READ UNCOMMITTED), the following SELECT should not return the value we inserted (1)
+SELECT a FROM t1;
+a
+1
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result
new file mode 100644
index 00000000000..13da8a0ffeb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result
@@ -0,0 +1,100 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connection con1;
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+START TRANSACTION;
+SELECT a FROM t1;
+a
+connection con2;
+BEGIN;
+INSERT INTO t1 (a) VALUES(1);
+connection con1;
+SELECT a FROM t1;
+a
+connection con2;
+INSERT INTO t1 (a) VALUES (2);
+connection con1;
+SELECT a FROM t1;
+a
+INSERT INTO t1 (a) SELECT a+100 FROM t1;
+SELECT a FROM t1;
+a
+connection con2;
+SELECT a FROM t1;
+a
+1
+2
+COMMIT;
+SELECT a FROM t1;
+a
+1
+2
+connection con1;
+SELECT a FROM t1;
+a
+INSERT INTO t1 (a) SELECT a+200 FROM t1;
+SELECT a FROM t1;
+a
+COMMIT;
+SELECT a FROM t1;
+a
+1
+2
+connection con2;
+SELECT a FROM t1;
+a
+1
+2
+connection default;
+CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (a) VALUES (1);
+COMMIT;
+connection con1;
+BEGIN;
+SELECT a from t2;
+a
+1
+INSERT INTO t2 (a) VALUES (1), (3);
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+connection con2;
+INSERT INTO t2 (a) VALUES (2);
+COMMIT;
+connection con1;
+SELECT a from t2;
+a
+1
+COMMIT;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
+CREATE TABLE t3 (
+pk int unsigned PRIMARY KEY,
+count int unsigned DEFAULT '0'
+) ENGINE=ROCKSDB;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+connection con1;
+BEGIN;
+SELECT * FROM t3;
+pk count
+connection con2;
+BEGIN;
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+connection con1;
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+SELECT count FROM t3;
+count
+0
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result
new file mode 100644
index 00000000000..3f57395fa37
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result
@@ -0,0 +1,56 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+connect con2,localhost,root,,;
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+connection con1;
+CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+START TRANSACTION;
+SELECT a FROM t1;
+a
+connection con2;
+BEGIN;
+INSERT INTO t1 (a) VALUES(1);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
+# If it differs from the result file, it might indicate a problem.
+connection con1;
+SELECT a FROM t1;
+a
+connection con2;
+INSERT INTO t1 (a) VALUES (2);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
+# If it differs from the result file, it might indicate a problem.
+connection con1;
+SELECT a FROM t1;
+a
+INSERT INTO t1 (a) SELECT a+100 FROM t1;
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+SELECT a FROM t1;
+a
+connection con2;
+SELECT a FROM t1;
+a
+COMMIT;
+SELECT a FROM t1;
+a
+connection con1;
+SELECT a FROM t1;
+a
+INSERT INTO t1 (a) SELECT a+200 FROM t1;
+# WARNING: Statement ended with errno 0, errname ''.
+# If it differs from the result file, it might indicate a problem.
+SELECT a FROM t1;
+a
+COMMIT;
+SELECT a FROM t1;
+a
+connection con2;
+SELECT a FROM t1;
+a
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result
new file mode 100644
index 00000000000..5f6df197c94
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result
@@ -0,0 +1,239 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ',' (a,b);
+SELECT a,b FROM t1;
+a b
+1 foo
+2 bar
+3
+4 abc
+LOAD DATA LOCAL INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+CHARACTER SET utf8 COLUMNS TERMINATED BY ','
+ ESCAPED BY '/' (a,b);
+SELECT a,b FROM t1;
+a b
+1 foo
+1 foo
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+LOAD DATA LOCAL INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ';'
+ (a) SET b='loaded';
+Warnings:
+Warning 1262 Row 1 was truncated; it contained more data than there were input columns
+Warning 1262 Row 2 was truncated; it contained more data than there were input columns
+Warning 1262 Row 3 was truncated; it contained more data than there were input columns
+SELECT a,b FROM t1;
+a b
+0 loaded
+1 foo
+1 foo
+102 loaded
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+5 loaded
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ';'
+ OPTIONALLY ENCLOSED BY ''''
+ LINES STARTING BY 'prefix:'
+IGNORE 2 LINES (a,b);
+Warnings:
+Warning 1262 Row 2 was truncated; it contained more data than there were input columns
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+5 loaded
+7 test
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 2 doesn't contain data for all columns
+Warning 1261 Row 3 doesn't contain data for all columns
+Warning 1261 Row 4 doesn't contain data for all columns
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 foo
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+2 bar
+3
+3
+3
+4 abc
+4 abc
+4 abc
+5 loaded
+7 test
+LOAD DATA INFILE '<DATADIR>/se_replacedata.dat' REPLACE INTO TABLE t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 2 doesn't contain data for all columns
+Warning 1261 Row 3 doesn't contain data for all columns
+Warning 1261 Row 4 doesn't contain data for all columns
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 aaa
+1 foo
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+2 bar
+2 bbb
+3
+3
+3
+3 ccc
+4 abc
+4 abc
+4 abc
+4 ddd
+5 loaded
+7 test
+DROP TABLE t1;
+set session rocksdb_skip_unique_check=1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ',' (a,b);
+SELECT a,b FROM t1;
+a b
+1 foo
+2 bar
+3
+4 abc
+LOAD DATA LOCAL INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+CHARACTER SET utf8 COLUMNS TERMINATED BY ','
+ ESCAPED BY '/' (a,b);
+SELECT a,b FROM t1;
+a b
+1 foo
+1 foo
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+LOAD DATA LOCAL INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ';'
+ (a) SET b='loaded';
+Warnings:
+Warning 1262 Row 1 was truncated; it contained more data than there were input columns
+Warning 1262 Row 2 was truncated; it contained more data than there were input columns
+Warning 1262 Row 3 was truncated; it contained more data than there were input columns
+SELECT a,b FROM t1;
+a b
+0 loaded
+1 foo
+1 foo
+102 loaded
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+5 loaded
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1
+FIELDS TERMINATED BY ';'
+ OPTIONALLY ENCLOSED BY ''''
+ LINES STARTING BY 'prefix:'
+IGNORE 2 LINES (a,b);
+Warnings:
+Warning 1262 Row 2 was truncated; it contained more data than there were input columns
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+3
+3
+4 abc
+4 abc
+5 loaded
+7 test
+LOAD DATA INFILE '<DATADIR>/se_loaddata.dat' INTO TABLE t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 2 doesn't contain data for all columns
+Warning 1261 Row 3 doesn't contain data for all columns
+Warning 1261 Row 4 doesn't contain data for all columns
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 foo
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+2 bar
+3
+3
+3
+4 abc
+4 abc
+4 abc
+5 loaded
+7 test
+LOAD DATA INFILE '<DATADIR>/se_replacedata.dat' REPLACE INTO TABLE t1;
+ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: LOAD DATA INFILE '<DATADIR>/se_replacedata.dat' REPLACE INTO TABLE t1
+SELECT a,b FROM t1;
+a b
+0
+0 loaded
+1 foo
+1 foo
+1 foo
+100 foo
+102 loaded
+2 bar
+2 bar
+2 bar
+3
+3
+3
+4 abc
+4 abc
+4 abc
+5 loaded
+7 test
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock.result b/storage/rocksdb/mysql-test/rocksdb/r/lock.result
new file mode 100644
index 00000000000..8c89fa1b934
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/lock.result
@@ -0,0 +1,108 @@
+connect con1,localhost,root,,;
+SET lock_wait_timeout=1;
+connection default;
+DROP TABLE IF EXISTS t1, t2, t3;
+CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3);
+LOCK TABLE t1 LOW_PRIORITY WRITE;
+Warnings:
+Warning 1287 'LOW_PRIORITY WRITE' is deprecated and will be removed in a future release. Please use WRITE instead
+SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2;
+id2 COUNT(DISTINCT id)
+1 1
+2 1
+3 1
+UPDATE t1 SET id=-1 WHERE id=1;
+connection con1;
+SELECT id,id2 FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
+LOCK TABLE t1 READ;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
+connection default;
+LOCK TABLE t1 READ;
+UPDATE t1 SET id=1 WHERE id=1;
+ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
+connection con1;
+SELECT COUNT(DISTINCT id) FROM t1;
+COUNT(DISTINCT id)
+1
+UPDATE t1 SET id=2 WHERE id=2;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table: test.t1
+LOCK TABLE t1 WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
+LOCK TABLE t1 READ;
+UNLOCK TABLES;
+connection default;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+ERROR HY000: Table 't2' was not locked with LOCK TABLES
+UNLOCK TABLES;
+CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLE t1 WRITE, t2 WRITE;
+INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1;
+UPDATE t1 SET id=1 WHERE id=-1;
+DROP TABLE t1,t2;
+CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (nr,nm) VALUES (1,3);
+INSERT INTO t2 (nr,nm) VALUES (2,4);
+LOCK TABLES t1 WRITE, t2 READ;
+INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3;
+INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4;
+UNLOCK TABLES;
+LOCK TABLES t1 WRITE;
+INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1;
+ERROR HY000: Table 't1' was not locked with LOCK TABLES
+UNLOCK TABLES;
+LOCK TABLES t1 WRITE, t1 AS t1_alias READ;
+INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias;
+DROP TABLE t1,t2;
+ERROR HY000: Table 't2' was not locked with LOCK TABLES
+UNLOCK TABLES;
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE;
+DROP TABLE t2, t3, t1;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ;
+ALTER TABLE t2 ADD COLUMN c2 INT;
+DROP TABLE t1, t2, t3;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+LOCK TABLE t1 READ, t2 READ;
+FLUSH TABLE t1;
+ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
+FLUSH TABLES;
+ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
+FLUSH TABLES t1, t2 WITH READ LOCK;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+UNLOCK TABLES;
+FLUSH TABLES t1, t2 WITH READ LOCK;
+connection con1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
+connection default;
+UNLOCK TABLES;
+FLUSH TABLES WITH READ LOCK;
+connection con1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on global read:
+connection default;
+UNLOCK TABLES;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+FLUSH TABLES WITH READ LOCK;
+DROP TABLE t1, t2;
+ERROR HY000: Can't execute the query because you have a conflicting read lock
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+disconnect con1;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+LOCK TABLE t1 WRITE, t2 WRITE;
+SELECT a,b FROM t1;
+a b
+UNLOCK TABLES;
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result
new file mode 100644
index 00000000000..cf764f89581
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result
@@ -0,0 +1,40 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+id1 id2 id3 value
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE;
+id1 id2 id3 value
+connection con1;
+ROLLBACK;
+BEGIN;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+connection con2;
+ROLLBACK;
+BEGIN;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1;
+connection con1;
+ROLLBACK;
+BEGIN;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+connection con2;
+ROLLBACK;
+BEGIN;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0;
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result
new file mode 100644
index 00000000000..4b237dcb7aa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result
@@ -0,0 +1,490 @@
+
+-----------------------------------------------------------------------
+- Locking issues case 1.1:
+- Locking rows that do not exist when using all primary key columns in
+- a WHERE clause
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+id1 id2 value
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+INSERT INTO t0 VALUES (1,5,0);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 1.1:
+- Locking rows that do not exist when using all primary key columns in
+- a WHERE clause
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+id1 id2 value
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+INSERT INTO t0 VALUES (1,5,0);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 1.2:
+- Locking rows that do not exist without using all primary key
+- columns in a WHERE clause
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
+id1 id2 value
+1 1 0
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
+id1 id2 value
+INSERT INTO t0 VALUES (1,5,0);
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 1.2:
+- Locking rows that do not exist without using all primary key
+- columns in a WHERE clause
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2));
+INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 FOR UPDATE;
+id1 id2 value
+1 1 0
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE;
+id1 id2 value
+INSERT INTO t0 VALUES (1,5,0);
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using REPEATABLE READ transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+UPDATE t0 SET VALUE=10 WHERE id=1;
+UPDATE t0 SET VALUE=10 WHERE id=5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
+SELECT * FROM t0 WHERE id=4 FOR UPDATE;
+id value
+4 0
+COMMIT;
+SELECT * FROM t0;
+id value
+1 10
+2 1
+3 0
+4 0
+5 1
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using READ COMMITTED transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+UPDATE t0 SET VALUE=10 WHERE id=1;
+UPDATE t0 SET VALUE=10 WHERE id=5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+UPDATE t0 SET value=100 WHERE id in (4,5) and value>0;
+SELECT * FROM t0 WHERE id=4 FOR UPDATE;
+id value
+4 0
+COMMIT;
+SELECT * FROM t0;
+id value
+1 10
+2 1
+3 0
+4 0
+5 1
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using REPEATABLE READ transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+UPDATE t0 SET VALUE=10 WHERE id=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+COMMIT;
+DROP TABLE t0;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
+
+-----------------------------------------------------------------------
+- Locking issues case 2:
+- Rows that are scanned but do not match the WHERE are not locked
+- using READ COMMITTED transaction isolation level unless
+- rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t0(id INT PRIMARY KEY, value INT);
+INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+id value
+2 1
+5 1
+UPDATE t0 SET VALUE=10 WHERE id=1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t0.PRIMARY
+COMMIT;
+DROP TABLE t0;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
+
+-----------------------------------------------------------------------
+- Locking issues case 3:
+- After creating a snapshot, other clients updating rows
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 3:
+- After creating a snapshot, other clients updating rows
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 4:
+- Phantom rows
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+INSERT INTO t0 VALUES(200001,1), (-1,1);
+id value
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 4:
+- Phantom rows
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+INSERT INTO t0 VALUES(200001,1), (-1,1);
+id value
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 5:
+- Deleting primary key
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+DELETE FROM t0 WHERE id=190000;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 5:
+- Deleting primary key
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+DELETE FROM t0 WHERE id=190000;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 6:
+- Changing primary key
+- using REPEATABLE READ transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+UPDATE t0 SET id=200001 WHERE id=190000;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 6:
+- Changing primary key
+- using READ COMMITTED transaction isolation level
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t0;
+CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT);
+Inserting 200,000 rows
+UPDATE t0 SET value=100 WHERE id=190000;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t0 WHERE value > 0 FOR UPDATE;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+UPDATE t0 SET id=200001 WHERE id=190000;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+DROP TABLE t0;
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+lock_scanned_rows is 0
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+UPDATE t2 SET value=value+100;
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 103
+4 104
+5 105
+COMMIT;
+DROP TABLE t1;
+DROP TABLE t2;
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+lock_scanned_rows is 0
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+UPDATE t2 SET value=value+100;
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 103
+4 104
+5 105
+COMMIT;
+DROP TABLE t1;
+DROP TABLE t2;
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+lock_scanned_rows is 1
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+UPDATE t2 SET value=value+100 WHERE id=3;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY
+UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 3
+4 104
+5 105
+COMMIT;
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
+
+-----------------------------------------------------------------------
+- Locking issues case 7:
+- Rows that are scanned as part of a query but not in the table being
+- updated should not be locked unless rocksdb_lock_scanned_rows is on
+-----------------------------------------------------------------------
+DROP TABLE IF EXISTS t1, t2;
+SELECT @@global.rocksdb_lock_scanned_rows;
+@@global.rocksdb_lock_scanned_rows
+0
+SET GLOBAL rocksdb_lock_scanned_rows=ON;
+CREATE TABLE t1(id INT PRIMARY KEY, value INT);
+CREATE TABLE t2(id INT PRIMARY KEY, value INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3);
+INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+lock_scanned_rows is 1
+UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3;
+UPDATE t2 SET value=value+100 WHERE id=3;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY
+UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5);
+SELECT * FROM t2;
+id value
+1 101
+2 102
+3 3
+4 104
+5 105
+COMMIT;
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_lock_scanned_rows=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result
new file mode 100644
index 00000000000..70c270d5538
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result
@@ -0,0 +1,84 @@
+INSERT INTO mysql.event (
+db,
+name,
+body,
+definer,
+interval_value,
+interval_field,
+originator,
+character_set_client,
+collation_connection,
+db_collation,
+body_utf8)
+values (
+database(),
+"ev1",
+"select 1",
+user(),
+100,
+"SECOND_MICROSECOND",
+1,
+'utf8',
+'utf8_general_ci',
+'utf8_general_ci',
+'select 1');
+SHOW EVENTS;
+ERROR 42000: This version of MySQL doesn't yet support 'MICROSECOND'
+DROP EVENT ev1;
+SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME
+FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME;
+TABLE_NAME COLUMN_NAME REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+columns_priv Column_name NULL NULL
+columns_priv Db NULL NULL
+columns_priv Host NULL NULL
+columns_priv Table_name NULL NULL
+columns_priv User NULL NULL
+db Db NULL NULL
+db Host NULL NULL
+db User NULL NULL
+event db NULL NULL
+event name NULL NULL
+func name NULL NULL
+help_category help_category_id NULL NULL
+help_category name NULL NULL
+help_keyword help_keyword_id NULL NULL
+help_keyword name NULL NULL
+help_relation help_keyword_id NULL NULL
+help_relation help_topic_id NULL NULL
+help_topic help_topic_id NULL NULL
+help_topic name NULL NULL
+ndb_binlog_index epoch NULL NULL
+ndb_binlog_index orig_epoch NULL NULL
+ndb_binlog_index orig_server_id NULL NULL
+plugin name NULL NULL
+proc db NULL NULL
+proc name NULL NULL
+proc type NULL NULL
+procs_priv Db NULL NULL
+procs_priv Host NULL NULL
+procs_priv Routine_name NULL NULL
+procs_priv Routine_type NULL NULL
+procs_priv User NULL NULL
+proxies_priv Host NULL NULL
+proxies_priv Proxied_host NULL NULL
+proxies_priv Proxied_user NULL NULL
+proxies_priv User NULL NULL
+servers Server_name NULL NULL
+slave_gtid_info Id NULL NULL
+slave_master_info Host NULL NULL
+slave_master_info Port NULL NULL
+slave_relay_log_info Id NULL NULL
+slave_worker_info Id NULL NULL
+tables_priv Db NULL NULL
+tables_priv Host NULL NULL
+tables_priv Table_name NULL NULL
+tables_priv User NULL NULL
+time_zone Time_zone_id NULL NULL
+time_zone_leap_second Transition_time NULL NULL
+time_zone_name Name NULL NULL
+time_zone_transition Time_zone_id NULL NULL
+time_zone_transition Transition_time NULL NULL
+time_zone_transition_type Time_zone_id NULL NULL
+time_zone_transition_type Transition_type_id NULL NULL
+user Host NULL NULL
+user User NULL NULL
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result
new file mode 100644
index 00000000000..835361eea35
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result
@@ -0,0 +1,143 @@
+reset master;
+set timestamp=1000000000;
+set SESSION binlog_format = 'ROW';
+create database test2;
+create database test3;
+use test;
+create table t1 (a int primary key, b char(8)) ENGINE=rocksdb;
+insert into t1 values(1, 'a');
+insert into t1 values(2, 'b');
+create table t2 (a int primary key, b char(8)) ENGINE=rocksdb;
+start transaction;
+insert into t2 values(1, 'a');
+insert into t2 values(2, 'b');
+insert into t2 values(3, 'c');
+insert into t2 values(4, 'd');
+commit;
+use test2;
+create table t1 (a int primary key, b char(8)) ENGINE=rocksdb;
+insert into t1 values(1, 'a');
+insert into t1 values(2, 'b');
+create table t2 (a int primary key, b char(8)) ENGINE=rocksdb;
+start transaction;
+insert into t2 values(1, 'a');
+insert into t2 values(2, 'b');
+insert into t2 values(3, 'c');
+insert into t2 values(4, 'd');
+commit;
+use test3;
+create table t1 (a int primary key, b char(8)) ENGINE=rocksdb;
+insert into t1 values(1, 'a');
+insert into t1 values(2, 'b');
+create table t2 (a int primary key, b char(8)) ENGINE=rocksdb;
+start transaction;
+insert into t2 values(1, 'a');
+insert into t2 values(2, 'b');
+insert into t2 values(3, 'c');
+insert into t2 values(4, 'd');
+commit;
+FLUSH LOGS;
+==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ====
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
+DELIMITER /*!*/;
+ROLLBACK/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/;
+SET @@session.sql_mode=1073741824/*!*/;
+SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
+SET @@session.lc_time_names=0/*!*/;
+SET @@session.collation_database=DEFAULT/*!*/;
+create database test2
+/*!*/;
+use `test2`/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+create table t1 (a int primary key, b char(8)) ENGINE=rocksdb
+/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+create table t2 (a int primary key, b char(8)) ENGINE=rocksdb
+/*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+COMMIT/*!*/;
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
+use test2;
+start transaction;
+insert into t2 values(5, 'e');
+insert into t2 values(6, 'f');
+use test;
+insert into t2 values(7, 'g');
+insert into t2 values(8, 'h');
+commit;
+FLUSH LOGS;
+==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ====
+==== DB changed in the middle of the transaction, which belongs to the selected database
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
+DELIMITER /*!*/;
+SET TIMESTAMP=1000000000/*!*/;
+SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/;
+SET @@session.sql_mode=1073741824/*!*/;
+SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
+SET @@session.lc_time_names=0/*!*/;
+SET @@session.collation_database=DEFAULT/*!*/;
+BEGIN
+/*!*/;
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
+use test;
+start transaction;
+insert into t2 values(9, 'i');
+insert into t2 values(10, 'j');
+use test2;
+insert into t2 values(11, 'k');
+insert into t2 values(12, 'l');
+commit;
+FLUSH LOGS;
+==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ====
+==== DB changed in the middle of the transaction, which belongs to the non-selected database
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
+DELIMITER /*!*/;
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
+use test;
+drop table t1;
+drop table if exists t2;
+use test2;
+drop table t1;
+drop table if exists t2;
+use test3;
+drop table t1;
+drop table if exists t2;
+drop database test2;
+drop database test3;
+FLUSH LOGS;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result
new file mode 100644
index 00000000000..849257d08fa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result
@@ -0,0 +1,131 @@
+drop table if exists r1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb;
+insert into r1 values (1,1,1,1,1,1,1,1);
+insert into r1 values (1,1,1,2,2,2,2,2);
+insert into r1 values (1,1,2,1,3,3,3,3);
+insert into r1 values (1,1,2,2,4,4,4,4);
+insert into r1 values (1,2,1,1,5,5,5,5);
+insert into r1 values (1,2,1,2,6,6,6,6);
+insert into r1 values (1,2,2,1,7,7,7,7);
+insert into r1 values (1,2,2,2,8,8,8,8);
+insert into r1 values (2,1,1,1,9,9,9,9);
+insert into r1 values (2,1,1,2,10,10,10,10);
+insert into r1 values (2,1,2,1,11,11,11,11);
+insert into r1 values (2,1,2,2,12,12,12,12);
+insert into r1 values (2,2,1,1,13,13,13,13);
+insert into r1 values (2,2,1,2,14,14,14,14);
+insert into r1 values (2,2,2,1,15,15,15,15);
+insert into r1 values (2,2,2,2,16,16,16,16);
+connection con2;
+BEGIN;
+insert into r1 values (5,5,5,5,5,5,5,5);
+update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1';
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50601 SELECT count(*) INTO @is_rocksdb_supported FROM information_schema.SESSION_VARIABLES WHERE variable_name='rocksdb_bulk_load' */;
+/*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */;
+/*!50601 PREPARE s FROM @enable_bulk_load */;
+/*!50601 EXECUTE s */;
+-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893;
+DROP TABLE IF EXISTS `r1`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `r1` (
+ `id1` int(11) NOT NULL DEFAULT '0',
+ `id2` int(11) NOT NULL DEFAULT '0',
+ `id3` varchar(100) NOT NULL DEFAULT '',
+ `id4` int(11) NOT NULL DEFAULT '0',
+ `value1` int(11) DEFAULT NULL,
+ `value2` int(11) DEFAULT NULL,
+ `value3` int(11) DEFAULT NULL,
+ `value4` int(11) DEFAULT NULL,
+ PRIMARY KEY (`id1`,`id2`,`id3`,`id4`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/* ORDERING KEY (DESC) : PRIMARY */;
+
+LOCK TABLES `r1` WRITE;
+/*!40000 ALTER TABLE `r1` DISABLE KEYS */;
+INSERT INTO `r1` VALUES (2,2,'2',2,16,16,16,16),(2,2,'2',1,15,15,15,15),(2,2,'1',2,14,14,14,14),(2,2,'1',1,13,13,13,13),(2,1,'2',2,12,12,12,12),(2,1,'2',1,11,11,11,11),(2,1,'1',2,10,10,10,10),(2,1,'1',1,9,9,9,9),(1,2,'2',2,8,8,8,8),(1,2,'2',1,7,7,7,7),(1,2,'1',2,6,6,6,6),(1,2,'1',1,5,5,5,5),(1,1,'2',2,4,4,4,4),(1,1,'2',1,3,3,3,3),(1,1,'1',2,2,2,2,2),(1,1,'1',1,1,1,1,1);
+/*!40000 ALTER TABLE `r1` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!50601 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=0', 'SET @dummy = 0') */;
+/*!50601 PREPARE s FROM @disable_bulk_load */;
+/*!50601 EXECUTE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+rollback;
+connection con1;
+1
+set @save_default_storage_engine=@@global.default_storage_engine;
+SET GLOBAL default_storage_engine=rocksdb;
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893;
+DROP TABLE IF EXISTS `r1`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `r1` (
+ `id1` int(11) NOT NULL DEFAULT '0',
+ `id2` int(11) NOT NULL DEFAULT '0',
+ `id3` varchar(100) NOT NULL DEFAULT '',
+ `id4` int(11) NOT NULL DEFAULT '0',
+ `value1` int(11) DEFAULT NULL,
+ `value2` int(11) DEFAULT NULL,
+ `value3` int(11) DEFAULT NULL,
+ `value4` int(11) DEFAULT NULL,
+ PRIMARY KEY (`id1`,`id2`,`id3`,`id4`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/* ORDERING KEY : (null) */;
+
+LOCK TABLES `r1` WRITE;
+/*!40000 ALTER TABLE `r1` DISABLE KEYS */;
+INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16);
+/*!40000 ALTER TABLE `r1` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+2
+SET GLOBAL binlog_format=statement;
+SET GLOBAL binlog_format=row;
+drop table r1;
+reset master;
+set @@global.default_storage_engine=@save_default_storage_engine;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result
new file mode 100644
index 00000000000..11c1f370e7a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result
@@ -0,0 +1,16 @@
+DROP TABLE IF EXISTS t1;
+create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+case when variable_value - @a > 20 then 'true' else 'false' end
+false
+select count(*) from t1;
+count(*)
+50000
+select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+case when variable_value - @a > 100 then 'true' else 'false' end
+true
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result
new file mode 100644
index 00000000000..e45c5d6efc7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result
@@ -0,0 +1,9 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
+SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1;
+set session debug= "+d,myrocks_simulate_negative_stats";
+SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END
+true
+set session debug= "-d,myrocks_simulate_negative_stats";
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
new file mode 100644
index 00000000000..3a631d2925b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
@@ -0,0 +1,63 @@
+Warnings:
+Note 1051 Unknown table 'test.ti_nk'
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+skip_merge_sort
+true
+DROP TABLE ti_nk;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result
new file mode 100644
index 00000000000..fa2062b415e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result
@@ -0,0 +1,81 @@
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6;
+create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+select count(*) from t1;
+count(*)
+10000
+select count(*) from t2;
+count(*)
+10000
+select count(*) from t3;
+count(*)
+10000
+select count(*) from t4;
+count(*)
+10000
+select count(*) from t5;
+count(*)
+10000
+select count(*) from t6;
+count(*)
+10000
+delete from t1 where id <= 9900;
+delete from t2 where id <= 9900;
+delete from t3 where id <= 9900;
+delete from t4 where id <= 9900;
+delete from t5 where id <= 9900;
+delete from t6 where id <= 9900;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+optimize table t3;
+Table Op Msg_type Msg_text
+test.t3 optimize status OK
+optimize table t4;
+Table Op Msg_type Msg_text
+test.t4 optimize status OK
+optimize table t6;
+Table Op Msg_type Msg_text
+test.t6 optimize status OK
+select count(*) from t1;
+count(*)
+100
+select count(*) from t2;
+count(*)
+100
+select count(*) from t3;
+count(*)
+100
+select count(*) from t4;
+count(*)
+100
+select count(*) from t5;
+count(*)
+100
+select count(*) from t6;
+count(*)
+100
+checking sst file reduction on optimize table from 0 to 1..
+ok.
+checking sst file reduction on optimize table from 1 to 2..
+ok.
+checking sst file reduction on optimize table from 2 to 3..
+ok.
+checking sst file reduction on optimize table from 3 to 4..
+ok.
+optimize table t2;
+Table Op Msg_type Msg_text
+test.t2 optimize status OK
+optimize table t5;
+Table Op Msg_type Msg_text
+test.t5 optimize status OK
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+DROP TABLE t5;
+DROP TABLE t6;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result
new file mode 100644
index 00000000000..76085cc1d27
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result
@@ -0,0 +1,30 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS VAR_POP;
+DROP TABLE IF EXISTS TEMP0;
+DROP TABLE IF EXISTS VAR_SAMP;
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+SHOW TABLES;
+Tables_in_test
+TEMP0
+VAR_POP
+VAR_SAMP
+t1
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+i j k
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1000
+DROP TABLE t1;
+DROP TABLE VAR_POP;
+DROP TABLE TEMP0;
+DROP TABLE VAR_SAMP;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result
new file mode 100644
index 00000000000..2e8610d43bd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result
@@ -0,0 +1,160 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level;
+SET GLOBAL rocksdb_perf_context_level=3;
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB;
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1';
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE
+test t1 NULL USER_KEY_COMPARISON_COUNT #
+test t1 NULL BLOCK_CACHE_HIT_COUNT #
+test t1 NULL BLOCK_READ_COUNT #
+test t1 NULL BLOCK_READ_BYTE #
+test t1 NULL BLOCK_READ_TIME #
+test t1 NULL BLOCK_CHECKSUM_TIME #
+test t1 NULL BLOCK_DECOMPRESS_TIME #
+test t1 NULL INTERNAL_KEY_SKIPPED_COUNT #
+test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT #
+test t1 NULL GET_SNAPSHOT_TIME #
+test t1 NULL GET_FROM_MEMTABLE_TIME #
+test t1 NULL GET_FROM_MEMTABLE_COUNT #
+test t1 NULL GET_POST_PROCESS_TIME #
+test t1 NULL GET_FROM_OUTPUT_FILES_TIME #
+test t1 NULL SEEK_ON_MEMTABLE_TIME #
+test t1 NULL SEEK_ON_MEMTABLE_COUNT #
+test t1 NULL SEEK_CHILD_SEEK_TIME #
+test t1 NULL SEEK_CHILD_SEEK_COUNT #
+test t1 NULL SEEK_IN_HEAP_TIME #
+test t1 NULL SEEK_INTERNAL_SEEK_TIME #
+test t1 NULL FIND_NEXT_USER_ENTRY_TIME #
+test t1 NULL WRITE_WAL_TIME #
+test t1 NULL WRITE_MEMTABLE_TIME #
+test t1 NULL WRITE_DELAY_TIME #
+test t1 NULL WRITE_PRE_AND_POST_PROCESS_TIME #
+test t1 NULL DB_MUTEX_LOCK_NANOS #
+test t1 NULL DB_CONDITION_WAIT_NANOS #
+test t1 NULL MERGE_OPERATOR_TIME_NANOS #
+test t1 NULL READ_INDEX_BLOCK_NANOS #
+test t1 NULL READ_FILTER_BLOCK_NANOS #
+test t1 NULL NEW_TABLE_BLOCK_ITER_NANOS #
+test t1 NULL NEW_TABLE_ITERATOR_NANOS #
+test t1 NULL BLOCK_SEEK_NANOS #
+test t1 NULL FIND_TABLE_NANOS #
+test t1 NULL IO_THREAD_POOL_ID #
+test t1 NULL IO_BYTES_WRITTEN #
+test t1 NULL IO_BYTES_READ #
+test t1 NULL IO_OPEN_NANOS #
+test t1 NULL IO_ALLOCATE_NANOS #
+test t1 NULL IO_WRITE_NANOS #
+test t1 NULL IO_READ_NANOS #
+test t1 NULL IO_RANGE_SYNC_NANOS #
+test t1 NULL IO_LOGGER_NANOS #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL;
+STAT_TYPE VALUE
+USER_KEY_COMPARISON_COUNT #
+BLOCK_CACHE_HIT_COUNT #
+BLOCK_READ_COUNT #
+BLOCK_READ_BYTE #
+BLOCK_READ_TIME #
+BLOCK_CHECKSUM_TIME #
+BLOCK_DECOMPRESS_TIME #
+INTERNAL_KEY_SKIPPED_COUNT #
+INTERNAL_DELETE_SKIPPED_COUNT #
+GET_SNAPSHOT_TIME #
+GET_FROM_MEMTABLE_TIME #
+GET_FROM_MEMTABLE_COUNT #
+GET_POST_PROCESS_TIME #
+GET_FROM_OUTPUT_FILES_TIME #
+SEEK_ON_MEMTABLE_TIME #
+SEEK_ON_MEMTABLE_COUNT #
+SEEK_CHILD_SEEK_TIME #
+SEEK_CHILD_SEEK_COUNT #
+SEEK_IN_HEAP_TIME #
+SEEK_INTERNAL_SEEK_TIME #
+FIND_NEXT_USER_ENTRY_TIME #
+WRITE_WAL_TIME #
+WRITE_MEMTABLE_TIME #
+WRITE_DELAY_TIME #
+WRITE_PRE_AND_POST_PROCESS_TIME #
+DB_MUTEX_LOCK_NANOS #
+DB_CONDITION_WAIT_NANOS #
+MERGE_OPERATOR_TIME_NANOS #
+READ_INDEX_BLOCK_NANOS #
+READ_FILTER_BLOCK_NANOS #
+NEW_TABLE_BLOCK_ITER_NANOS #
+NEW_TABLE_ITERATOR_NANOS #
+BLOCK_SEEK_NANOS #
+FIND_TABLE_NANOS #
+IO_THREAD_POOL_ID #
+IO_BYTES_WRITTEN #
+IO_BYTES_READ #
+IO_OPEN_NANOS #
+IO_ALLOCATE_NANOS #
+IO_WRITE_NANOS #
+IO_READ_NANOS #
+IO_RANGE_SYNC_NANOS #
+IO_LOGGER_NANOS #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE
+test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 0
+test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0
+SELECT * FROM t1;
+i j
+1 1
+2 2
+3 3
+4 4
+5 5
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE
+test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 5
+test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0
+SELECT * FROM t1 WHERE j BETWEEN 1 AND 5;
+i j
+1 1
+2 2
+3 3
+4 4
+5 5
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE
+test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 10
+test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0
+BEGIN;
+INSERT INTO t2 VALUES (1), (2);
+INSERT INTO t2 VALUES (3), (4);
+COMMIT;
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't2'
+AND STAT_TYPE = 'IO_WRITE_NANOS'
+AND VALUE > 0;
+COUNT(*)
+0
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0;
+COUNT(*)
+1
+SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS';
+INSERT INTO t2 VALUES (5), (6), (7), (8);
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't2'
+AND STAT_TYPE = 'IO_WRITE_NANOS'
+AND VALUE > 0;
+COUNT(*)
+1
+SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS';
+SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END;
+CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END
+true
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result
new file mode 100644
index 00000000000..b83f0a474cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1);
+select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put';
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 734 uuid:1-3
+select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put';
+case when variable_value-@p < 1000 then 'true' else variable_value-@p end
+true
+select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s < 100 then 'true' else variable_value-@s end
+true
+SELECT * FROM t1;
+id value
+1 1
+INSERT INTO t1 values (2, 2);
+ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT.
+ROLLBACK;
+SELECT * FROM t1;
+id value
+1 10001
+INSERT INTO t1 values (2, 2);
+SELECT * FROM t1 ORDER BY id;
+id value
+1 10001
+2 2
+BEGIN;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+9998
+COMMIT;
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+DROP TABLE t1;
+reset master;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result
new file mode 100644
index 00000000000..e165e117a99
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result
@@ -0,0 +1,210 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT,
+a INT,
+b INT,
+PRIMARY KEY (i),
+KEY ka(a),
+KEY kb(b) comment 'rev:cf1'
+) ENGINE = rocksdb;
+explain extended select * from t1 where a> 500 and a< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750))
+explain extended select * from t1 where a< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750)
+explain extended select * from t1 where a> 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500)
+explain extended select * from t1 where a>=0 and a<=1000;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000))
+explain extended select * from t1 where b> 500 and b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750))
+explain extended select * from t1 where b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750)
+explain extended select * from t1 where b> 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500)
+explain extended select * from t1 where b>=0 and b<=1000;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000))
+set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range;
+set rocksdb_records_in_range = 15000;
+explain extended select a from t1 where a < 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 15000 100.00 Using where; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` < 750)
+explain extended select a, b from t1 where a < 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL ka NULL NULL NULL 20000 75.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750)
+explain extended select a from t1 where a = 700;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ref ka ka 5 const 15000 100.00 Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` = 700)
+explain extended select a,b from t1 where a = 700;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ref ka ka 5 const 15000 100.00 NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` = 700)
+explain extended select a from t1 where a in (700, 800);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 index ka ka 5 NULL 20000 100.00 Using where; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800))
+explain extended select a,b from t1 where a in (700, 800);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL ka NULL NULL NULL 20000 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800))
+set rocksdb_records_in_range=8000;
+explain extended select a from t1 where a in (700, 800);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 16000 100.00 Using where; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where (`test`.`t1`.`a` in (700,800))
+explain extended select a,b from t1 where a in (700, 800);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL ka NULL NULL NULL 20000 80.00 Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` in (700,800))
+set rocksdb_records_in_range = @save_rocksdb_records_in_range;
+set global rocksdb_force_flush_memtable_now = true;
+explain extended select * from t1 where a> 500 and a< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` > 500) and (`test`.`t1`.`a` < 750))
+explain extended select * from t1 where a< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` < 750)
+explain extended select * from t1 where a> 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` > 500)
+explain extended select * from t1 where a>=0 and a<=1000;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 0) and (`test`.`t1`.`a` <= 1000))
+explain extended select * from t1 where b> 500 and b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750))
+explain extended select * from t1 where b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` < 750)
+explain extended select * from t1 where b> 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`b` > 500)
+explain extended select * from t1 where b>=0 and b<=1000;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 0) and (`test`.`t1`.`b` <= 1000))
+explain extended select * from t1 where a>= 500 and a<= 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` >= 500) and (`test`.`t1`.`a` <= 500))
+explain extended select * from t1 where b>= 500 and b<= 500;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`b` >= 500) and (`test`.`t1`.`b` <= 500))
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range ka,kb ka 5 NULL 1000 100.00 Using index condition; Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750))
+drop index ka on t1;
+drop index kb on t1;
+create index kab on t1(a,b);
+set global rocksdb_force_flush_memtable_now = true;
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kab kab 5 NULL 1000 100.00 Using where; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750))
+set rocksdb_records_in_range=444;
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range kab kab 5 NULL 444 100.00 Using where; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where ((`test`.`t1`.`a` < 750) and (`test`.`t1`.`b` > 500) and (`test`.`t1`.`b` < 750))
+set rocksdb_records_in_range=0;
+CREATE TABLE `linktable` (
+`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+`id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+`link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+`visibility` tinyint(3) NOT NULL DEFAULT '0',
+`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '',
+`time` bigint(20) unsigned NOT NULL DEFAULT '0',
+`version` int(11) unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk',
+KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+insert into linktable values (1,1,1,1,1,1,1,1,1);
+insert into linktable values (1,1,2,1,1,1,1,1,1);
+insert into linktable values (1,1,3,1,1,1,1,1,1);
+insert into linktable values (1,1,4,1,1,1,1,1,1);
+set global rocksdb_force_flush_memtable_now = true;
+explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where
+drop table linktable;
+CREATE TABLE `linktable` (
+`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+`id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+`id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+`link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+`visibility` tinyint(3) NOT NULL DEFAULT '0',
+`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '',
+`time` bigint(20) unsigned NOT NULL DEFAULT '0',
+`version` int(11) unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk',
+KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+insert into linktable values (1,1,1,1,1,1,1,1,1);
+insert into linktable values (1,1,2,1,1,1,1,1,1);
+insert into linktable values (1,1,3,1,1,1,1,1,1);
+insert into linktable values (1,1,4,1,1,1,1,1,1);
+set global rocksdb_force_flush_memtable_now = true;
+explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where
+drop table linktable;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result
new file mode 100644
index 00000000000..f227d200bcc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result
@@ -0,0 +1,37 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb;
+REPAIR TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 repair note The storage engine for the table doesn't support repair
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2;
+Table Op Msg_type Msg_text
+test.t1 repair note The storage engine for the table doesn't support repair
+test.t2 repair note The storage engine for the table doesn't support repair
+INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
+REPAIR LOCAL TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 repair note The storage engine for the table doesn't support repair
+INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+INSERT INTO t2 (a,b) VALUES (9,'i');
+REPAIR LOCAL TABLE t2, t1 EXTENDED;
+Table Op Msg_type Msg_text
+test.t2 repair note The storage engine for the table doesn't support repair
+test.t1 repair note The storage engine for the table doesn't support repair
+INSERT INTO t1 (a,b) VALUES (10,'j');
+INSERT INTO t2 (a,b) VALUES (11,'k');
+REPAIR TABLE t1, t2 QUICK USE_FRM;
+Table Op Msg_type Msg_text
+test.t1 repair note The storage engine for the table doesn't support repair
+test.t2 repair note The storage engine for the table doesn't support repair
+INSERT INTO t1 (a,b) VALUES (12,'l');
+INSERT INTO t2 (a,b) VALUES (13,'m');
+REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+Table Op Msg_type Msg_text
+test.t1 repair note The storage engine for the table doesn't support repair
+test.t2 repair note The storage engine for the table doesn't support repair
+FLUSH TABLE t1;
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/replace.result b/storage/rocksdb/mysql-test/rocksdb/r/replace.result
new file mode 100644
index 00000000000..f8f61a3f8c3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/replace.result
@@ -0,0 +1,32 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+SELECT a,b FROM t1;
+a b
+1 a
+2 b
+3 c
+4 d
+5 e
+REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo');
+SELECT a,b FROM t1;
+a b
+1 a
+10 foo
+10 foo
+2 b
+3 c
+4 d
+5 e
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb;
+REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+INSERT INTO t1 (a,b) VALUES (4,'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+REPLACE INTO t1 (a,b) VALUES (4,'b');
+SELECT a,b FROM t1;
+a b
+1 a
+3 c
+4 b
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
new file mode 100644
index 00000000000..55388c65b99
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
@@ -0,0 +1,2456 @@
+select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb';
+ENGINE COMMENT TRANSACTIONS XA SAVEPOINTS
+ROCKSDB RocksDB storage engine YES YES YES
+drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
+drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20;
+drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29;
+drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39;
+drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49;
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+#
+# Issue #1: Don't update indexes if index values have not changed
+#
+create table t1 (
+pk int primary key,
+a int,
+b int,
+key(a)
+) engine=rocksdb;
+insert into t1 values
+(1,1,1), (2,2,2), (3,3,3), (4,4,4);
+set @var1=(select variable_value
+from information_schema.global_status
+where variable_name='rocksdb_number_keys_written');
+# Do an update that doesn't change the key 'a'.
+update t1 set b=3334341 where a=2;
+set @var2=(select variable_value
+from information_schema.global_status
+where variable_name='rocksdb_number_keys_written');
+# The following should produce 1
+select @var2 - @var1;
+@var2 - @var1
+1
+# Do an update that sets the key to the same value
+update t1 set a=pk where a=3;
+set @var3=(select variable_value
+from information_schema.global_status
+where variable_name='rocksdb_number_keys_written');
+# We have 'updated' column to the same value, so the following must return 0:
+select @var3 - @var2;
+@var3 - @var2
+0
+drop table t1;
+create table t0 (a int primary key) engine=rocksdb;
+show create table t0;
+Table Create Table
+t0 CREATE TABLE `t0` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+drop table t0;
+create table t1 (a int primary key, b int) engine=rocksdb;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+select * from t1;
+a b
+1 1
+2 2
+# Check that we can create another table and insert there
+create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb;
+insert into t2 value ('abc','def');
+insert into t2 value ('hijkl','mnopq');
+select * from t2;
+a b
+abc def
+hijkl mnopq
+# Select again from t1 to see that records from different tables dont mix
+select * from t1;
+a b
+1 1
+2 2
+explain select * from t2 where a='no-such-key';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain select * from t2 where a='abc';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 const PRIMARY PRIMARY 12 const # NULL
+select * from t2 where a='abc';
+a b
+abc def
+# Try a composite PK
+create table t3 (
+pk1 int,
+pk2 varchar(10),
+col1 varchar(10),
+primary key(pk1, pk2)
+) engine=rocksdb;
+insert into t3 values (2,'two', 'row#2');
+insert into t3 values (3,'three', 'row#3');
+insert into t3 values (1,'one', 'row#1');
+select * from t3;
+pk1 pk2 col1
+1 one row#1
+2 two row#2
+3 three row#3
+select * from t3 where pk1=3 and pk2='three';
+pk1 pk2 col1
+3 three row#3
+drop table t1, t2, t3;
+#
+# Test blob values
+#
+create table t4 (a int primary key, b blob) engine=rocksdb;
+insert into t4 values (1, repeat('quux-quux', 60));
+insert into t4 values (10, repeat('foo-bar', 43));
+insert into t4 values (5, repeat('foo-bar', 200));
+insert into t4 values (2, NULL);
+select
+a,
+(case a
+when 1 then b=repeat('quux-quux', 60)
+when 10 then b=repeat('foo-bar', 43)
+when 5 then b=repeat('foo-bar', 200)
+when 2 then b is null
+else 'IMPOSSIBLE!' end) as CMP
+from t4;
+a CMP
+1 1
+2 1
+5 1
+10 1
+drop table t4;
+#
+# Test blobs of various sizes
+#
+# TINYBLOB
+create table t5 (a int primary key, b tinyblob) engine=rocksdb;
+insert into t5 values (1, repeat('quux-quux', 6));
+insert into t5 values (10, repeat('foo-bar', 4));
+insert into t5 values (5, repeat('foo-bar', 2));
+select
+a,
+(case a
+when 1 then b=repeat('quux-quux', 6)
+when 10 then b=repeat('foo-bar', 4)
+when 5 then b=repeat('foo-bar', 2)
+else 'IMPOSSIBLE!' end) as CMP
+from t5;
+a CMP
+1 1
+5 1
+10 1
+drop table t5;
+# MEDIUMBLOB
+create table t6 (a int primary key, b mediumblob) engine=rocksdb;
+insert into t6 values (1, repeat('AB', 65000));
+insert into t6 values (10, repeat('bbb', 40000));
+insert into t6 values (5, repeat('foo-bar', 2));
+select
+a,
+(case a
+when 1 then b=repeat('AB', 65000)
+when 10 then b=repeat('bbb', 40000)
+when 5 then b=repeat('foo-bar', 2)
+else 'IMPOSSIBLE!' end) as CMP
+from t6;
+a CMP
+1 1
+5 1
+10 1
+drop table t6;
+# LONGBLOB
+create table t7 (a int primary key, b longblob) engine=rocksdb;
+insert into t7 values (1, repeat('AB', 65000));
+insert into t7 values (10, repeat('bbb', 40000));
+insert into t7 values (5, repeat('foo-bar', 2));
+select
+a,
+(case a
+when 1 then b=repeat('AB', 65000)
+when 10 then b=repeat('bbb', 40000)
+when 5 then b=repeat('foo-bar', 2)
+else 'IMPOSSIBLE!' end) as CMP
+from t7;
+a CMP
+1 1
+5 1
+10 1
+drop table t7;
+#
+# Check if DELETEs work
+#
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+insert into t8 values
+('one', 'eins'),
+('two', 'zwei'),
+('three', 'drei'),
+('four', 'vier'),
+('five', 'funf');
+# Delete by PK
+explain delete from t8 where a='three';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t8 range PRIMARY PRIMARY 12 const # Using where
+delete from t8 where a='three';
+select * from t8;
+a col1
+five funf
+four vier
+one eins
+two zwei
+# Delete while doing a full table scan
+delete from t8 where col1='eins' or col1='vier';
+select * from t8;
+a col1
+five funf
+two zwei
+# delete w/o WHERE:
+delete from t8;
+select * from t8;
+a col1
+#
+# Test UPDATEs
+#
+insert into t8 values
+('one', 'eins'),
+('two', 'zwei'),
+('three', 'drei'),
+('four', 'vier'),
+('five', 'funf');
+update t8 set col1='dva' where a='two';
+update t8 set a='fourAAA' where col1='vier';
+select * from t8;
+a col1
+five funf
+fourAAA vier
+one eins
+three drei
+two dva
+delete from t8;
+#
+# Basic transactions tests
+#
+begin;
+insert into t8 values ('trx1-val1', 'data');
+insert into t8 values ('trx1-val2', 'data');
+rollback;
+select * from t8;
+a col1
+begin;
+insert into t8 values ('trx1-val1', 'data');
+insert into t8 values ('trx1-val2', 'data');
+commit;
+select * from t8;
+a col1
+trx1-val1 data
+trx1-val2 data
+drop table t8;
+#
+# Check if DROP TABLE works
+#
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+select * from t8;
+a col1
+insert into t8 values ('foo','foo');
+drop table t8;
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+select * from t8;
+a col1
+drop table t8;
+#
+# MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table
+#
+CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY'
+#
+# MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work
+#
+CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t10 VALUES (1),(3);
+CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t11 VALUES (1),(4);
+select * from t10;
+i
+1
+3
+select * from t11;
+j
+1
+4
+EXPLAIN
+SELECT * FROM t10, t11 WHERE i=j;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t10 index PRIMARY PRIMARY 4 NULL # Using index
+1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.i # Using index
+SELECT * FROM t10, t11 WHERE i=j;
+i j
+1 1
+DROP TABLE t10,t11;
+#
+# MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122
+#
+CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t12 VALUES (2),(1);
+SELECT * FROM t12 ORDER BY pk;
+pk
+1
+2
+DROP TABLE t12;
+#
+# MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ...
+#
+create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2;
+drop table t14;
+#
+# MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table
+#
+create table t9 (i int primary key) engine=rocksdb;
+alter table t9 discard tablespace;
+ERROR HY000: Table storage engine for 't9' doesn't have this option
+drop table t9;
+#
+# MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ...
+# on accessing a table after ALTER
+#
+CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t15 VALUES (1,1),(5,2);
+ALTER TABLE t15 DROP COLUMN a;
+DROP TABLE t15;
+#
+# MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table
+#
+create table t16 (pk int primary key, a char(8)) engine=RocksDB;
+insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d');
+update t16 set pk=100, a = 'updated' where a in ('b','c');
+ERROR 23000: Duplicate entry '100' for key 'PRIMARY'
+select * from t16;
+pk a
+1 a
+2 b
+3 c
+4 d
+drop table t16;
+#
+# MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table
+#
+drop table if exists t_very_long_table_name;
+CREATE TABLE `t_very_long_table_name` (
+`c` char(1) NOT NULL,
+`c0` char(0) NOT NULL,
+`c1` char(1) NOT NULL,
+`c20` char(20) NOT NULL,
+`c255` char(255) NOT NULL,
+PRIMARY KEY (`c255`)
+) ENGINE=RocksDB DEFAULT CHARSET=latin1;
+INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255));
+drop table t_very_long_table_name;
+#
+# Test table locking and read-before-write checks.
+#
+create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb;
+insert into t17 values ('row1', 'val1');
+insert into t17 values ('row1', 'val1-try2');
+ERROR 23000: Duplicate entry 'row1' for key 'PRIMARY'
+insert into t17 values ('ROW1', 'val1-try2');
+ERROR 23000: Duplicate entry 'ROW1' for key 'PRIMARY'
+insert into t17 values ('row2', 'val2');
+insert into t17 values ('row3', 'val3');
+# This is ok
+update t17 set pk='row4' where pk='row1';
+# This will try to overwrite another row:
+update t17 set pk='row3' where pk='row2';
+ERROR 23000: Duplicate entry 'row3' for key 'PRIMARY'
+select * from t17;
+pk col1
+row2 val2
+row3 val3
+row4 val1
+#
+# Locking tests
+#
+# First, make sure there's no locking when transactions update different rows
+set autocommit=0;
+update t17 set col1='UPD1' where pk='row2';
+update t17 set col1='UPD2' where pk='row3';
+commit;
+select * from t17;
+pk col1
+row2 UPD1
+row3 UPD2
+row4 val1
+# Check the variable
+show variables like 'rocksdb_lock_wait_timeout';
+Variable_name Value
+rocksdb_lock_wait_timeout 1
+set rocksdb_lock_wait_timeout=2;
+show variables like 'rocksdb_lock_wait_timeout';
+Variable_name Value
+rocksdb_lock_wait_timeout 2
+# Try updating the same row from two transactions
+begin;
+update t17 set col1='UPD2-AA' where pk='row2';
+update t17 set col1='UPD2-BB' where pk='row2';
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t17.PRIMARY
+set rocksdb_lock_wait_timeout=1000;
+update t17 set col1='UPD2-CC' where pk='row2';
+rollback;
+select * from t17 where pk='row2';
+pk col1
+row2 UPD2-CC
+drop table t17;
+#
+# MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable)
+#
+create table t18 (pk int primary key, i int) engine=RocksDB;
+begin;
+select * from t18;
+pk i
+select * from t18 where pk = 1;
+pk i
+connect con1,localhost,root,,;
+insert into t18 values (1,100);
+connection default;
+select * from t18;
+pk i
+select * from t18 where pk = 1;
+pk i
+commit;
+drop table t18;
+#
+# MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY
+#
+create table t19 (pk int primary key, i int) engine=RocksDB;
+insert into t19 values (1,1);
+insert into t19 values (1,100) on duplicate key update i = 102;
+select * from t19;
+pk i
+1 102
+drop table t19;
+# MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY
+create table t20 (pk int primary key, i int) engine=RocksDB;
+insert into t20 values (1,1);
+replace into t20 values (1,100);
+select * from t20;
+pk i
+1 100
+drop table t20;
+#
+# MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT
+#
+create table t21 (v varbinary(16) primary key, i int) engine=RocksDB;
+insert into t21 values ('a',1);
+select * from t21;
+v i
+a 1
+drop table t21;
+#
+# MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE
+#
+CREATE TABLE t22 (a int primary key) ENGINE=RocksDB;
+INSERT INTO t22 VALUES (1),(2);
+CREATE TABLE t23 (b int primary key) ENGINE=RocksDB;
+INSERT INTO t23 SELECT * FROM t22;
+DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a;
+DROP TABLE t22,t23;
+#
+# MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT
+#
+CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB;
+INSERT INTO t24 VALUES (1),(2);
+CREATE TABLE t25 LIKE t24;
+INSERT INTO t25 SELECT * FROM t24;
+DELETE t25.* FROM t24, t25;
+DROP TABLE t24,t25;
+#
+# MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself
+#
+create table t26 (pk int primary key, c char(1)) engine=RocksDB;
+insert into t26 values (1,'a'),(2,'b');
+update t26 set c = 'x' order by pk limit 1;
+delete from t26 order by pk limit 1;
+select * from t26;
+pk c
+2 b
+drop table t26;
+#
+# Test whether SELECT ... FOR UPDATE puts locks
+#
+create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB;
+insert into t27 values
+('row1', 'row1data'),
+('row2', 'row2data'),
+('row3', 'row3data');
+connection con1;
+begin;
+select * from t27 where pk='row3' for update;
+pk col1
+row3 row3data
+connection default;
+set rocksdb_lock_wait_timeout=1;
+update t27 set col1='row2-modified' where pk='row3';
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t27.PRIMARY
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+drop table t27;
+#
+# MDEV-4060: RocksDB: Assertion `! trx->batch' fails in
+#
+create table t28 (pk int primary key, a int) engine=RocksDB;
+insert into t28 values (1,10),(2,20);
+begin;
+update t28 set a = 100 where pk = 3;
+rollback;
+select * from t28;
+pk a
+1 10
+2 20
+drop table t28;
+#
+# Secondary indexes
+#
+create table t30 (
+pk varchar(16) not null primary key,
+key1 varchar(16) not null,
+col1 varchar(16) not null,
+key(key1)
+) engine=rocksdb;
+insert into t30 values ('row1', 'row1-key', 'row1-data');
+insert into t30 values ('row2', 'row2-key', 'row2-data');
+insert into t30 values ('row3', 'row3-key', 'row3-data');
+explain
+select * from t30 where key1='row2-key';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 ref key1 key1 18 const # Using index condition
+select * from t30 where key1='row2-key';
+pk key1 col1
+row2 row2-key row2-data
+explain
+select * from t30 where key1='row1';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 ref key1 key1 18 const # Using index condition
+# This will produce nothing:
+select * from t30 where key1='row1';
+pk key1 col1
+explain
+select key1 from t30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 index NULL key1 18 NULL # Using index
+select key1 from t30;
+key1
+row1-key
+row2-key
+row3-key
+# Create a duplicate record
+insert into t30 values ('row2a', 'row2-key', 'row2a-data');
+# Can we see it?
+select * from t30 where key1='row2-key';
+pk key1 col1
+row2 row2-key row2-data
+row2a row2-key row2a-data
+delete from t30 where pk='row2';
+select * from t30 where key1='row2-key';
+pk key1 col1
+row2a row2-key row2a-data
+#
+# Range scans on secondary index
+#
+delete from t30;
+insert into t30 values
+('row1', 'row1-key', 'row1-data'),
+('row2', 'row2-key', 'row2-data'),
+('row3', 'row3-key', 'row3-data'),
+('row4', 'row4-key', 'row4-data'),
+('row5', 'row5-key', 'row5-data');
+analyze table t30;
+Table Op Msg_type Msg_text
+test.t30 analyze status OK
+explain
+select * from t30 where key1 <='row3-key';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range key1 key1 18 NULL # Using index condition
+select * from t30 where key1 <='row3-key';
+pk key1 col1
+row1 row1-key row1-data
+row2 row2-key row2-data
+row3 row3-key row3-data
+explain
+select * from t30 where key1 between 'row2-key' and 'row4-key';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range key1 key1 18 NULL # Using index condition
+select * from t30 where key1 between 'row2-key' and 'row4-key';
+pk key1 col1
+row2 row2-key row2-data
+row3 row3-key row3-data
+row4 row4-key row4-data
+explain
+select * from t30 where key1 in ('row2-key','row4-key');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range key1 key1 18 NULL # Using index condition
+select * from t30 where key1 in ('row2-key','row4-key');
+pk key1 col1
+row2 row2-key row2-data
+row4 row4-key row4-data
+explain
+select key1 from t30 where key1 in ('row2-key','row4-key');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range key1 key1 18 NULL # Using where; Using index
+select key1 from t30 where key1 in ('row2-key','row4-key');
+key1
+row2-key
+row4-key
+explain
+select * from t30 where key1 > 'row1-key' and key1 < 'row4-key';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range key1 key1 18 NULL # Using index condition
+select * from t30 where key1 > 'row1-key' and key1 < 'row4-key';
+pk key1 col1
+row2 row2-key row2-data
+row3 row3-key row3-data
+explain
+select * from t30 order by key1 limit 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 index NULL key1 18 NULL # NULL
+select * from t30 order by key1 limit 3;
+pk key1 col1
+row1 row1-key row1-data
+row2 row2-key row2-data
+row3 row3-key row3-data
+explain
+select * from t30 order by key1 desc limit 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 index NULL key1 18 NULL # NULL
+select * from t30 order by key1 desc limit 3;
+pk key1 col1
+row5 row5-key row5-data
+row4 row4-key row4-data
+row3 row3-key row3-data
+#
+# Range scans on primary key
+#
+explain
+select * from t30 where pk <='row3';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where
+select * from t30 where pk <='row3';
+pk key1 col1
+row1 row1-key row1-data
+row2 row2-key row2-data
+row3 row3-key row3-data
+explain
+select * from t30 where pk between 'row2' and 'row4';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where
+select * from t30 where pk between 'row2' and 'row4';
+pk key1 col1
+row2 row2-key row2-data
+row3 row3-key row3-data
+row4 row4-key row4-data
+explain
+select * from t30 where pk in ('row2','row4');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where
+select * from t30 where pk in ('row2','row4');
+pk key1 col1
+row2 row2-key row2-data
+row4 row4-key row4-data
+explain
+select * from t30 order by pk limit 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t30 index NULL PRIMARY 18 NULL # NULL
+select * from t30 order by pk limit 3;
+pk key1 col1
+row1 row1-key row1-data
+row2 row2-key row2-data
+row3 row3-key row3-data
+drop table t30;
+#
+# MDEV-3841: RocksDB: Reading by PK prefix does not work
+#
+create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB;
+insert into t31 values (1,10,100),(2,20,200);
+select * from t31 where i = 1;
+i j k
+1 10 100
+select * from t31 where j = 10;
+i j k
+1 10 100
+select * from t31 where k = 100;
+i j k
+1 10 100
+select * from t31 where i = 1 and j = 10;
+i j k
+1 10 100
+select * from t31 where i = 1 and k = 100;
+i j k
+1 10 100
+select * from t31 where j = 10 and k = 100;
+i j k
+1 10 100
+select * from t31 where i = 1 and j = 10 and k = 100;
+i j k
+1 10 100
+drop table t31;
+#
+# MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work
+#
+create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB;
+insert into t32 values
+(1,10,100,''),
+(2,20,200,'');
+select * from t32 where i = 1 and j = 10 and k = 100;
+i j k a
+1 10 100
+update t32 set a = 'updated' where i = 1 and j = 10 and k = 100;
+select * from t32;
+i j k a
+1 10 100 updated
+2 20 200
+drop table t32;
+#
+# MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC
+#
+CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB;
+INSERT INTO t33 VALUES (1,'a'),(2,'b');
+SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC;
+pk a
+2 b
+1 a
+DROP TABLE t33;
+#
+# MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index
+#
+# Unique indexes can be created, but uniqueness won't be enforced
+create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB;
+drop table t33;
+#
+# MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range
+#
+CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t34 VALUES (10),(11);
+SELECT pk FROM t34 WHERE pk > 5 AND pk < 15;
+pk
+10
+11
+SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15;
+pk
+10
+11
+SELECT pk FROM t34 WHERE pk > 5;
+pk
+10
+11
+SELECT pk FROM t34 WHERE pk < 15;
+pk
+10
+11
+drop table t34;
+#
+# MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC
+#
+create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB;
+insert into t35 values (1,1,1,1,1),(2,2,2,2,2);
+select * from t35 where a = 1 and c = 1 and d = 1 order by e desc;
+a b c d e
+1 1 1 1 1
+drop table t35;
+#
+# MDEV-4084: RocksDB: Wrong result on IN subquery with index
+#
+CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB;
+INSERT INTO t36 VALUES (1,10),(2,20);
+SELECT 3 IN ( SELECT a FROM t36 );
+3 IN ( SELECT a FROM t36 )
+0
+drop table t36;
+#
+# MDEV-4084: RocksDB: Wrong result on IN subquery with index
+#
+CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b))
+ENGINE=RocksDB;
+INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y');
+SELECT MAX(a) FROM t37 WHERE a < 100;
+MAX(a)
+20
+DROP TABLE t37;
+#
+# MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC
+#
+CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB;
+INSERT INTO t38 VALUES (1,10), (2,20);
+SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC;
+i
+20
+10
+drop table t38;
+#
+# MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys
+# with a multi-part key and ORDER BY .. DESC
+#
+CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB;
+INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y');
+CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t41 VALUES (1),(2);
+SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC;
+pk1 a b pk2
+DROP TABLE t40,t41;
+#
+# MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL
+#
+CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB;
+INSERT INTO t42 VALUES (1, NULL),(2, 8);
+SELECT ( 3 ) NOT IN ( SELECT a FROM t42 );
+( 3 ) NOT IN ( SELECT a FROM t42 )
+NULL
+DROP TABLE t42;
+#
+# MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on
+# DELETE with search by NULL-able secondary key ...
+#
+CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB;
+INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x');
+UPDATE t43 SET pk = 10 WHERE a = 8;
+REPLACE INTO t43 ( a ) VALUES ( 8 );
+Warnings:
+Warning 1364 Field 'pk' doesn't have a default value
+REPLACE INTO t43 ( b ) VALUES ( 'y' );
+Warnings:
+Warning 1364 Field 'pk' doesn't have a default value
+SELECT * FROM t43 WHERE a = 8;
+pk a b
+10 8 g
+DELETE FROM t43 WHERE a = 8;
+DROP TABLE t43;
+#
+# Basic AUTO_INCREMENT tests
+#
+create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb;
+insert into t44 (col1) values ('row1');
+insert into t44 (col1) values ('row2');
+insert into t44 (col1) values ('row3');
+select * from t44;
+pk col1
+1 row1
+2 row2
+3 row3
+drop table t44;
+#
+# ALTER TABLE tests
+#
+create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb;
+insert into t45 values (1, 'row1');
+insert into t45 values (2, 'row2');
+alter table t45 rename t46;
+select * from t46;
+pk col1
+1 row1
+2 row2
+drop table t46;
+drop table t45;
+ERROR 42S02: Unknown table 'test.t45'
+#
+# Check Bulk loading
+# Bulk loading used to overwrite existing data
+# Now it fails if there is data overlap with what
+# already exists
+#
+show variables like 'rocksdb%';
+Variable_name Value
+rocksdb_access_hint_on_compaction_start 1
+rocksdb_advise_random_on_open ON
+rocksdb_allow_concurrent_memtable_write OFF
+rocksdb_allow_mmap_reads OFF
+rocksdb_allow_mmap_writes OFF
+rocksdb_allow_os_buffer ON
+rocksdb_background_sync OFF
+rocksdb_base_background_compactions 1
+rocksdb_block_cache_size 8388608
+rocksdb_block_restart_interval 16
+rocksdb_block_size 4096
+rocksdb_block_size_deviation 10
+rocksdb_bulk_load OFF
+rocksdb_bulk_load_size 1000
+rocksdb_bytes_per_sync 0
+rocksdb_cache_index_and_filter_blocks ON
+rocksdb_checksums_pct 100
+rocksdb_collect_sst_properties ON
+rocksdb_commit_in_the_middle OFF
+rocksdb_compact_cf
+rocksdb_compaction_readahead_size 0
+rocksdb_compaction_sequential_deletes 0
+rocksdb_compaction_sequential_deletes_count_sd OFF
+rocksdb_compaction_sequential_deletes_file_size 0
+rocksdb_compaction_sequential_deletes_window 0
+rocksdb_create_checkpoint
+rocksdb_create_if_missing ON
+rocksdb_create_missing_column_families OFF
+rocksdb_datadir ./.rocksdb
+rocksdb_db_write_buffer_size 0
+rocksdb_debug_optimizer_no_zero_cardinality ON
+rocksdb_default_cf_options
+rocksdb_delete_obsolete_files_period_micros 21600000000
+rocksdb_disable_2pc ON
+rocksdb_disabledatasync OFF
+rocksdb_enable_bulk_load_api ON
+rocksdb_enable_thread_tracking OFF
+rocksdb_enable_write_thread_adaptive_yield OFF
+rocksdb_error_if_exists OFF
+rocksdb_flush_memtable_on_analyze ON
+rocksdb_force_flush_memtable_now OFF
+rocksdb_force_index_records_in_range 0
+rocksdb_hash_index_allow_collision ON
+rocksdb_index_type kBinarySearch
+rocksdb_info_log_level error_level
+rocksdb_is_fd_close_on_exec ON
+rocksdb_keep_log_file_num 1000
+rocksdb_lock_scanned_rows OFF
+rocksdb_lock_wait_timeout 1
+rocksdb_log_file_time_to_roll 0
+rocksdb_manifest_preallocation_size 4194304
+rocksdb_max_background_compactions 1
+rocksdb_max_background_flushes 1
+rocksdb_max_log_file_size 0
+rocksdb_max_manifest_file_size 18446744073709551615
+rocksdb_max_open_files -1
+rocksdb_max_row_locks 1073741824
+rocksdb_max_subcompactions 1
+rocksdb_max_total_wal_size 0
+rocksdb_merge_buf_size 67108864
+rocksdb_merge_combine_read_size 1073741824
+rocksdb_new_table_reader_for_compaction_inputs OFF
+rocksdb_no_block_cache OFF
+rocksdb_override_cf_options
+rocksdb_paranoid_checks ON
+rocksdb_pause_background_work ON
+rocksdb_perf_context_level 0
+rocksdb_pin_l0_filter_and_index_blocks_in_cache ON
+rocksdb_rate_limiter_bytes_per_sec 0
+rocksdb_read_free_rpl_tables
+rocksdb_records_in_range 50
+rocksdb_rpl_skip_tx_api OFF
+rocksdb_seconds_between_stat_computes 3600
+rocksdb_signal_drop_index_thread OFF
+rocksdb_skip_bloom_filter_on_read OFF
+rocksdb_skip_fill_cache OFF
+rocksdb_skip_unique_check OFF
+rocksdb_skip_unique_check_tables .*
+rocksdb_stats_dump_period_sec 600
+rocksdb_store_checksums OFF
+rocksdb_strict_collation_check OFF
+rocksdb_strict_collation_exceptions
+rocksdb_table_cache_numshardbits 6
+rocksdb_table_stats_sampling_pct 10
+rocksdb_unsafe_for_binlog OFF
+rocksdb_use_adaptive_mutex OFF
+rocksdb_use_fsync OFF
+rocksdb_validate_tables 1
+rocksdb_verify_checksums OFF
+rocksdb_wal_bytes_per_sync 0
+rocksdb_wal_dir
+rocksdb_wal_recovery_mode 2
+rocksdb_wal_size_limit_mb 0
+rocksdb_wal_ttl_seconds 0
+rocksdb_whole_key_filtering ON
+rocksdb_write_disable_wal OFF
+rocksdb_write_ignore_missing_column_families OFF
+rocksdb_write_sync OFF
+create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
+insert into t47 values (1, 'row1');
+insert into t47 values (2, 'row2');
+set rocksdb_bulk_load=1;
+insert into t47 values (3, 'row3'),(4, 'row4');
+set rocksdb_bulk_load=0;
+select * from t47;
+pk col1
+1 row1
+2 row2
+3 row3
+4 row4
+drop table t47;
+#
+# Fix TRUNCATE over empty table (transaction is committed when it wasn't
+# started)
+#
+create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb;
+set autocommit=0;
+truncate table t48;
+set autocommit=1;
+drop table t48;
+#
+# MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded
+#
+create table t49 (pk int primary key, a int) engine=RocksDB;
+insert into t49 values (1,10),(2,20);
+begin;
+update t49 set a = 100 where pk = 1;
+connect con1,localhost,root,,;
+set rocksdb_lock_wait_timeout=5000;
+set @var1= to_seconds(now());
+update t49 set a = 1000 where pk = 1;
+connect con2,localhost,root,,;
+kill query $con1_id;
+connection con1;
+ERROR 70100: Query execution was interrupted
+set @var2= to_seconds(now());
+"[Jay Edgar] I've updated this query to help determine why it is sometimes failing"
+"(t13541934). If you get an error here (i.e. not 'passed') notify me."
+select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result';
+result
+passed
+connection default;
+disconnect con1;
+commit;
+drop table t49;
+#
+# Index-only tests for INT-based columns
+#
+create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,2,2);
+insert into t1 values (-5,-5,-5);
+# INT column uses index-only:
+explain
+select key1 from t1 where key1=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref key1 key1 5 const # Using index
+select key1 from t1 where key1=2;
+key1
+2
+select key1 from t1 where key1=-5;
+key1
+-5
+drop table t1;
+create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb;
+insert into t2 values (1,1,1), (2,2,2);
+# INT UNSIGNED column uses index-only:
+explain
+select key1 from t2 where key1=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref key1 key1 5 const # Using index
+select key1 from t2 where key1=2;
+key1
+2
+drop table t2;
+create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb;
+insert into t3 values (1,1,1), (2,2,2);
+# BIGINT uses index-only:
+explain
+select key1 from t3 where key1=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ref key1 key1 9 const # Using index
+select key1 from t3 where key1=2;
+key1
+2
+drop table t3;
+#
+# Index-only reads for string columns
+#
+create table t1 (
+pk int primary key,
+key1 char(10) character set binary,
+col1 int,
+key (key1)
+) engine=rocksdb;
+insert into t1 values(1, 'one',11), (2,'two',22);
+explain
+select key1 from t1 where key1='one';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index
+# The following will produce no rows. This looks like a bug,
+# but it is actually correct behavior. Binary strings are end-padded
+# with \0 character (and not space). Comparison does not ignore
+# the tail of \0.
+select key1 from t1 where key1='one';
+key1
+explain
+select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index
+select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0';
+hex(key1)
+6F6E6500000000000000
+drop table t1;
+create table t2 (
+pk int primary key,
+key1 char(10) collate latin1_bin,
+col1 int,
+key (key1)
+) engine=rocksdb;
+insert into t2 values(1, 'one',11), (2,'two',22);
+explain
+select key1 from t2 where key1='one';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref key1 key1 11 const # Using where; Using index
+select key1 from t2 where key1='one';
+key1
+one
+drop table t2;
+create table t3 (
+pk int primary key,
+key1 char(10) collate utf8_bin,
+col1 int,
+key (key1)
+) engine=rocksdb;
+insert into t3 values(1, 'one',11), (2,'two',22);
+explain
+select key1 from t3 where key1='one';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ref key1 key1 31 const # Using where; Using index
+select key1 from t3 where key1='one';
+key1
+one
+drop table t3;
+# a VARCHAR column
+create table t4 (
+pk int primary key,
+key1 varchar(10) collate latin1_bin,
+key(key1)
+) engine=rocksdb;
+insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five');
+explain
+select key1 from t4 where key1='two';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref key1 key1 13 const # Using where; Using index
+select key1 from t4 where key1='two';
+key1
+two
+select key1 from t4 where key1='fifty-five';
+key1
+fifty-five
+explain
+select key1 from t4 where key1 between 's' and 'u';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 range key1 key1 13 NULL # Using where; Using index
+select key1 from t4 where key1 between 's' and 'u';
+key1
+threee
+two
+drop table t4;
+#
+# MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len
+#
+CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8);
+SELECT * FROM t1 WHERE i != 3 OR pk1 > 9;
+pk1 pk2 i
+1 test1 6
+2 test2 8
+DROP TABLE t1;
+#
+# MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1),(2,2);
+BEGIN;
+UPDATE t1 SET i = 100;
+connect con1,localhost,root,,test;
+DELETE IGNORE FROM t1 ORDER BY i;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+disconnect con1;
+connection default;
+COMMIT;
+DROP TABLE t1;
+#
+# MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field
+# (testcase only)
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'foo'), (2,'bar');
+DROP TABLE t1;
+#
+# MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin;
+INSERT INTO t1 VALUES (1,'h','h');
+SELECT * FROM t1;
+pk c1 c2
+1 h h
+SELECT c1 FROM t1;
+c1
+h
+DROP TABLE t1;
+#
+# MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE
+#
+CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,4),(2,5);
+SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE;
+1
+DROP TABLE t2;
+#
+# MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e');
+SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h';
+MAX( pk )
+NULL
+DROP TABLE t1;
+#
+# MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field
+#
+create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB;
+insert into t1 values ('2',2),('6',6);
+select * from t1 where c = i;
+c i
+2 2
+6 6
+select * from t1 ignore index (i) where c = i;
+c i
+2 2
+6 6
+drop table t1;
+#
+# Test statement rollback inside a transaction
+#
+create table t1 (pk varchar(12) primary key) engine=rocksdb;
+insert into t1 values ('old-val1'),('old-val2');
+create table t2 (pk varchar(12) primary key) engine=rocksdb;
+insert into t2 values ('new-val2'),('old-val1');
+begin;
+insert into t1 values ('new-val1');
+insert into t1 select * from t2;
+ERROR 23000: Duplicate entry 'old-val1' for key 'PRIMARY'
+commit;
+select * from t1;
+pk
+new-val1
+old-val1
+old-val2
+drop table t1, t2;
+#
+# MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT:
+# rows that should be deleted remain in the table
+#
+CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB;
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t1 (pk) VALUES (NULL),(NULL);
+BEGIN;
+INSERT INTO t2 (pk) VALUES (NULL),(NULL);
+INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL);
+SELECT * FROM t1 ORDER BY pk LIMIT 9;
+pk
+1
+2
+3
+4
+5
+6
+7
+8
+affected rows: 8
+DELETE FROM t1 ORDER BY pk LIMIT 9;
+affected rows: 8
+SELECT * FROM t1 ORDER BY pk LIMIT 9;
+pk
+affected rows: 0
+DROP TABLE t1,t2;
+#
+# MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on
+# inserting into a varchar column
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+DROP TABLE t1;
+#
+# MDEV-4061: RocksDB: Changes from an interrupted query are still applied
+#
+create table t1 (pk int primary key, a int) engine=RocksDB;
+insert into t1 values (1,10),(2,20);
+set autocommit = 1;
+update t1 set a = sleep(100) where pk = 1;
+connect con1,localhost,root,,;
+kill query $con_id;
+connection default;
+ERROR 70100: Query execution was interrupted
+select * from t1;
+pk a
+1 10
+2 20
+disconnect con1;
+drop table t1;
+#
+# MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB;
+INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0);
+SELECT * FROM t1;
+pk a b
+1 157 0
+2 1898 -504403
+SELECT pk FROM t1;
+pk
+1
+2
+SELECT * FROM t1 WHERE a != 97;
+pk a b
+1 157 0
+2 1898 -504403
+DROP TABLE t1;
+#
+# Test @@rocksdb_max_row_locks
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB;
+set @a=-1;
+insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100;
+set @tmp1= @@rocksdb_max_row_locks;
+set rocksdb_max_row_locks= 20;
+update t1 set a=a+10;
+ERROR HY000: Internal error: Operation aborted: Number of locks held by the transaction exceeded @@rocksdb_max_row_locks
+DROP TABLE t1;
+#
+# Test AUTO_INCREMENT behavior problem,
+# "explicit insert into an auto-inc column is not noticed by RocksDB"
+#
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+insert into t1 values (null);
+insert into t1 values (null);
+select * from t1;
+i
+1
+2
+drop table t1;
+create table t2 (i int primary key auto_increment) engine=RocksDB;
+insert into t2 values (1);
+select * from t2;
+i
+1
+# this fails (ie. used to fail), RocksDB engine did not notice use of '1' above
+insert into t2 values (null);
+select * from t2;
+i
+1
+2
+# but then this succeeds, so previous statement must have incremented next number counter
+insert into t2 values (null);
+select * from t2;
+i
+1
+2
+3
+drop table t2;
+#
+# Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown
+#
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+insert into t1 values (null);
+insert into t1 values (null);
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+insert into t1 values (null);
+select * from t1;
+i
+1
+2
+3
+drop table t1;
+#
+# Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0
+#
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+insert into t1 values (null),(null);
+show table status like 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 1000 0 # 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL
+drop table t1;
+#
+# Fix Issue #4: Crash when using pseudo-unique keys
+#
+CREATE TABLE t1 (
+i INT,
+t TINYINT,
+s SMALLINT,
+m MEDIUMINT,
+b BIGINT,
+pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY,
+UNIQUE KEY b_t (b,t)
+) ENGINE=rocksdb;
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+b+t
+9
+11
+25
+27
+29
+207
+10107
+100000000000000100
+1000000000000000100
+DROP TABLE t1;
+#
+# Fix issue #5: Transaction rollback doesn't undo all changes.
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (id int auto_increment primary key, value int) engine=rocksdb;
+set autocommit=0;
+begin;
+set @a:=0;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+rollback;
+select count(*) from t1;
+count(*)
+0
+set autocommit=1;
+drop table t0, t1;
+#
+# Check status variables
+#
+show status like 'rocksdb%';
+Variable_name Value
+rocksdb_rows_deleted #
+rocksdb_rows_inserted #
+rocksdb_rows_read #
+rocksdb_rows_updated #
+rocksdb_system_rows_deleted #
+rocksdb_system_rows_inserted #
+rocksdb_system_rows_read #
+rocksdb_system_rows_updated #
+rocksdb_block_cache_add #
+rocksdb_block_cache_data_hit #
+rocksdb_block_cache_data_miss #
+rocksdb_block_cache_filter_hit #
+rocksdb_block_cache_filter_miss #
+rocksdb_block_cache_hit #
+rocksdb_block_cache_index_hit #
+rocksdb_block_cache_index_miss #
+rocksdb_block_cache_miss #
+rocksdb_block_cachecompressed_hit #
+rocksdb_block_cachecompressed_miss #
+rocksdb_bloom_filter_prefix_checked #
+rocksdb_bloom_filter_prefix_useful #
+rocksdb_bloom_filter_useful #
+rocksdb_bytes_read #
+rocksdb_bytes_written #
+rocksdb_compact_read_bytes #
+rocksdb_compact_write_bytes #
+rocksdb_compaction_key_drop_new #
+rocksdb_compaction_key_drop_obsolete #
+rocksdb_compaction_key_drop_user #
+rocksdb_flush_write_bytes #
+rocksdb_getupdatessince_calls #
+rocksdb_git_date #
+rocksdb_git_hash #
+rocksdb_l0_num_files_stall_micros #
+rocksdb_l0_slowdown_micros #
+rocksdb_memtable_compaction_micros #
+rocksdb_memtable_hit #
+rocksdb_memtable_miss #
+rocksdb_no_file_closes #
+rocksdb_no_file_errors #
+rocksdb_no_file_opens #
+rocksdb_num_iterators #
+rocksdb_number_block_not_compressed #
+rocksdb_number_deletes_filtered #
+rocksdb_number_keys_read #
+rocksdb_number_keys_updated #
+rocksdb_number_keys_written #
+rocksdb_number_merge_failures #
+rocksdb_number_multiget_bytes_read #
+rocksdb_number_multiget_get #
+rocksdb_number_multiget_keys_read #
+rocksdb_number_reseeks_iteration #
+rocksdb_number_sst_entry_delete #
+rocksdb_number_sst_entry_merge #
+rocksdb_number_sst_entry_other #
+rocksdb_number_sst_entry_put #
+rocksdb_number_sst_entry_singledelete #
+rocksdb_number_stat_computes #
+rocksdb_number_superversion_acquires #
+rocksdb_number_superversion_cleanups #
+rocksdb_number_superversion_releases #
+rocksdb_rate_limit_delay_millis #
+rocksdb_sequence_number #
+rocksdb_snapshot_conflict_errors #
+rocksdb_wal_bytes #
+rocksdb_wal_synced #
+rocksdb_write_other #
+rocksdb_write_self #
+rocksdb_write_timedout #
+rocksdb_write_wal #
+select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%';
+VARIABLE_NAME
+ROCKSDB_ROWS_DELETED
+ROCKSDB_ROWS_INSERTED
+ROCKSDB_ROWS_READ
+ROCKSDB_ROWS_UPDATED
+ROCKSDB_SYSTEM_ROWS_DELETED
+ROCKSDB_SYSTEM_ROWS_INSERTED
+ROCKSDB_SYSTEM_ROWS_READ
+ROCKSDB_SYSTEM_ROWS_UPDATED
+ROCKSDB_BLOCK_CACHE_ADD
+ROCKSDB_BLOCK_CACHE_DATA_HIT
+ROCKSDB_BLOCK_CACHE_DATA_MISS
+ROCKSDB_BLOCK_CACHE_FILTER_HIT
+ROCKSDB_BLOCK_CACHE_FILTER_MISS
+ROCKSDB_BLOCK_CACHE_HIT
+ROCKSDB_BLOCK_CACHE_INDEX_HIT
+ROCKSDB_BLOCK_CACHE_INDEX_MISS
+ROCKSDB_BLOCK_CACHE_MISS
+ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
+ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
+ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
+ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
+ROCKSDB_BLOOM_FILTER_USEFUL
+ROCKSDB_BYTES_READ
+ROCKSDB_BYTES_WRITTEN
+ROCKSDB_COMPACT_READ_BYTES
+ROCKSDB_COMPACT_WRITE_BYTES
+ROCKSDB_COMPACTION_KEY_DROP_NEW
+ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE
+ROCKSDB_COMPACTION_KEY_DROP_USER
+ROCKSDB_FLUSH_WRITE_BYTES
+ROCKSDB_GETUPDATESSINCE_CALLS
+ROCKSDB_GIT_DATE
+ROCKSDB_GIT_HASH
+ROCKSDB_L0_NUM_FILES_STALL_MICROS
+ROCKSDB_L0_SLOWDOWN_MICROS
+ROCKSDB_MEMTABLE_COMPACTION_MICROS
+ROCKSDB_MEMTABLE_HIT
+ROCKSDB_MEMTABLE_MISS
+ROCKSDB_NO_FILE_CLOSES
+ROCKSDB_NO_FILE_ERRORS
+ROCKSDB_NO_FILE_OPENS
+ROCKSDB_NUM_ITERATORS
+ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED
+ROCKSDB_NUMBER_DELETES_FILTERED
+ROCKSDB_NUMBER_KEYS_READ
+ROCKSDB_NUMBER_KEYS_UPDATED
+ROCKSDB_NUMBER_KEYS_WRITTEN
+ROCKSDB_NUMBER_MERGE_FAILURES
+ROCKSDB_NUMBER_MULTIGET_BYTES_READ
+ROCKSDB_NUMBER_MULTIGET_GET
+ROCKSDB_NUMBER_MULTIGET_KEYS_READ
+ROCKSDB_NUMBER_RESEEKS_ITERATION
+ROCKSDB_NUMBER_SST_ENTRY_DELETE
+ROCKSDB_NUMBER_SST_ENTRY_MERGE
+ROCKSDB_NUMBER_SST_ENTRY_OTHER
+ROCKSDB_NUMBER_SST_ENTRY_PUT
+ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE
+ROCKSDB_NUMBER_STAT_COMPUTES
+ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
+ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
+ROCKSDB_NUMBER_SUPERVERSION_RELEASES
+ROCKSDB_RATE_LIMIT_DELAY_MILLIS
+ROCKSDB_SEQUENCE_NUMBER
+ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
+ROCKSDB_WAL_BYTES
+ROCKSDB_WAL_SYNCED
+ROCKSDB_WRITE_OTHER
+ROCKSDB_WRITE_SELF
+ROCKSDB_WRITE_TIMEDOUT
+ROCKSDB_WRITE_WAL
+# RocksDB-SE's status variables are global internally
+# but they are shown as both session and global, like InnoDB's status vars.
+select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%';
+VARIABLE_NAME
+ROCKSDB_ROWS_DELETED
+ROCKSDB_ROWS_INSERTED
+ROCKSDB_ROWS_READ
+ROCKSDB_ROWS_UPDATED
+ROCKSDB_SYSTEM_ROWS_DELETED
+ROCKSDB_SYSTEM_ROWS_INSERTED
+ROCKSDB_SYSTEM_ROWS_READ
+ROCKSDB_SYSTEM_ROWS_UPDATED
+ROCKSDB_BLOCK_CACHE_ADD
+ROCKSDB_BLOCK_CACHE_DATA_HIT
+ROCKSDB_BLOCK_CACHE_DATA_MISS
+ROCKSDB_BLOCK_CACHE_FILTER_HIT
+ROCKSDB_BLOCK_CACHE_FILTER_MISS
+ROCKSDB_BLOCK_CACHE_HIT
+ROCKSDB_BLOCK_CACHE_INDEX_HIT
+ROCKSDB_BLOCK_CACHE_INDEX_MISS
+ROCKSDB_BLOCK_CACHE_MISS
+ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
+ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
+ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
+ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
+ROCKSDB_BLOOM_FILTER_USEFUL
+ROCKSDB_BYTES_READ
+ROCKSDB_BYTES_WRITTEN
+ROCKSDB_COMPACT_READ_BYTES
+ROCKSDB_COMPACT_WRITE_BYTES
+ROCKSDB_COMPACTION_KEY_DROP_NEW
+ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE
+ROCKSDB_COMPACTION_KEY_DROP_USER
+ROCKSDB_FLUSH_WRITE_BYTES
+ROCKSDB_GETUPDATESSINCE_CALLS
+ROCKSDB_GIT_DATE
+ROCKSDB_GIT_HASH
+ROCKSDB_L0_NUM_FILES_STALL_MICROS
+ROCKSDB_L0_SLOWDOWN_MICROS
+ROCKSDB_MEMTABLE_COMPACTION_MICROS
+ROCKSDB_MEMTABLE_HIT
+ROCKSDB_MEMTABLE_MISS
+ROCKSDB_NO_FILE_CLOSES
+ROCKSDB_NO_FILE_ERRORS
+ROCKSDB_NO_FILE_OPENS
+ROCKSDB_NUM_ITERATORS
+ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED
+ROCKSDB_NUMBER_DELETES_FILTERED
+ROCKSDB_NUMBER_KEYS_READ
+ROCKSDB_NUMBER_KEYS_UPDATED
+ROCKSDB_NUMBER_KEYS_WRITTEN
+ROCKSDB_NUMBER_MERGE_FAILURES
+ROCKSDB_NUMBER_MULTIGET_BYTES_READ
+ROCKSDB_NUMBER_MULTIGET_GET
+ROCKSDB_NUMBER_MULTIGET_KEYS_READ
+ROCKSDB_NUMBER_RESEEKS_ITERATION
+ROCKSDB_NUMBER_SST_ENTRY_DELETE
+ROCKSDB_NUMBER_SST_ENTRY_MERGE
+ROCKSDB_NUMBER_SST_ENTRY_OTHER
+ROCKSDB_NUMBER_SST_ENTRY_PUT
+ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE
+ROCKSDB_NUMBER_STAT_COMPUTES
+ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES
+ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS
+ROCKSDB_NUMBER_SUPERVERSION_RELEASES
+ROCKSDB_RATE_LIMIT_DELAY_MILLIS
+ROCKSDB_SEQUENCE_NUMBER
+ROCKSDB_SNAPSHOT_CONFLICT_ERRORS
+ROCKSDB_WAL_BYTES
+ROCKSDB_WAL_SYNCED
+ROCKSDB_WRITE_OTHER
+ROCKSDB_WRITE_SELF
+ROCKSDB_WRITE_TIMEDOUT
+ROCKSDB_WRITE_WAL
+#
+# Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+pk int primary key,
+col1 varchar(255),
+key(col1)
+) engine=rocksdb;
+insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0;
+select * from t1 where pk=3;
+pk col1
+3 123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-
+drop table t0, t1;
+#
+# Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+CREATE TABLE t1 (
+id1 bigint(20) unsigned NOT NULL DEFAULT '0',
+id2 bigint(20) unsigned NOT NULL DEFAULT '0',
+link_type bigint(20) unsigned NOT NULL DEFAULT '0',
+visibility tinyint(3) NOT NULL DEFAULT '0',
+data varchar(255) NOT NULL DEFAULT '',
+time bigint(20) unsigned NOT NULL DEFAULT '0',
+version int(11) unsigned NOT NULL DEFAULT '0',
+PRIMARY KEY (link_type,id1,id2)
+) engine=rocksdb;
+insert into t1 select a,a,a,1,a,a,a from t0;
+alter table t1 add index id1_type (id1,link_type,visibility,time,version,data);
+select * from t1 where id1 = 3;
+id1 id2 link_type visibility data time version
+3 3 3 1 3 3 3
+drop table t0,t1;
+#
+# Test column families
+#
+create table t1 (
+pk int primary key,
+col1 int,
+col2 int,
+key(col1) comment 'cf3',
+key(col2) comment 'cf4'
+) engine=rocksdb;
+insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+explain
+select * from t1 where col1=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref col1 col1 5 const # NULL
+select * from t1 where col1=2;
+pk col1 col2
+2 2 2
+explain
+select * from t1 where col2=3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref col2 col2 5 const # NULL
+select * from t1 where col2=3;
+pk col1 col2
+3 3 3
+select * from t1 where pk=4;
+pk col1 col2
+4 4 4
+drop table t1;
+#
+# Try primary key in a non-default CF:
+#
+create table t1 (
+pk int,
+col1 int,
+col2 int,
+key(col1) comment 'cf3',
+key(col2) comment 'cf4',
+primary key (pk) comment 'cf5'
+) engine=rocksdb;
+insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+explain
+select * from t1 where col1=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref col1 col1 5 const # NULL
+select * from t1 where col1=2;
+pk col1 col2
+2 2 2
+select * from t1 where pk=4;
+pk col1 col2
+4 4 4
+drop table t1;
+#
+# Issue #15: SIGSEGV from reading in blob data
+#
+CREATE TABLE t1 (
+id int not null,
+blob_col text,
+PRIMARY KEY (id)
+) ENGINE=ROCKSDB CHARSET=latin1;
+INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col);
+INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col);
+DROP TABLE t1;
+#
+# Issue #17: Automatic per-index column families
+#
+create table t1 (
+id int not null,
+key1 int,
+PRIMARY KEY (id),
+index (key1) comment '$per_index_cf'
+) engine=rocksdb;
+#Same CF ids with different CF flags
+create table t1_err (
+id int not null,
+key1 int,
+PRIMARY KEY (id),
+index (key1) comment 'test.t1.key1'
+) engine=rocksdb;
+ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag.
+create table t1_err (
+id int not null,
+key1 int,
+PRIMARY KEY (id),
+index (key1) comment 'test.t1.key2'
+) engine=rocksdb;
+drop table t1_err;
+# Unfortunately there is no way to check which column family everything goes to
+insert into t1 values (1,1);
+select * from t1;
+id key1
+1 1
+# Check that ALTER and RENAME are disallowed
+alter table t1 add col2 int;
+ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF'
+rename table t1 to t2;
+ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF'
+drop table t1;
+# Check detection of typos in $per_index_cf
+create table t1 (
+id int not null,
+key1 int,
+PRIMARY KEY (id),
+index (key1) comment '$per_idnex_cf'
+)engine=rocksdb;
+ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf'
+#
+# Issue #22: SELECT ... FOR UPDATE takes a long time
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+id1 int,
+id2 int,
+value1 int,
+value2 int,
+primary key(id1, id2) COMMENT 'new_column_family',
+key(id2)
+) engine=rocksdb default charset=latin1 collate=latin1_bin;
+insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B;
+explain
+select * from t1 where id1=30 and value1=30 for update;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref PRIMARY PRIMARY 4 const # Using where
+set @var1=(select variable_value
+from information_schema.global_status
+where variable_name='rocksdb_number_keys_read');
+select * from t1 where id1=3 and value1=3 for update;
+id1 id2 value1 value2
+set @var2=(select variable_value
+from information_schema.global_status
+where variable_name='rocksdb_number_keys_read');
+# The following must return true (before the fix, the difference was 70):
+select if((@var2 - @var1) < 30, 1, @var2-@var1);
+if((@var2 - @var1) < 30, 1, @var2-@var1)
+1
+drop table t0,t1;
+#
+# Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting
+#
+create table t1 (id int primary key, value int) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+# The following must not use 'Using filesort':
+explain select * from t1 ORDER BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL # NULL
+drop table t1;
+#
+# Issue #26: Index-only scans for DATETIME and TIMESTAMP
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+# Try a DATETIME column:
+create table t1 (
+pk int auto_increment primary key,
+kp1 datetime,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 12:34:56', interval a day), a from t0;
+select * from t1;
+pk kp1 kp2 col1
+1 2015-01-01 12:34:56 0 NULL
+2 2015-01-02 12:34:56 1 NULL
+3 2015-01-03 12:34:56 2 NULL
+4 2015-01-04 12:34:56 3 NULL
+5 2015-01-05 12:34:56 4 NULL
+6 2015-01-06 12:34:56 5 NULL
+7 2015-01-07 12:34:56 6 NULL
+8 2015-01-08 12:34:56 7 NULL
+9 2015-01-09 12:34:56 8 NULL
+10 2015-01-10 12:34:56 9 NULL
+# This must show 'Using index'
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range kp1 kp1 6 NULL # Using where; Using index
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+kp1 kp2
+2015-01-01 12:34:56 0
+2015-01-02 12:34:56 1
+2015-01-03 12:34:56 2
+2015-01-04 12:34:56 3
+2015-01-05 12:34:56 4
+# Now, the same with NOT NULL column
+create table t2 (
+pk int auto_increment primary key,
+kp1 datetime not null,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+# This must show 'Using index'
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using where; Using index
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+kp1 kp2
+2015-01-01 12:34:56 0
+2015-01-02 12:34:56 1
+2015-01-03 12:34:56 2
+2015-01-04 12:34:56 3
+2015-01-05 12:34:56 4
+drop table t1,t2;
+# Try a DATE column:
+create table t1 (
+pk int auto_increment primary key,
+kp1 date,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01', interval a day), a from t0;
+select * from t1;
+pk kp1 kp2 col1
+1 2015-01-01 0 NULL
+2 2015-01-02 1 NULL
+3 2015-01-03 2 NULL
+4 2015-01-04 3 NULL
+5 2015-01-05 4 NULL
+6 2015-01-06 5 NULL
+7 2015-01-07 6 NULL
+8 2015-01-08 7 NULL
+9 2015-01-09 8 NULL
+10 2015-01-10 9 NULL
+# This must show 'Using index'
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01' and '2015-01-05';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01' and '2015-01-05';
+kp1 kp2
+2015-01-01 0
+2015-01-02 1
+2015-01-03 2
+2015-01-04 3
+2015-01-05 4
+# Now, the same with NOT NULL column
+create table t2 (
+pk int auto_increment primary key,
+kp1 date not null,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+# This must show 'Using index'
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+kp1 kp2
+2015-01-01 0
+2015-01-02 1
+2015-01-03 2
+2015-01-04 3
+2015-01-05 4
+drop table t1,t2;
+#
+# Try a TIMESTAMP column:
+#
+create table t1 (
+pk int auto_increment primary key,
+kp1 timestamp,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 12:34:56', interval a day), a from t0;
+select * from t1;
+pk kp1 kp2 col1
+1 2015-01-01 12:34:56 0 NULL
+2 2015-01-02 12:34:56 1 NULL
+3 2015-01-03 12:34:56 2 NULL
+4 2015-01-04 12:34:56 3 NULL
+5 2015-01-05 12:34:56 4 NULL
+6 2015-01-06 12:34:56 5 NULL
+7 2015-01-07 12:34:56 6 NULL
+8 2015-01-08 12:34:56 7 NULL
+9 2015-01-09 12:34:56 8 NULL
+10 2015-01-10 12:34:56 9 NULL
+# This must show 'Using index'
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range kp1 kp1 5 NULL # Using where; Using index
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+kp1 kp2
+2015-01-01 12:34:56 0
+2015-01-02 12:34:56 1
+2015-01-03 12:34:56 2
+2015-01-04 12:34:56 3
+2015-01-05 12:34:56 4
+# Now, the same with NOT NULL column
+create table t2 (
+pk int auto_increment primary key,
+kp1 timestamp not null,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+# This must show 'Using index'
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 4 NULL # Using where; Using index
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+kp1 kp2
+2015-01-01 12:34:56 0
+2015-01-02 12:34:56 1
+2015-01-03 12:34:56 2
+2015-01-04 12:34:56 3
+2015-01-05 12:34:56 4
+drop table t1,t2;
+#
+# Try a TIME column:
+#
+create table t1 (
+pk int auto_increment primary key,
+kp1 time,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 09:00:00', interval a minute), a from t0;
+select * from t1;
+pk kp1 kp2 col1
+1 09:00:00 0 NULL
+2 09:01:00 1 NULL
+3 09:02:00 2 NULL
+4 09:03:00 3 NULL
+5 09:04:00 4 NULL
+6 09:05:00 5 NULL
+7 09:06:00 6 NULL
+8 09:07:00 7 NULL
+9 09:08:00 8 NULL
+10 09:09:00 9 NULL
+# This must show 'Using index'
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+kp1 kp2
+09:01:00 1
+09:02:00 2
+09:03:00 3
+09:04:00 4
+09:05:00 5
+# Now, the same with NOT NULL column
+create table t2 (
+pk int auto_increment primary key,
+kp1 time not null,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+# This must show 'Using index'
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+kp1 kp2
+09:01:00 1
+09:02:00 2
+09:03:00 3
+09:04:00 4
+09:05:00 5
+drop table t1,t2;
+#
+# Try a YEAR column:
+#
+create table t1 (
+pk int auto_increment primary key,
+kp1 year,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2) select 2015+a, a from t0;
+select * from t1;
+pk kp1 kp2 col1
+1 2015 0 NULL
+2 2016 1 NULL
+3 2017 2 NULL
+4 2018 3 NULL
+5 2019 4 NULL
+6 2020 5 NULL
+7 2021 6 NULL
+8 2022 7 NULL
+9 2023 8 NULL
+10 2024 9 NULL
+# This must show 'Using index'
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2016' and '2020';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range kp1 kp1 2 NULL # Using where; Using index
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2016' and '2020';
+kp1 kp2
+2016 1
+2017 2
+2018 3
+2019 4
+2020 5
+# Now, the same with NOT NULL column
+create table t2 (
+pk int auto_increment primary key,
+kp1 year not null,
+kp2 int,
+col1 int,
+key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+# This must show 'Using index'
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2016' and '2020';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 1 NULL # Using where; Using index
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2016' and '2020';
+kp1 kp2
+2016 1
+2017 2
+2018 3
+2019 4
+2020 5
+drop table t1,t2;
+#
+# Issue #57: Release row locks on statement errors
+#
+create table t1 (id int primary key) engine=rocksdb;
+insert into t1 values (1), (2), (3);
+begin;
+insert into t1 values (4), (5), (6);
+insert into t1 values (7), (8), (2), (9);
+ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
+select * from t1;
+id
+1
+2
+3
+4
+5
+6
+begin;
+select * from t1 where id=4 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+select * from t1 where id=7 for update;
+id
+select * from t1 where id=9 for update;
+id
+drop table t1;
+#Index on blob column
+SET @old_mode = @@sql_mode;
+SET sql_mode = 'strict_all_tables';
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb;
+drop table t1;
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb;
+insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde');
+select * from t1;
+a b c
+1 1abcde 1abcde
+2 2abcde 2abcde
+3 3abcde 3abcde
+explain select * from t1 where b like '1%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 1258 NULL # Using where
+explain select b, a from t1 where b like '1%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range b b 1258 NULL # Using where
+update t1 set b= '12345' where b = '2abcde';
+select * from t1;
+a b c
+1 1abcde 1abcde
+2 12345 2abcde
+3 3abcde 3abcde
+drop table t1;
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb;
+ERROR 42000: Specified key was too long; max key length is 2048 bytes
+SET sql_mode = @old_mode;
+drop table t0;
+#
+# Fix assertion failure (attempt to overrun the key buffer) for prefix indexes
+#
+create table t1 (
+pk int primary key,
+col1 varchar(100),
+key (col1(10))
+) engine=rocksdb;
+insert into t1 values (1, repeat('0123456789', 9));
+drop table t1;
+#
+# Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*)
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk;
+INSERT INTO t1 VALUES (1,1);
+REPLACE INTO t1 VALUES (1,2);
+SELECT * FROM t1;
+pk f1
+1 2
+DROP TABLE t1, t2;
+#
+# Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error
+#
+create table t1(a int primary key);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t2 (
+a varchar(32) primary key,
+col1 int
+) engine=rocksdb;
+insert into t2
+select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B;
+update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004';
+drop table t1,t2;
+#
+# Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed
+#
+CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,1,1,1,1,1,1);
+SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6;
+c1 c2 c3 c4 c5 c6 c7
+EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 50 Using where
+drop table t2;
+#
+# Issue #135: register transaction was not being called for statement
+#
+DROP DATABASE IF EXISTS test_db;
+CREATE DATABASE test_db;
+CREATE TABLE test_db.t1(c1 INT PRIMARY KEY);
+LOCK TABLES test_db.t1 READ;
+SET AUTOCOMMIT=0;
+SELECT c1 FROM test_db.t1;
+c1
+START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;
+DROP DATABASE test_db;
+#
+# Issue #143: Split rocksdb_bulk_load option into two
+#
+CREATE TABLE t1 (id int primary key, value int) engine=RocksDB;
+SET rocksdb_skip_unique_check=1;
+INSERT INTO t1 VALUES(1, 1);
+INSERT INTO t1 VALUES(1, 2);
+INSERT INTO t1 VALUES(1, 3);
+SELECT * FROM t1;
+id value
+REPLACE INTO t1 VALUES(4, 4);
+ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: REPLACE INTO t1 VALUES(4, 4)
+INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1;
+ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1
+TRUNCATE TABLE t1;
+SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size;
+SET rocksdb_skip_unique_check=0;
+SET rocksdb_commit_in_the_middle=1;
+SET rocksdb_bulk_load_size=10;
+BEGIN;
+INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
+(11),(12),(13),(14),(15),(16),(17),(18),(19);
+ROLLBACK;
+SELECT * FROM t1;
+id value
+1 NULL
+2 NULL
+3 NULL
+4 NULL
+5 NULL
+6 NULL
+7 NULL
+8 NULL
+9 NULL
+10 NULL
+INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15);
+BEGIN;
+UPDATE t1 SET value=100;
+ROLLBACK;
+SELECT * FROM t1;
+id value
+1 100
+2 100
+3 100
+4 100
+5 100
+6 100
+7 100
+8 100
+9 100
+10 100
+11 NULL
+12 NULL
+13 NULL
+14 NULL
+15 NULL
+BEGIN;
+DELETE FROM t1;
+ROLLBACK;
+SELECT * FROM t1;
+id value
+11 NULL
+12 NULL
+13 NULL
+14 NULL
+15 NULL
+SET rocksdb_commit_in_the_middle=0;
+SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size;
+DROP TABLE t1;
+#
+# Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance()
+#
+CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY;
+INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar");
+Warnings:
+Warning 1366 Incorrect integer value: 'long varchar' for column 'data' at row 2
+Warning 1366 Incorrect integer value: 'varchar' for column 'data' at row 3
+Warning 1366 Incorrect integer value: 'long long long varchar' for column 'data' at row 4
+create TABLE t1 (a int not null, b int not null, primary key(a,b));
+INSERT INTO t1 VALUES (1,1);
+SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4));
+a
+1
+1
+1
+1
+DROP TABLE t1, t2;
+#
+# Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release
+#
+create table r1 (id int auto_increment primary key, value int);
+insert into r1 (id) values (null), (null), (null), (null), (null);
+create table r2 like r1;
+show create table r2;
+Table Create Table
+r2 CREATE TABLE `r2` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `value` int(11) DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+begin;
+insert into r1 values (10, 1);
+commit;
+begin;
+select * from r1;
+id value
+1 NULL
+2 NULL
+3 NULL
+4 NULL
+5 NULL
+10 1
+commit;
+drop table r1, r2;
+create table r1 (id int auto_increment, value int, index i(id));
+insert into r1 (id) values (null), (null), (null), (null), (null);
+create table r2 like r1;
+show create table r2;
+Table Create Table
+r2 CREATE TABLE `r2` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `value` int(11) DEFAULT NULL,
+ KEY `i` (`id`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+begin;
+insert into r1 values (10, 1);
+commit;
+begin;
+select * from r1;
+id value
+1 NULL
+2 NULL
+3 NULL
+4 NULL
+5 NULL
+10 1
+commit;
+drop table r1, r2;
+#
+# Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT
+#
+CREATE TABLE t1(c1 INT);
+lock TABLE t1 read local;
+SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM'');
+1
+set AUTOCOMMIT=0;
+start transaction with consistent snapshot;
+SELECT * FROM t1;
+c1
+COMMIT;
+UNLOCK TABLES;
+DROP TABLE t1;
+#
+# Issue#213 Crash on LOCK TABLES + partitions
+#
+CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2;
+INSERT INTO t1(a)VALUES (20010101101010.999949);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+lock tables t1 write,t1 as t0 write,t1 as t2 write;
+SELECT a FROM t1 ORDER BY a;
+a
+2147483647
+truncate t1;
+INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020');
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'a' at row 2
+Warning 1264 Out of range value for column 'b' at row 2
+UNLOCK TABLES;
+DROP TABLE t1;
+#
+# Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type
+# (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue
+# needs a datype that doesn't support index-inly)
+#
+CREATE TABLE t1(
+c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL,
+c2 varchar(10) character set utf8 collate utf8_general_ci,
+c3 INT,
+INDEX idx(c1,c2)
+);
+INSERT INTO t1 VALUES ('c1-val1','c2-val1',5);
+INSERT INTO t1 VALUES ('c1-val2','c2-val3',6);
+INSERT INTO t1 VALUES ('c1-val3','c2-val3',7);
+SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC;
+c1 c2 c3
+c1-val3 c2-val3 7
+c1-val1 c2-val1 5
+explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range idx idx 32 NULL # Using where
+drop table t1;
+#
+# Issue#267: MyRocks issue with no matching min/max row and count(*)
+#
+CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2));
+INSERT INTO t1 VALUES(1,null);
+INSERT INTO t1 VALUES(2,null);
+SELECT count(*) as total_rows, min(c2) as min_value FROM t1;
+total_rows min_value
+2 NULL
+DROP TABLE t1;
+#
+# Issue#263: MyRocks auto_increment skips values if you insert a negative value
+#
+CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
+INSERT INTO t1 VALUES(0),(-1),(0);
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL
+SELECT * FROM t1;
+a
+-1
+1
+2
+DROP TABLE t1;
+CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
+INSERT INTO t1 VALUES(0),(10),(0);
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 12 NULL NULL NULL latin1_swedish_ci NULL
+SELECT * FROM t1;
+a
+1
+10
+11
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result
new file mode 100644
index 00000000000..09d251ccbe6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result
@@ -0,0 +1,61 @@
+create table t1 (a int,
+primary key (a) comment 'cf1') engine=rocksdb;
+create table t2 (a int,
+primary key (a) comment 'cf2') engine=rocksdb;
+create table t3 (a int,
+primary key (a) comment 'z') engine=rocksdb;
+insert into t1 values (1);
+insert into t2 values (2);
+insert into t3 values (2);
+
+Default options for all column families:
+
+select cf_name, option_type, value
+from information_schema.rocksdb_cf_options
+where option_type in ('WRITE_BUFFER_SIZE',
+'TARGET_FILE_SIZE_BASE',
+'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
+order by cf_name, option_type;
+cf_name option_type value
+cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+cf1 TARGET_FILE_SIZE_BASE 1048576
+cf1 WRITE_BUFFER_SIZE 12582912
+cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+cf2 TARGET_FILE_SIZE_BASE 1048576
+cf2 WRITE_BUFFER_SIZE 12582912
+default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+default TARGET_FILE_SIZE_BASE 1048576
+default WRITE_BUFFER_SIZE 12582912
+z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+z TARGET_FILE_SIZE_BASE 1048576
+z WRITE_BUFFER_SIZE 12582912
+__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+__system__ TARGET_FILE_SIZE_BASE 1048576
+__system__ WRITE_BUFFER_SIZE 12582912
+
+Individualized options for column families:
+
+select cf_name, option_type, value
+from information_schema.rocksdb_cf_options
+where option_type in ('WRITE_BUFFER_SIZE',
+'TARGET_FILE_SIZE_BASE',
+'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
+order by cf_name, option_type;
+cf_name option_type value
+cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+cf1 TARGET_FILE_SIZE_BASE 2097152
+cf1 WRITE_BUFFER_SIZE 8388608
+cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8
+cf2 TARGET_FILE_SIZE_BASE 1048576
+cf2 WRITE_BUFFER_SIZE 16777216
+default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+default TARGET_FILE_SIZE_BASE 1048576
+default WRITE_BUFFER_SIZE 12582912
+z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+z TARGET_FILE_SIZE_BASE 4194304
+z WRITE_BUFFER_SIZE 12582912
+__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10
+__system__ TARGET_FILE_SIZE_BASE 1048576
+__system__ WRITE_BUFFER_SIZE 12582912
+
+drop table t1,t2,t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result
new file mode 100644
index 00000000000..1c85343cabb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result
@@ -0,0 +1,120 @@
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+pk int primary key,
+a int not null,
+b int not null,
+key(a) comment 'rev:foo',
+key(b) comment 'bar'
+) engine=rocksdb;
+insert into t1 select a,a,a from t0;
+insert into t1 select a+10,a+10,a+10 from t0;
+# Primary key is not in a reverse-ordered CF, so full table scan
+# returns rows in ascending order:
+select * from t1;
+pk a b
+0 0 0
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+11 11 11
+12 12 12
+13 13 13
+14 14 14
+15 15 15
+16 16 16
+17 17 17
+18 18 18
+19 19 19
+explain
+select a from t1 order by a limit 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 4 NULL # Using index
+select a from t1 order by a limit 5;
+a
+0
+1
+2
+3
+4
+explain
+select b from t1 order by b limit 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL b 4 NULL # Using index
+select a from t1 order by a limit 5;
+a
+0
+1
+2
+3
+4
+explain
+select a from t1 order by a desc limit 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 4 NULL # Using index
+select a from t1 order by a desc limit 5;
+a
+19
+18
+17
+16
+15
+explain
+select b from t1 order by b desc limit 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL b 4 NULL # Using index
+select b from t1 order by b desc limit 5;
+b
+19
+18
+17
+16
+15
+drop table t1;
+#
+# Try a primary key in a reverse-ordered CF.
+#
+create table t2 (
+pk int,
+a int not null,
+primary key(pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t2 select a,a from t0;
+# Primary key is in a reverse-ordered CF, so full table scan
+# returns rows in descending order:
+select * from t2;
+pk a
+9 9
+8 8
+7 7
+6 6
+5 5
+4 4
+3 3
+2 2
+1 1
+0 0
+set autocommit=0;
+begin;
+delete from t2 where a=3 or a=7;
+select * from t2;
+pk a
+9 9
+8 8
+6 6
+5 5
+4 4
+2 2
+1 1
+0 0
+rollback;
+set autocommit=1;
+drop table t2;
+drop table t0;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result
new file mode 100644
index 00000000000..a8908edada5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result
@@ -0,0 +1,129 @@
+set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums;
+set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums;
+set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct;
+drop table if exists t1,t2,t3;
+show variables like 'rocksdb_%checksum%';
+Variable_name Value
+rocksdb_checksums_pct 100
+rocksdb_store_checksums OFF
+rocksdb_verify_checksums OFF
+create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t1 values (1,1,1),(2,2,2),(3,3,3);
+check table t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+ CHECKTABLE t1: Checking table t1
+ CHECKTABLE t1: Checking index a
+ CHECKTABLE t1: ... 3 index entries checked (0 had checksums)
+ CHECKTABLE t1: Checking index b
+ CHECKTABLE t1: ... 3 index entries checked (0 had checksums)
+ CHECKTABLE t1: 0 table records had checksums
+drop table t1;
+set session rocksdb_store_checksums=on;
+create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t2 values (1,1,1),(2,2,2),(3,3,3);
+check table t2;
+Table Op Msg_type Msg_text
+test.t2 check status OK
+ CHECKTABLE t2: Checking table t2
+ CHECKTABLE t2: Checking index a
+ CHECKTABLE t2: ... 3 index entries checked (3 had checksums)
+ CHECKTABLE t2: Checking index b
+ CHECKTABLE t2: ... 3 index entries checked (3 had checksums)
+ CHECKTABLE t2: 3 table records had checksums
+# Now, make a table that has both rows with checksums and without
+create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t3 values (1,1,1),(2,2,2),(3,3,3);
+set session rocksdb_store_checksums=off;
+update t3 set b=3 where a=2;
+set session rocksdb_store_checksums=on;
+check table t3;
+Table Op Msg_type Msg_text
+test.t3 check status OK
+ CHECKTABLE t3: Checking table t3
+ CHECKTABLE t3: Checking index a
+ CHECKTABLE t3: ... 3 index entries checked (3 had checksums)
+ CHECKTABLE t3: Checking index b
+ CHECKTABLE t3: ... 3 index entries checked (2 had checksums)
+ CHECKTABLE t3: 2 table records had checksums
+set session rocksdb_store_checksums=on;
+set session rocksdb_checksums_pct=5;
+create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+check table t4;
+Table Op Msg_type Msg_text
+test.t4 check status OK
+10000 index entries had around 500 checksums
+10000 index entries had around 500 checksums
+Around 500 table records had checksums
+set session rocksdb_checksums_pct=100;
+#
+# Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches.
+#
+insert into mtr.test_suppressions values
+('Checksum mismatch in key of key-value pair for index'),
+('Checksum mismatch in value of key-value pair for index'),
+('Data with incorrect checksum');
+# 1. Start with mismatch in key checksum of the PK.
+set session debug= "+d,myrocks_simulate_bad_pk_checksum1";
+set session rocksdb_verify_checksums=off;
+select * from t3;
+pk a b
+1 1 1
+2 2 3
+3 3 3
+set session rocksdb_verify_checksums=on;
+select * from t3;
+ERROR HY000: Internal error: Record checksum mismatch
+select * from t4;
+ERROR HY000: Internal error: Record checksum mismatch
+set session debug= "-d,myrocks_simulate_bad_pk_checksum1";
+# 2. Continue with mismatch in pk value checksum.
+set session debug= "+d,myrocks_simulate_bad_pk_checksum2";
+set session rocksdb_verify_checksums=off;
+select * from t3;
+pk a b
+1 1 1
+2 2 3
+3 3 3
+set session rocksdb_verify_checksums=on;
+select * from t3;
+ERROR HY000: Internal error: Record checksum mismatch
+select * from t4;
+ERROR HY000: Internal error: Record checksum mismatch
+set session debug= "-d,myrocks_simulate_bad_pk_checksum2";
+# 3. Check if we catch checksum mismatches for secondary indexes
+explain
+select * from t3 force index(a) where a<4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 range a a 5 NULL # Using index condition
+select * from t3 force index(a) where a<4;
+pk a b
+1 1 1
+2 2 3
+3 3 3
+set session debug= "+d,myrocks_simulate_bad_key_checksum1";
+select * from t3 force index(a) where a<4;
+ERROR HY000: Internal error: Record checksum mismatch
+select * from t4 force index(a) where a<1000000;
+ERROR HY000: Internal error: Record checksum mismatch
+set session debug= "-d,myrocks_simulate_bad_key_checksum1";
+# 4. The same for index-only reads?
+explain
+select a from t3 force index(a) where a<4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 index a a 5 NULL # Using where; Using index
+select a from t3 force index(a) where a<4;
+a
+1
+2
+3
+set session debug= "+d,myrocks_simulate_bad_key_checksum1";
+select a from t3 force index(a) where a<4;
+ERROR HY000: Internal error: Record checksum mismatch
+select a from t4 force index(a) where a<1000000;
+ERROR HY000: Internal error: Record checksum mismatch
+set session debug= "-d,myrocks_simulate_bad_key_checksum1";
+set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums;
+set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums;
+set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct;
+drop table t2,t3,t4;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result
new file mode 100644
index 00000000000..9d6d368c686
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result
@@ -0,0 +1,56 @@
+SET debug_sync='RESET';
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT);
+INSERT INTO t1 VALUES(1,1), (2,2), (3,3);
+SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go';
+SELECT * FROM t1 order by t1.pk ASC FOR UPDATE;
+SET debug_sync='now WAIT_FOR parked';
+DELETE FROM t1 WHERE pk = 1;
+SET debug_sync='now SIGNAL go';
+pk a
+2 2
+3 3
+set debug_sync='RESET';
+drop table t1;
+SET debug_sync='RESET';
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT);
+INSERT INTO t1 VALUES(1,1), (2,2), (3,3);
+SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go';
+SELECT * FROM t1 order by t1.pk DESC FOR UPDATE;
+SET debug_sync='now WAIT_FOR parked';
+DELETE FROM t1 WHERE pk = 3;
+SET debug_sync='now SIGNAL go';
+pk a
+2 2
+1 1
+set debug_sync='RESET';
+drop table t1;
+SET debug_sync='RESET';
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT);
+INSERT INTO t1 VALUES(1,1), (2,2), (3,3);
+SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go';
+SELECT * FROM t1 order by t1.pk ASC FOR UPDATE;
+SET debug_sync='now WAIT_FOR parked';
+DELETE FROM t1 WHERE pk = 1;
+SET debug_sync='now SIGNAL go';
+pk a
+2 2
+3 3
+set debug_sync='RESET';
+drop table t1;
+SET debug_sync='RESET';
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT);
+INSERT INTO t1 VALUES(1,1), (2,2), (3,3);
+SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go';
+SELECT * FROM t1 order by t1.pk DESC FOR UPDATE;
+SET debug_sync='now WAIT_FOR parked';
+DELETE FROM t1 WHERE pk = 3;
+SET debug_sync='now SIGNAL go';
+pk a
+2 2
+1 1
+set debug_sync='RESET';
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result
new file mode 100644
index 00000000000..7910e98b198
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result
@@ -0,0 +1,2 @@
+Check for the number of MANIFEST files
+1
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
new file mode 100644
index 00000000000..51841f174af
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
@@ -0,0 +1,227 @@
+select * from information_schema.engines where engine = 'rocksdb';
+ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+ROCKSDB DEFAULT RocksDB storage engine YES YES YES
+drop table if exists t0,t1,t2,t3;
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int primary key,
+kp1 int,
+kp2 int,
+col1 int,
+key (kp1,kp2) comment 'cf1'
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+# Try a basic case:
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+pk kp1 kp2 col1
+2 2 2 2
+4 4 4 4
+6 6 6 6
+8 8 8 8
+10 10 10 10
+# Check that ICP doesnt work for columns where column value
+# cant be restored from mem-comparable form:
+create table t3 (
+pk int primary key,
+kp1 int,
+kp2 varchar(10) collate utf8_general_ci,
+col1 int,
+key (kp1,kp2) comment 'cf1'
+) engine=rocksdb;
+insert into t3 select a,a/10,a,a from t1;
+# This must not use ICP:
+explain
+select * from t3 where kp1=3 and kp2 like '%foo%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ref kp1 kp1 5 const # Using where
+explain format=json
+select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "range",
+ "possible_keys": [
+ "kp1"
+ ],
+ "key": "kp1",
+ "used_key_parts": [
+ "kp1"
+ ],
+ "key_length": "5",
+ "rows": 1000,
+ "filtered": 100,
+ "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))",
+ "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')"
+ }
+ }
+}
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%'))
+# Check that we handle the case where out-of-range is encountered sooner
+# than matched index condition
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk kp1 kp2 col1
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk kp1 kp2 col1
+# Try doing backwards scans
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+pk kp1 kp2 col1
+10 10 10 10
+8 8 8 8
+6 6 6 6
+4 4 4 4
+2 2 2 2
+explain
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+pk kp1 kp2 col1
+998 998 998 998
+996 996 996 996
+994 994 994 994
+992 992 992 992
+explain
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+pk kp1 kp2 col1
+drop table t0,t1,t2,t3;
+#
+# Check how ICP affects counters
+#
+# First, some preparations
+#
+create procedure save_read_stats()
+select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT
+into @rr, @rq, @rif, @rin
+from information_schema.table_statistics
+where table_name='t4' and table_schema=database();
+create procedure get_read_stats()
+select
+ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin
+from information_schema.table_statistics
+where table_name='t4' and table_schema=database();
+create table t4 (
+id int,
+id1 int,
+id2 int,
+value int,
+value2 varchar(100),
+primary key (id),
+key id1_id2 (id1, id2) comment 'cf1'
+) engine=rocksdb charset=latin1 collate latin1_bin;
+insert into t4 values
+(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5),
+(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10);
+#
+# Now, the test itself
+#
+call save_read_stats();
+call get_read_stats();
+ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin
+0 0 0 0
+# ============== index-only query ==============
+explain
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index
+call save_read_stats();
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id1 id2
+1 1
+call get_read_stats();
+ROWS_READ-@rr 10
+ROWS_REQUESTED-@rq 11
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 9
+# ============== Query without ICP ==============
+set optimizer_switch='index_condition_pushdown=off';
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id id1 id2 value value2
+1 1 1 1 1
+call get_read_stats();
+ROWS_READ-@rr 10
+ROWS_REQUESTED-@rq 11
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 9
+# ============== Query with ICP ==============
+set optimizer_switch='index_condition_pushdown=on';
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id id1 id2 value value2
+1 1 1 1 1
+call get_read_stats();
+ROWS_READ-@rr 1
+ROWS_REQUESTED-@rq 1
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 0
+drop table t4;
+drop procedure save_read_stats;
+drop procedure get_read_stats;
+#
+# Issue #67: Inefficient index condition pushdown
+#
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+pk int not null primary key,
+key1 bigint(20) unsigned,
+col1 int,
+key (key1)
+) engine=rocksdb;
+insert into t1
+select
+A.a+10*B.a+100*C.a,
+A.a+10*B.a+100*C.a,
+1234
+from t0 A, t0 B, t0 C;
+set @count=0;
+explain
+select * from t1 where key1=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref key1 key1 9 const # Using index condition
+set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context
+where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT');
+select * from t1 where key1=1;
+pk key1 col1
+1 1 1234
+set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context
+where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT');
+# The following must be =1, or in any case not 999:
+select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment";
+INTERNAL_KEY_SKIPPED_COUNT increment
+1
+drop table t0,t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
new file mode 100644
index 00000000000..d368da16a60
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
@@ -0,0 +1,193 @@
+select * from information_schema.engines where engine = 'rocksdb';
+ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+ROCKSDB DEFAULT RocksDB storage engine YES YES YES
+drop table if exists t0,t1,t2,t3;
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int primary key,
+kp1 int,
+kp2 int,
+col1 int,
+key (kp1,kp2) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+# Try a basic case:
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+pk kp1 kp2 col1
+2 2 2 2
+4 4 4 4
+6 6 6 6
+8 8 8 8
+10 10 10 10
+# Check that ICP doesnt work for columns where column value
+# cant be restored from mem-comparable form:
+create table t3 (
+pk int primary key,
+kp1 int,
+kp2 varchar(10) collate utf8_general_ci,
+col1 int,
+key (kp1,kp2) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t3 select a,a/10,a,a from t1;
+# This must not use ICP:
+explain
+select * from t3 where kp1=3 and kp2 like '%foo%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ref kp1 kp1 5 const # Using where
+explain format=json
+select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "range",
+ "possible_keys": [
+ "kp1"
+ ],
+ "key": "kp1",
+ "used_key_parts": [
+ "kp1"
+ ],
+ "key_length": "5",
+ "rows": 1000,
+ "filtered": 100,
+ "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))",
+ "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')"
+ }
+ }
+}
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%'))
+# Check that we handle the case where out-of-range is encountered sooner
+# than matched index condition
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk kp1 kp2 col1
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk kp1 kp2 col1
+# Try doing backwards scans
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+pk kp1 kp2 col1
+10 10 10 10
+8 8 8 8
+6 6 6 6
+4 4 4 4
+2 2 2 2
+explain
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+pk kp1 kp2 col1
+998 998 998 998
+996 996 996 996
+994 994 994 994
+992 992 992 992
+explain
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+pk kp1 kp2 col1
+drop table t0,t1,t2,t3;
+#
+# Check how ICP affects counters
+#
+# First, some preparations
+#
+create procedure save_read_stats()
+select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT
+into @rr, @rq, @rif, @rin
+from information_schema.table_statistics
+where table_name='t4' and table_schema=database();
+create procedure get_read_stats()
+select
+ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin
+from information_schema.table_statistics
+where table_name='t4' and table_schema=database();
+create table t4 (
+id int,
+id1 int,
+id2 int,
+value int,
+value2 varchar(100),
+primary key (id),
+key id1_id2 (id1, id2) comment 'rev:cf1'
+) engine=rocksdb charset=latin1 collate latin1_bin;
+insert into t4 values
+(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5),
+(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10);
+#
+# Now, the test itself
+#
+call save_read_stats();
+call get_read_stats();
+ROWS_READ-@rr ROWS_REQUESTED-@rq ROWS_INDEX_FIRST-@rif ROWS_INDEX_NEXT-@rin
+0 0 0 0
+# ============== index-only query ==============
+explain
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index
+call save_read_stats();
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id1 id2
+1 1
+call get_read_stats();
+ROWS_READ-@rr 10
+ROWS_REQUESTED-@rq 11
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 9
+# ============== Query without ICP ==============
+set optimizer_switch='index_condition_pushdown=off';
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id id1 id2 value value2
+1 1 1 1 1
+call get_read_stats();
+ROWS_READ-@rr 10
+ROWS_REQUESTED-@rq 11
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 9
+# ============== Query with ICP ==============
+set optimizer_switch='index_condition_pushdown=on';
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+id id1 id2 value value2
+1 1 1 1 1
+call get_read_stats();
+ROWS_READ-@rr 1
+ROWS_REQUESTED-@rq 1
+ROWS_INDEX_FIRST-@rif 1
+ROWS_INDEX_NEXT-@rin 0
+drop table t4;
+drop procedure save_read_stats;
+drop procedure get_read_stats;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result
new file mode 100644
index 00000000000..e4d11960e6e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result
@@ -0,0 +1,63 @@
+create table t1 (pk int not null primary key) engine=rocksdb;
+insert into t1 values (1),(2),(3);
+set autocommit=0;
+begin;
+select * from t1 where pk=1 for update;
+pk
+1
+connect con1,localhost,root,,;
+connection con1;
+### Connection con1
+set @@rocksdb_lock_wait_timeout=500;
+set autocommit=0;
+begin;
+select * from t1 where pk=1 for update;;
+connection default;
+### Connection default
+rollback;
+connection con1;
+pk
+1
+rollback;
+connection default;
+begin;
+select * from t1 where pk=1 for update;
+pk
+1
+connection con1;
+### Connection con1
+set @@rocksdb_lock_wait_timeout=2;
+set autocommit=0;
+begin;
+select * from t1 where pk=1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+connection default;
+rollback;
+set autocommit=1;
+connection con1;
+drop table t1;
+connection default;
+#
+# Now, test what happens if another transaction modified the record and committed
+#
+CREATE TABLE t1 (
+id int primary key,
+value int
+) engine=rocksdb collate latin1_bin;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
+connection con1;
+BEGIN;
+SELECT * FROM t1 WHERE id=3;
+id value
+3 3
+connection default;
+BEGIN;
+UPDATE t1 SET value=30 WHERE id=3;
+COMMIT;
+connection con1;
+SELECT * FROM t1 WHERE id=3 FOR UPDATE;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+disconnect con1;
+connection default;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result
new file mode 100644
index 00000000000..acf62d0bb70
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result
@@ -0,0 +1,123 @@
+drop table if exists t1,t2;
+# Tests for MyRocks + partitioning
+#
+# MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB
+PARTITION BY HASH(pk) PARTITIONS 2;
+INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1);
+CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1, 1), (2, 1);
+SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 );
+f1
+NULL
+drop table t1,t2;
+#
+# Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables
+#
+CREATE TABLE t1 (
+id INT PRIMARY KEY,
+a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8,
+b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null,
+c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null,
+INDEX (a),
+INDEX (b),
+INDEX (c)
+) ENGINE=RocksDB PARTITION BY key (id) partitions 2;
+INSERT INTO t1 (id, b) VALUES (28, 3);
+Warnings:
+Warning 1364 Field 'c' doesn't have a default value
+UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1;
+check table t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+drop table t1;
+#
+# Issue #105, another testcase
+#
+create table t1 (
+pk int primary key,
+col1 int,
+col2 int,
+key (col1) comment 'rev:cf_issue105'
+) engine=rocksdb partition by hash(pk) partitions 2;
+insert into t1 values (1,10,10);
+insert into t1 values (2,10,10);
+insert into t1 values (11,20,20);
+insert into t1 values (12,20,20);
+explain select * from t1 force index(col1) where col1=10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref col1 col1 5 const 2000 NULL
+select * from t1 force index(col1) where col1=10;
+pk col1 col2
+2 10 10
+1 10 10
+select * from t1 use index () where col1=10;
+pk col1 col2
+2 10 10
+1 10 10
+drop table t1;
+#
+# Issue #108: Index-only scans do not work for partitioned tables and extended keys
+#
+create table t1 (
+pk int primary key,
+col1 int,
+col2 int,
+key (col1)
+) engine=rocksdb partition by hash(pk) partitions 2;
+insert into t1 values (1,10,10);
+insert into t1 values (2,10,10);
+insert into t1 values (11,20,20);
+insert into t1 values (12,20,20);
+# The following must use "Using index"
+explain select pk from t1 force index(col1) where col1=10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref col1 col1 5 const 2000 Using index
+drop table t1;
+#
+# Issue #214: subqueries cause crash
+#
+create TABLE t1(a int,b int,c int,primary key(a,b))
+partition by list (b*a) (partition x1 values in (1) tablespace ts1,
+partition x2 values in (3,11,5,7) tablespace ts2,
+partition x3 values in (16,8,5+19,70-43) tablespace ts3);
+create table t2(b binary(2));
+set session optimizer_switch=5;
+insert into t1(a,b) values(1,7);
+select a from t1 where a in (select a from t1 where a in (select b from t2));
+a
+drop table t1, t2;
+#
+# Issue #260: altering name to invalid value leaves table unaccessible
+#
+CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4;
+INSERT INTO t1 VALUES(1,'a');
+RENAME TABLE t1 TO db3.t3;
+ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 - Internal (unspecified) error in handler)
+SELECT * FROM t1;
+c1 c2
+1 a
+SHOW TABLES;
+Tables_in_test
+t1
+RENAME TABLE t1 TO test.t3;
+SELECT * FROM t3;
+c1 c2
+1 a
+SHOW TABLES;
+Tables_in_test
+t3
+CREATE DATABASE db3;
+USE test;
+RENAME TABLE t3 to db3.t2;
+USE db3;
+SELECT * FROM t2;
+c1 c2
+1 a
+SHOW TABLES;
+Tables_in_db3
+t2
+DROP TABLE t2;
+use test;
+DROP DATABASE db3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result
new file mode 100644
index 00000000000..7a17dabf294
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result
@@ -0,0 +1,37 @@
+create table t1 (pk int primary key, c char(8)) engine=RocksDB;
+insert into t1 values (1,'new'),(2,'new');
+select * from t1;
+pk c
+1 new
+2 new
+connect con1,localhost,root,,;
+update t1 set c = 'updated';
+connection default;
+flush status;
+show status like 'Qcache_hits';
+Variable_name Value
+Qcache_hits 0
+show global status like 'Qcache_hits';
+Variable_name Value
+Qcache_hits 0
+select * from t1;
+pk c
+1 updated
+2 updated
+select sql_no_cache * from t1;
+pk c
+1 updated
+2 updated
+select * from t1 where pk = 1;
+pk c
+1 updated
+show status like 'Qcache_hits';
+Variable_name Value
+Qcache_hits 0
+show status like 'Qcache_not_cached';
+Variable_name Value
+Qcache_not_cached 3
+show global status like 'Qcache_hits';
+Variable_name Value
+Qcache_hits 0
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result
new file mode 100644
index 00000000000..d20bbc9b775
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result
@@ -0,0 +1,290 @@
+select * from information_schema.engines where engine = 'rocksdb';
+ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+ROCKSDB DEFAULT RocksDB storage engine YES YES YES
+drop table if exists t0,t1,t2,t3,t4,t5;
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int not null,
+a int not null,
+b int not null,
+primary key(pk),
+key(a) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A;
+#
+# HA_READ_KEY_EXACT tests
+#
+# Original failure was here:
+explain
+select * from t2 force index (a) where a=0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref a a 4 const # NULL
+select * from t2 force index (a) where a=0;
+pk a b
+0 0 0
+1 0 1
+2 0 2
+3 0 3
+4 0 4
+5 0 5
+6 0 6
+7 0 7
+8 0 8
+9 0 9
+# The rest are for code coverage:
+explain
+select * from t2 force index (a) where a=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref a a 4 const # NULL
+select * from t2 force index (a) where a=2;
+pk a b
+20 2 20
+21 2 21
+22 2 22
+23 2 23
+24 2 24
+25 2 25
+26 2 26
+27 2 27
+28 2 28
+29 2 29
+explain
+select * from t2 force index (a) where a=3 and pk=33;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 const a a 8 const,const # NULL
+select * from t2 force index (a) where a=3 and pk=33;
+pk a b
+33 3 33
+select * from t2 force index (a) where a=99 and pk=99;
+pk a b
+select * from t2 force index (a) where a=0 and pk=0;
+pk a b
+0 0 0
+select * from t2 force index (a) where a=-1;
+pk a b
+select * from t2 force index (a) where a=-1 and pk in (101,102);
+pk a b
+select * from t2 force index (a) where a=100 and pk in (101,102);
+pk a b
+#
+# #36: Range in form tbl.key >= const doesn't work in reverse column family
+#
+explain
+select count(*) from t2 force index (a) where a>=0 and a <=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 4 NULL # Using where; Using index
+select count(*) from t2 force index (a) where a>=0 and a <=1;
+count(*)
+20
+explain
+select count(*) from t2 force index (a) where a>=-1 and a <=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 4 NULL # Using where; Using index
+select count(*) from t2 force index (a) where a>=-1 and a <=1;
+count(*)
+20
+explain
+select * from t2 force index (a) where a=0 and pk>=3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 8 NULL # Using index condition
+select * from t2 force index (a) where a=0 and pk>=3;
+pk a b
+3 0 3
+4 0 4
+5 0 5
+6 0 6
+7 0 7
+8 0 8
+9 0 9
+# Try edge cases where we fall over the end of the table
+create table t3 like t2;
+insert into t3 select * from t2;
+select * from t3 where pk>=1000000;
+pk a b
+select * from t2 where pk>=1000000;
+pk a b
+#
+# #42: Range in form tbl.key > const doesn't work in reverse column family
+#
+explain
+select count(*) from t2 force index (a) where a>0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 4 NULL # Using where; Using index
+select count(*) from t2 force index (a) where a>0;
+count(*)
+990
+explain
+select count(*) from t2 force index (a) where a>99;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 4 NULL # Using where; Using index
+select count(*) from t2 force index (a) where a>99;
+count(*)
+0
+select * from t2 where pk>1000000;
+pk a b
+select * from t3 where pk>1000000;
+pk a b
+explain
+select count(*) from t2 force index (a) where a=2 and pk>25;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 8 NULL # Using where; Using index
+select count(*) from t2 force index (a) where a=2 and pk>25;
+count(*)
+4
+select * from t2 force index (a) where a>-10 and a < 1;
+pk a b
+0 0 0
+1 0 1
+2 0 2
+3 0 3
+4 0 4
+5 0 5
+6 0 6
+7 0 7
+8 0 8
+9 0 9
+select * from t3 force index (a) where a>-10 and a < 1;
+pk a b
+0 0 0
+1 0 1
+2 0 2
+3 0 3
+4 0 4
+5 0 5
+6 0 6
+7 0 7
+8 0 8
+9 0 9
+#
+# #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family
+#
+select max(a) from t2 where a < 2;
+max(a)
+1
+select max(a) from t2 where a < -1;
+max(a)
+NULL
+select max(pk) from t2 where a=3 and pk < 6;
+max(pk)
+NULL
+select max(pk) from t2 where pk < 200000;
+max(pk)
+999
+select max(pk) from t2 where pk < 20;
+max(pk)
+19
+select max(a) from t3 where a < 2;
+max(a)
+1
+select max(a) from t3 where a < -1;
+max(a)
+NULL
+select max(pk) from t3 where pk < 200000;
+max(pk)
+999
+select max(pk) from t3 where pk < 20;
+max(pk)
+19
+select max(pk) from t2 where a=3 and pk < 33;
+max(pk)
+32
+select max(pk) from t3 where a=3 and pk < 33;
+max(pk)
+32
+#
+# #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF
+#
+# Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV
+explain
+select * from t2 where a between 99 and 2000 order by a desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 4 NULL # Using index condition
+select * from t2 where a between 99 and 2000 order by a desc;
+pk a b
+999 99 999
+998 99 998
+997 99 997
+996 99 996
+995 99 995
+994 99 994
+993 99 993
+992 99 992
+991 99 991
+990 99 990
+select max(a) from t2 where a <=10;
+max(a)
+10
+select max(a) from t2 where a <=-4;
+max(a)
+NULL
+select max(pk) from t2 where a=5 and pk <=55;
+max(pk)
+55
+select max(pk) from t2 where a=5 and pk <=55555;
+max(pk)
+59
+select max(pk) from t2 where a=5 and pk <=0;
+max(pk)
+NULL
+select max(pk) from t2 where pk <=-1;
+max(pk)
+NULL
+select max(pk) from t2 where pk <=999999;
+max(pk)
+999
+select max(pk) from t3 where pk <=-1;
+max(pk)
+NULL
+select max(pk) from t3 where pk <=999999;
+max(pk)
+999
+#
+# Tests for search_flag=HA_READ_PREFIX_LAST
+#
+create table t4 (
+pk int primary key,
+a int,
+b int,
+c int,
+key(a,b,c)
+) engine=rocksdb;
+insert into t4 select pk,pk,pk,pk from t2 where pk < 100;
+explain
+select * from t4 where a=1 and b in (1) order by c desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref a a 10 const,const # Using where; Using index
+select * from t4 where a=1 and b in (1) order by c desc;
+pk a b c
+1 1 1 1
+explain
+select * from t4 where a=5 and b in (4) order by c desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ref a a 10 const,const # Using where; Using index
+select * from t4 where a=5 and b in (4) order by c desc;
+pk a b c
+# HA_READ_PREFIX_LAST for reverse-ordered CF
+create table t5 (
+pk int primary key,
+a int,
+b int,
+c int,
+key(a,b,c) comment 'rev:cf2'
+) engine=rocksdb;
+insert into t5 select pk,pk,pk,pk from t2 where pk < 100;
+explain
+select * from t5 where a=1 and b in (1) order by c desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t5 ref a a 10 const,const # Using where; Using index
+select * from t5 where a=1 and b in (1) order by c desc;
+pk a b c
+1 1 1 1
+explain
+select * from t5 where a=5 and b in (4) order by c desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t5 ref a a 10 const,const # Using where; Using index
+select * from t5 where a=5 and b in (4) order by c desc;
+pk a b c
+drop table t0,t1,t2,t3,t4,t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result
new file mode 100644
index 00000000000..d7a4f9dd065
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result
@@ -0,0 +1,11 @@
+create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2));
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select count(*) from t1;
+count(*)
+10000
+explain select c1 from t1 where c1 > 5 limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range i i 9 NULL 9900 Using where; Using index
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result
new file mode 100644
index 00000000000..8c02de98c90
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result
@@ -0,0 +1,66 @@
+create table t1 (a int primary key) engine=rocksdb;
+Verify rocksdb_rows_inserted
+select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted';
+insert into t1 values(1);
+select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted';
+select @new_rows_inserted - @old_rows_inserted;
+@new_rows_inserted - @old_rows_inserted
+1
+Verify rocksdb_rows_updated
+select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated';
+update t1 set a=2 where a=1;
+select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated';
+select @new_rows_updated - @old_rows_updated;
+@new_rows_updated - @old_rows_updated
+1
+Verify rocksdb_rows_read
+select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read';
+select * from t1;
+a
+2
+select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read';
+select @new_rows_read - @old_rows_read;
+@new_rows_read - @old_rows_read
+1
+Verify rocksdb_rows_deleted
+select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted';
+delete from t1;
+select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted';
+select @new_rows_deleted - @old_rows_deleted;
+@new_rows_deleted - @old_rows_deleted
+1
+use mysql;
+create table t1(a int primary key) engine=rocksdb;
+Verify rocksdb_system_rows_inserted
+select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted';
+insert into t1 values(1);
+select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted';
+select @new_system_rows_inserted - @old_system_rows_inserted;
+@new_system_rows_inserted - @old_system_rows_inserted
+1
+Verify rocksdb_system_rows_updated
+select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated';
+update t1 set a=2 where a=1;
+select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated';
+select @new_system_rows_updated - @old_system_rows_updated;
+@new_system_rows_updated - @old_system_rows_updated
+1
+Verify rocksdb_system_rows_read
+select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read';
+select * from t1;
+a
+2
+select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read';
+select @new_system_rows_read - @old_system_rows_read;
+@new_system_rows_read - @old_system_rows_read
+1
+Verify rocksdb_system_rows_deleted
+select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted';
+delete from t1;
+select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted';
+select @new_system_rows_deleted - @old_system_rows_deleted;
+@new_system_rows_deleted - @old_system_rows_deleted
+1
+drop table t1;
+use test;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result
new file mode 100644
index 00000000000..1e8aa5787a6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result
@@ -0,0 +1,23 @@
+drop table if exists t1;
+SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100;
+create table t1 (pk int primary key) engine=rocksdb;
+set global rocksdb_force_flush_memtable_now = true;
+select table_rows from information_schema.tables
+where table_schema = database() and table_name = 't1';
+table_rows
+10000
+drop table t1;
+drop table if exists t2;
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10;
+create table t2 (pk int primary key) engine=rocksdb;
+set global rocksdb_force_flush_memtable_now = true;
+select table_rows from information_schema.tables
+where table_schema = database() and table_name = 't2';
+table_rows
+10000
+select table_name from information_schema.tables where table_schema = database() and table_name = 't2';
+table_name
+t2
+drop table t2;
+SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result
new file mode 100644
index 00000000000..82609f46423
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result
@@ -0,0 +1,321 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+create procedure save_read_stats()
+begin
+select rows_requested into @rq from information_schema.table_statistics
+where table_schema=database() and table_name='t1';
+select variable_value into @rr from information_schema.global_status
+where variable_name='rocksdb_rows_read';
+select variable_value into @ru from information_schema.global_status
+where variable_name='rocksdb_rows_updated';
+select variable_value into @rd from information_schema.global_status
+where variable_name='rocksdb_rows_deleted';
+end//
+create procedure get_read_stats()
+begin
+select rows_requested - @rq as rows_requested from
+information_schema.table_statistics
+where table_schema=database() and table_name='t1';
+select variable_value - @rr as rows_read from
+information_schema.global_status
+where variable_name='rocksdb_rows_read';
+select variable_value - @ru as rows_updated from
+information_schema.global_status
+where variable_name='rocksdb_rows_updated';
+select variable_value - @rd as rows_deleted from
+information_schema.global_status
+where variable_name='rocksdb_rows_deleted';
+end//
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3), (4,4);
+include/sync_slave_sql_with_master.inc
+
+# regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves
+
+call save_read_stats();
+update t1 set value=value+1 where id=1;
+delete from t1 where id=4;
+select * from t1;
+id value
+1 2
+2 2
+3 3
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+1
+rows_deleted
+1
+select * from t1;
+id value
+1 2
+2 2
+3 3
+
+# "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore
+
+include/stop_slave.inc
+delete from t1 where id in (2, 3);
+include/start_slave.inc
+call save_read_stats();
+update t1 set value=value+1 where id=3;
+delete from t1 where id=2;
+select * from t1;
+id value
+1 2
+3 4
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+1
+rows_deleted
+1
+select * from t1;
+id value
+1 2
+3 4
+
+## tables without primary key -- read free replication should be disabled
+
+
+#no index
+
+drop table t1;
+create table t1 (c1 int, c2 int);
+insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5);
+include/sync_slave_sql_with_master.inc
+call save_read_stats();
+update t1 set c2=100 where c1=3;
+delete from t1 where c1 <= 2;
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+5
+rows_read
+5
+rows_updated
+1
+rows_deleted
+2
+select * from t1;
+c1 c2
+3 100
+4 4
+5 5
+
+#secondary index only
+
+drop table t1;
+create table t1 (c1 int, c2 int, index i(c1));
+insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5);
+include/sync_slave_sql_with_master.inc
+call save_read_stats();
+update t1 set c2=100 where c1=3;
+delete from t1 where c1 <= 2;
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+3
+rows_read
+3
+rows_updated
+1
+rows_deleted
+2
+select * from t1;
+c1 c2
+3 100
+4 4
+5 5
+
+## large row operations -- primary key modification, secondary key modification
+
+drop table t1;
+create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2));
+include/sync_slave_sql_with_master.inc
+call save_read_stats();
+
+#updating all seconary keys by 1
+
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+10000
+rows_deleted
+0
+include/diff_tables.inc [master:t1, slave:t1]
+
+#updating all primary keys by 2
+
+call save_read_stats();
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+10000
+rows_deleted
+0
+include/diff_tables.inc [master:t1, slave:t1]
+
+#updating secondary keys after truncating t1 on slave
+
+truncate table t1;
+call save_read_stats();
+update t1 set c2=c2+10;
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+10000
+rows_deleted
+0
+include/diff_tables.inc [master:t1, slave:t1]
+
+#updating primary keys after truncating t1 on slave
+
+truncate table t1;
+call save_read_stats();
+update t1 set id2=id2+10;
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+10000
+rows_deleted
+0
+include/diff_tables.inc [master:t1, slave:t1]
+
+#deleting half rows
+
+call save_read_stats();
+delete from t1 where id1 <= 5000;
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+0
+rows_read
+0
+rows_updated
+0
+rows_deleted
+5000
+include/diff_tables.inc [master:t1, slave:t1]
+[on master]
+create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+include/sync_slave_sql_with_master.inc
+[on slave]
+delete from t2 where id <= 2;
+delete from u2 where id <= 2;
+[on master]
+update t2 set i2=100, value=100 where id=1;
+update u2 set i2=100, value=100 where id=1;
+[on slave]
+call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*");
+call mtr.add_suppression("Slave: Can't find record in 'u2'.*");
+include/wait_for_slave_sql_error.inc [errno=1032]
+select count(*) from t2 force index(primary);
+count(*)
+2
+select count(*) from t2 force index(i1);
+count(*)
+1
+select count(*) from t2 force index(i2);
+count(*)
+2
+select * from t2 where id=1;
+id i1 i2 value
+1 1 100 100
+select i1 from t2 where i1=1;
+i1
+select i2 from t2 where i2=100;
+i2
+100
+select count(*) from u2 force index(primary);
+count(*)
+1
+select count(*) from u2 force index(i1);
+count(*)
+1
+select count(*) from u2 force index(i2);
+count(*)
+1
+select * from u2 where id=1;
+id i1 i2 value
+select i1 from u2 where i1=1;
+i1
+select i2 from u2 where i2=100;
+i2
+include/wait_for_slave_sql_to_start.inc
+
+# some tables with read-free replication on and some with it off
+# secondary keys have extra rows
+
+[on master]
+create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+include/sync_slave_sql_with_master.inc
+[on slave]
+update t3 set i1=100 where id=1;
+update u3 set i1=100 where id=1;
+[on master]
+delete from t3 where id=1;
+delete from u3 where id=1;
+include/sync_slave_sql_with_master.inc
+[on slave]
+select count(*) from t3 force index(primary);
+count(*)
+2
+select count(*) from t3 force index(i1);
+count(*)
+3
+select count(*) from t3 force index(i2);
+count(*)
+2
+select i1 from t3 where i1=100;
+i1
+100
+select count(*) from u3 force index(primary);
+count(*)
+2
+select count(*) from u3 force index(i1);
+count(*)
+2
+select count(*) from u3 force index(i2);
+count(*)
+2
+select i1 from u3 where i1=100;
+i1
+drop table t1, t2, t3, u2, u3;
+drop procedure save_read_stats;
+drop procedure get_read_stats;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result
new file mode 100644
index 00000000000..8cdfa910739
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result
@@ -0,0 +1,56 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int primary key,
+kp1 int,
+kp2 int,
+col1 int,
+key (kp1,kp2)
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+create table t3 like t2;
+insert into t3 select * from t2;
+include/sync_slave_sql_with_master.inc
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+include/stop_slave.inc
+include/start_slave.inc
+update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0;
+set debug_sync= 'now WAIT_FOR Reached';
+set global debug = '';
+set sql_log_bin=0;
+delete from t2 where pk=2;
+delete from t2 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+include/sync_slave_sql_with_master.inc
+select * from t2 where pk < 5;
+pk kp1 kp2 col1
+0 0 0 0
+1 1 1 1
+4 4 4 4
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+include/stop_slave.inc
+include/start_slave.inc
+update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0;
+call mtr.add_suppression("Deadlock found when trying to get lock");
+set debug_sync= 'now WAIT_FOR Reached';
+set global debug = '';
+set sql_log_bin=0;
+delete from t3 where pk=2;
+delete from t3 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+include/sync_slave_sql_with_master.inc
+select * from t3 where pk < 5;
+pk kp1 kp2 col1
+0 0 0 0
+1 1 1 1
+4 4 4 100
+drop table t0, t1, t2, t3;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result
new file mode 100644
index 00000000000..50905527447
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result
@@ -0,0 +1,42 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+select @@binlog_format;
+@@binlog_format
+ROW
+create table t1 (pk int primary key) engine=rocksdb;
+insert into t1 values (1),(2),(3);
+include/sync_slave_sql_with_master.inc
+select * from t1;
+pk
+1
+2
+3
+drop table t1;
+#
+# Issue #18: slave crash on update with row based binary logging
+#
+create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+update t1 set value2=100 where id=1;
+update t1 set value2=200 where id=2;
+update t1 set value2=300 where id=3;
+include/sync_slave_sql_with_master.inc
+select * from t1 where id=1;
+id value value2
+1 1 100
+select * from t1 where id=2;
+id value value2
+2 1 200
+select * from t1 where id=3;
+id value value2
+3 1 300
+drop table t1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result
new file mode 100644
index 00000000000..a95642096f5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result
@@ -0,0 +1,88 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+create procedure save_read_stats()
+begin
+select rows_requested into @rq from information_schema.table_statistics
+where table_schema=database() and table_name='t1';
+select variable_value into @rr from information_schema.global_status
+where variable_name='rocksdb_rows_read';
+select variable_value into @ru from information_schema.global_status
+where variable_name='rocksdb_rows_updated';
+select variable_value into @rd from information_schema.global_status
+where variable_name='rocksdb_rows_deleted';
+end//
+create procedure get_read_stats()
+begin
+select rows_requested - @rq as rows_requested from
+information_schema.table_statistics
+where table_schema=database() and table_name='t1';
+select variable_value - @rr as rows_read from
+information_schema.global_status
+where variable_name='rocksdb_rows_read';
+select variable_value - @ru as rows_updated from
+information_schema.global_status
+where variable_name='rocksdb_rows_updated';
+select variable_value - @rd as rows_deleted from
+information_schema.global_status
+where variable_name='rocksdb_rows_deleted';
+end//
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5);
+include/sync_slave_sql_with_master.inc
+call save_read_stats();
+update t1 set value=value+1 where id=1;
+update t1 set value=value+1 where id=3;
+select * from t1;
+id value
+1 2
+2 2
+3 4
+4 4
+5 5
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+2
+rows_read
+2
+rows_updated
+2
+rows_deleted
+0
+select * from t1;
+id value
+1 2
+2 2
+3 4
+4 4
+5 5
+call save_read_stats();
+delete from t1 where id in (4,5);
+select * from t1;
+id value
+1 2
+2 2
+3 4
+include/sync_slave_sql_with_master.inc
+call get_read_stats();
+rows_requested
+2
+rows_read
+2
+rows_updated
+0
+rows_deleted
+2
+select * from t1;
+id value
+1 2
+2 2
+3 4
+drop table t1;
+drop procedure save_read_stats;
+drop procedure get_read_stats;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result
new file mode 100644
index 00000000000..1d3cd7db641
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result
@@ -0,0 +1,242 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+# Test of row replication with triggers on the slave side
+CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1));
+SELECT * FROM t1;
+C1 C2
+SET @old_slave_exec_mode= @@global.slave_exec_mode;
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET @@global.slave_exec_mode= IDEMPOTENT;
+SET @@global.slave_run_triggers_for_rbr= YES;
+SELECT * FROM t1;
+C1 C2
+create table t2 (id char(2) primary key, cnt int, o char(1), n char(1));
+insert into t2 values
+('u0', 0, ' ', ' '),('u1', 0, ' ', ' '),
+('d0', 0, ' ', ' '),('d1', 0, ' ', ' '),
+('i0', 0, ' ', ' '),('i1', 0, ' ', ' ');
+create trigger t1_cnt_b before update on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0';
+create trigger t1_cnt_db before delete on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0';
+create trigger t1_cnt_ib before insert on t1 for each row
+update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0';
+create trigger t1_cnt_a after update on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1';
+create trigger t1_cnt_da after delete on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1';
+create trigger t1_cnt_ia after insert on t1 for each row
+update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 0
+i1 0
+u0 0
+u1 0
+# INSERT triggers test
+insert into t1 values ('a','b');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 1 a
+i1 1 a
+u0 0
+u1 0
+# UPDATE triggers test
+update t1 set C1= 'd';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 1 a
+i1 1 a
+u0 1 a d
+u1 1 a d
+# DELETE triggers test
+delete from t1 where C1='d';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 1 d
+d1 1 d
+i0 1 a
+i1 1 a
+u0 1 a d
+u1 1 a d
+# INSERT triggers which cause also UPDATE test (insert duplicate row)
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 1 d
+d1 1 d
+i0 2 0
+i1 2 0
+u0 1 a d
+u1 1 a d
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 1 d
+d1 1 d
+i0 3 0
+i1 3 0
+u0 2 0 0
+u1 2 0 0
+# INSERT triggers which cause also DELETE test
+# (insert duplicate row in table referenced by foreign key)
+insert into t1 values ('1','1');
+drop table if exists t1;
+SET @@global.slave_exec_mode= @old_slave_exec_mode;
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+drop table t2;
+CREATE TABLE t1 (i INT);
+CREATE TABLE t2 (i INT);
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr=YES;
+CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW
+INSERT INTO t2 VALUES (new.i);
+BEGIN;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+COMMIT;
+select * from t2;
+i
+1
+2
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+drop tables t2,t1;
+# Triggers on slave do not work if master has some
+CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1));
+SELECT * FROM t1;
+C1 C2
+create trigger t1_dummy before delete on t1 for each row
+set @dummy= 1;
+SET @old_slave_exec_mode= @@global.slave_exec_mode;
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET @@global.slave_exec_mode= IDEMPOTENT;
+SET @@global.slave_run_triggers_for_rbr= YES;
+SELECT * FROM t1;
+C1 C2
+create table t2 (id char(2) primary key, cnt int, o char(1), n char(1));
+insert into t2 values
+('u0', 0, ' ', ' '),('u1', 0, ' ', ' '),
+('d0', 0, ' ', ' '),('d1', 0, ' ', ' '),
+('i0', 0, ' ', ' '),('i1', 0, ' ', ' ');
+create trigger t1_cnt_b before update on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0';
+create trigger t1_cnt_ib before insert on t1 for each row
+update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0';
+create trigger t1_cnt_a after update on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1';
+create trigger t1_cnt_da after delete on t1 for each row
+update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1';
+create trigger t1_cnt_ia after insert on t1 for each row
+update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 0
+i1 0
+u0 0
+u1 0
+# INSERT triggers test
+insert into t1 values ('a','b');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 0
+i1 0
+u0 0
+u1 0
+# UPDATE triggers test
+update t1 set C1= 'd';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 0
+i1 0
+u0 0
+u1 0
+# DELETE triggers test
+delete from t1 where C1='d';
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 0
+i1 0
+u0 0
+u1 0
+# INSERT triggers which cause also UPDATE test (insert duplicate row)
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 1 0
+i1 1 0
+u0 0
+u1 0
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+id cnt o n
+d0 0
+d1 0
+i0 1 0
+i1 1 0
+u0 0
+u1 0
+# INSERT triggers which cause also DELETE test
+# (insert duplicate row in table referenced by foreign key)
+insert into t1 values ('1','1');
+drop table if exists t1;
+SET @@global.slave_exec_mode= @old_slave_exec_mode;
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+drop table t2;
+#
+# MDEV-5513: Trigger is applied to the rows after first one
+#
+create table t1 (a int, b int);
+create table tlog (a int auto_increment primary key);
+set sql_log_bin=0;
+create trigger tr1 after insert on t1 for each row insert into tlog values (null);
+set sql_log_bin=1;
+set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr;
+set global slave_run_triggers_for_rbr=1;
+create trigger tr2 before insert on t1 for each row set new.b = new.a;
+insert into t1 values (1,10),(2,20),(3,30);
+select * from t1;
+a b
+1 10
+2 20
+3 30
+#
+# Verify slave skips running triggers if master ran and logged the row events for triggers
+#
+create table t4(a int, b int);
+delete from tlog;
+create trigger tr4 before insert on t4 for each row insert into tlog values (null);
+insert into t4 values (1, 10),(2, 20);
+select * from tlog;
+a
+4
+5
+select * from t4;
+a b
+1 10
+2 20
+select * from tlog;
+a
+4
+5
+set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved;
+drop table t1, tlog, t4;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result
new file mode 100644
index 00000000000..9424238da93
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result
@@ -0,0 +1,103 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3);
+begin;
+insert into t1 values (11, 1);
+savepoint a;
+insert into t1 values (12, 1);
+rollback to savepoint a;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+commit;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+commit;
+select * from t1;
+id value
+1 1
+2 2
+3 3
+include/sync_slave_sql_with_master.inc
+select * from t1;
+id value
+1 1
+2 2
+3 3
+begin;
+insert into t1 values (21, 1);
+savepoint a;
+insert into t1 values (22, 1);
+rollback to savepoint a;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+insert into t1 values (23, 1);
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+commit;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+commit;
+select * from t1;
+id value
+1 1
+2 2
+3 3
+include/sync_slave_sql_with_master.inc
+select * from t1;
+id value
+1 1
+2 2
+3 3
+begin;
+insert into t1 values (31, 1);
+savepoint a;
+insert into t1 values (32, 1);
+savepoint b;
+insert into t1 values (33, 1);
+rollback to savepoint a;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+insert into t1 values (34, 1);
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+rollback;
+select * from t1;
+id value
+1 1
+2 2
+3 3
+include/sync_slave_sql_with_master.inc
+select * from t1;
+id value
+1 1
+2 2
+3 3
+SET autocommit=off;
+select * from t1;
+id value
+1 1
+2 2
+3 3
+SAVEPOINT A;
+select * from t1;
+id value
+1 1
+2 2
+3 3
+SAVEPOINT A;
+insert into t1 values (35, 35);
+ROLLBACK TO SAVEPOINT A;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+START TRANSACTION;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+select * from t1;
+id value
+1 1
+2 2
+3 3
+include/sync_slave_sql_with_master.inc
+select * from t1;
+id value
+1 1
+2 2
+3 3
+drop table t1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result
new file mode 100644
index 00000000000..315f040899e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result
@@ -0,0 +1,54 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+select @@binlog_format;
+@@binlog_format
+STATEMENT
+create table t1 (pk int primary key) engine=rocksdb;
+insert into t1 values (1),(2),(3);
+ERROR HY000: Can't execute updates on master with binlog_format != ROW.
+set session rocksdb_unsafe_for_binlog=on;
+insert into t1 values (1),(2),(3);
+select * from t1;
+pk
+1
+2
+3
+delete from t1;
+set session rocksdb_unsafe_for_binlog=off;
+insert into t1 values (1),(2),(3);
+ERROR HY000: Can't execute updates on master with binlog_format != ROW.
+set binlog_format=row;
+insert into t1 values (1),(2),(3);
+include/sync_slave_sql_with_master.inc
+select * from t1;
+pk
+1
+2
+3
+drop table t1;
+create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+update t1 set value2=100 where id=1;
+update t1 set value2=200 where id=2;
+update t1 set value2=300 where id=3;
+include/sync_slave_sql_with_master.inc
+select * from t1 where id=1;
+id value value2
+1 1 100
+select * from t1 where id=2;
+id value value2
+2 1 200
+select * from t1 where id=3;
+id value value2
+3 1 300
+drop table t1;
+set binlog_format=row;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result
new file mode 100644
index 00000000000..8cdfa910739
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result
@@ -0,0 +1,56 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists t1;
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int primary key,
+kp1 int,
+kp2 int,
+col1 int,
+key (kp1,kp2)
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+create table t3 like t2;
+insert into t3 select * from t2;
+include/sync_slave_sql_with_master.inc
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+include/stop_slave.inc
+include/start_slave.inc
+update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0;
+set debug_sync= 'now WAIT_FOR Reached';
+set global debug = '';
+set sql_log_bin=0;
+delete from t2 where pk=2;
+delete from t2 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+include/sync_slave_sql_with_master.inc
+select * from t2 where pk < 5;
+pk kp1 kp2 col1
+0 0 0 0
+1 1 1 1
+4 4 4 4
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+include/stop_slave.inc
+include/start_slave.inc
+update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0;
+call mtr.add_suppression("Deadlock found when trying to get lock");
+set debug_sync= 'now WAIT_FOR Reached';
+set global debug = '';
+set sql_log_bin=0;
+delete from t3 where pk=2;
+delete from t3 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+include/sync_slave_sql_with_master.inc
+select * from t3 where pk < 5;
+pk kp1 kp2 col1
+0 0 0 0
+1 1 1 1
+4 4 4 100
+drop table t0, t1, t2, t3;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result
new file mode 100644
index 00000000000..766795932b0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result
@@ -0,0 +1,3 @@
+CREATE DATABASE IF NOT EXISTS rqg_examples;
+Running test with grammar file example.yy
+DROP DATABASE rqg_examples;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result
new file mode 100644
index 00000000000..b0a1c408006
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result
@@ -0,0 +1,29 @@
+call mtr.add_suppression("Did not write failed ");
+call mtr.add_suppression("Can't open and lock privilege tables");
+SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER;
+CREATE TABLE mysql.user_temp LIKE mysql.user;
+INSERT mysql.user_temp SELECT * FROM mysql.user;
+CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv;
+INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp;
+CREATE DATABASE IF NOT EXISTS rqg_runtime;
+Running test with grammar file alter_online.yy
+DROP DATABASE rqg_runtime;
+CREATE DATABASE IF NOT EXISTS rqg_runtime;
+Running test with grammar file concurrency_1.yy
+DROP DATABASE rqg_runtime;
+CREATE DATABASE IF NOT EXISTS rqg_runtime;
+Running test with grammar file connect_kill_sql.yy
+DROP DATABASE rqg_runtime;
+CREATE DATABASE IF NOT EXISTS rqg_runtime;
+Running test with grammar file metadata_stability.yy
+DROP DATABASE rqg_runtime;
+DELETE FROM mysql.tables_priv;
+DELETE FROM mysql.user;
+INSERT mysql.user SELECT * FROM mysql.user_temp;
+INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp;
+DROP TABLE mysql.user_temp;
+DROP TABLE mysql.tables_priv_temp;
+DROP TABLE IF EXISTS test.executors;
+DROP DATABASE IF EXISTS testdb_N;
+DROP DATABASE IF EXISTS testdb_S;
+SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result
new file mode 100644
index 00000000000..23705d493e7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result
@@ -0,0 +1,11 @@
+call mtr.add_suppression("Deadlock found when trying to get lock");
+CREATE DATABASE IF NOT EXISTS rqg_transactions;
+Running test with grammar file transactions.yy
+Running test with grammar file repeatable_read.yy
+Running test with grammar file transaction_durability.yy
+Running test with grammar file transactions-flat.yy
+Running test with grammar file combinations.yy
+Running test with grammar file repeatable_read.yy
+Running test with grammar file transaction_durability.yy
+Running test with grammar file transactions-flat.yy
+DROP DATABASE rqg_transactions;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select.result b/storage/rocksdb/mysql-test/rocksdb/r/select.result
new file mode 100644
index 00000000000..22a6ca9bc87
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select.result
@@ -0,0 +1,373 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar');
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (a,b) SELECT a, b FROM t1;
+INSERT INTO t1 (a,b) SELECT a, b FROM t2;
+SELECT * FROM t1;
+a b pk
+1 z 2
+1 z 5
+100 foobar 1
+100 foobar 4
+200 bar 3
+200 bar 6
+SELECT DISTINCT a FROM t1;
+a
+1
+100
+200
+SELECT ALL b, a FROM t1;
+b a
+bar 200
+bar 200
+foobar 100
+foobar 100
+z 1
+z 1
+SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a;
+a b pk
+1 z 2
+1 z 2
+1 z 5
+1 z 5
+100 foobar 1
+100 foobar 1
+100 foobar 4
+100 foobar 4
+200 bar 3
+200 bar 3
+200 bar 6
+200 bar 6
+SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2;
+a
+1
+1
+1
+1
+1
+1
+100
+100
+100
+100
+100
+100
+200
+200
+200
+200
+200
+200
+SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a)
+FROM t1 t1_1, t2, t1 t1_2;
+a
+1
+100
+200
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+3
+SET GLOBAL query_cache_size = 1024*1024;
+SELECT SQL_CACHE * FROM t1, t2;
+a b pk a b pk
+1 z 2 1 z 2
+1 z 2 100 foobar 1
+1 z 2 200 bar 3
+1 z 5 1 z 2
+1 z 5 100 foobar 1
+1 z 5 200 bar 3
+100 foobar 1 1 z 2
+100 foobar 1 100 foobar 1
+100 foobar 1 200 bar 3
+100 foobar 4 1 z 2
+100 foobar 4 100 foobar 1
+100 foobar 4 200 bar 3
+200 bar 3 1 z 2
+200 bar 3 100 foobar 1
+200 bar 3 200 bar 3
+200 bar 6 1 z 2
+200 bar 6 100 foobar 1
+200 bar 6 200 bar 3
+SET GLOBAL query_cache_size = 1048576;
+SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1
+WHERE b > 'b' AND a IS NOT NULL
+GROUP BY 2 DESC, field1 ASC
+HAVING field1 < 1000
+ORDER BY field2, 1 DESC, field1*2
+LIMIT 5 OFFSET 1;
+field1 field2
+11 z:z
+110 foobar:foobar
+SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP;
+SUM(a) MAX(a) b
+2 1 z
+200 100 foobar
+400 200 bar
+602 200 NULL
+SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE();
+Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype
+test.t2.a 1 200 1 3 0 0 100.3333 81.2418 ENUM('1','100','200') NOT NULL
+test.t2.b bar z 1 6 0 0 3.3333 NULL ENUM('bar','foobar','z') NOT NULL
+test.t2.pk 1 3 1 1 0 0 2.0000 0.8165 ENUM('1','2','3') NOT NULL
+SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
+INTO OUTFILE '<DATADIR>/select.out'
+CHARACTER SET utf8
+FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '''';
+200,'bar'
+200,'bar'
+100,'foobar'
+100,'foobar'
+1,'z'
+1,'z'
+SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
+INTO DUMPFILE '<DATADIR>/select.dump';
+ERROR 42000: Result consisted of more than one row
+SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1
+INTO DUMPFILE '<DATADIR>/select.dump';
+1z2200bar3
+SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max;
+SELECT @min, @max;
+@min @max
+1 200
+SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2
+WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a;
+a b pk a b pk
+1 z 2 1 z 2
+1 z 2 1 z 2
+1 z 5 1 z 2
+1 z 5 1 z 2
+100 foobar 1 100 foobar 1
+100 foobar 1 100 foobar 1
+100 foobar 4 100 foobar 1
+100 foobar 4 100 foobar 1
+200 bar 3 200 bar 3
+200 bar 3 200 bar 3
+200 bar 6 200 bar 3
+200 bar 6 200 bar 3
+SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200);
+a b
+1 z
+1 z
+1 z
+1 z
+100 foobar
+100 foobar
+100 foobar
+100 foobar
+200 bar
+200 bar
+200 bar
+200 bar
+SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 };
+a
+1
+1
+100
+100
+200
+200
+SELECT t1.* FROM t2 INNER JOIN t1;
+a b pk
+1 z 2
+1 z 2
+1 z 2
+1 z 5
+1 z 5
+1 z 5
+100 foobar 1
+100 foobar 1
+100 foobar 1
+100 foobar 4
+100 foobar 4
+100 foobar 4
+200 bar 3
+200 bar 3
+200 bar 3
+200 bar 6
+200 bar 6
+200 bar 6
+SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b;
+a b pk
+1 z 2
+1 z 2
+1 z 5
+1 z 5
+100 foobar 1
+100 foobar 1
+100 foobar 4
+100 foobar 4
+200 bar 3
+200 bar 3
+200 bar 6
+200 bar 6
+SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b;
+a b
+1 bar
+1 bar
+1 foobar
+1 foobar
+100 bar
+100 bar
+SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b;
+a b
+1 bar
+1 bar
+1 foobar
+1 foobar
+100 bar
+100 bar
+SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1;
+a b pk
+1 z 2
+SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL;
+a b pk
+1 z 2
+1 z 2
+100 foobar 1
+100 foobar 1
+200 bar 3
+200 bar 3
+SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b;
+SUM(t2.a)
+602
+SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a);
+MIN(t2.a)
+1
+SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > '';
+b
+bar
+bar
+bar
+bar
+foobar
+foobar
+foobar
+foobar
+z
+z
+z
+z
+SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL;
+b
+bar
+bar
+foobar
+foobar
+z
+z
+SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2;
+a b pk a b pk
+1 z 2 1 z 2
+1 z 5 NULL NULL NULL
+100 foobar 1 100 foobar 1
+100 foobar 4 NULL NULL NULL
+200 bar 3 200 bar 3
+200 bar 6 NULL NULL NULL
+SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 );
+a b pk
+1 z 2
+100 foobar 1
+200 bar 3
+SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2;
+b
+bar
+bar
+bar
+bar
+bar
+bar
+foobar
+foobar
+foobar
+foobar
+foobar
+foobar
+z
+z
+z
+z
+z
+z
+SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2;
+min_a
+1
+1
+1
+SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 );
+a b
+1 z
+SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 );
+a b
+200 bar
+SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a );
+a b pk
+1 z 2
+1 z 2
+100 foobar 1
+100 foobar 1
+200 bar 3
+200 bar 3
+SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 );
+a b
+100 foobar
+200 bar
+SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' );
+a b
+1 z
+100 foobar
+SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 );
+a b
+1 z
+1 z
+SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b );
+a b
+100 foobar
+100 foobar
+200 bar
+200 bar
+SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC;
+a b pk
+1 z 2
+1 z 2
+1 z 2
+1 z 5
+1 z 5
+1 z 5
+100 foobar 1
+100 foobar 1
+100 foobar 1
+100 foobar 4
+100 foobar 4
+100 foobar 4
+200 bar 3
+200 bar 3
+200 bar 3
+200 bar 6
+200 bar 6
+200 bar 6
+SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b );
+a b
+1 z
+1 z
+100 foobar
+100 foobar
+200 bar
+200 bar
+SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1;
+a b
+1 z
+100 foobar
+200 bar
+SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1;
+a b
+1 z
+1 z
+1 z
+100 foobar
+100 foobar
+100 foobar
+200 bar
+200 bar
+200 bar
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result
new file mode 100644
index 00000000000..713f5e85fe0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result
@@ -0,0 +1,35 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+connect con1,localhost,root,,;
+BEGIN;
+SELECT a,b FROM t1 WHERE b='a' FOR UPDATE;
+a b
+1 a
+3 a
+connection default;
+SET lock_wait_timeout = 1;
+SELECT a,b FROM t1 WHERE b='a';
+a b
+1 a
+3 a
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+UPDATE t1 SET b='c' WHERE b='a';
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+connection con1;
+COMMIT;
+SELECT a,b FROM t1;
+a b
+1 a
+2 b
+3 a
+disconnect con1;
+connection default;
+UPDATE t1 SET b='c' WHERE b='a';
+SELECT a,b FROM t1;
+a b
+1 c
+2 b
+3 c
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result
new file mode 100644
index 00000000000..044aa4d6fc7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result
@@ -0,0 +1,28 @@
+drop table if exists t1;
+create table t1 (a int primary key) engine=rocksdb;
+insert into t1 values (1), (2), (3);
+Should succeed since no table gets involved
+select 1 for update skip locked;
+1
+1
+select * from nonexistence for update skip locked;
+ERROR 42S02: Table 'test.nonexistence' doesn't exist
+select * from t1 for update skip locked;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+select * from t1 where a > 1 and a < 3 for update skip locked;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+insert into t1 select * from t1 for update skip locked;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+Should succeed since no table gets involved
+select 1 for update nowait;
+1
+1
+select * from nonexistence for update nowait;
+ERROR 42S02: Table 'test.nonexistence' doesn't exist
+select * from t1 for update nowait;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+select * from t1 where a > 1 and a < 3 for update nowait;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+insert into t1 select * from t1 for update nowait;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result
new file mode 100644
index 00000000000..e6433dcbeef
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+connect con1,localhost,root,,;
+BEGIN;
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+a b
+1 a
+3 a
+connection default;
+SET lock_wait_timeout = 1;
+SELECT a,b FROM t1 WHERE b='a';
+a b
+1 a
+3 a
+#
+# Currently, SELECT ... LOCK IN SHARE MODE works like
+# SELECT FOR UPDATE
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+UPDATE t1 SET b='c' WHERE b='a';
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+connection con1;
+COMMIT;
+SELECT a,b FROM t1;
+a b
+1 a
+2 b
+3 a
+disconnect con1;
+connection default;
+UPDATE t1 SET b='c' WHERE b='a';
+SELECT a,b FROM t1;
+a b
+1 c
+2 b
+3 c
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result
new file mode 100644
index 00000000000..69b927ba5a8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result
@@ -0,0 +1,416 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB;
+CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB
+PARTITION BY KEY(l) PARTITIONS 4;
+SHOW ENGINE rocksdb STATUS;
+Type Name Status
+DBSTATS rocksdb #
+CF_COMPACTION __system__ #
+CF_COMPACTION cf_t1 #
+CF_COMPACTION cf_t4 #
+CF_COMPACTION default #
+CF_COMPACTION rev:cf_t2 #
+Memory_Stats rocksdb #
+INSERT INTO t1 VALUES (1), (2), (3);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+3
+INSERT INTO t2 VALUES (1), (2), (3), (4);
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+4
+INSERT INTO t4 VALUES (1), (2), (3), (4), (5);
+SELECT COUNT(*) FROM t4;
+COUNT(*)
+5
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS;
+CF_NAME STAT_TYPE VALUE
+__system__ NUM_IMMUTABLE_MEM_TABLE #
+__system__ MEM_TABLE_FLUSH_PENDING #
+__system__ COMPACTION_PENDING #
+__system__ CUR_SIZE_ACTIVE_MEM_TABLE #
+__system__ CUR_SIZE_ALL_MEM_TABLES #
+__system__ NUM_ENTRIES_ACTIVE_MEM_TABLE #
+__system__ NUM_ENTRIES_IMM_MEM_TABLES #
+__system__ NON_BLOCK_CACHE_SST_MEM_USAGE #
+__system__ NUM_LIVE_VERSIONS #
+cf_t1 NUM_IMMUTABLE_MEM_TABLE #
+cf_t1 MEM_TABLE_FLUSH_PENDING #
+cf_t1 COMPACTION_PENDING #
+cf_t1 CUR_SIZE_ACTIVE_MEM_TABLE #
+cf_t1 CUR_SIZE_ALL_MEM_TABLES #
+cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE #
+cf_t1 NUM_ENTRIES_IMM_MEM_TABLES #
+cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE #
+cf_t1 NUM_LIVE_VERSIONS #
+cf_t4 NUM_IMMUTABLE_MEM_TABLE #
+cf_t4 MEM_TABLE_FLUSH_PENDING #
+cf_t4 COMPACTION_PENDING #
+cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE #
+cf_t4 CUR_SIZE_ALL_MEM_TABLES #
+cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE #
+cf_t4 NUM_ENTRIES_IMM_MEM_TABLES #
+cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE #
+cf_t4 NUM_LIVE_VERSIONS #
+default NUM_IMMUTABLE_MEM_TABLE #
+default MEM_TABLE_FLUSH_PENDING #
+default COMPACTION_PENDING #
+default CUR_SIZE_ACTIVE_MEM_TABLE #
+default CUR_SIZE_ALL_MEM_TABLES #
+default NUM_ENTRIES_ACTIVE_MEM_TABLE #
+default NUM_ENTRIES_IMM_MEM_TABLES #
+default NON_BLOCK_CACHE_SST_MEM_USAGE #
+default NUM_LIVE_VERSIONS #
+rev:cf_t2 NUM_IMMUTABLE_MEM_TABLE #
+rev:cf_t2 MEM_TABLE_FLUSH_PENDING #
+rev:cf_t2 COMPACTION_PENDING #
+rev:cf_t2 CUR_SIZE_ACTIVE_MEM_TABLE #
+rev:cf_t2 CUR_SIZE_ALL_MEM_TABLES #
+rev:cf_t2 NUM_ENTRIES_ACTIVE_MEM_TABLE #
+rev:cf_t2 NUM_ENTRIES_IMM_MEM_TABLES #
+rev:cf_t2 NON_BLOCK_CACHE_SST_MEM_USAGE #
+rev:cf_t2 NUM_LIVE_VERSIONS #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS;
+STAT_TYPE VALUE
+DB_BACKGROUND_ERRORS #
+DB_NUM_SNAPSHOTS #
+DB_OLDEST_SNAPSHOT_TIME #
+DB_BLOCK_CACHE_USAGE #
+SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE)
+FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_SCHEMA = 'test'
+GROUP BY TABLE_NAME, PARTITION_NAME;
+TABLE_SCHEMA TABLE_NAME PARTITION_NAME COUNT(STAT_TYPE)
+test t1 NULL 43
+test t2 NULL 43
+test t4 p0 43
+test t4 p1 43
+test t4 p2 43
+test t4 p3 43
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS;
+CF_NAME OPTION_TYPE VALUE
+__system__ COMPARATOR #
+__system__ MERGE_OPERATOR #
+__system__ COMPACTION_FILTER #
+__system__ COMPACTION_FILTER_FACTORY #
+__system__ WRITE_BUFFER_SIZE #
+__system__ MAX_WRITE_BUFFER_NUMBER #
+__system__ MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
+__system__ NUM_LEVELS #
+__system__ LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
+__system__ LEVEL0_SLOWDOWN_WRITES_TRIGGER #
+__system__ LEVEL0_STOP_WRITES_TRIGGER #
+__system__ MAX_MEM_COMPACTION_LEVEL #
+__system__ TARGET_FILE_SIZE_BASE #
+__system__ TARGET_FILE_SIZE_MULTIPLIER #
+__system__ MAX_BYTES_FOR_LEVEL_BASE #
+__system__ LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
+__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER #
+__system__ SOFT_RATE_LIMIT #
+__system__ HARD_RATE_LIMIT #
+__system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS #
+__system__ ARENA_BLOCK_SIZE #
+__system__ DISABLE_AUTO_COMPACTIONS #
+__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH #
+__system__ VERIFY_CHECKSUM_IN_COMPACTION #
+__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
+__system__ MEMTABLE_FACTORY #
+__system__ INPLACE_UPDATE_SUPPORT #
+__system__ INPLACE_UPDATE_NUM_LOCKS #
+__system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
+__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
+__system__ BLOOM_LOCALITY #
+__system__ MAX_SUCCESSIVE_MERGES #
+__system__ MIN_PARTIAL_MERGE_OPERANDS #
+__system__ OPTIMIZE_FILTERS_FOR_HITS #
+__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
+__system__ COMPRESSION_TYPE #
+__system__ COMPRESSION_PER_LEVEL #
+__system__ COMPRESSION_OPTS #
+__system__ BOTTOMMOST_COMPRESSION #
+__system__ PREFIX_EXTRACTOR #
+__system__ COMPACTION_STYLE #
+__system__ COMPACTION_OPTIONS_UNIVERSAL #
+__system__ COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
+__system__ BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
+__system__ BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
+__system__ BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
+__system__ BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
+__system__ BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
+__system__ BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
+__system__ BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
+__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
+__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
+__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
+__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
+__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
+__system__ BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
+cf_t1 COMPARATOR #
+cf_t1 MERGE_OPERATOR #
+cf_t1 COMPACTION_FILTER #
+cf_t1 COMPACTION_FILTER_FACTORY #
+cf_t1 WRITE_BUFFER_SIZE #
+cf_t1 MAX_WRITE_BUFFER_NUMBER #
+cf_t1 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
+cf_t1 NUM_LEVELS #
+cf_t1 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
+cf_t1 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
+cf_t1 LEVEL0_STOP_WRITES_TRIGGER #
+cf_t1 MAX_MEM_COMPACTION_LEVEL #
+cf_t1 TARGET_FILE_SIZE_BASE #
+cf_t1 TARGET_FILE_SIZE_MULTIPLIER #
+cf_t1 MAX_BYTES_FOR_LEVEL_BASE #
+cf_t1 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
+cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
+cf_t1 SOFT_RATE_LIMIT #
+cf_t1 HARD_RATE_LIMIT #
+cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
+cf_t1 ARENA_BLOCK_SIZE #
+cf_t1 DISABLE_AUTO_COMPACTIONS #
+cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
+cf_t1 VERIFY_CHECKSUM_IN_COMPACTION #
+cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
+cf_t1 MEMTABLE_FACTORY #
+cf_t1 INPLACE_UPDATE_SUPPORT #
+cf_t1 INPLACE_UPDATE_NUM_LOCKS #
+cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
+cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
+cf_t1 BLOOM_LOCALITY #
+cf_t1 MAX_SUCCESSIVE_MERGES #
+cf_t1 MIN_PARTIAL_MERGE_OPERANDS #
+cf_t1 OPTIMIZE_FILTERS_FOR_HITS #
+cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
+cf_t1 COMPRESSION_TYPE #
+cf_t1 COMPRESSION_PER_LEVEL #
+cf_t1 COMPRESSION_OPTS #
+cf_t1 BOTTOMMOST_COMPRESSION #
+cf_t1 PREFIX_EXTRACTOR #
+cf_t1 COMPACTION_STYLE #
+cf_t1 COMPACTION_OPTIONS_UNIVERSAL #
+cf_t1 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
+cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
+cf_t4 COMPARATOR #
+cf_t4 MERGE_OPERATOR #
+cf_t4 COMPACTION_FILTER #
+cf_t4 COMPACTION_FILTER_FACTORY #
+cf_t4 WRITE_BUFFER_SIZE #
+cf_t4 MAX_WRITE_BUFFER_NUMBER #
+cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
+cf_t4 NUM_LEVELS #
+cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
+cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
+cf_t4 LEVEL0_STOP_WRITES_TRIGGER #
+cf_t4 MAX_MEM_COMPACTION_LEVEL #
+cf_t4 TARGET_FILE_SIZE_BASE #
+cf_t4 TARGET_FILE_SIZE_MULTIPLIER #
+cf_t4 MAX_BYTES_FOR_LEVEL_BASE #
+cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
+cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
+cf_t4 SOFT_RATE_LIMIT #
+cf_t4 HARD_RATE_LIMIT #
+cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
+cf_t4 ARENA_BLOCK_SIZE #
+cf_t4 DISABLE_AUTO_COMPACTIONS #
+cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
+cf_t4 VERIFY_CHECKSUM_IN_COMPACTION #
+cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
+cf_t4 MEMTABLE_FACTORY #
+cf_t4 INPLACE_UPDATE_SUPPORT #
+cf_t4 INPLACE_UPDATE_NUM_LOCKS #
+cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
+cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
+cf_t4 BLOOM_LOCALITY #
+cf_t4 MAX_SUCCESSIVE_MERGES #
+cf_t4 MIN_PARTIAL_MERGE_OPERANDS #
+cf_t4 OPTIMIZE_FILTERS_FOR_HITS #
+cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
+cf_t4 COMPRESSION_TYPE #
+cf_t4 COMPRESSION_PER_LEVEL #
+cf_t4 COMPRESSION_OPTS #
+cf_t4 BOTTOMMOST_COMPRESSION #
+cf_t4 PREFIX_EXTRACTOR #
+cf_t4 COMPACTION_STYLE #
+cf_t4 COMPACTION_OPTIONS_UNIVERSAL #
+cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
+cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
+default COMPARATOR #
+default MERGE_OPERATOR #
+default COMPACTION_FILTER #
+default COMPACTION_FILTER_FACTORY #
+default WRITE_BUFFER_SIZE #
+default MAX_WRITE_BUFFER_NUMBER #
+default MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
+default NUM_LEVELS #
+default LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
+default LEVEL0_SLOWDOWN_WRITES_TRIGGER #
+default LEVEL0_STOP_WRITES_TRIGGER #
+default MAX_MEM_COMPACTION_LEVEL #
+default TARGET_FILE_SIZE_BASE #
+default TARGET_FILE_SIZE_MULTIPLIER #
+default MAX_BYTES_FOR_LEVEL_BASE #
+default LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
+default MAX_BYTES_FOR_LEVEL_MULTIPLIER #
+default SOFT_RATE_LIMIT #
+default HARD_RATE_LIMIT #
+default RATE_LIMIT_DELAY_MAX_MILLISECONDS #
+default ARENA_BLOCK_SIZE #
+default DISABLE_AUTO_COMPACTIONS #
+default PURGE_REDUNDANT_KVS_WHILE_FLUSH #
+default VERIFY_CHECKSUM_IN_COMPACTION #
+default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
+default MEMTABLE_FACTORY #
+default INPLACE_UPDATE_SUPPORT #
+default INPLACE_UPDATE_NUM_LOCKS #
+default MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
+default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
+default BLOOM_LOCALITY #
+default MAX_SUCCESSIVE_MERGES #
+default MIN_PARTIAL_MERGE_OPERANDS #
+default OPTIMIZE_FILTERS_FOR_HITS #
+default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
+default COMPRESSION_TYPE #
+default COMPRESSION_PER_LEVEL #
+default COMPRESSION_OPTS #
+default BOTTOMMOST_COMPRESSION #
+default PREFIX_EXTRACTOR #
+default COMPACTION_STYLE #
+default COMPACTION_OPTIONS_UNIVERSAL #
+default COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
+default BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
+default BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
+default BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
+default BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
+default BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
+default BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
+default BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
+default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
+default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
+default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
+default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
+default BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
+default BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
+rev:cf_t2 COMPARATOR #
+rev:cf_t2 MERGE_OPERATOR #
+rev:cf_t2 COMPACTION_FILTER #
+rev:cf_t2 COMPACTION_FILTER_FACTORY #
+rev:cf_t2 WRITE_BUFFER_SIZE #
+rev:cf_t2 MAX_WRITE_BUFFER_NUMBER #
+rev:cf_t2 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
+rev:cf_t2 NUM_LEVELS #
+rev:cf_t2 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
+rev:cf_t2 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
+rev:cf_t2 LEVEL0_STOP_WRITES_TRIGGER #
+rev:cf_t2 MAX_MEM_COMPACTION_LEVEL #
+rev:cf_t2 TARGET_FILE_SIZE_BASE #
+rev:cf_t2 TARGET_FILE_SIZE_MULTIPLIER #
+rev:cf_t2 MAX_BYTES_FOR_LEVEL_BASE #
+rev:cf_t2 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
+rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
+rev:cf_t2 SOFT_RATE_LIMIT #
+rev:cf_t2 HARD_RATE_LIMIT #
+rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
+rev:cf_t2 ARENA_BLOCK_SIZE #
+rev:cf_t2 DISABLE_AUTO_COMPACTIONS #
+rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
+rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION #
+rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
+rev:cf_t2 MEMTABLE_FACTORY #
+rev:cf_t2 INPLACE_UPDATE_SUPPORT #
+rev:cf_t2 INPLACE_UPDATE_NUM_LOCKS #
+rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
+rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
+rev:cf_t2 BLOOM_LOCALITY #
+rev:cf_t2 MAX_SUCCESSIVE_MERGES #
+rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS #
+rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS #
+rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
+rev:cf_t2 COMPRESSION_TYPE #
+rev:cf_t2 COMPRESSION_PER_LEVEL #
+rev:cf_t2 COMPRESSION_OPTS #
+rev:cf_t2 BOTTOMMOST_COMPRESSION #
+rev:cf_t2 PREFIX_EXTRACTOR #
+rev:cf_t2 COMPACTION_STYLE #
+rev:cf_t2 COMPACTION_OPTIONS_UNIVERSAL #
+rev:cf_t2 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
+rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+SHOW ENGINE rocksdb MUTEX;
+Type Name Status
+SHOW ENGINE ALL MUTEX;
+SHOW ENGINE rocksdb TRANSACTION STATUS;
+Type Name Status
+SNAPSHOTS rocksdb
+============================================================
+TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT
+============================================================
+---------
+SNAPSHOTS
+---------
+LIST OF SNAPSHOTS FOR EACH SESSION:
+-----------------------------------------
+END OF ROCKSDB TRANSACTION MONITOR OUTPUT
+=========================================
+
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+SHOW ENGINE rocksdb TRANSACTION STATUS;
+Type Name Status
+SNAPSHOTS rocksdb
+============================================================
+TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT
+============================================================
+---------
+SNAPSHOTS
+---------
+LIST OF SNAPSHOTS FOR EACH SESSION:
+---SNAPSHOT, ACTIVE NUM sec
+MySQL thread id TID, OS thread handle PTR
+lock count 0, write count 0
+-----------------------------------------
+END OF ROCKSDB TRANSACTION MONITOR OUTPUT
+=========================================
+
+ROLLBACK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result
new file mode 100644
index 00000000000..407a8b103bd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result
@@ -0,0 +1,24 @@
+DROP TABLE IF EXISTS t1, t2, t3;
+CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo');
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t2 (a,b) VALUES (1,'bar');
+set global rocksdb_force_flush_memtable_now = true;
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8;
+SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' );
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 2 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
+t2 ROCKSDB 10 Fixed 1 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
+t3 ROCKSDB 10 Fixed 1000 # # 0 0 0 NULL NULL NULL NULL utf8_general_ci NULL
+SHOW TABLE STATUS WHERE name LIKE 't2';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t2 ROCKSDB 10 Fixed 10000 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
+DROP TABLE t1, t2, t3;
+CREATE DATABASE `db_new..............................................end`;
+USE `db_new..............................................end`;
+CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb;
+INSERT INTO `t1_new..............................................end` VALUES (1);
+SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end';
+TABLE_SCHEMA db_new..............................................end
+TABLE_NAME t1_new..............................................end
+DROP DATABASE `db_new..............................................end`;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result
new file mode 100644
index 00000000000..f40aceffd79
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result
@@ -0,0 +1,9 @@
+DROP TABLE IF EXISTS t1;
+SHOW GLOBAL VARIABLES LIKE "log_bin";
+Variable_name Value
+log_bin ON
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result
new file mode 100644
index 00000000000..ef9fafc852a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result
@@ -0,0 +1,66 @@
+CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end
+true
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+case when variable_value-@d < 10 then 'true' else 'false' end
+true
+CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+optimize table t2;
+Table Op Msg_type Msg_text
+test.t2 optimize status OK
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end
+true
+select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+case when variable_value-@d > 9000 then 'true' else 'false' end
+true
+CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB;
+INSERT INTO t3 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+optimize table t3;
+Table Op Msg_type Msg_text
+test.t3 optimize status OK
+select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s = 0 then 'true' else 'false' end
+true
+select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+case when variable_value-@d > 9000 then 'true' else 'false' end
+true
+CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB;
+INSERT INTO t4 VALUES (1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+optimize table t4;
+Table Op Msg_type Msg_text
+test.t4 optimize status OK
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end
+true
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+case when variable_value-@d < 10 then 'true' else 'false' end
+true
+CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB;
+INSERT INTO t5 VALUES (1, 1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+optimize table t5;
+Table Op Msg_type Msg_text
+test.t5 optimize status OK
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end
+true
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+case when variable_value-@d < 10 then 'true' else 'false' end
+true
+DROP TABLE t1, t2, t3, t4, t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result
new file mode 100644
index 00000000000..e8a11363dba
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result
@@ -0,0 +1,10 @@
+SET @cur_long_query_time = @@long_query_time;
+SET @@long_query_time = 600;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB;
+SET @@long_query_time = 0;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+7500
+SET @@long_query_time = @cur_long_query_time;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result
new file mode 100644
index 00000000000..1798563f328
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result
@@ -0,0 +1,69 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+create table t1(
+id bigint not null primary key auto_increment,
+a varchar(255) not null,
+b bigint,
+index t1_1(b)
+) engine=rocksdb;
+create table t2(
+id bigint not null primary key auto_increment,
+a varchar(255) not null,
+b bigint,
+index t2_1(b) comment 'cf_t3'
+) engine=rocksdb;
+create table t3(
+id bigint not null primary key auto_increment,
+a varchar(255) not null,
+b bigint,
+index t3_1(b) comment 'rev:cf_t4'
+) engine=rocksdb;
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1';
+table_name table_rows
+t2 4999
+t3 4999
+SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1';
+CASE WHEN table_rows < 100000 then 'true' else 'false' end
+true
+set global rocksdb_force_flush_memtable_now = true;
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name table_rows
+t1 100000
+t2 4999
+t3 4999
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name data_length>0 index_length>0
+t1 1 1
+t2 1 1
+t3 1 1
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name table_rows
+t1 100000
+t2 4999
+t3 4999
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name data_length>0 index_length>0
+t1 1 1
+t2 1 1
+t3 1 1
+analyze table t1,t2,t3,t4,t5;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
+test.t3 analyze status OK
+test.t4 analyze Error Table 'test.t4' doesn't exist
+test.t4 analyze status Operation failed
+test.t5 analyze Error Table 'test.t5' doesn't exist
+test.t5 analyze status Operation failed
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name table_rows
+t1 100000
+t2 4999
+t3 4999
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+table_name data_length>0 index_length>0
+t1 1 1
+t2 1 1
+t3 1 1
+drop table t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result
new file mode 100644
index 00000000000..e0520f5a31b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result
@@ -0,0 +1,9 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1000
+SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1";
+TABLE_SCHEMA TABLE_NAME TABLE_ENGINE ROWS_INSERTED ROWS_UPDATED ROWS_DELETED ROWS_READ ROWS_REQUESTED COMPRESSED_PAGE_SIZE COMPRESS_PADDING COMPRESS_OPS COMPRESS_OPS_OK COMPRESS_PRIMARY_OPS COMPRESS_PRIMARY_OPS_OK COMPRESS_USECS COMPRESS_OK_USECS COMPRESS_PRIMARY_USECS COMPRESS_PRIMARY_OK_USECS UNCOMPRESS_OPS UNCOMPRESS_USECS ROWS_INDEX_FIRST ROWS_INDEX_NEXT IO_READ_BYTES IO_READ_REQUESTS IO_READ_SVC_USECS IO_READ_SVC_USECS_MAX IO_READ_WAIT_USECS IO_READ_WAIT_USECS_MAX IO_READ_SLOW_IOS IO_WRITE_BYTES IO_WRITE_REQUESTS IO_WRITE_SVC_USECS IO_WRITE_SVC_USECS_MAX IO_WRITE_WAIT_USECS IO_WRITE_WAIT_USECS_MAX IO_WRITE_SLOW_IOS IO_READ_BYTES_BLOB IO_READ_REQUESTS_BLOB IO_READ_SVC_USECS_BLOB IO_READ_SVC_USECS_MAX_BLOB IO_READ_WAIT_USECS_BLOB IO_READ_WAIT_USECS_MAX_BLOB IO_READ_SLOW_IOS_BLOB IO_READ_BYTES_PRIMARY IO_READ_REQUESTS_PRIMARY IO_READ_SVC_USECS_PRIMARY IO_READ_SVC_USECS_MAX_PRIMARY IO_READ_WAIT_USECS_PRIMARY IO_READ_WAIT_USECS_MAX_PRIMARY IO_READ_SLOW_IOS_PRIMARY IO_READ_BYTES_SECONDARY IO_READ_REQUESTS_SECONDARY IO_READ_SVC_USECS_SECONDARY IO_READ_SVC_USECS_MAX_SECONDARY IO_READ_WAIT_USECS_SECONDARY IO_READ_WAIT_USECS_MAX_SECONDARY IO_READ_SLOW_IOS_SECONDARY IO_INDEX_INSERTS QUERIES_USED QUERIES_EMPTY COMMENT_BYTES INNODB_ROW_LOCK_WAITS INNODB_ROW_LOCK_WAIT_TIMEOUTS INNODB_PAGES_READ INNODB_PAGES_READ_INDEX INNODB_PAGES_READ_BLOB INNODB_PAGES_WRITTEN INNODB_PAGES_WRITTEN_INDEX INNODB_PAGES_WRITTEN_BLOB
+test t1 ROCKSDB 1000 0 0 1000 1001 0 0 0 0 0 0 0 0 0 0 0 0 1 999 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1001 0 0 0 0 0 0 0 0 0 0
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result
new file mode 100644
index 00000000000..7cc0cc7cd98
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1;
+a
+10
+ALTER TABLE t1 AUTO_INCREMENT=100;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=100 DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1 ORDER BY a;
+a
+10
+100
+ALTER TABLE t1 AUTO_INCREMENT=50;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB AUTO_INCREMENT=101 DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1 ORDER BY a;
+a
+10
+100
+101
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result
new file mode 100644
index 00000000000..f904c04e0fb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300
+ALTER TABLE t1 AVG_ROW_LENGTH=30000000;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result
new file mode 100644
index 00000000000..d9cc69ee2a1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CHECKSUM=1
+ALTER TABLE t1 CHECKSUM=0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result
new file mode 100644
index 00000000000..0beddd9f6e3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result
@@ -0,0 +1,26 @@
+DROP TABLE IF EXISTS t1;
+CREATE DATABASE test_remote;
+CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql
+OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote');
+CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql
+OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote');
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection'
+ALTER TABLE t1 CONNECTION='test_connection2';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection2'
+DROP TABLE t1;
+DROP SERVER test_connection;
+DROP SERVER test_connection2;
+DROP DATABASE test_remote;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result
new file mode 100644
index 00000000000..5821369ae57
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result
@@ -0,0 +1,20 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '<DATA_DIR>' INDEX DIRECTORY = '<INDEX_DIR>';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 INDEX DIRECTORY = '<DATA_DIR>';
+Warnings:
+Warning 1618 <INDEX DIRECTORY> option ignored
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result
new file mode 100644
index 00000000000..c5d1ad8ace9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1
+ALTER TABLE t1 DELAY_KEY_WRITE=0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result
new file mode 100644
index 00000000000..bd5e65f59c4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 INSERT_METHOD=NO;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result
new file mode 100644
index 00000000000..6c34d08b7eb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8
+ALTER TABLE t1 KEY_BLOCK_SIZE=1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`) KEY_BLOCK_SIZE=8
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result
new file mode 100644
index 00000000000..679e00e0771
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=10000000
+ALTER TABLE t1 MAX_ROWS=30000000;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=30000000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result
new file mode 100644
index 00000000000..bc650434b7a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=1
+ALTER TABLE t1 MIN_ROWS=10000;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=10000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result
new file mode 100644
index 00000000000..b42d3f4d450
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=1
+ALTER TABLE t1 PACK_KEYS=0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=0
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result
new file mode 100644
index 00000000000..80ec79497ce
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 PASSWORD='new_password';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result
new file mode 100644
index 00000000000..a0c480eaede
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) NOT NULL,
+ PRIMARY KEY (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) NOT NULL,
+ PRIMARY KEY (`b`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result
new file mode 100644
index 00000000000..fb021dbda18
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result
@@ -0,0 +1,16 @@
+DROP TABLE IF EXISTS t1, child1, child2;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 UNION = (child1,child2);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1, child1, child2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result
new file mode 100644
index 00000000000..ecfc72932cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result
@@ -0,0 +1,46 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb
+DEFAULT CHARACTER SET = utf8
+COLLATE = utf8_general_ci
+COMMENT = 'standard table options'
+;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='standard table options'
+ALTER TABLE t1 COMMENT = 'table altered';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered'
+ALTER TABLE t1 ENGINE=MEMORY;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MEMORY DEFAULT CHARSET=utf8 COMMENT='table altered'
+ALTER TABLE t1 ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered'
+ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='table altered'
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result
new file mode 100644
index 00000000000..c24ef9fc1c8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result
@@ -0,0 +1,936 @@
+create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb;
+insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10);
+begin;
+insert into t1 values (11,11,11);
+select * from t1 where id=1;
+id value value2
+1 1 1
+select * from t1 where value=1;
+id value value2
+1 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+1 1 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+11 11 11
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+11
+rollback;
+begin;
+insert into t1 values (7,7,7);
+select * from t1 where id=1;
+id value value2
+1 1 1
+select * from t1 where value=1;
+id value value2
+1 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+1 1 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1;
+id value value2
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+rollback;
+begin;
+update t1 set value2=100 where id=1;
+select * from t1 where id=1;
+id value value2
+1 1 100
+select * from t1 where value=1;
+id value value2
+1 1 100
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 100
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 100
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+1 1 100
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+update t1 set value=100 where id=1;
+select * from t1 where id=1;
+id value value2
+1 100 1
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+1 100 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1;
+id value value2
+1 100 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+100
+rollback;
+begin;
+update t1 set id=100 where id=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+100 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+100 1 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+100 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+100 1 1
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+100 1 1
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+update t1 set value2=100 where value=1;
+select * from t1 where id=1;
+id value value2
+1 1 100
+select * from t1 where value=1;
+id value value2
+1 1 100
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 100
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 100
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+1 1 100
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+update t1 set value=100 where value=1;
+select * from t1 where id=1;
+id value value2
+1 100 1
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+1 100 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1;
+id value value2
+1 100 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+100
+rollback;
+begin;
+update t1 set id=100 where value=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+100 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+100 1 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+100 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+100 1 1
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+100 1 1
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+update t1 set value2=100 where value2=1;
+select * from t1 where id=1;
+id value value2
+1 1 100
+select * from t1 where value=1;
+id value value2
+1 1 100
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 100
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 100
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+1 1 100
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+update t1 set value=100 where value2=1;
+select * from t1 where id=1;
+id value value2
+1 100 1
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+1 100 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+1 100 1
+2 2 2
+select * from t1;
+id value value2
+1 100 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+100
+rollback;
+begin;
+update t1 set id=100 where value2=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+100 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+100 1 1
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+100 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+100 1 1
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+100 1 1
+select value from t1;
+value
+1
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+delete from t1 where id=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+delete from t1 where value=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+delete from t1 where value2=1;
+select * from t1 where id=1;
+id value value2
+select * from t1 where value=1;
+id value value2
+select value from t1 where value=1;
+value
+select * from t1 where value2=1;
+id value value2
+select * from t1 where id=5;
+id value value2
+5 5 5
+select * from t1 where value=5;
+id value value2
+5 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+5 5 5
+select * from t1 where id < 3;
+id value value2
+2 2 2
+select * from t1 where value < 3;
+id value value2
+2 2 2
+select value from t1 where value < 3;
+value
+2
+select * from t1 where value2 < 3;
+id value value2
+2 2 2
+select * from t1;
+id value value2
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+8 8 8
+9 9 9
+10 10 10
+select value from t1;
+value
+2
+3
+4
+5
+6
+8
+9
+10
+rollback;
+begin;
+insert into t1 values (11,11,11);
+insert into t1 values (12,12,12);
+insert into t1 values (13,13,13);
+delete from t1 where id=9;
+delete from t1 where value=8;
+update t1 set id=100 where value2=5;
+update t1 set value=103 where value=4;
+update t1 set id=115 where id=3;
+select * from t1 where id=1;
+id value value2
+1 1 1
+select * from t1 where value=1;
+id value value2
+1 1 1
+select value from t1 where value=1;
+value
+1
+select * from t1 where value2=1;
+id value value2
+1 1 1
+select * from t1 where id=5;
+id value value2
+select * from t1 where value=5;
+id value value2
+100 5 5
+select value from t1 where value=5;
+value
+5
+select * from t1 where value2=5;
+id value value2
+100 5 5
+select * from t1 where id < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1 where value < 3;
+id value value2
+1 1 1
+2 2 2
+select value from t1 where value < 3;
+value
+1
+2
+select * from t1 where value2 < 3;
+id value value2
+1 1 1
+2 2 2
+select * from t1;
+id value value2
+1 1 1
+2 2 2
+4 103 4
+6 6 6
+10 10 10
+11 11 11
+12 12 12
+13 13 13
+100 5 5
+115 3 3
+select value from t1;
+value
+1
+2
+3
+5
+6
+10
+11
+12
+13
+103
+rollback;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result
new file mode 100644
index 00000000000..1544256f194
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result
@@ -0,0 +1,33 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+TRUNCATE TABLE t1;
+INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
+TRUNCATE TABLE t1;
+SELECT a,b FROM t1;
+a b
+DROP TABLE t1;
+CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb;
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL
+INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+TRUNCATE TABLE t1;
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL
+INSERT INTO t1 (c) VALUES ('d');
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed # # # 0 0 0 2 NULL NULL NULL latin1_swedish_ci NULL
+SELECT a,c FROM t1;
+a c
+1 d
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+HANDLER t1 OPEN AS h1;
+ERROR HY000: Table storage engine for 'h1' doesn't have this option
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result
new file mode 100644
index 00000000000..7adf50f9ff3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c varchar(500) not null,
+primary key (a,b) comment 'cf1',
+key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+DELETE FROM t1;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+truncate table t1;
+select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+case when variable_value-@a < 500000 then 'true' else 'false' end
+true
+DROP TABLE IF EXISTS t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result
new file mode 100644
index 00000000000..bbdd6d210fb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result
@@ -0,0 +1,48 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+b BINARY ,
+b0 BINARY(0) ,
+b1 BINARY(1) ,
+b20 BINARY(20) ,
+b255 BINARY(255) ,
+pk BINARY PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) YES NULL
+b0 binary(0) YES NULL
+b1 binary(1) YES NULL
+b20 binary(20) YES NULL
+b255 binary(255) YES NULL
+pk binary(1) NO PRI NULL
+INSERT INTO t1 VALUES ('','','','','','');
+INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a');
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b');
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b20' at row 1
+Warning 1265 Data truncated for column 'b255' at row 1
+INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1;
+ERROR 23000: Duplicate entry 'c' for key 'PRIMARY'
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk)
+00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00
+61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62
+61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61
+ALTER TABLE t1 ADD COLUMN b257 BINARY(257) ;
+ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+b binary(1) YES NULL
+b0 binary(0) YES NULL
+b1 binary(1) YES NULL
+b20 binary(20) YES NULL
+b255 binary(255) YES NULL
+pk binary(1) NO PRI NULL
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result
new file mode 100644
index 00000000000..c5cffdc1a0d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result
@@ -0,0 +1,80 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (b BINARY,
+b20 BINARY(20) PRIMARY KEY,
+v16 VARBINARY(16),
+v128 VARBINARY(128)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b20 A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index
+SELECT HEX(b20) FROM t1 ORDER BY b20;
+HEX(b20)
+6368617231000000000000000000000000000000
+6368617232000000000000000000000000000000
+6368617233000000000000000000000000000000
+6368617234000000000000000000000000000000
+EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort
+SELECT HEX(b20) FROM t1 ORDER BY b20 DESC;
+HEX(b20)
+6368617234000000000000000000000000000000
+6368617233000000000000000000000000000000
+6368617232000000000000000000000000000000
+6368617231000000000000000000000000000000
+DROP TABLE t1;
+CREATE TABLE t1 (b BINARY,
+b20 BINARY(20),
+v16 VARBINARY(16),
+v128 VARBINARY(128),
+pk VARBINARY(10) PRIMARY KEY,
+INDEX (v16(10))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 v16 1 v16 A 500 10 NULL YES LSMTREE
+INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6);
+INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1;
+EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range v16 v16 13 NULL # Using where
+SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%';
+HEX(SUBSTRING(v16,7,3))
+723161
+723161
+723161
+723161
+723261
+723261
+723261
+723261
+723361
+723361
+723461
+723461
+EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range v16 v16 13 NULL # Using where
+SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%';
+HEX(SUBSTRING(v16,7,3))
+723161
+723161
+723161
+723161
+723261
+723261
+723261
+723261
+723361
+723361
+723461
+723461
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result
new file mode 100644
index 00000000000..d385c0d4670
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result
@@ -0,0 +1,53 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a BIT ,
+b BIT(20) ,
+c BIT(64) ,
+d BIT(1) ,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) YES NULL
+b bit(20) YES NULL
+c bit(64) NO PRI b'0'
+d bit(1) YES NULL
+ALTER TABLE t1 DROP COLUMN d;
+ALTER TABLE t1 ADD COLUMN d BIT(0) ;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a bit(1) YES NULL
+b bit(20) YES NULL
+c bit(64) NO PRI b'0'
+d bit(1) YES NULL
+INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1);
+SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0;
+BIN(a) HEX(b) c+0
+0 FFFFF 18446744073709551615
+INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d<100;
+a+0 b+0 c+0
+0 1048575 18446744073709551615
+1 0 18446744073709551614
+INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0);
+SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2);
+a+0 b+0 c+0
+1 0 18446744073709551614
+1 102 255
+DELETE FROM t1;
+INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0);
+Warnings:
+Warning 1264 Out of range value for column 'c' at row 1
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+a+0 b+0 c+0 d+0
+1 0 0 1
+1 0 18446744073709551615 0
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) ) ENGINE=rocksdb;
+ERROR 42000: Display width out of range for column 'a' (max = 64)
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result
new file mode 100644
index 00000000000..8da878eb0f2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result
@@ -0,0 +1,58 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a BIT,
+b BIT(20) PRIMARY KEY,
+c BIT(32),
+d BIT(64)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (a,b,c,d) VALUES
+(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13),
+(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF);
+EXPLAIN SELECT b+0 FROM t1 ORDER BY b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 3 NULL # Using index
+SELECT b+0 FROM t1 ORDER BY b;
+b+0
+11
+12
+13
+16
+100
+101
+256
+1000
+65535
+1048575
+DROP TABLE t1;
+# TODO: Unique indexes are not enforced
+CREATE TABLE t1 (
+a BIT,
+b BIT(20),
+c BIT(32),
+d BIT(64),
+pk BIT(10) PRIMARY KEY,
+INDEX(a)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 a 1 a A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (a,b,c,d,pk) VALUES
+(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4),
+(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10);
+EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary; Using filesort
+SELECT DISTINCT a+0 FROM t1 ORDER BY a;
+a+0
+0
+1
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result
new file mode 100644
index 00000000000..e36c91658fd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result
@@ -0,0 +1,57 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b BLOB ,
+b0 BLOB(0) ,
+b1 BLOB(1) ,
+b300 BLOB(300) ,
+bm BLOB(65535) ,
+b70k BLOB(70000) ,
+b17m BLOB(17000000) ,
+t TINYBLOB ,
+m MEDIUMBLOB ,
+l LONGBLOB
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b blob YES NULL
+b0 blob YES NULL
+b1 tinyblob YES NULL
+b300 blob YES NULL
+bm blob YES NULL
+b70k mediumblob YES NULL
+b17m longblob YES NULL
+t tinyblob YES NULL
+m mediumblob YES NULL
+l longblob YES NULL
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) );
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b0' at row 1
+Warning 1265 Data truncated for column 'b1' at row 1
+Warning 1265 Data truncated for column 'b300' at row 1
+Warning 1265 Data truncated for column 'bm' at row 1
+Warning 1265 Data truncated for column 't' at row 1
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152
+ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296);
+ERROR 42000: Display width out of range for column 'bbb' (max = 4294967295)
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result
new file mode 100644
index 00000000000..26726e0f6d1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result
@@ -0,0 +1,188 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+b BLOB,
+t TINYBLOB,
+m MEDIUMBLOB,
+l LONGBLOB,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 b A 1000 32 NULL LSMTREE
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+# # # # # PRIMARY # # # #
+SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f;
+f
+
+
+EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+# # # # # NULL # # # #
+SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f;
+f
+
+
+DROP TABLE t1;
+CREATE TABLE t1 (
+b BLOB,
+t TINYBLOB,
+m MEDIUMBLOB,
+l LONGBLOB,
+pk INT AUTO_INCREMENT PRIMARY KEY,
+UNIQUE INDEX l_t (l(256),t(64))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk # # NULL NULL # #
+t1 0 l_t 1 l # # 256 NULL # #
+t1 0 l_t 2 t # # 64 NULL # #
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort
+SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+SUBSTRING(t,64) SUBSTRING(l,256)
+
+
+bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+
+
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+
+
+EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort
+SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+SUBSTRING(t,64) SUBSTRING(l,256)
+
+
+bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+
+
+fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+
+
+DROP TABLE t1;
+CREATE TABLE t1 (
+b BLOB,
+t TINYBLOB,
+m MEDIUMBLOB,
+l LONGBLOB,
+pk INT AUTO_INCREMENT PRIMARY KEY,
+INDEX (m(128))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 m 1 m A 500 128 NULL YES LSMTREE
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref m m 131 const # Using where; Using filesort
+SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+f
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref m m 131 const # Using where; Using filesort
+SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC;
+f
+DROP TABLE t1;
+CREATE TABLE t1 (
+b BLOB,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b TINYBLOB,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b MEDIUMBLOB,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b LONGBLOB,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result
new file mode 100644
index 00000000000..dd9dc6d1f9f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result
@@ -0,0 +1,73 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+b1 BOOL ,
+b2 BOOLEAN
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+b1 tinyint(1) YES NULL
+b2 tinyint(1) YES NULL
+INSERT INTO t1 (b1,b2) VALUES (1,TRUE);
+SELECT b1,b2 FROM t1;
+b1 b2
+1 1
+INSERT INTO t1 (b1,b2) VALUES (FALSE,0);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+INSERT INTO t1 (b1,b2) VALUES (2,3);
+SELECT b1,b2 FROM t1;
+b1 b2
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (-1,-2);
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+1 1
+2 3
+SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1;
+a b
+false false
+true true
+true true
+true true
+SELECT b1,b2 FROM t1 WHERE b1 = TRUE;
+b1 b2
+1 1
+SELECT b1,b2 FROM t1 WHERE b2 = FALSE;
+b1 b2
+0 0
+INSERT INTO t1 (b1,b2) VALUES ('a','b');
+Warnings:
+Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1
+Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+2 3
+INSERT INTO t1 (b1,b2) VALUES (128,-129);
+Warnings:
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b2' at row 1
+SELECT b1,b2 FROM t1;
+b1 b2
+-1 -2
+0 0
+0 0
+1 1
+127 -128
+2 3
+ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED ;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'UNSIGNED' at line 1
+ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL ;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL' at line 1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result
new file mode 100644
index 00000000000..1e5ac0d44f5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result
@@ -0,0 +1,76 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c CHAR ,
+c0 CHAR(0) ,
+c1 CHAR(1) ,
+c20 CHAR(20) ,
+c255 CHAR(255) ,
+PRIMARY KEY (c255)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+c char(1) YES NULL
+c0 char(0) YES NULL
+c1 char(1) YES NULL
+c20 char(20) YES NULL
+c255 char(255) NO PRI
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','','');
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.');
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256));
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+Warning 1265 Data truncated for column 'c0' at row 1
+Warning 1265 Data truncated for column 'c1' at row 1
+Warning 1265 Data truncated for column 'c20' at row 1
+Warning 1265 Data truncated for column 'c255' at row 1
+INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 5
+Warning 1265 Data truncated for column 'c0' at row 5
+Warning 1265 Data truncated for column 'c1' at row 5
+Warning 1265 Data truncated for column 'c20' at row 5
+Warning 1265 Data truncated for column 'c' at row 6
+Warning 1265 Data truncated for column 'c0' at row 6
+Warning 1265 Data truncated for column 'c1' at row 6
+Warning 1265 Data truncated for column 'c20' at row 6
+Warning 1265 Data truncated for column 'c255' at row 6
+SELECT c,c0,c1,c20,c255 FROM t1;
+c c0 c1 c20 c255
+
+ a
+C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b
+a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.
+x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20;
+c20 REPEAT('a',LENGTH(c20)) COUNT(*)
+ 2
+Creating an article aaaaaaaaaaaaaaaaaaa 1
+aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1
+abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1
+xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1
+ALTER TABLE t1 ADD COLUMN c257 CHAR(257) ;
+ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead
+DROP TABLE t1;
+CREATE TABLE t1(c1 CHAR(0) NOT NULL);
+INSERT INTO t1 VALUES('a');
+Warnings:
+Warning 1265 Data truncated for column 'c1' at row 1
+SELECT * FROM t1;
+c1
+
+DROP TABLE t1;
+CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key);
+INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five');
+SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1;
+a
+five
+four
+one
+three
+two
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result
new file mode 100644
index 00000000000..e8b913288c5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result
@@ -0,0 +1,73 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+c CHAR,
+c20 CHAR(20) PRIMARY KEY,
+v16 VARCHAR(16),
+v128 VARCHAR(128)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 c20 A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+EXPLAIN SELECT c20 FROM t1 ORDER BY c20;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index
+SELECT c20 FROM t1 ORDER BY c20;
+c20
+char1
+char2
+char3
+char4
+EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index
+SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20;
+c20
+char1
+char2
+char3
+char4
+DROP TABLE t1;
+CREATE TABLE t1 (
+c CHAR,
+c20 CHAR(20),
+v16 VARCHAR(16),
+v128 VARCHAR(128),
+pk VARCHAR(64) PRIMARY KEY,
+INDEX (v16)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 v16 1 v16 A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4');
+EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index v16 v16 19 NULL # Using where; Using index
+SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%';
+SUBSTRING(v16,7,3)
+r1a
+r1a
+r2a
+r3a
+EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
+SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%';
+SUBSTRING(v16,7,3)
+r1a
+r1a
+r2a
+r3a
+EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL v16 NULL NULL NULL # Using where; Using filesort
+SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+c c20 v16 v128
+a char1 varchar1a varchar1b
+b char3 varchar1a varchar1b
+c char4 varchar3a varchar3b
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result
new file mode 100644
index 00000000000..9ab00243b6b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result
@@ -0,0 +1,109 @@
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t (id int not null auto_increment primary key,
+c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+key sk (c));
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+insert into t (c) values ('A'), ('b'), ('C');
+explain select c from t;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL sk 4 NULL # Using index
+select c from t;
+c
+A
+b
+C
+select c from t where c = 'a';
+c
+A
+drop table t;
+set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
+create table t (id int not null auto_increment primary key,
+c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci,
+key sk (c));
+insert into t (c) values ('☀'), ('ß');
+explain select c from t;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL sk 27 NULL # Using index
+select c from t;
+c
+ß
+☀
+drop table t;
+set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans";
+create table t (id int not null auto_increment,
+c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci,
+primary key (id),
+key sk1 (c1),
+key sk2 (c2));
+explain select hex(c1) from t order by c1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL sk1 4 NULL # Using index
+explain select hex(c1) from t IGNORE INDEX (sk1) order by c1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t ALL NULL NULL NULL NULL # Using filesort
+explain select hex(c2) from t order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL sk2 2 NULL # Using index
+explain select hex(c2) from t IGNORE INDEX (sk1) order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL sk2 2 NULL # Using index
+truncate t;
+insert into t (c1, c2) values ('Asdf ', 'Asdf ');
+Warnings:
+Warning 1265 Data truncated for column 'c1' at row 1
+Warning 1265 Data truncated for column 'c2' at row 1
+select char_length(c1), char_length(c2), c1, c2 from t;
+char_length(c1) char_length(c2) c1 c2
+1 1 A A
+drop table t;
+create table t (id int not null auto_increment,
+c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci,
+primary key (id),
+unique key sk2 (c2));
+insert into t (c2) values ('Asdf');
+insert into t (c2) values ('asdf ');
+ERROR 23000: Duplicate entry 'asdf' for key 'sk2'
+drop table t;
+create table t (id int not null auto_increment,
+c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+primary key (id),
+unique key sk1 (c1));
+insert into t (c1) values ('Asdf');
+insert into t (c1) values ('asdf ');
+ERROR 23000: Duplicate entry 'asdf ' for key 'sk1'
+insert into t (c1) values ('asdf');
+ERROR 23000: Duplicate entry 'asdf' for key 'sk1'
+drop table t;
+create table t (id int not null auto_increment,
+c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+primary key (id),
+unique key sk1 (c1(1)));
+insert into t (c1) values ('Asdf');
+insert into t (c1) values ('bbbb ');
+insert into t (c1) values ('a ');
+ERROR 23000: Duplicate entry 'a' for key 'sk1'
+explain select c1 from t;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t ALL NULL NULL NULL NULL # NULL
+select c1 from t;
+c1
+Asdf
+bbbb
+drop table t;
+set session rocksdb_verify_checksums = on;
+create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1;
+insert into t values (1, ' a');
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL email_i 33 NULL # Using index
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+index_name count
+email_i 1
+drop table t;
+create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1;
+insert into t values (1, REPEAT('a', 700));
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+index_name count
+email_i 1
+drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result
new file mode 100644
index 00000000000..1a5ec573be7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result
@@ -0,0 +1,53 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DATE ,
+dt DATETIME ,
+ts TIMESTAMP ,
+t TIME ,
+y YEAR ,
+y4 YEAR(4) ,
+y2 YEAR(2) ,
+pk DATETIME PRIMARY KEY
+) ENGINE=rocksdb;
+Warnings:
+Warning 1818 YEAR(2) column type is deprecated. Creating YEAR(4) column instead.
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d date YES NULL
+dt datetime YES NULL
+ts timestamp YES NULL
+t time YES NULL
+y year(4) YES NULL
+y4 year(4) YES NULL
+y2 year(4) YES NULL
+pk datetime NO PRI NULL
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'),
+('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'),
+('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15');
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16');
+Warnings:
+Warning 1265 Data truncated for column 'd' at row 1
+Warning 1264 Out of range value for column 'dt' at row 1
+Warning 1264 Out of range value for column 'ts' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 'y' at row 1
+Warning 1264 Out of range value for column 'y4' at row 1
+Warning 1264 Out of range value for column 'y2' at row 1
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+d dt ts t y y4 y2
+1000-01-01 1000-01-01 00:00:00 1970-01-01 03:00:01 -838:59:59 1901 1901 2000
+9999-12-31 9999-12-31 23:59:59 2038-01-19 06:14:07 838:59:59 2155 2155 1999
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 2000
+2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 2012
+0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 0000
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result
new file mode 100644
index 00000000000..187330836df
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result
@@ -0,0 +1,119 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DATE,
+dt DATETIME PRIMARY KEY,
+ts TIMESTAMP,
+t TIME,
+y YEAR
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 dt A 1000 NULL NULL LSMTREE
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm));
+EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index
+SELECT dt FROM t1 ORDER BY dt LIMIT 3;
+dt
+2010-11-22 11:43:14
+2010-11-22 12:33:54
+2011-08-27 21:33:56
+EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index
+SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3;
+dt
+2010-11-22 11:43:14
+2010-11-22 12:33:54
+2011-08-27 21:33:56
+INSERT INTO t1 (d,dt,ts,t,y) VALUES
+('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000');
+ERROR 23000: Duplicate entry '2010-11-22 12:33:54' for key 'PRIMARY'
+DROP TABLE t1;
+CREATE TABLE t1 (
+d DATE,
+dt DATETIME,
+ts TIMESTAMP,
+t TIME,
+y YEAR,
+pk TIME PRIMARY KEY,
+INDEX (ts)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 ts 1 ts A 500 NULL NULL YES LSMTREE
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00');
+EXPLAIN SELECT ts FROM t1 WHERE ts > NOW();
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index ts ts 5 NULL # Using where; Using index
+SELECT ts FROM t1 WHERE ts > NOW();
+ts
+EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW();
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
+SELECT ts FROM t1 USE INDEX () WHERE ts > NOW();
+ts
+DROP TABLE t1;
+CREATE TABLE t1 (
+d DATE,
+dt DATETIME,
+ts TIMESTAMP,
+t TIME,
+y YEAR,
+pk TIME PRIMARY KEY,
+INDEX (y,t)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 y 1 y A 250 NULL NULL YES LSMTREE
+t1 1 y 2 t A 500 NULL NULL YES LSMTREE
+SET @tm = '2012-04-09 05:27:00';
+INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18');
+EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index y y 6 NULL # Using index
+SELECT y, COUNT(*) FROM t1 GROUP BY y;
+y COUNT(*)
+1994 1
+1998 1
+1999 1
+2000 1
+2001 1
+2012 1
+EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index y y 6 NULL # Using index; Using temporary; Using filesort
+SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y;
+y COUNT(*)
+1994 1
+1998 1
+1999 1
+2000 1
+2001 1
+2012 1
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result
new file mode 100644
index 00000000000..7a7a5c7638c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result
@@ -0,0 +1,179 @@
+drop table if exists t1, t2;
+#
+# Check that DECIMAL PK
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+# First, make the server to create a dataset in the old format:
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t1 (
+pk1 decimal(32,16),
+pk2 decimal(32,16),
+pk3 decimal(32,16),
+a smallint not null,
+primary key(pk1, pk2, pk3)
+);
+insert into t1
+select
+A.a, B.a, C.a, 1234
+from t0 A, t0 B, t0 C;
+#
+# Looking at the table size, one can tell that the data is stored using
+# old format:
+#
+set global rocksdb_force_flush_memtable_now=1;
+# Check the format version:
+select table_name,index_name,kv_format_version
+from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name='t1';
+table_name index_name kv_format_version
+t1 PRIMARY 10
+flush tables;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+# Check that the new server reads the data in the old format:
+select * from t1 order by pk1,pk2,pk3 limit 5;
+pk1 pk2 pk3 a
+0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 3.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 4.0000000000000000 1234
+#
+# Ok, now, enable the new data format:
+#
+create table t2 (
+pk1 decimal(32,16),
+pk2 decimal(32,16),
+pk3 decimal(32,16),
+a smallint not null,
+primary key(pk1, pk2, pk3)
+);
+insert into t2
+select
+A.a, B.a, C.a, 1234
+from t0 A, t0 B, t0 C;
+set global rocksdb_force_flush_memtable_now=1;
+larger
+1
+# This should show the new PK data fromat
+select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name='t2';
+table_name index_name kv_format_version
+t2 PRIMARY 11
+#
+# Check that the server is able to read BOTH the old and the new formats:
+#
+select * from t2 limit 3;
+pk1 pk2 pk3 a
+0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
+select * from t1 limit 3;
+pk1 pk2 pk3 a
+0.0000000000000000 0.0000000000000000 0.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 1.0000000000000000 1234
+0.0000000000000000 0.0000000000000000 2.0000000000000000 1234
+drop table t1,t2;
+drop table t0;
+#
+# Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly.
+# (Decoding happens from the mem-comparable image in the index, regardless
+# of whether the value part has original value or not)
+#
+create table t1 (
+pk int not null primary key,
+col1 decimal (2,1) signed,
+col2 decimal (2,1) unsigned,
+filler varchar(100),
+key key1(col1, col2)
+)engine=rocksdb;
+insert into t1 values
+(1,-9.1, 0.7, 'filler'),
+(2,-8.2, 1.6, 'filler'),
+(3, 0.3, 2.5, 'filler'),
+(4, 1.4, 3.4, 'filler'),
+(5, 2.5, 4.3, 'filler'),
+(6, 3.3, 5.3, 'filler');
+insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+# The following can't use index-only:
+explain select * from t1 where col1 between -8 and 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range key1 key1 3 NULL # Using index condition
+# This will use index-only:
+explain
+select col1, col2 from t1 where col1 between -8 and 8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index
+select col1, col2 from t1 where col1 between -8 and 8;
+col1 col2
+0.3 2.5
+1.4 3.4
+2.5 4.3
+3.3 5.3
+insert into t1 values (11, NULL, 0.9, 'row1-with-null');
+insert into t1 values (10, -8.4, NULL, 'row2-with-null');
+explain
+select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index
+select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7;
+col1 col2
+NULL 0.9
+-9.1 0.7
+-8.4 NULL
+-8.2 1.6
+# Try an UPDATE
+select * from t1 where pk in (3,4);
+pk col1 col2 filler
+3 0.3 2.5 filler
+4 1.4 3.4 filler
+update t1 set col2= col2+0.2 where pk in (3,4);
+select * from t1 where pk in (3,4);
+pk col1 col2 filler
+3 0.3 2.7 filler
+4 1.4 3.6 filler
+drop table t1;
+#
+# Try another DECIMAL-based type that takes more space
+#
+create table t1 (
+pk int not null primary key,
+col1 decimal (12,6) signed,
+col2 decimal (12,6) unsigned,
+filler varchar(100),
+key key1(col1, col2)
+)engine=rocksdb;
+insert into t1 values
+(1,-900.001, 000.007, 'filler'),
+(2,-700.002, 100.006, 'filler'),
+(3, 000.003, 200.005, 'filler'),
+(4, 100.004, 300.004, 'filler'),
+(5, 200.005, 400.003, 'filler'),
+(6, 300.003, 500.003, 'filler');
+insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+explain
+select col1, col2 from t1 force index(key1) where col1 between -800 and 800;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range key1 key1 7 NULL # Using where; Using index
+select col1, col2 from t1 force index(key1) where col1 between -800 and 800;
+col1 col2
+-700.002000 100.006000
+0.003000 200.005000
+100.004000 300.004000
+200.005000 400.003000
+300.003000 500.003000
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result
new file mode 100644
index 00000000000..009b342182d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result
@@ -0,0 +1,47 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a ENUM('') ,
+b ENUM('test1','test2','test3','test4','test5') ,
+c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') ,
+PRIMARY KEY (b)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') YES NULL
+b enum('test1','test2','test3','test4','test5') NO PRI test1
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL
+INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2);
+SELECT a,b,c FROM t1;
+a b c
+ test2 4
+ test5 2
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'a' at row 1
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+
+ test2 4
+ test5 2
+ALTER TABLE t1 ADD COLUMN e ENUM('a','A') ;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in ENUM
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a enum('') YES NULL
+b enum('test1','test2','test3','test4','test5') NO PRI test1
+c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL
+e enum('a','A') YES NULL
+INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A');
+SELECT a,b,c,e FROM t1;
+a b c e
+ NULL
+ test2 4 NULL
+ test3 75 a
+ test5 2 NULL
+SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != '';
+a b c e
+ test2 4 NULL
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result
new file mode 100644
index 00000000000..37d005485d6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result
@@ -0,0 +1,69 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'),
+b ENUM('test1','test2','test3','test4','test5'),
+c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY
+) ENGINE=rocksdb;
+INSERT INTO t1 (a,b,c) VALUES
+('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'),
+('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d');
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE
+EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY PRIMARY 1 NULL # Using where; Using index
+SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u';
+c
+1d
+1j
+4z
+5a
+5b
+EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
+SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u';
+c
+1d
+1j
+4z
+5a
+5b
+DROP TABLE t1;
+CREATE TABLE t1 (
+a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'),
+b ENUM('test1','test2','test3','test4','test5'),
+c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'),
+pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+INDEX(b)
+) ENGINE=rocksdb;
+INSERT INTO t1 (a,b,c,pk) VALUES
+('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3),
+('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6);
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 b 1 b A 500 NULL NULL YES LSMTREE
+EXPLAIN SELECT DISTINCT b FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL b 2 NULL # NULL
+SELECT DISTINCT b FROM t1;
+b
+test1
+test2
+test3
+test4
+EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary
+SELECT DISTINCT b FROM t1 IGNORE INDEX (b);
+b
+test1
+test2
+test3
+test4
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result
new file mode 100644
index 00000000000..45fd402f9b3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result
@@ -0,0 +1,131 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d DECIMAL ,
+d0 DECIMAL(0) ,
+d1_1 DECIMAL(1,1) ,
+d10_2 DECIMAL(10,2) ,
+d60_10 DECIMAL(60,10) ,
+n NUMERIC ,
+n0_0 NUMERIC(0,0) ,
+n1 NUMERIC(1) ,
+n20_4 NUMERIC(20,4) ,
+n65_4 NUMERIC(65,4) ,
+pk NUMERIC PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+d decimal(10,0) YES NULL
+d0 decimal(10,0) YES NULL
+d1_1 decimal(1,1) YES NULL
+d10_2 decimal(10,2) YES NULL
+d60_10 decimal(60,10) YES NULL
+n decimal(10,0) YES NULL
+n0_0 decimal(10,0) YES NULL
+n1 decimal(1,0) YES NULL
+n20_4 decimal(20,4) YES NULL
+n65_4 decimal(65,4) YES NULL
+pk decimal(10,0) NO PRI NULL
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+6
+);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+Warnings:
+Warning 1264 Out of range value for column 'd' at row 1
+Warning 1264 Out of range value for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Warning 1264 Out of range value for column 'd10_2' at row 1
+Warning 1264 Out of range value for column 'd60_10' at row 1
+Warning 1264 Out of range value for column 'n' at row 1
+Warning 1264 Out of range value for column 'n0_0' at row 1
+Warning 1264 Out of range value for column 'n1' at row 1
+Warning 1264 Out of range value for column 'n20_4' at row 1
+Warning 1264 Out of range value for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+Warnings:
+Note 1265 Data truncated for column 'd' at row 1
+Note 1265 Data truncated for column 'd0' at row 1
+Warning 1264 Out of range value for column 'd1_1' at row 1
+Note 1265 Data truncated for column 'd10_2' at row 1
+Note 1265 Data truncated for column 'd60_10' at row 1
+Note 1265 Data truncated for column 'n' at row 1
+Note 1265 Data truncated for column 'n0_0' at row 1
+Note 1265 Data truncated for column 'n1' at row 1
+Note 1265 Data truncated for column 'n20_4' at row 1
+Note 1265 Data truncated for column 'n65_4' at row 1
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4
+-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000
+-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999
+0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000
+100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000
+9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999
+ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ;
+ERROR 42000: Too big precision 66 specified for column 'n66'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ;
+ERROR 42000: Too big precision 66 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ;
+ERROR 42000: Too big scale 66 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result
new file mode 100644
index 00000000000..8aa80244908
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result
@@ -0,0 +1,129 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+d1 DECIMAL(10,2) PRIMARY KEY,
+d2 DECIMAL(60,10),
+n1 NUMERIC,
+n2 NUMERIC(65,4)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 d1 A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (d1,d2,n1,n2) VALUES
+(10.22,60.12345,123456,14.3456),
+(10.0,60.12345,123456,14),
+(11.14,15,123456,13),
+(100,100,1,2),
+(0,0,0,0),
+(4540424564.23,3343303441.0,12,13),
+(15,17,23,100000);
+Warnings:
+Warning 1264 Out of range value for column 'd1' at row 6
+EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index
+SELECT d1 FROM t1 ORDER BY d1 DESC;
+d1
+99999999.99
+100.00
+15.00
+11.14
+10.22
+10.00
+0.00
+EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index; Using filesort
+SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC;
+d1
+99999999.99
+100.00
+15.00
+11.14
+10.22
+10.00
+0.00
+DROP TABLE t1;
+CREATE TABLE t1 (
+d1 DECIMAL(10,2),
+d2 DECIMAL(60,10),
+n1 NUMERIC,
+n2 NUMERIC(65,4),
+pk NUMERIC PRIMARY KEY,
+UNIQUE INDEX n1_n2 (n1,n2)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 0 n1_n2 1 n1 A 500 NULL NULL YES LSMTREE
+t1 0 n1_n2 2 n2 A 1000 NULL NULL YES LSMTREE
+INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES
+(10.22,60.12345,123456,14.3456,1),
+(10.0,60.12345,123456,14,2),
+(11.14,15,123456,13,3),
+(100,100,1,2,4),
+(0,0,0,0,5),
+(4540424564.23,3343303441.0,12,13,6),
+(15,17,23,100000,7);
+Warnings:
+Warning 1264 Out of range value for column 'd1' at row 6
+EXPLAIN SELECT DISTINCT n1+n2 FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index n1_n2 n1_n2 37 NULL # Using index; Using temporary
+SELECT DISTINCT n1+n2 FROM t1;
+n1+n2
+0.0000
+100023.0000
+123469.0000
+123470.0000
+123470.3456
+25.0000
+3.0000
+DROP TABLE t1;
+CREATE TABLE t1 (
+d1 DECIMAL(10,2),
+d2 DECIMAL(60,10),
+n1 NUMERIC,
+n2 NUMERIC(65,4),
+pk DECIMAL(20,10) PRIMARY KEY,
+INDEX (d2)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 d2 1 d2 A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES
+(10.22,60.12345,123456,14.3456,1),
+(10.0,60.12345,123456,14,2),
+(11.14,15,123456,13,3),
+(100,100,1,2,4),
+(0,0,0,0,5),
+(4540424564.23,3343303441.0,12,13,6),
+(15,17,23,100000,7);
+Warnings:
+Warning 1264 Out of range value for column 'd1' at row 6
+EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index d2 d2 29 NULL # Using index
+SELECT d2, COUNT(*) FROM t1 GROUP BY d2;
+d2 COUNT(*)
+0.0000000000 1
+100.0000000000 1
+15.0000000000 1
+17.0000000000 1
+3343303441.0000000000 1
+60.1234500000 2
+EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index d2 d2 29 NULL # Using index; Using temporary; Using filesort
+SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2;
+d2 COUNT(*)
+0.0000000000 1
+100.0000000000 1
+15.0000000000 1
+17.0000000000 1
+3343303441.0000000000 1
+60.1234500000 2
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result
new file mode 100644
index 00000000000..0f78926c89a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result
@@ -0,0 +1,306 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT ,
+f0 FLOAT(0) ,
+r1_1 REAL(1,1) ,
+f23_0 FLOAT(23) ,
+f20_3 FLOAT(20,3) ,
+d DOUBLE ,
+d1_0 DOUBLE(1,0) ,
+d10_10 DOUBLE PRECISION (10,10) ,
+d53 DOUBLE(53,0) ,
+d53_10 DOUBLE(53,10) ,
+pk DOUBLE PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+f float YES NULL
+f0 float YES NULL
+r1_1 double(1,1) YES NULL
+f23_0 float YES NULL
+f20_3 float(20,3) YES NULL
+d double YES NULL
+d1_0 double(1,0) YES NULL
+d10_10 double(10,10) YES NULL
+d53 double(53,0) YES NULL
+d53_10 double(53,10) YES NULL
+pk double NO PRI NULL
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 11111111.111
+d10_10 0.0123456789
+d1_0 8
+d53 1234566789123456800
+d53_10 100000000000000000.0000000000
+f0 12345.1
+f20_3 56789.988
+f23_0 123457000
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+99999999999999999999999999999999999999,
+99999999999999999999999999999999999999.9999999999999999,
+0.9,
+99999999999999999999999999999999999999.9,
+99999999999999999.999,
+999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+9,
+0.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+3
+);
+Warnings:
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d 0
+d 11111111.111
+d 1e81
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 0
+d1_0 8
+d1_0 9
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f 0
+f 1e38
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1;
+MAX(f) 9.999999680285692e37
+MAX(d) 1e81
+MAX(d10_10) 0.9999999999
+MAX(d1_0) 9
+MAX(d53) 100000000000000000000000000000000000000000000000000000
+MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000
+MAX(f0) 9.999999680285692e37
+MAX(f20_3) 99999998430674940.000
+MAX(f23_0) 9.999999680285692e37
+MAX(r1_1) 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+9999999999999999999999999999999999999999999999999999999999999.9999,
+5
+);
+Warnings:
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+999999999999999999999999999999999999999,
+999999999999999999999999999999999999999.9999999999999999,
+1.9,
+999999999999999999999999999999999999999.9,
+999999999999999999.999,
+9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+99,
+1.9999999999,
+1999999999999999999999999999999999999999999999999999999,
+19999999999999999999999999999999999999999999.9999999999,
+6
+);
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: ''
+Warning 1264 Out of range value for column 'f' at row 1
+Warning 1264 Out of range value for column 'f0' at row 1
+Warning 1264 Out of range value for column 'r1_1' at row 1
+Warning 1264 Out of range value for column 'f23_0' at row 1
+Warning 1264 Out of range value for column 'f20_3' at row 1
+Warning 1264 Out of range value for column 'd1_0' at row 1
+Warning 1264 Out of range value for column 'd10_10' at row 1
+Warning 1264 Out of range value for column 'd53' at row 1
+Warning 1264 Out of range value for column 'd53_10' at row 1
+SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1;
+f 12345.1
+d -1e60
+d 0
+d 11111111.111
+d 1e61
+d 1e65
+d 1e81
+d10_10 -0.9999999999
+d10_10 0.0000000000
+d10_10 0.0123456789
+d10_10 0.9999999999
+d10_10 0.9999999999
+d10_10 0.9999999999
+d1_0 -9
+d1_0 0
+d1_0 8
+d1_0 9
+d1_0 9
+d1_0 9
+d53 -1000000000000000000000000000000
+d53 0
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 100000000000000000000000000000000000000000000000000000
+d53 1234566789123456800
+d53_10 -10000000000000000000000000000000000000000000.0000000000
+d53_10 0.0000000000
+d53_10 100000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+d53_10 10000000000000000000000000000000000000000000.0000000000
+f -1e24
+f 0
+f 1e38
+f 3.40282e38
+f 3.40282e38
+f0 -100000000000
+f0 0
+f0 12345.1
+f0 1e38
+f0 3.40282e38
+f0 3.40282e38
+f20_3 -99999998430674940.000
+f20_3 0.000
+f20_3 56789.988
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f20_3 99999998430674940.000
+f23_0 -1000
+f23_0 0
+f23_0 123457000
+f23_0 1e38
+f23_0 3.40282e38
+f23_0 3.40282e38
+r1_1 -0.9
+r1_1 0.0
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+r1_1 0.9
+ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ;
+ERROR 42000: Display width out of range for column 'd0_0' (max = 255)
+ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ;
+ERROR 42000: Too big precision 256 specified for column 'n66_6'. Maximum is 65.
+ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ;
+ERROR 42000: Too big scale 35 specified for column 'n66_66'. Maximum is 30.
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result
new file mode 100644
index 00000000000..9a50f66870c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result
@@ -0,0 +1,189 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+f FLOAT PRIMARY KEY,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 f A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (f,r,d,dp) VALUES
+(1.2345,1422.22,1.2345,1234567.89),
+(0,0,0,0),
+(-1,-1,-1,-1),
+(17.5843,4953453454.44,29229114.0,1111111.23),
+(4644,1422.22,466664.999,0.5);
+EXPLAIN SELECT f FROM t1 ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index
+SELECT f FROM t1 ORDER BY f;
+f
+-1
+0
+1.2345
+17.5843
+4644
+EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort
+SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f;
+f
+-1
+0
+1.2345
+17.5843
+4644
+DROP TABLE t1;
+CREATE TABLE t1 (
+f FLOAT,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10),
+pk DOUBLE PRIMARY KEY,
+UNIQUE KEY r_dp (r,dp)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 0 r_dp 1 r A 500 NULL NULL YES LSMTREE
+t1 0 r_dp 2 dp A 1000 NULL NULL YES LSMTREE
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index r_dp r_dp 18 NULL # Using where; Using index
+SELECT r, dp FROM t1 WHERE r > 0 or dp > 0;
+r dp
+1422.220 0.5000000000
+1422.220 1234567.8900000000
+4953453454.440 1111111.2300000000
+DROP TABLE t1;
+CREATE TABLE t1 (
+f FLOAT,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10),
+pk FLOAT PRIMARY KEY,
+UNIQUE KEY(d)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 0 d 1 d A 1000 NULL NULL YES LSMTREE
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index d d 9 NULL # Using index
+SELECT DISTINCT d FROM t1 ORDER BY d;
+d
+-1
+0
+1.2345
+466664.999
+29229114
+DROP TABLE t1;
+CREATE TABLE t1 (
+f FLOAT,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10),
+pk FLOAT PRIMARY KEY,
+KEY(d)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 d 1 d A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index d d 9 NULL # Using index
+SELECT DISTINCT d FROM t1 ORDER BY d;
+d
+-1
+0
+1.2345
+466664.999
+29229114
+DROP TABLE t1;
+CREATE TABLE t1 (
+f FLOAT,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10),
+pk FLOAT PRIMARY KEY,
+UNIQUE KEY(f)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 0 f 1 f A 1000 NULL NULL YES LSMTREE
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,0,0,0,6);
+ERROR 23000: Duplicate entry '1.2345' for key 'f'
+EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index f f 5 NULL # Using index
+SELECT DISTINCT f FROM t1 ORDER BY f;
+f
+-1
+0
+1.2345
+17.5843
+4644
+DROP TABLE t1;
+CREATE TABLE t1 (
+f FLOAT,
+r REAL(20,3),
+d DOUBLE,
+dp DOUBLE PRECISION (64,10),
+pk FLOAT PRIMARY KEY,
+KEY(f)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 f 1 f A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,0,0,0,6);
+EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index f f 5 NULL # Using index
+SELECT DISTINCT f FROM t1 ORDER BY f;
+f
+-1
+0
+1.2345
+17.5843
+4644
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result
new file mode 100644
index 00000000000..06866e9b5bb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result
@@ -0,0 +1,212 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT ,
+i0 INT(0) ,
+i1 INT(1) ,
+i20 INT(20) ,
+t TINYINT ,
+t0 TINYINT(0) ,
+t1 TINYINT(1) ,
+t20 TINYINT(20) ,
+s SMALLINT ,
+s0 SMALLINT(0) ,
+s1 SMALLINT(1) ,
+s20 SMALLINT(20) ,
+m MEDIUMINT ,
+m0 MEDIUMINT(0) ,
+m1 MEDIUMINT(1) ,
+m20 MEDIUMINT(20) ,
+b BIGINT ,
+b0 BIGINT(0) ,
+b1 BIGINT(1) ,
+b20 BIGINT(20) ,
+pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+i0 int(11) YES NULL
+i1 int(1) YES NULL
+i20 int(20) YES NULL
+t tinyint(4) YES NULL
+t0 tinyint(4) YES NULL
+t1 tinyint(1) YES NULL
+t20 tinyint(20) YES NULL
+s smallint(6) YES NULL
+s0 smallint(6) YES NULL
+s1 smallint(1) YES NULL
+s20 smallint(20) YES NULL
+m mediumint(9) YES NULL
+m0 mediumint(9) YES NULL
+m1 mediumint(1) YES NULL
+m20 mediumint(20) YES NULL
+b bigint(20) YES NULL
+b0 bigint(20) YES NULL
+b1 bigint(1) YES NULL
+b20 bigint(20) YES NULL
+pk int(11) NO PRI NULL auto_increment
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 1
+Warning 1264 Out of range value for column 'i0' at row 1
+Warning 1264 Out of range value for column 'i1' at row 1
+Warning 1264 Out of range value for column 'i20' at row 1
+Warning 1264 Out of range value for column 't' at row 1
+Warning 1264 Out of range value for column 't0' at row 1
+Warning 1264 Out of range value for column 't1' at row 1
+Warning 1264 Out of range value for column 't20' at row 1
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 's0' at row 1
+Warning 1264 Out of range value for column 's1' at row 1
+Warning 1264 Out of range value for column 's20' at row 1
+Warning 1264 Out of range value for column 'm' at row 1
+Warning 1264 Out of range value for column 'm0' at row 1
+Warning 1264 Out of range value for column 'm1' at row 1
+Warning 1264 Out of range value for column 'm20' at row 1
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1264 Out of range value for column 'b0' at row 1
+Warning 1264 Out of range value for column 'b1' at row 1
+Warning 1264 Out of range value for column 'b20' at row 1
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+Warnings:
+Warning 1264 Out of range value for column 'i' at row 8
+Warning 1264 Out of range value for column 'i0' at row 8
+Warning 1264 Out of range value for column 'i1' at row 8
+Warning 1264 Out of range value for column 'i20' at row 8
+Warning 1264 Out of range value for column 't' at row 8
+Warning 1264 Out of range value for column 't0' at row 8
+Warning 1264 Out of range value for column 't1' at row 8
+Warning 1264 Out of range value for column 't20' at row 8
+Warning 1264 Out of range value for column 's' at row 8
+Warning 1264 Out of range value for column 's0' at row 8
+Warning 1264 Out of range value for column 's1' at row 8
+Warning 1264 Out of range value for column 's20' at row 8
+Warning 1264 Out of range value for column 'm' at row 8
+Warning 1264 Out of range value for column 'm0' at row 8
+Warning 1264 Out of range value for column 'm1' at row 8
+Warning 1264 Out of range value for column 'm20' at row 8
+Warning 1264 Out of range value for column 'i' at row 9
+Warning 1264 Out of range value for column 'i0' at row 9
+Warning 1264 Out of range value for column 'i1' at row 9
+Warning 1264 Out of range value for column 'i20' at row 9
+Warning 1264 Out of range value for column 't' at row 9
+Warning 1264 Out of range value for column 't0' at row 9
+Warning 1264 Out of range value for column 't1' at row 9
+Warning 1264 Out of range value for column 't20' at row 9
+Warning 1264 Out of range value for column 's' at row 9
+Warning 1264 Out of range value for column 's0' at row 9
+Warning 1264 Out of range value for column 's1' at row 9
+Warning 1264 Out of range value for column 's20' at row 9
+Warning 1264 Out of range value for column 'm' at row 9
+Warning 1264 Out of range value for column 'm0' at row 9
+Warning 1264 Out of range value for column 'm1' at row 9
+Warning 1264 Out of range value for column 'm20' at row 9
+Warning 1264 Out of range value for column 'i' at row 10
+Warning 1264 Out of range value for column 'i0' at row 10
+Warning 1264 Out of range value for column 'i1' at row 10
+Warning 1264 Out of range value for column 'i20' at row 10
+Warning 1264 Out of range value for column 't' at row 10
+Warning 1264 Out of range value for column 't0' at row 10
+Warning 1264 Out of range value for column 't1' at row 10
+Warning 1264 Out of range value for column 't20' at row 10
+Warning 1264 Out of range value for column 's' at row 10
+Warning 1264 Out of range value for column 's0' at row 10
+Warning 1264 Out of range value for column 's1' at row 10
+Warning 1264 Out of range value for column 's20' at row 10
+Warning 1264 Out of range value for column 'm' at row 10
+Warning 1264 Out of range value for column 'm0' at row 10
+Warning 1264 Out of range value for column 'm1' at row 10
+Warning 1264 Out of range value for column 'm20' at row 10
+Warning 1264 Out of range value for column 'i' at row 11
+Warning 1264 Out of range value for column 'i0' at row 11
+Warning 1264 Out of range value for column 'i1' at row 11
+Warning 1264 Out of range value for column 'i20' at row 11
+Warning 1264 Out of range value for column 't' at row 11
+Warning 1264 Out of range value for column 't0' at row 11
+Warning 1264 Out of range value for column 't1' at row 11
+Warning 1264 Out of range value for column 't20' at row 11
+Warning 1264 Out of range value for column 's' at row 11
+Warning 1264 Out of range value for column 's0' at row 11
+Warning 1264 Out of range value for column 's1' at row 11
+Warning 1264 Out of range value for column 's20' at row 11
+Warning 1264 Out of range value for column 'm' at row 11
+Warning 1264 Out of range value for column 'm0' at row 11
+Warning 1264 Out of range value for column 'm1' at row 11
+Warning 1264 Out of range value for column 'm20' at row 11
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807
+ALTER TABLE t1 ADD COLUMN i257 INT(257) ;
+ERROR 42000: Display width out of range for column 'i257' (max = 255)
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result
new file mode 100644
index 00000000000..e4c47f04c24
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result
@@ -0,0 +1,99 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+i INT PRIMARY KEY,
+t TINYINT,
+s SMALLINT,
+m MEDIUMINT,
+b BIGINT
+) ENGINE=rocksdb;
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+EXPLAIN SELECT i FROM t1 ORDER BY i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index
+SELECT i FROM t1 ORDER BY i;
+i
+1
+2
+3
+5
+10
+11
+12
+101
+1000
+10001
+DROP TABLE t1;
+CREATE TABLE t1 (
+i INT,
+t TINYINT,
+s SMALLINT,
+m MEDIUMINT,
+b BIGINT,
+pk SMALLINT AUTO_INCREMENT PRIMARY KEY,
+INDEX s_m (s,m)
+) ENGINE=rocksdb;
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index s_m s_m 7 NULL # Using where; Using index
+SELECT s, m FROM t1 WHERE s != 10 AND m != 1;
+s m
+10000 1000000
+10000 1000000
+10002 10003
+103 104
+12 13
+13 14
+14 15
+3 4
+4 5
+5 6
+DROP TABLE t1;
+# RocksDB: unique indexes allowed
+CREATE TABLE t1 (
+i INT,
+t TINYINT,
+s SMALLINT,
+m MEDIUMINT,
+b BIGINT,
+pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY,
+UNIQUE KEY b_t (b,t)
+) ENGINE=rocksdb;
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+b+t
+9
+11
+25
+27
+29
+207
+10107
+100000000000000100
+1000000000000000100
+SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+b+t
+9
+11
+25
+27
+29
+207
+10107
+100000000000000100
+1000000000000000100
+SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+b+t
+9
+11
+25
+27
+29
+207
+10107
+100000000000000100
+1000000000000000100
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result
new file mode 100644
index 00000000000..f401af46536
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result
@@ -0,0 +1,49 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a SET('') ,
+b SET('test1','test2','test3','test4','test5') ,
+c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') ,
+PRIMARY KEY (c)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') YES NULL
+b set('test1','test2','test3','test4','test5') YES NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,,23'),
+('',5,2),
+(',','test4,test2','');
+Warnings:
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'c' at row 1
+SELECT a,b,c FROM t1;
+a b c
+ 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64
+ test1,test3 02
+ test2,test3 01,23,34,44
+ test2,test4
+ALTER TABLE t1 ADD COLUMN e SET('a','A') ;
+Warnings:
+Note 1291 Column 'e' has duplicated value 'a' in SET
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+a set('') YES NULL
+b set('test1','test2','test3','test4','test5') YES NULL
+c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI
+e set('a','A') YES NULL
+ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') ;
+ERROR HY000: Too many strings for column f and SET
+SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != '';
+a b c e
+ test2,test3 01,23,34,44 NULL
+ test2,test4 NULL
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result
new file mode 100644
index 00000000000..62a3004e584
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result
@@ -0,0 +1,80 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'),
+b SET('test1','test2','test3','test4','test5'),
+c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,23'),
+('',5,2),
+('N.America,Asia','test4,test2',''),
+('Africa,Europe,Asia','test2,test3','01'),
+('Antarctica','test3','34,44'),
+('Asia','test5','50'),
+('Europe,S.America','test1,','39');
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 7
+EXPLAIN SELECT c FROM t1 ORDER BY c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index
+SELECT c FROM t1 ORDER BY c;
+c
+
+01
+22
+39
+34,44
+01,23,34,44
+50
+EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index; Using filesort
+SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c;
+c
+
+01
+22
+39
+34,44
+01,23,34,44
+50
+INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02');
+ERROR 23000: Duplicate entry '22' for key 'PRIMARY'
+INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44');
+ERROR 23000: Duplicate entry '34,44' for key 'PRIMARY'
+DROP TABLE t1;
+CREATE TABLE t1 (
+a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'),
+b SET('test1','test2','test3','test4','test5'),
+c SET('01','22','23','33','34','39','40','44','50','63','64'),
+pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+INDEX(a)
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE
+t1 1 a 1 a A 500 NULL NULL YES LSMTREE
+INSERT INTO t1 (a,b,c,pk) VALUES
+('','test2,test3','01,34,44,23',1),
+('',5,2,2),
+('N.America,Asia','test4,test2','',3),
+('Africa,Europe,Asia','test2,test3','01',4),
+('Antarctica','test3','34,44',5),
+('Asia','test5','50',6),
+('Europe,S.America','test1,','39',7);
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 7
+EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
+SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0;
+a
+Africa,Europe,Asia
+S.America,Europe
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result
new file mode 100644
index 00000000000..bd10e3888c9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result
@@ -0,0 +1,57 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT PRIMARY KEY,
+t TEXT ,
+t0 TEXT(0) ,
+t1 TEXT(1) ,
+t300 TEXT(300) ,
+tm TEXT(65535) ,
+t70k TEXT(70000) ,
+t17m TEXT(17000000) ,
+tt TINYTEXT ,
+m MEDIUMTEXT ,
+l LONGTEXT
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+pk int(11) NO PRI NULL auto_increment
+t text YES NULL
+t0 text YES NULL
+t1 tinytext YES NULL
+t300 text YES NULL
+tm text YES NULL
+t70k mediumtext YES NULL
+t17m longtext YES NULL
+tt tinytext YES NULL
+m mediumtext YES NULL
+l longtext YES NULL
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) );
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+Warnings:
+Warning 1265 Data truncated for column 't' at row 1
+Warning 1265 Data truncated for column 't0' at row 1
+Warning 1265 Data truncated for column 't1' at row 1
+Warning 1265 Data truncated for column 't300' at row 1
+Warning 1265 Data truncated for column 'tm' at row 1
+Warning 1265 Data truncated for column 'tt' at row 1
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l)
+0 0 0 0 0 0 0 0 0 0
+1 1 1 1 1 1 1 1 1 1
+5 5 5 5 5 5 5 5 5 6
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576
+ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) ;
+ERROR 42000: Display width out of range for column 'ttt' (max = 4294967295)
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result
new file mode 100644
index 00000000000..22318316596
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result
@@ -0,0 +1,165 @@
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (
+t TEXT,
+tt TINYTEXT,
+m MEDIUMTEXT,
+l LONGTEXT,
+PRIMARY KEY t (t(32))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 t A 1000 32 NULL LSMTREE
+INSERT INTO t1 (t,tt,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL # Using where; Using filesort
+SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f;
+f
+
+
+EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where; Using filesort
+SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f;
+f
+
+
+DROP TABLE t1;
+CREATE TABLE t1 (
+t TEXT,
+tt TINYTEXT,
+m MEDIUMTEXT,
+l LONGTEXT,
+pk TINYTEXT PRIMARY KEY,
+UNIQUE INDEX l_tt (l(256),tt(64))
+) ENGINE=rocksdb;
+ERROR 42000: BLOB/TEXT column 'pk' used in key specification without a key length
+CREATE TABLE t1 (
+t TEXT,
+tt TINYTEXT,
+m MEDIUMTEXT,
+l LONGTEXT,
+pk MEDIUMTEXT,
+PRIMARY KEY mt (pk(1)),
+INDEX (m(128))
+) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 pk A 1000 1 NULL LSMTREE
+t1 1 m 1 m A 500 128 NULL YES LSMTREE
+INSERT INTO t1 (t,tt,m,l,pk) VALUES
+('','','','','0'),
+('a','b','c','d','1'),
+('b','d','c','b','2'),
+('test1','test2','test3','test4','3'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'),
+('abc','def','ghi','jkl','5'),
+('test2','test3','test4','test5','6'),
+('test3','test4','test5','test6','7'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9');
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref m m 131 const # Using where; Using filesort
+SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+f
+DROP TABLE t1;
+CREATE TABLE t1 (
+b TEXT,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b TINYTEXT,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b MEDIUMTEXT,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b LONGTEXT,
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+SELECT b FROM t1;
+b
+00000000000000000000000000000000
+00000000000000000000000000000001
+00000000000000000000000000000002
+DROP TABLE t1;
+CREATE TABLE t1 (
+b LONGTEXT CHARACTER SET "binary" COLLATE "binary",
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' ');
+SELECT hex(b) FROM t1;
+hex(b)
+
+00
+20
+3030303030303030303030303030303030303030303030303030303030303030
+3030303030303030303030303030303030303030303030303030303030303031
+3030303030303030303030303030303030303030303030303030303030303032
+DROP TABLE t1;
+CREATE TABLE t1 (
+b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin",
+PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' ');
+ERROR 23000: Duplicate entry ' ' for key 'PRIMARY'
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0);
+SELECT hex(b) FROM t1;
+hex(b)
+00
+
+DROP TABLE t1;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result
new file mode 100644
index 00000000000..06afbb92802
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result
@@ -0,0 +1,93 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARBINARY(0) ,
+v1 VARBINARY(1) ,
+v64 VARBINARY(64) ,
+v65000 VARBINARY(65000) ,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) YES NULL
+v1 varbinary(1) YES NULL
+v64 varbinary(64) NO PRI
+v65000 varbinary(65000) YES NULL
+CREATE TABLE t2 (v VARBINARY(65532) , PRIMARY KEY(v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varbinary(65532) NO PRI
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) HEX(v65000)
+
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+Warning 1265 Data truncated for column 'v65000' at row 6
+SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1;
+HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000))
+ 0
+ 61 0
+ 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932
+ 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000
+ 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930
+ 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000
+ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) ;
+Warnings:
+Note 1246 Converting column 'v65536' from VARBINARY to BLOB
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varbinary(0) YES NULL
+v1 varbinary(1) YES NULL
+v64 varbinary(64) NO PRI
+v65000 varbinary(65000) YES NULL
+v65536 mediumblob YES NULL
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result
new file mode 100644
index 00000000000..5c449da2b8f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result
@@ -0,0 +1,743 @@
+drop table if exists t1,t2;
+#
+# A basic test whether endspace-aware variable length encoding
+# works when in PK
+#
+create table t1 ( pk varchar(10) primary key, col1 int not null) ;
+insert into t1 values ('a', 1);
+select * from t1;
+pk col1
+a 1
+drop table t1;
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+v0 VARCHAR(0) ,
+v1 VARCHAR(1) ,
+v64 VARCHAR(64) ,
+v65000 VARCHAR(65000) ,
+PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) YES NULL
+v1 varchar(1) YES NULL
+v64 varchar(64) NO PRI
+v65000 varchar(65000) YES NULL
+CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+Field Type Null Key Default Extra
+v varchar(65532) NO PRI
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+SELECT v0,v1,v64,v65000 FROM t1;
+v0 v1 v64 v65000
+
+
+
+
+
+
+
+
+
+
+
+ y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+ o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly)
+ o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject.
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Free to read in the Knowledgebase!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+ o The 'default' book to read if you wont to learn to use MySQL / MariaDB.
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ * MariaDB Crash Course by Ben Forta
+ * MySQL (4th Edition) by Paul DuBois
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ * MySQL Admin Cookbook
+ * MySQL Cookbook by Paul DuBois
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ For MariaDB / MySQL end users
+ For developers who want to code on MariaDB or MySQL
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 1
+Warning 1265 Data truncated for column 'v1' at row 1
+Warning 1265 Data truncated for column 'v64' at row 1
+Warning 1265 Data truncated for column 'v65000' at row 1
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+Warnings:
+Warning 1265 Data truncated for column 'v0' at row 5
+Warning 1265 Data truncated for column 'v1' at row 5
+Warning 1265 Data truncated for column 'v64' at row 5
+Warning 1265 Data truncated for column 'v65000' at row 5
+Warning 1265 Data truncated for column 'v0' at row 6
+Warning 1265 Data truncated for column 'v1' at row 6
+Warning 1265 Data truncated for column 'v64' at row 6
+SELECT v0, v1, v64, LENGTH(v65000) FROM t1;
+v0 v1 v64 LENGTH(v65000)
+ 0
+ a 0
+ H aHere is a list of recommended books on MariaDB and MySQL. We've 2966
+ a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000
+ y Once there, double check that an article doesn't already exist 2965
+ y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000
+ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) ;
+Warnings:
+Note 1246 Converting column 'v65536' from VARCHAR to TEXT
+SHOW COLUMNS IN t1;
+Field Type Null Key Default Extra
+v0 varchar(0) YES NULL
+v1 varchar(1) YES NULL
+v64 varchar(64) NO PRI
+v65000 varchar(65000) YES NULL
+v65536 mediumtext YES NULL
+DROP TABLE t1, t2;
+#
+# Endspace-comparison tests:
+#
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET latin1 COLLATE latin1_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+b 622020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 6109 a-tab
+a 612009 a-space-tab
+a 61 a
+b 622020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET latin1 COLLATE latin1_bin,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL col1 67 NULL # Using index
+select col1, hex(col1) from t1;
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 67 NULL # Using where; Using index
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL col1 67 NULL # Using index
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 2020202020202020 8
+12 20202020202020202020202020202020 16
+13 202020202020202020202020202020202020202020202020 24
+21 202020202020202020 9
+22 2020202020202020202020202020202020 17
+23 202020202020202020202020202020202020 18
+14 a 2020202020202020202020202020202061 17
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET utf8 COLLATE utf8_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+b 622020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 6109 a-tab
+a 612009 a-space-tab
+a 61 a
+b 622020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_bin,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL col1 195 NULL # Using index
+select col1, hex(col1) from t1;
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL col1 195 NULL # Using index
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 2020202020202020 8
+12 20202020202020202020202020202020 16
+13 202020202020202020202020202020202020202020202020 24
+21 202020202020202020 9
+22 2020202020202020202020202020202020 17
+23 202020202020202020202020202020202020 18
+14 a 2020202020202020202020202020202061 17
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 0061 a
+b 006200200020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 00610009 a-tab
+a 006100200009 a-space-tab
+a 0061 a
+b 006200200020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL
+select col1, hex(col1) from t1;
+col1 hex(col1)
+ab 00610062
+a 00610020
+a 0061
+a 0061002000200009
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 131 NULL # Using where
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 0061002000200009
+a 00610020
+a 0061
+ab 00610062
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL NULL NULL NULL # NULL
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 00200020002000200020002000200020 16
+12 0020002000200020002000200020002000200020002000200020002000200020 32
+13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48
+14 a 00200020002000200020002000200020002000200020002000200020002000200061 34
+21 002000200020002000200020002000200020 18
+22 00200020002000200020002000200020002000200020002000200020002000200020 34
+23 002000200020002000200020002000200020002000200020002000200020002000200020 36
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+b 622020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 6109 a-tab
+a 612009 a-space-tab
+a 61 a
+b 622020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL
+select col1, hex(col1) from t1;
+col1 hex(col1)
+ab 6162
+a 6120
+a 61
+a 61202009
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 259 NULL # Using where
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL NULL NULL NULL # NULL
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 2020202020202020 8
+12 20202020202020202020202020202020 16
+13 202020202020202020202020202020202020202020202020 24
+14 a 2020202020202020202020202020202061 17
+21 202020202020202020 9
+22 2020202020202020202020202020202020 17
+23 202020202020202020202020202020202020 18
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET utf16 COLLATE utf16_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 0061 a
+b 006200200020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 00610009 a-tab
+a 006100200009 a-space-tab
+a 0061 a
+b 006200200020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET utf16 COLLATE utf16_bin,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL # NULL
+select col1, hex(col1) from t1;
+col1 hex(col1)
+ab 00610062
+a 00610020
+a 0061
+a 0061002000200009
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 259 NULL # Using where
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 0061002000200009
+a 00610020
+a 0061
+ab 00610062
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL NULL NULL NULL # NULL
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 00200020002000200020002000200020 16
+12 0020002000200020002000200020002000200020002000200020002000200020 32
+13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48
+14 a 00200020002000200020002000200020002000200020002000200020002000200061 34
+21 002000200020002000200020002000200020 18
+22 00200020002000200020002000200020002000200020002000200020002000200020 34
+23 002000200020002000200020002000200020002000200020002000200020002000200020 36
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+create table t1 (
+pk int primary key,
+col1 varchar(10) collate utf8mb4_bin not null,
+col2 varchar(20),
+key(col1)
+) engine=rocksdb;
+insert into t1 values (1, 'ab','ab');
+insert into t1 values (2, 'ab\0','ab0');
+select pk, hex(col1), col2 from t1 force index(col1) order by col1;
+pk hex(col1) col2
+2 616200 ab0
+1 6162 ab
+select pk, hex(col1), col2 from t1 ignore index(col1) order by col1;
+pk hex(col1) col2
+2 616200 ab0
+1 6162 ab
+drop table t1;
+create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
+insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc ');
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL email_i 33 NULL # Using index
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+index_name count
+email_i 1
+drop table t;
+set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct;
+set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums;
+set global rocksdb_checksums_pct = 100;
+set session rocksdb_verify_checksums = on;
+create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
+insert into t values (1, 'a');
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t index NULL email_i 33 NULL # Using index
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+index_name count
+email_i 1
+drop table t;
+set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
+set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result
new file mode 100644
index 00000000000..de7608ebb1c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar_debug.result
@@ -0,0 +1,254 @@
+drop table if exists t1,t2;
+set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
+#
+# Issue 257: Sort order for varchars is different between
+# MyISAM/InnoDB vs MyRocks
+#
+create table t1 (
+pk varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+insert into t1 values ('a ', 'a-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values('b ', 'b-2x-space');
+insert into t1 values ('b', 'b');
+ERROR 23000: Duplicate entry 'b' for key 'PRIMARY'
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+b 622020 b-2x-space
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+pk hex(pk) col1
+a 6109 a-tab
+a 612009 a-space-tab
+a 61 a
+b 622020 b-2x-space
+# Try longer values
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY'
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+pk col1
+a a-tab
+a a-space-tab
+a a
+b b-2x-space
+c c-10-x-space
+drop table t1;
+# Secondary index
+create table t1 (
+pk int not null primary key,
+col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci,
+col2 varchar(64),
+key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL col1 195 NULL # Using index
+select col1, hex(col1) from t1;
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+# Must show 'using index' for latin1_bin and utf8_bin:
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index
+select col1, hex(col1) from t1 where col1 < 'b';
+col1 hex(col1)
+a 61202009
+a 6120
+a 61
+ab 6162
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 # NULL col1 195 NULL # Using index
+select pk, col1, hex(col1), length(col1) from t1;
+pk col1 hex(col1) length(col1)
+10 0
+11 2020202020202020 8
+12 20202020202020202020202020202020 16
+13 202020202020202020202020202020202020202020202020 24
+21 202020202020202020 9
+22 2020202020202020202020202020202020 17
+23 202020202020202020202020202020202020 18
+14 a 2020202020202020202020202020202061 17
+drop table t1;
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+select pk,length(a) from t1 force index(a) where a < 'zz';
+pk length(a)
+1 301
+2 301
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+pk length(a) rtrim(a)
+1 301 a
+2 301 b
+drop table t1;
+set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans";
+#
+# Check backwards compatibility:
+#
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+# Create the tables in the old format
+create table t1 (
+pk varchar(64) collate latin1_bin,
+col1 varchar(64),
+primary key (pk)
+);
+insert into t1 values ('a','a');
+# The following will not produce an error:
+insert into t1 values ('a ', 'a-space');
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+a 6120 a-space
+create table t2 (
+pk int not null primary key,
+col1 varchar(64) collate latin1_bin,
+col2 varchar(64),
+unique key (col1)
+);
+insert into t2 values (0, 'ab', 'a-b');
+# The following will not produce an error:
+insert into t2 values (1, 'a ', 'a-space');
+insert into t2 values (2, 'a', 'a');
+select pk, col1, hex(col1), col2 from t2;
+pk col1 hex(col1) col2
+0 ab 6162 a-b
+1 a 6120 a-space
+2 a 61 a
+# Check the format version:
+select table_name,index_name,kv_format_version
+from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name in ('t1','t2');
+table_name index_name kv_format_version
+t1 PRIMARY 10
+t2 PRIMARY 10
+t2 col1 10
+flush tables;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+a 6120 a-space
+select pk, col1, hex(col1), col2 from t2;
+pk col1 hex(col1) col2
+0 ab 6162 a-b
+1 a 6120 a-space
+2 a 61 a
+select pk, hex(pk), col1 from t1;
+pk hex(pk) col1
+a 61 a
+a 6120 a-space
+select pk, col1, hex(col1), col2 from t2;
+pk col1 hex(col1) col2
+0 ab 6162 a-b
+1 a 6120 a-space
+2 a 61 a
+drop table t1,t2;
+#
+# General upgrade tests to see that they work.
+#
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+id int primary key,
+col1 varchar(64) collate latin1_swedish_ci,
+unique key (col1)
+) engine=rocksdb;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+insert into t2 values (1, 'a');
+insert into t2 values (2, 'b');
+insert into t2 values (3, 'c');
+insert into t2 values (4, 'c ');
+select col1 from t2;
+col1
+a
+b
+c
+c
+delete from t2 where id = 4;
+alter table t2 engine=rocksdb;
+select col1 from t2;
+col1
+a
+b
+c
+insert into t2 values (4, 'c ');
+ERROR 23000: Duplicate entry 'c ' for key 'col1'
+drop table t2;
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+id int primary key,
+col1 varchar(64) collate latin1_bin,
+unique key (col1)
+) engine=rocksdb;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+insert into t2 values (1, 'a');
+insert into t2 values (2, 'b');
+insert into t2 values (3, 'c');
+insert into t2 values (4, 'c ');
+select col1 from t2;
+col1
+a
+b
+c
+c
+delete from t2 where id = 4;
+alter table t2 engine=rocksdb;
+select col1 from t2;
+col1
+a
+b
+c
+insert into t2 values (4, 'c ');
+ERROR 23000: Duplicate entry 'c ' for key 'col1'
+drop table t2;
+#
+# Check what happens when one tries to 'upgrade' to the new data format
+# and causes a unique key violation:
+#
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+pk int not null primary key,
+col1 varchar(64) collate latin1_bin,
+col2 varchar(64),
+unique key (col1)
+);
+insert into t2 values (1, 'a ', 'a-space');
+insert into t2 values (2, 'a', 'a');
+select * from t2;
+pk col1 col2
+1 a a-space
+2 a a
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+alter table t2 engine=rocksdb;
+ERROR 23000: Duplicate entry 'a' for key 'col1'
+drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result
new file mode 100644
index 00000000000..0f3e7200d8a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result
@@ -0,0 +1,72 @@
+set debug_sync='RESET';
+drop table if exists t1;
+create table t1 (id int, value int, primary key (id)) engine=rocksdb;
+create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
+begin;
+insert into t1 values (1,1);
+set session rocksdb_lock_wait_timeout=50;
+begin;
+insert into t1 values (1,2);
+commit;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+commit;
+select * from t1;
+id value
+1 1
+truncate table t1;
+begin;
+insert into t2 values (1,1,1);
+begin;
+insert into t2 values (2,1,2);
+commit;
+ERROR 23000: Duplicate entry '1' for key 'id2'
+commit;
+select * from t2;
+id id2 value
+1 1 1
+truncate table t2;
+begin;
+insert into t1 values (1,1);
+begin;
+insert into t1 values (1,2);
+rollback;
+commit;
+select * from t1;
+id value
+1 2
+truncate table t1;
+begin;
+insert into t2 values (1,1,1);
+begin;
+insert into t2 values (2,1,2);
+rollback;
+commit;
+select * from t2;
+id id2 value
+2 1 2
+truncate table t2;
+set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1';
+insert into t1 values (1,1);
+set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2';
+insert into t2 values (1,1,1);
+set debug_sync='now WAIT_FOR parked1';
+set debug_sync='now WAIT_FOR parked2';
+set session rocksdb_lock_wait_timeout=1;
+insert into t1 values (1,2);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+insert into t2 values (2,1,2);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.id2
+set debug_sync='now SIGNAL go1';
+set debug_sync='now SIGNAL go2';
+insert into t1 values (1,2);
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+insert into t2 values (2,1,2);
+ERROR 23000: Duplicate entry '1' for key 'id2'
+select * from t1;
+id value
+1 1
+select * from t2;
+id id2 value
+1 1 1
+set debug_sync='RESET';
+drop table t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result
new file mode 100644
index 00000000000..59ad709a595
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result
@@ -0,0 +1,185 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32),
+id4 INT, id5 VARCHAR(32),
+value1 INT, value2 INT, value3 VARCHAR(32),
+PRIMARY KEY (id1, id2) ,
+UNIQUE INDEX (id2, id1) ,
+UNIQUE INDEX (id2, id3, id4) ,
+INDEX (id1) ,
+INDEX (id3, id1) ,
+UNIQUE INDEX(id5) ,
+INDEX (id2, id5)) ENGINE=ROCKSDB;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test inserting a key that returns duplicate error
+INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY'
+INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY'
+INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY'
+INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11);
+ERROR 23000: Duplicate entry '1' for key 'id5'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11);
+ERROR 23000: Duplicate entry '5' for key 'id5'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11);
+ERROR 23000: Duplicate entry '10' for key 'id5'
+# Test updating a key that returns duplicate error
+UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2;
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+UPDATE t1 SET id2=1, id3=1, id4=1;
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test updating a key to itself
+UPDATE t1 set id2=id4;
+UPDATE t1 set id5=id3, value1=value2;
+UPDATE t1 set value3=value1;
+# Test modifying values should not cause duplicates
+UPDATE t1 SET value1=value3+1;
+UPDATE t1 SET value3=value3 div 2;
+UPDATE t1 SET value2=value3;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test NULL values are considered unique
+INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+# Adding multiple rows where one of the rows fail the duplicate
+# check should fail the whole statement
+INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23),
+(24, 24, 24, 24, 24, 24, 24, 24),
+(25, 10, 10, 10, 25, 25, 25, 25),
+(26, 26, 26, 26, 26, 26, 26, 26);
+ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2'
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+BEGIN;
+INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30);
+BEGIN;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+# Primary key should prevent duplicate on insert
+INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+# Primary key should prevent duplicate on update
+UPDATE t1 SET id1=30, id2=31 WHERE id2=10;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+# Unique secondary key should prevent duplicate on insert
+INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# Unique secondary key should prevent duplicate on update
+UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+UPDATE t1 SET id5=34 WHERE id2=8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# Adding multiple rows where one of the rows fail the duplicate
+# check should fail the whole statement
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+(36, 36, 36, 36, 36, 36, 36, 36),
+(37, 31, 32, 33, 37, 37, 37, 37),
+(38, 38, 38, 38, 38, 38, 38, 38);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+(36, 36, 36, 36, 36, 36, 36, 36),
+(37, 37, 37, 37, 34, 37, 37, 37),
+(38, 38, 38, 38, 38, 38, 38, 38);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# NULL values are unique and duplicates in value fields are ignored
+INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37),
+(38, 31, 32, NULL, 38, 37, 37, 37),
+(39, 31, 32, NULL, 39, 37, 37, 37);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+# Fail on duplicate key update for row added in our transaction
+UPDATE t1 SET id5=37 WHERE id1=38;
+ERROR 23000: Duplicate entry '37' for key 'id5'
+# Fail on lock timeout for row modified in another transaction
+UPDATE t1 SET id5=34 WHERE id1=38;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# NULL values are unique
+UPDATE t1 SET id5=NULL WHERE value1 > 37;
+COMMIT;
+COMMIT;
+BEGIN;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+17
+BEGIN;
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+# When transaction is pending, fail on lock acquisition
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+17
+COMMIT;
+# When transaction is committed, fail on duplicate key
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+Got one of the listed errors
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2'
+ROLLBACK;
+SELECT * FROM t1;
+id1 id2 id3 id4 id5 value1 value2 value3
+1 1 1 1 1 2 0 0
+2 2 2 2 2 3 1 1
+3 3 3 3 3 4 1 1
+4 4 4 4 4 5 2 2
+5 5 5 5 5 6 2 2
+6 6 6 6 6 7 3 3
+7 7 7 7 7 8 3 3
+8 8 8 8 8 9 4 4
+9 9 9 9 9 10 4 4
+10 10 10 10 10 11 5 5
+20 20 20 NULL NULL 20 20 20
+21 20 20 NULL NULL 20 20 20
+22 20 20 NULL NULL 20 20 20
+30 31 32 33 34 30 30 30
+37 31 32 NULL 37 37 37 37
+38 31 32 NULL 38 37 37 37
+39 31 32 NULL 39 37 37 37
+40 40 40 40 40 40 40 40
+DROP TABLE t1;
+#
+# Issue #88: Creating unique index over column with duplicate values succeeds
+#
+create table t1 (pk int primary key, a int) engine=rocksdb;
+insert into t1 values
+(1, 1),
+(2, 2),
+(3, 3),
+(4, 1),
+(5, 5);
+alter table t1 add unique(a);
+ERROR 23000: Duplicate entry '1' for key 'a'
+drop table t1;
+#
+# Issue #111
+#
+CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16;
+INSERT INTO t2 VALUES (1,1);
+INSERT INTO t2 VALUES (1,1);
+ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY'
+INSERT INTO t2 VALUES (2,1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result
new file mode 100644
index 00000000000..0ff55ac8d10
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result
@@ -0,0 +1,162 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32),
+id4 INT, id5 VARCHAR(32),
+value1 INT, value2 INT, value3 VARCHAR(32),
+PRIMARY KEY (id1, id2) COMMENT 'rev:cf',
+UNIQUE INDEX (id2, id1) COMMENT 'rev:cf',
+UNIQUE INDEX (id2, id3, id4) COMMENT 'rev:cf',
+INDEX (id1) COMMENT 'rev:cf',
+INDEX (id3, id1) COMMENT 'rev:cf',
+UNIQUE INDEX(id5) COMMENT 'rev:cf',
+INDEX (id2, id5)) ENGINE=ROCKSDB;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test inserting a key that returns duplicate error
+INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY'
+INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY'
+INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY'
+INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11);
+ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11);
+ERROR 23000: Duplicate entry '1' for key 'id5'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11);
+ERROR 23000: Duplicate entry '5' for key 'id5'
+INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11);
+ERROR 23000: Duplicate entry '10' for key 'id5'
+# Test updating a key that returns duplicate error
+UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2;
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+UPDATE t1 SET id2=1, id3=1, id4=1;
+ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2'
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test updating a key to itself
+UPDATE t1 set id2=id4;
+UPDATE t1 set id5=id3, value1=value2;
+UPDATE t1 set value3=value1;
+# Test modifying values should not cause duplicates
+UPDATE t1 SET value1=value3+1;
+UPDATE t1 SET value3=value3 div 2;
+UPDATE t1 SET value2=value3;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+# Test NULL values are considered unique
+INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+# Adding multiple rows where one of the rows fail the duplicate
+# check should fail the whole statement
+INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23),
+(24, 24, 24, 24, 24, 24, 24, 24),
+(25, 10, 10, 10, 25, 25, 25, 25),
+(26, 26, 26, 26, 26, 26, 26, 26);
+ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2'
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+BEGIN;
+INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30);
+BEGIN;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+# Primary key should prevent duplicate on insert
+INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+# Primary key should prevent duplicate on update
+UPDATE t1 SET id1=30, id2=31 WHERE id2=10;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+# Unique secondary key should prevent duplicate on insert
+INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# Unique secondary key should prevent duplicate on update
+UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+UPDATE t1 SET id5=34 WHERE id2=8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# Adding multiple rows where one of the rows fail the duplicate
+# check should fail the whole statement
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+(36, 36, 36, 36, 36, 36, 36, 36),
+(37, 31, 32, 33, 37, 37, 37, 37),
+(38, 38, 38, 38, 38, 38, 38, 38);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+(36, 36, 36, 36, 36, 36, 36, 36),
+(37, 37, 37, 37, 34, 37, 37, 37),
+(38, 38, 38, 38, 38, 38, 38, 38);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# NULL values are unique and duplicates in value fields are ignored
+INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37),
+(38, 31, 32, NULL, 38, 37, 37, 37),
+(39, 31, 32, NULL, 39, 37, 37, 37);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+# Fail on duplicate key update for row added in our transaction
+UPDATE t1 SET id5=37 WHERE id1=38;
+ERROR 23000: Duplicate entry '37' for key 'id5'
+# Fail on lock timeout for row modified in another transaction
+UPDATE t1 SET id5=34 WHERE id1=38;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id5
+# NULL values are unique
+UPDATE t1 SET id5=NULL WHERE value1 > 37;
+COMMIT;
+COMMIT;
+BEGIN;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+17
+BEGIN;
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+# When transaction is pending, fail on lock acquisition
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.id2_2
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+17
+COMMIT;
+# When transaction is committed, fail on duplicate key
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+Got one of the listed errors
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2'
+ROLLBACK;
+SELECT * FROM t1;
+id1 id2 id3 id4 id5 value1 value2 value3
+40 40 40 40 40 40 40 40
+39 31 32 NULL 39 37 37 37
+38 31 32 NULL 38 37 37 37
+37 31 32 NULL 37 37 37 37
+30 31 32 33 34 30 30 30
+22 20 20 NULL NULL 20 20 20
+21 20 20 NULL NULL 20 20 20
+20 20 20 NULL NULL 20 20 20
+10 10 10 10 10 11 5 5
+9 9 9 9 9 10 4 4
+8 8 8 8 8 9 4 4
+7 7 7 7 7 8 3 3
+6 6 6 6 6 7 3 3
+5 5 5 5 5 6 2 2
+4 4 4 4 4 5 2 2
+3 3 3 3 3 4 1 1
+2 2 2 2 2 3 1 1
+1 1 1 1 1 2 0 0
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result
new file mode 100644
index 00000000000..32776e19767
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS t1;
+create table t1 (id int primary key, value int) engine=rocksdb;
+insert into t1 values (1,1);
+SET session transaction isolation level read uncommitted;
+begin;
+insert into t1 values (2,1);
+ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED
+select * from t1 where id=1;
+ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED
+rollback;
+SET session transaction isolation level serializable;
+begin;
+insert into t1 values (2,1);
+ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE
+select * from t1 where id=1;
+ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE
+rollback;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update.result b/storage/rocksdb/mysql-test/rocksdb/r/update.result
new file mode 100644
index 00000000000..33bf8d09405
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/update.result
@@ -0,0 +1,113 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+UPDATE t1 SET a=a+100;
+SELECT a,b FROM t1;
+a b
+101 a
+101 a
+10100 foobar
+10100 foobar
+102 b
+102 b
+103 c
+103 c
+104 d
+104 d
+105 e
+105 e
+UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100;
+SELECT a,b FROM t1;
+a b
+1 NULL
+1 NULL
+10000 NULL
+10000 NULL
+2 NULL
+2 NULL
+3 NULL
+3 NULL
+4 NULL
+4 NULL
+5 NULL
+5 NULL
+UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1;
+SELECT a,b FROM t1;
+a b
+1 NULL
+1 update
+10000 NULL
+10000 NULL
+2 NULL
+2 NULL
+3 NULL
+3 NULL
+4 NULL
+4 NULL
+5 NULL
+5 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+BEGIN;
+UPDATE t1 SET a=a+100;
+UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100;
+COMMIT;
+SELECT * FROM t1 ORDER BY pk;
+a b pk
+10050 NULL 12
+10050 NULL 6
+51 NULL 1
+51 NULL 7
+52 NULL 2
+52 NULL 8
+53 NULL 3
+53 NULL 9
+54 NULL 10
+54 NULL 4
+55 NULL 11
+55 NULL 5
+BEGIN;
+UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3;
+UPDATE t1 SET b = '';
+ROLLBACK;
+SELECT * FROM t1 ORDER BY pk;
+a b pk
+51 NULL 1
+52 NULL 2
+53 NULL 3
+54 NULL 4
+55 NULL 5
+10050 NULL 6
+51 NULL 7
+52 NULL 8
+53 NULL 9
+54 NULL 10
+55 NULL 11
+10050 NULL 12
+BEGIN;
+UPDATE t1 SET b = 'update2' WHERE a <= 100;
+SAVEPOINT spt1;
+UPDATE t1 SET b = '';
+ROLLBACK TO SAVEPOINT spt1;
+ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows.
+UPDATE t1 SET b = 'upd' WHERE a = 10050;
+COMMIT;
+ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction.
+SELECT * FROM t1 ORDER BY pk;
+a b pk
+51 NULL 1
+52 NULL 2
+53 NULL 3
+54 NULL 4
+55 NULL 5
+10050 NULL 6
+51 NULL 7
+52 NULL 8
+53 NULL 9
+54 NULL 10
+55 NULL 11
+10050 NULL 12
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result
new file mode 100644
index 00000000000..9dda807edea
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result
@@ -0,0 +1,57 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1;
+SELECT a,b FROM t1 ORDER BY pk;
+a b
+1 upd1
+2 b
+3 c
+4 d
+5 e
+10000 foobar
+1 a
+2 b
+3 c
+4 d
+5 e
+10000 foobar
+UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b'
+WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+ERROR 21000: Subquery returns more than 1 row
+UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b'
+WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+Warnings:
+Error 1242 Subquery returns more than 1 row
+SELECT a,b FROM t1 ORDER BY pk;
+a b
+1 upd2a
+2 upd2a
+3 upd2a
+4 upd2a
+5 upd2a
+10000 upd2a
+1 a
+2 upd2a
+3 upd2a
+4 upd2a
+5 upd2a
+10000 upd2a
+SELECT c,d FROM t2 ORDER BY pk;
+c d
+upd2b 1
+upd2b 2
+upd2b 3
+upd2b 4
+upd2b 5
+upd2b 10000
+upd2b 1
+upd2b 2
+upd2b 3
+upd2b 4
+upd2b 5
+upd2b 10000
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result
new file mode 100644
index 00000000000..294c07b2a79
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result
@@ -0,0 +1,691 @@
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar 10000
+foobar 10000
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+SELECT a,b FROM t1;
+a b
+1 a
+1 a
+101 b
+101 b
+101 c
+101 c
+101 d
+101 d
+101 e
+101 e
+101 foobar
+101 foobar
+SELECT c,d FROM t2;
+c d
+foobar NULL
+foobar NULL
+multi 1
+multi 1
+multi 2
+multi 2
+multi 3
+multi 3
+multi 4
+multi 4
+multi 5
+multi 5
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result
new file mode 100644
index 00000000000..576fe6f6754
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c');
+UPDATE t1 SET b='m' WHERE b = 'f';
+UPDATE t1 SET b='z' WHERE a < 2;
+UPDATE t1 SET b='';
+SELECT a,b FROM t1;
+a b
+1
+100
+100
+4
+5
+6
+7
+8
+DROP TABLE t1;
+# RocksDB: skip the test for secondary UNIQUE keys.
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a');
+UPDATE t1 SET a=a+200;
+UPDATE t1 SET a=0 WHERE a > 250;
+UPDATE t1 SET a=205 WHERE a=200;
+ERROR 23000: Duplicate entry '205' for key 'PRIMARY'
+UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1;
+SELECT a,b FROM t1;
+a b
+0 a
+12345 e
+200 f
+201 a
+202 b
+203 c
+204 d
+UPDATE t1 SET a=80 WHERE a IN (202,203);
+ERROR 23000: Duplicate entry '80' for key 'PRIMARY'
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result
new file mode 100644
index 00000000000..92c5207046a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result
@@ -0,0 +1,9 @@
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
+CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4;
+Expect errors that we are missing two .frm files
+ RocksDB: Schema mismatch - Table test.t1 is registered in RocksDB but does not have a .frm file
+ RocksDB: Schema mismatch - Table test.t2 is registered in RocksDB but does not have a .frm file
+Expect an error that we have an extra .frm file
+ RocksDB: Schema mismatch - A .frm file exists for table test.t1_dummy, but that table is not registered in RocksDB
+DROP TABLE t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result
new file mode 100644
index 00000000000..433c00c9fc8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result
@@ -0,0 +1,39 @@
+SET GLOBAL rocksdb_write_disable_wal=false;
+SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
+create table aaa (id int primary key, i int) engine rocksdb;
+SET LOCAL rocksdb_write_sync=off;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(1,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+0
+insert aaa(id, i) values(2,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+0
+insert aaa(id, i) values(3,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+0
+SET LOCAL rocksdb_write_sync=1;
+insert aaa(id, i) values(4,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+1
+insert aaa(id, i) values(5,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+2
+insert aaa(id, i) values(6,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+variable_value-@a
+3
+SET GLOBAL rocksdb_background_sync=on;
+SET LOCAL rocksdb_write_sync=off;
+insert aaa(id, i) values(7,1);
+truncate table aaa;
+drop table aaa;
+SET GLOBAL rocksdb_write_sync=off;
+SET GLOBAL rocksdb_write_disable_wal=false;
+SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
+SET GLOBAL rocksdb_background_sync=off;
diff --git a/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk
new file mode 100644
index 00000000000..4617b6d9fc3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk
@@ -0,0 +1,27 @@
+#!/bin/awk
+
+/Query_time:/ {
+ results["Rows_examined:"] = "uninit";
+ results["RocksDB_key_skipped:"] = "uninit";
+ results["RocksDB_del_skipped:"] = "uninit";
+
+ for (i = 2; i <= NF; i = i+2) {
+ results[$i] = $(i+1);
+ }
+
+ # If the output format has changed and we don't find these keys,
+ # error out.
+ if (results["Rows_examined:"] == "uninit" ||
+ results["RocksDB_key_skipped:"] == "uninit" ||
+ results["RocksDB_del_skipped:"] == "uninit") {
+ exit(-2);
+ }
+
+ if (results["Rows_examined:"] == 0) {
+ next
+ }
+ if (results["RocksDB_key_skipped:"] == 0 ||
+ results["RocksDB_del_skipped:"] == 0) {
+ exit(-1);
+ }
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/1st.test b/storage/rocksdb/mysql-test/rocksdb/t/1st.test
new file mode 100644
index 00000000000..cecef8b7537
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/1st.test
@@ -0,0 +1,36 @@
+--source include/have_rocksdb.inc
+
+#
+# This test checks some very basic capabilities
+# which will be used in almost every other test,
+# and will not be checked through support* variables.
+# If this test does not pass, there is no point
+# at executing other ones.
+#
+# Minimal requirements:
+# - supported column types: INT, CHAR (default CHAR(8), INT(11));
+# - column attributes as declared in define_engine.inc ($default_col_opts)
+# (by default empty, which means no additional attributes apart from the type);
+# - table attributes as declared in define_engine.inc ($default_tbl_opts)
+# (by default empty, which means no additional attributes apart from ENGINE);
+# - CREATE TABLE .. (column1 <column options>, column2 <column options>) ENGINE=<storage_engine>;
+# - INSERT INTO TABLE .. VALUES (val1,val2);
+# - DROP TABLE ..
+# - SELECT a,b FROM ..
+# - SHOW CREATE TABLE ..
+# - SHOW COLUMNS IN ...
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+INSERT INTO t1 VALUES (1, 1,'a');
+INSERT INTO t1 (a,b) VALUES (2,'b');
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf
new file mode 100644
index 00000000000..45ec29033c6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.cnf
@@ -0,0 +1,5 @@
+[mysql]
+no-defaults
+
+[mysqld.1]
+rocksdb_strict_collation_check=1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test
new file mode 100644
index 00000000000..e0d7a4465c8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test
@@ -0,0 +1,290 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+##
+## test adding index inplace
+##
+
+# test basic add
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+DROP TABLE t1;
+
+# test add + drop (simultaneous)
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+DROP TABLE t1;
+
+## test multi-drop + multi-add
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+DROP TABLE t1;
+
+# test multi add + drop (simultaneous)
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2;
+DROP TABLE t1;
+
+# test dropping and adding a key simultaneously w/ same name but different col
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2;
+DROP TABLE t1;
+
+##
+## test adding index inplace w/ various column types
+##
+
+# test basic add
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3;
+DROP TABLE t1;
+
+## test add + drop (simultaneous)
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3;
+DROP TABLE t1;
+
+### test multi-drop + multi-add
+CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+INSERT INTO t1 VALUES ('aaa', '1111', 1);
+INSERT INTO t1 VALUES ('bbb', '2222', 2);
+INSERT INTO t1 VALUES ('ccc', '3333', 3);
+ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE;
+ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3;
+DROP TABLE t1;
+
+##
+## test adding via CREATE/DROP index syntax
+##
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+CREATE INDEX kb on t1 (b);
+CREATE INDEX kba on t1 (b,a);
+DROP INDEX ka on t1;
+DROP INDEX kab on t1;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5;
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2;
+DROP TABLE t1;
+
+#
+# Create tables with partitions and try to update/select from them.
+#
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+DROP INDEX kij ON t1;
+SHOW CREATE TABLE t1;
+
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+SELECT COUNT(*) FROM t1;
+
+DROP TABLE t1;
+
+#
+# test crash recovery
+#
+
+CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+
+--echo # crash_during_online_index_creation
+flush logs;
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="+d,crash_during_online_index_creation";
+--error 2013
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+SET SESSION debug="-d,crash_during_online_index_creation";
+
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+
+DROP TABLE t1;
+
+#
+# Test crash recovery with partitioned tables
+#
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+--echo # crash_during_index_creation_partition
+flush logs;
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="+d,crash_during_index_creation_partition";
+--error 2013
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+SET SESSION debug="-d,crash_during_index_creation_partition";
+
+SHOW CREATE TABLE t1;
+
+# here, the index numbers should be higher because previously 4 index numbers
+# were allocated for the partitioned table
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+SELECT COUNT(*) FROM t1;
+
+DROP TABLE t1;
+
+#
+# Test rollback on partitioned tables for inplace alter
+#
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+--echo # crash_during_index_creation_partition
+flush logs;
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="+d,myrocks_simulate_index_create_rollback";
+
+--echo # expected assertion failure from sql layer here for alter rollback
+call mtr.add_suppression("Assertion `0' failed.");
+call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
+
+--error 2013
+
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+SET SESSION debug="-d,myrocks_simulate_index_create_rollback";
+
+SHOW CREATE TABLE t1;
+
+# here, the index numbers should be higher because previously 4 index numbers
+# were allocated for the partitioned table
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+
+SHOW CREATE TABLE t1;
+SELECT COUNT(*) FROM t1;
+
+DROP TABLE t1;
+
+# test failure in prepare phase (due to collation)
+CREATE TABLE t1 (a INT, b TEXT);
+
+--error 1105
+ALTER TABLE t1 ADD KEY kb(b(10));
+ALTER TABLE t1 ADD PRIMARY KEY(a);
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test
new file mode 100644
index 00000000000..2ad2c390d59
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test
@@ -0,0 +1,102 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+# Create a table with a primary key and one secondary key as well as one
+# more column
+CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin';
+
+--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")`
+
+# Create a text file with data to import into the table.
+# The primary key is in sorted order and the secondary keys are randomly generated
+--let ROCKSDB_INFILE = $file
+perl;
+my $fn = $ENV{'ROCKSDB_INFILE'};
+open(my $fh, '>>', $fn) || die "perl open($fn): $!";
+my $max = 3000000;
+my @chars = ("A".."Z", "a".."z", "0".."9");
+my @lowerchars = ("a".."z");
+my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1);
+for (my $ii = 0; $ii < $max; $ii++)
+{
+ my $pk;
+ my $tmp = $ii;
+ foreach (@powers_of_26)
+ {
+ $pk .= $lowerchars[$tmp / $_];
+ $tmp = $tmp % $_;
+ }
+
+ my $num = int(rand(25)) + 6;
+ my $a;
+ $a .= $chars[rand(@chars)] for 1..$num;
+
+ $num = int(rand(25)) + 6;
+ my $b;
+ $b .= $chars[rand(@chars)] for 1..$num;
+ print $fh "$pk\t$a\t$b\n";
+}
+close($fh);
+EOF
+
+--file_exists $file
+
+set rocksdb_bulk_load=1;
+set rocksdb_bulk_load_size=100000;
+--disable_query_log
+--echo LOAD DATA INFILE <input_file> INTO TABLE t1;
+eval LOAD DATA INFILE '$file' INTO TABLE t1;
+--enable_query_log
+set rocksdb_bulk_load=0;
+
+# Make sure all the data is there.
+select count(pk) from t1;
+select count(a) from t1;
+select count(b) from t1;
+
+# now do fast secondary index creation
+ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
+# disable duplicate index warning
+--disable_warnings
+# now do same index using copy algorithm
+ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
+--enable_warnings
+
+# checksum testing
+SELECT COUNT(*) as c FROM
+(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`)
+UNION DISTINCT
+SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#',
+`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE
+INDEX(`kb_copy`)) as temp;
+
+select count(*) from t1 FORCE INDEX(kb);
+select count(*) from t1 FORCE INDEX(kb_copy);
+select count(*) from t1 FORCE INDEX(PRIMARY);
+
+# drop the index
+ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE;
+ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE;
+
+# add two indexes simultaneously
+ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE;
+SELECT COUNT(*) FROM t1 FORCE INDEX(kab);
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+# Reverse CF testing, needs to be added to SSTFileWriter in reverse order
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB;
+INSERT INTO t1 (a, b) VALUES (1, 5);
+INSERT INTO t1 (a, b) VALUES (2, 6);
+INSERT INTO t1 (a, b) VALUES (3, 7);
+ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test
new file mode 100644
index 00000000000..033b1325151
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test
@@ -0,0 +1,22 @@
+
+#
+# Tests concurrent inserts for tables with no primary key.
+#
+
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+--echo # Binary must be compiled with debug for this test
+--source include/have_debug.inc
+
+# create the actual table
+CREATE TABLE t1 (a INT) ENGINE=rocksdb;
+
+let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py root 127.0.0.1 $MASTER_MYPORT test t1 100 4;
+exec $exec;
+
+SELECT COUNT(*) from t1;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test
new file mode 100644
index 00000000000..d1fe15b98fe
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test
@@ -0,0 +1,91 @@
+--source include/have_rocksdb.inc
+
+#
+# This test checks some very basic capabilities
+# for tables without primary keys. A hidden pk will be generated under the hood
+# in myrocks. Everything should work here as normal.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# test CREATE
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+## test ALTER
+CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,'a'),(5,'z');
+ALTER TABLE t1 ADD COLUMN b INT;
+SHOW CREATE TABLE t1;
+
+--sorted_result
+SELECT * FROM t1;
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+--sorted_result
+SELECT * FROM t1;
+DROP TABLE t1;
+
+## test creating a table with primary and then dropping that key
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+ALTER TABLE t1 DROP COLUMN pk;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+# test CHECK TABLE
+# CHECK TABLE statements
+#
+# Note: the output is likely to be different for the engine under test,
+# in which case rdiff will be needed. Or, the output might say that
+# the storage engine does not support CHECK.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+
+CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb;
+
+CHECK TABLE t1;
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+CHECK TABLE t1, t2 FOR UPGRADE;
+INSERT INTO t2 (a,b) VALUES (5,'e');
+CHECK TABLE t2 QUICK;
+INSERT INTO t1 (a,b) VALUES (6,'f');
+CHECK TABLE t1 FAST;
+INSERT INTO t1 (a,b) VALUES (7,'g');
+INSERT INTO t2 (a,b) VALUES (8,'h');
+CHECK TABLE t2, t1 MEDIUM;
+INSERT INTO t1 (a,b) VALUES (9,'i');
+INSERT INTO t2 (a,b) VALUES (10,'j');
+CHECK TABLE t1, t2 EXTENDED;
+INSERT INTO t1 (a,b) VALUES (11,'k');
+CHECK TABLE t1 CHANGED;
+
+DROP TABLE t1, t2;
+
+# test disabling unique keys
+--error 1105
+CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb;
+
+## test restarting a table that has no data
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+--source include/restart_mysqld.inc
+
+## single delete statement should remove MULTIPLE rows (aka duplicate rows)
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (36,'foo');
+DELETE FROM t1 WHERE a = 35 AND b = 'foo';
+--sorted_result
+SELECT * FROM t1;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test
new file mode 100644
index 00000000000..1f3ef49e534
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test
@@ -0,0 +1,137 @@
+--source include/have_rocksdb.inc
+
+#
+# This test checks some very basic capabilities
+# for tables without primary keys. A hidden pk will be generated under the hood
+# in myrocks. Everything should work here as normal.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+## test CREATE with SK
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+## test adding/dropping sk w/no pk
+CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+ALTER TABLE t1 ADD INDEX (b);
+--source no_primary_key_basic_ops.inc
+
+ALTER TABLE t1 DROP INDEX b;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+# test dropping pk w/ sk
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+ALTER TABLE t1 DROP COLUMN pk;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB;
+ALTER TABLE t1 DROP COLUMN `pk`;
+DROP TABLE t1;
+
+# create table with multiple sk, make sure it still works
+# test CREATE with SK
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+# test CREATE table with multi-part sk
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+# test CREATE table with more than one sk
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+# test check table with sk
+CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (1),(2),(5);
+CHECK TABLE t1;
+INSERT INTO t1 (a) VALUES (6),(8),(12);
+CHECK TABLE t1 FOR UPGRADE;
+INSERT INTO t1 (a) VALUES (13),(15),(16);
+CHECK TABLE t1 QUICK;
+INSERT INTO t1 (a) VALUES (17),(120),(132);
+CHECK TABLE t1 FAST;
+INSERT INTO t1 (a) VALUES (801),(900),(7714);
+CHECK TABLE t1 MEDIUM;
+INSERT INTO t1 (a) VALUES (8760),(10023),(12000);
+CHECK TABLE t1 EXTENDED;
+INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028);
+CHECK TABLE t1 CHANGED;
+DROP TABLE t1;
+
+## tables with multi-part secondary indexes + columns that dont belong to any
+## secondary indexes
+CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4);
+INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8);
+INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13);
+INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17);
+
+--sorted_result
+SELECT * FROM t1;
+--sorted_result
+SELECT * FROM t1 WHERE a = 1 OR a = 10;
+--sorted_result
+SELECT * FROM t1 WHERE c = 3 OR d = 17;
+--sorted_result
+SELECT * FROM t1 WHERE a > 5 OR d > 5;
+
+# force some of these selects to use different indexes and/or have the columns
+# being selected also not contain column d
+--sorted_result
+SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11;
+--sorted_result
+SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12;
+
+UPDATE t1 SET a=a+100;
+UPDATE t1 SET a=a-100, b=99 WHERE a>100;
+--sorted_result
+SELECT * FROM t1;
+
+DELETE FROM t1 WHERE a>5;
+DELETE FROM t1 WHERE b=99 AND d>4;
+--sorted_result
+SELECT * FROM t1;
+
+TRUNCATE TABLE t1;
+DROP TABLE t1;
+
+## secondary indexes live in reverse column families
+CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b)
+comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb;
+--source no_primary_key_basic_ops.inc
+DROP TABLE t1;
+
+## https://github.com/facebook/mysql-5.6/issues/209
+## Accidental single delete caused data inconsistency
+CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB;
+INSERT INTO t1 (col1, col2) values (2,2);
+ALTER TABLE t1 ADD COLUMN extra INT;
+UPDATE t1 SET col2 = 1;
+select * from t1;
+DELETE FROM t1 WHERE col1 = 2;
+
+# flush memtable to cause compaction to occur.
+# During compaction, if a SingleDelete occurs then the delete marker and the
+# key it is deleting are both removed. This will cause data inconsistency if
+# SingleDelete is called on PK, since we do multiple Put() operations to update
+# primary keys.
+set global rocksdb_force_flush_memtable_now = true;
+
+select * from t1;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test
new file mode 100644
index 00000000000..e3ac4307c54
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_os_buffer.test
@@ -0,0 +1,30 @@
+--source include/have_rocksdb.inc
+
+# Issue221
+# Turning on --rocksdb-allow-mmap-reads while having --rocksdb-allow-os-buffer
+# off caused an assertion in RocksDB. Now it should not be allowed and the
+# server will not start with that configuration
+
+# Write file to make mysql-test-run.pl expect the "crash", but don't restart
+# the serve runtil it is told to
+--let $_server_id= `SELECT @@server_id`
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
+--exec echo "wait" >$_expect_file_name
+shutdown_server 10;
+
+# Clear the log
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+
+# Attempt to restart the server with invalid options
+--exec echo "restart:--rocksdb_allow_os_buffer=0 --rocksdb_allow_mmap_reads=1" >$_expect_file_name
+--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart
+--exec echo "restart:" >$_expect_file_name
+
+# Cleanup
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
+
+# We should now have an error message
+--exec grep "disable allow_os_buffer" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test
new file mode 100644
index 00000000000..2603311da55
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test
@@ -0,0 +1,94 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic ALTER TABLE statements.
+#
+# USAGE of table options in ALTER statements
+# is covered in tbl_standard_opts and tbl_opt*.tests.
+#
+# Index operations are covered in index* tests.
+#
+# ALTER OFFLINE is not covered as it is not supported, as of 5.5.23
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z');
+
+# Column operations
+
+ALTER TABLE t1 ADD COLUMN b INT;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0';
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 ALTER a DROP DEFAULT;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 CHANGE b1 b INT AFTER c;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 CHANGE b b CHAR(8);
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 MODIFY COLUMN b INT;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 MODIFY COLUMN b INT AFTER a;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+
+
+# Rename table
+
+ALTER TABLE t1 RENAME TO t2;
+--error ER_NO_SUCH_TABLE
+SHOW CREATE TABLE t1;
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+
+
+# ORDER BY
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3);
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC;
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+
+# Character set, collate
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs;
+INSERT INTO t1 VALUES (1,5,'z','t');
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 CONVERT TO CHARACTER SET utf8;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci;
+SHOW CREATE TABLE t1;
+
+
+# A 'null' ALTER operation
+
+ALTER TABLE t1 FORCE;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test
new file mode 100644
index 00000000000..10722194121
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test
@@ -0,0 +1,31 @@
+--source include/have_rocksdb.inc
+
+#
+# ANALYZE TABLE statements
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b');
+CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (3,3,'c');
+ANALYZE TABLE t1;
+INSERT INTO t2 VALUES (1,4,'d');
+ANALYZE NO_WRITE_TO_BINLOG TABLE t2;
+INSERT INTO t1 VALUES (4,5,'e');
+INSERT INTO t2 VALUES (2,6,'f');
+ANALYZE LOCAL TABLE t1, t2;
+
+DROP TABLE t1, t2;
+
+ --let $create_definition = a $int_indexed_col, $default_index(a)
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7);
+ANALYZE TABLE t1;
+INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12);
+ANALYZE TABLE t1;
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test
new file mode 100644
index 00000000000..4f759a8ec60
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test
@@ -0,0 +1,44 @@
+--source include/have_rocksdb.inc
+
+# Tests the Apply_changes_iter class for walking forward and backwards
+# with data in both the transaction class and in the rocksdb storage layer
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY,
+ key1 INT NOT NULL,
+ KEY (key1)
+) ENGINE=ROCKSDB;
+
+INSERT INTO t1 VALUES (12,12);
+INSERT INTO t1 VALUES (6,6);
+BEGIN;
+INSERT INTO t1 VALUES (8,8), (10,10);
+SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC;
+SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC;
+SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC;
+SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC;
+ROLLBACK;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY,
+ key1 INT NOT NULL,
+ KEY (key1) COMMENT 'rev:cf'
+) ENGINE=ROCKSDB;
+
+INSERT INTO t2 VALUES (12,12);
+INSERT INTO t2 VALUES (6,6);
+BEGIN;
+INSERT INTO t2 VALUES (8,8), (10,10);
+SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC;
+SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC;
+SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC;
+SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC;
+ROLLBACK;
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test
new file mode 100644
index 00000000000..68ad21bea1c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (pk) VALUES (3), (2), (1);
+SELECT * FROM t1;
+
+--source include/restart_mysqld.inc
+
+INSERT INTO t1 (pk) VALUES (4);
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test
new file mode 100644
index 00000000000..c3f3550e303
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test
@@ -0,0 +1,65 @@
+--source include/have_rocksdb.inc
+
+#
+# auto-increment-offset and auto-increment-increment
+#
+
+############################################
+# TODO:
+# This test currently produces wrong result
+# on the line 36 of the result file and further
+# due to bug MySQL:47118.
+# When/if the bug is fixed,
+# the result will need to be updated
+############################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo #---------------------------
+--echo # auto_increment_offset
+--echo #---------------------------
+SET auto_increment_offset = 200;
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+
+# If auto_increment_offset is greater than auto_increment_increment,
+# the offset is ignored
+
+INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c');
+SELECT LAST_INSERT_ID();
+SELECT a,b FROM t1 ORDER BY a;
+
+--echo #---------------------------
+--echo # auto_increment_increment
+--echo #---------------------------
+
+SET auto_increment_increment = 300;
+# offset should not be ignored anymore
+
+INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f');
+SELECT LAST_INSERT_ID();
+SELECT a,b FROM t1 ORDER BY a;
+
+SET auto_increment_increment = 50;
+INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i');
+SELECT LAST_INSERT_ID();
+SELECT a,b FROM t1 ORDER BY a;
+DROP TABLE t1;
+
+
+--echo #---------------------------
+--echo # offset is greater than the max value
+--echo #---------------------------
+
+SET auto_increment_increment = 500;
+SET auto_increment_offset = 300;
+
+CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (NULL);
+SELECT LAST_INSERT_ID();
+SELECT a FROM t1 ORDER BY a;
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test
new file mode 100644
index 00000000000..375571f705d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test
@@ -0,0 +1,3 @@
+--source include/have_rocksdb.inc
+
+--echo # The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE.
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt
new file mode 100644
index 00000000000..8600e9e415c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20
+--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc
new file mode 100644
index 00000000000..acc1a9f2365
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc
@@ -0,0 +1,63 @@
+--source include/have_rocksdb.inc
+
+let tmpl_ddl= suite/rocksdb/t/bloomfilter_table_def.tmpl;
+let ddl= $MYSQL_TMP_DIR/bloomfilter_create.sql;
+
+DELIMITER //;
+CREATE PROCEDURE bloom_start()
+BEGIN
+ select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+ select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+END//
+CREATE PROCEDURE bloom_end()
+BEGIN
+select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+END//
+DELIMITER ;//
+
+
+#BF is sometimes invoked and useful
+--exec sed s/##CF##//g $tmpl_ddl > $ddl
+--source $ddl
+--source suite/rocksdb/t/bloomfilter_load_select.inc
+
+#BF is always invoked but not useful at all
+--exec sed s/##CF##/" COMMENT 'cf_short_prefix'"/g $tmpl_ddl > $ddl
+--source $ddl
+--source suite/rocksdb/t/bloomfilter_load_select.inc
+
+
+#BF is most of the time invoked and useful
+--exec sed s/##CF##/" COMMENT 'cf_long_prefix'"/g $tmpl_ddl > $ddl
+--source $ddl
+--source suite/rocksdb/t/bloomfilter_load_select.inc
+
+# BUG: Prev() with prefix lookup should not use prefix bloom filter
+create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin;
+--disable_query_log
+let $max = 100000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO r1 VALUES ($i,$i,$i,$i,$i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 asc;
+call bloom_end();
+call bloom_start();
+select * from r1 where id1=1 and id2 in (1) order by id3 desc;
+call bloom_end();
+
+# cleanup
+DROP PROCEDURE bloom_start;
+DROP PROCEDURE bloom_end;
+truncate table t1;
+optimize table t1;
+truncate table t2;
+optimize table t2;
+drop table if exists t1;
+drop table if exists t2;
+drop table if exists r1;
+--remove_file $ddl
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test
new file mode 100644
index 00000000000..efcf9ee1f73
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test
@@ -0,0 +1 @@
+--source bloomfilter.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt
new file mode 100644
index 00000000000..f3824106b25
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:24
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test
new file mode 100644
index 00000000000..c4f1570ec41
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test
@@ -0,0 +1,103 @@
+--source include/have_rocksdb.inc
+
+## Test 0: Eq cond len includs VARCHAR, and real cond len < prefix bloom len < VARCHAR definition len
+CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin;
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $insert = INSERT INTO t0 VALUES('X', $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+# BF not used
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1;
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+DROP TABLE t0;
+
+
+## Test 1: Eq cond len is shorter than prefix bloom len
+CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb;
+
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $insert = INSERT INTO t1 VALUES(1, 1, $i, $i);
+ eval $insert;
+ inc $i;
+}
+--enable_query_log
+
+# BF not used (4+8+4=16)
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2;
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+# BF not used (4+8=12)
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2;
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+DROP TABLE t1;
+
+
+## Test 2: Long IN and short IN (varchar) -- can_use_bloom_filter changes within the same query
+CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin;
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $insert = INSERT INTO t2 VALUES($i, $i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+# BF used for large cond, not used for short cond
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100');
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200');
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+# BF not used because cond length is too small in all cases
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200');
+select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+DROP TABLE t2;
+
+
+## Test 3: Eq cond len is longer than prefix bloom len
+CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin;
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ if ($i != 5000) {
+ let $insert = INSERT INTO t3 VALUES(1, $i, $i, $i);
+ eval $insert;
+ }
+ inc $i;
+}
+--enable_query_log
+
+# Full BF works with Get(), Block based does not.
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1;
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful';
+
+# BF used (4+8+8+8)
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1;
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500;
+select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful';
+
+DROP TABLE t3;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt
new file mode 100644
index 00000000000..ef6d0fd554a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt
@@ -0,0 +1,3 @@
+--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20
+--rocksdb_debug_optimizer_n_rows=1000
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test
new file mode 100644
index 00000000000..a15e2a89693
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test
@@ -0,0 +1,118 @@
+--source include/have_rocksdb.inc
+
+--source include/restart_mysqld.inc
+CREATE TABLE `linktable` (
+ `id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `visibility` tinyint(3) NOT NULL DEFAULT '0',
+ `data` varchar(255) NOT NULL DEFAULT '',
+ `time` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `version` int(11) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk',
+ KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type',
+ KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2',
+ KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $insert = INSERT INTO linktable VALUES($i, $i, $i, $i, 1, 1, $i, $i, $i);
+ eval $insert;
+ inc $i;
+}
+--enable_query_log
+
+## HA_READ_PREFIX_LAST_OR_PREV
+# BF len 21
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc;
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+# BF len 20
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc;
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+# BF len 13
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc;
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+## HA_READ_PREFIX_LAST_OR_PREV (no end range)
+# BF len 20
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc;
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+# BF len 19
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc;
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+--echo ## HA_READ_PREFIX_LAST
+--echo # BF len 20
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc;
+select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+--echo # BF len 19
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc;
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+--echo # BF len 12
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc;
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+
+DROP TABLE linktable;
+--source include/restart_mysqld.inc
+
+--echo #
+--echo # bloom filter prefix is 20 byte
+--echo # Create a key which is longer than that, so that we see that
+--echo # eq_cond_len= slice.size() - 1;
+--echo # doesnt work.
+--echo #
+--echo # indexnr 4
+--echo # kp0 + 4 = 8
+--echo # kp1 + 8 = 16
+--echo # kp2 + 8 = 24 24>20 byte length prefix
+--echo # kp3 + 8 = 28
+
+create table t1 (
+ pk int primary key,
+ kp0 int not null,
+ kp1 bigint not null,
+ kp2 bigint not null,
+ kp3 bigint not null,
+ key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1'
+) engine=rocksdb;
+
+insert into t1 values (1, 1,1, 1,1);
+insert into t1 values (10,1,1,0x12FFFFFFFFFF,1);
+insert into t1 values (11,1,1,0x12FFFFFFFFFF,1);
+insert into t1 values (20,2,2,0x12FFFFFFFFFF,1);
+insert into t1 values (21,2,2,0x12FFFFFFFFFF,1);
+
+--source include/restart_mysqld.inc
+
+--replace_column 9 #
+explain
+select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc;
+show status like '%rocksdb_bloom_filter_prefix%';
+
+select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc;
+show status like '%rocksdb_bloom_filter_prefix%';
+--echo # The following MUST show TRUE:
+select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
+
+drop table t1;
+# Key length is 4 + 8 + 8 = 20
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt
new file mode 100644
index 00000000000..0a325757962
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=write_buffer_size=16k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:12
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test
new file mode 100644
index 00000000000..76ec6ca101f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test
@@ -0,0 +1,52 @@
+--source include/have_rocksdb.inc
+
+# Fixing issue#230 -- Prefix bloom filter + reverse column family misses some rows
+# This test inserts 20,000 rows into t1, then selecting one by one from stored procedure.
+# If the select does not return any row, it is wrong.
+
+CREATE TABLE t1 (
+ `id1` int unsigned NOT NULL DEFAULT '0',
+ `id2` int unsigned NOT NULL DEFAULT '0',
+ `link_type` int unsigned NOT NULL DEFAULT '0',
+ `visibility` tinyint NOT NULL DEFAULT '0',
+ `data` varchar(255) NOT NULL DEFAULT '',
+ `time` int unsigned NOT NULL DEFAULT '0',
+ `version` int unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+
+DELIMITER //;
+CREATE PROCEDURE select_test()
+BEGIN
+ DECLARE id1_cond INT;
+ SET id1_cond = 1;
+ WHILE id1_cond <= 20000 DO
+ SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt;
+ IF @cnt < 1 THEN
+ SELECT id1_cond, @cnt;
+ END IF;
+ SET id1_cond = id1_cond + 1;
+ END WHILE;
+END//
+DELIMITER ;//
+
+--disable_query_log
+let $i = 1;
+while ($i <= 20000) {
+ let $insert = INSERT INTO t1 VALUES($i, $i, 1, 1, $i, $i, $i);
+ eval $insert;
+ inc $i;
+}
+--enable_query_log
+
+--echo "Skipping bloom filter"
+SET session rocksdb_skip_bloom_filter_on_read=1;
+CALL select_test();
+
+--echo "Using bloom filter"
+SET session rocksdb_skip_bloom_filter_on_read=0;
+CALL select_test();
+
+DROP PROCEDURE select_test;
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc
new file mode 100644
index 00000000000..1f1a4b9810f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc
@@ -0,0 +1,189 @@
+# loading some data (larger than write buf size) to cause compaction
+--exec perl suite/rocksdb/t/gen_insert.pl t1 > $MYSQL_TMP_DIR/insert_t1.sql
+--exec perl suite/rocksdb/t/gen_insert.pl t2 > $MYSQL_TMP_DIR/insert_t2.sql
+--disable_query_log
+--source $MYSQL_TMP_DIR/insert_t1.sql
+--source $MYSQL_TMP_DIR/insert_t2.sql
+--enable_query_log
+
+# BF conditions (prefix short(4B)|medium(20B)|long(240B))
+#0 no eq condition (o, x, x)
+## cond length 4, key length > 4
+call bloom_start();
+select count(*) from t1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(PRIMARY) where id1 >= 1;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2 >= 1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index(id3_id4) where id3 >= '1';
+call bloom_end();
+
+#1 cond length == prefix length (o, o, x)
+## cond length 4+8+8=20, key length > 20
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=2 and id1=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=24 and id1=12;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=88 and id1=44;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=100 and id1=50;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index(id2_id1) where id2=428 and id1=214;
+call bloom_end();
+## (cond_length == extended_key_length(4+8+4+4=20) == prefix_length)
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005;
+call bloom_end();
+
+#2 cond length < actual key length and cond_length < prefix length (o, x, x)
+## for long prefix key, most cases falling into this category, unless all key colums are used.
+## cond length 4+8=12, key length > 12
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=23;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=345;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=456;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=23;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=345;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id4) where id2=456;
+call bloom_end();
+
+#3 both actual key length and cond length >= prefix length (o, o, o/x)
+## cond length 4+8+9+8+4=33
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124';
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60;
+call bloom_end();
+## 4+8+9=25
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=1 and id3='1';
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3) where id2=23 and id3='23';
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=1 and id3='1';
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id3_id2) where id2=23 and id3='23';
+call bloom_end();
+
+#4 actual key length > prefix length and cond length < prefix length (o, x, x)
+## cond length 4+8=12
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=12;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=23;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=100;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (PRIMARY) where id1=234;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=36;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2_id3_id1_id4) where id2=234;
+call bloom_end();
+
+#5 cond length == extended key length < prefix length (o, o, o)
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=1 and id4=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=23 and id4=115;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=500 and id4=2500;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id2) where id2=601 and id4=3005;
+call bloom_end();
+## 4+9+4=17
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='1' and id4=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id3_id4) where id3='12' and id4=60;
+call bloom_end();
+
+#6 cond length == non-extended key length < prefix length, actual key length > prefix length (o, x, x)
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=1;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=23;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=345;
+call bloom_end();
+call bloom_start();
+select count(*) from t1 force index (id2) where id2=456;
+call bloom_end();
+## 4+9+4=17
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='100' and id5=500;
+call bloom_end();
+call bloom_start();
+select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200;
+call bloom_end();
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt
new file mode 100644
index 00000000000..5c62c7cf986
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt
@@ -0,0 +1,3 @@
+--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20
+--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240}
+--rocksdb_skip_bloom_filter_on_read=1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test
new file mode 100644
index 00000000000..efcf9ee1f73
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test
@@ -0,0 +1 @@
+--source bloomfilter.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl
new file mode 100644
index 00000000000..5cf033d4726
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.tmpl
@@ -0,0 +1,36 @@
+drop table if exists t1;
+drop table if exists t2;
+create table t1 (
+ id1 bigint not null,
+ id2 bigint not null,
+ id3 varchar(100) not null,
+ id4 int not null,
+ id5 int not null,
+ value bigint,
+ value2 varchar(100),
+ primary key (id1, id2, id3, id4)##CF##,
+ index id2 (id2)##CF##,
+ index id2_id1 (id2, id1)##CF##,
+ index id2_id3 (id2, id3)##CF##,
+ index id2_id4 (id2, id4)##CF##,
+ index id2_id3_id1_id4 (id2, id3, id1, id4)##CF##,
+ index id3_id2 (id3, id2)##CF##
+) engine=ROCKSDB;
+
+create table t2 (
+ id1 bigint not null,
+ id2 bigint not null,
+ id3 varchar(100) not null,
+ id4 int not null,
+ id5 int not null,
+ value bigint,
+ value2 varchar(100),
+ primary key (id4)##CF##,
+ index id2 (id2)##CF##,
+ index id2_id3 (id2, id3)##CF##,
+ index id2_id4 (id2, id4)##CF##,
+ index id2_id4_id5 (id2, id4, id5)##CF##,
+ index id3_id4 (id3, id4)##CF##,
+ index id3_id5 (id3, id5)##CF##
+) engine=ROCKSDB;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test
new file mode 100644
index 00000000000..cfa2f6ff747
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test
@@ -0,0 +1,110 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+# Create a table with a primary key and one secondary key as well as one
+# more column
+CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
+
+# Create a second identical table to validate that bulk loading different
+# tables in the same session works
+CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
+
+# Create a third table using partitions to validate that bulk loading works
+# across a partitioned table
+CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'
+ PARTITION BY KEY() PARTITIONS 4;
+
+--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")`
+
+# Create a text file with data to import into the table.
+# The primary key is in sorted order and the secondary keys are randomly generated
+--let ROCKSDB_INFILE = $file
+perl;
+my $fn = $ENV{'ROCKSDB_INFILE'};
+open(my $fh, '>>', $fn) || die "perl open($fn): $!";
+my $max = 10000000;
+my @chars = ("A".."Z", "a".."z", "0".."9");
+my @lowerchars = ("a".."z");
+my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1);
+for (my $ii = 0; $ii < $max; $ii++)
+{
+ my $pk;
+ my $tmp = $ii;
+ foreach (@powers_of_26)
+ {
+ $pk .= $lowerchars[$tmp / $_];
+ $tmp = $tmp % $_;
+ }
+
+ my $num = int(rand(25)) + 6;
+ my $a;
+ $a .= $chars[rand(@chars)] for 1..$num;
+
+ $num = int(rand(25)) + 6;
+ my $b;
+ $b .= $chars[rand(@chars)] for 1..$num;
+ print $fh "$pk\t$a\t$b\n";
+}
+close($fh);
+EOF
+
+--file_exists $file
+
+# Make sure a snapshot held by another user doesn't block the bulk load
+connect (other,localhost,root,,);
+set session transaction isolation level repeatable read;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+start transaction with consistent snapshot;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+
+connection default;
+set rocksdb_bulk_load=1;
+set rocksdb_bulk_load_size=100000;
+--disable_query_log
+--echo LOAD DATA INFILE <input_file> INTO TABLE t1;
+eval LOAD DATA INFILE '$file' INTO TABLE t1;
+--echo LOAD DATA INFILE <input_file> INTO TABLE t2;
+eval LOAD DATA INFILE '$file' INTO TABLE t2;
+--echo LOAD DATA INFILE <input_file> INTO TABLE t3;
+eval LOAD DATA INFILE '$file' INTO TABLE t3;
+--enable_query_log
+set rocksdb_bulk_load=0;
+
+# Make sure all the data is there.
+select count(pk) from t1;
+select count(a) from t1;
+select count(b) from t1;
+select count(pk) from t2;
+select count(a) from t2;
+select count(b) from t2;
+select count(pk) from t3;
+select count(a) from t3;
+select count(b) from t3;
+
+# Create a dummy file with a bulk load extesion. It should be removed when
+# the server starts
+--let $tmpext = .bulk_load.tmp
+--let $MYSQLD_DATADIR= `SELECT @@datadir`
+--let $datadir = $MYSQLD_DATADIR/.rocksdb
+--write_file $datadir/test$tmpext
+dummy data
+EOF
+--write_file $datadir/longfilenamethatvalidatesthatthiswillgetdeleted$tmpext
+dummy data
+EOF
+
+# Show the files exists
+--list_files $datadir *$tmpext
+
+# Now restart the server and make sure it automatically removes this test file
+--source include/restart_mysqld.inc
+
+# Show the files do not exist
+--list_files $datadir *$tmpext
+
+# Cleanup
+disconnect other;
+DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt
new file mode 100644
index 00000000000..ed6029f9a27
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt
@@ -0,0 +1,4 @@
+--skip-rocksdb_debug_optimizer_no_zero_cardinality
+--rocksdb_compaction_sequential_deletes=0
+--force-restart
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test
new file mode 100644
index 00000000000..df2b0673315
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test
@@ -0,0 +1,41 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+create table t1(
+ id bigint not null primary key,
+ i1 bigint, #unique
+ i2 bigint, #repeating
+ c1 varchar(20), #unique
+ c2 varchar(20), #repeating
+ index t1_1(id, i1),
+ index t1_2(i1, i2),
+ index t1_3(i2, i1),
+ index t1_4(c1, c2),
+ index t1_5(c2, c1)
+) engine=rocksdb;
+--disable_query_log
+let $i=0;
+while ($i<100000)
+{
+ inc $i;
+ eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10);
+}
+--enable_query_log
+
+# Flush memtable out to SST and display index cardinalities
+optimize table t1;
+show index in t1;
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+
+--echo restarting...
+--source include/restart_mysqld.inc
+
+# display index cardinalities after the restart
+show index in t1;
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py
new file mode 100644
index 00000000000..a3d50f305a4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py
@@ -0,0 +1,31 @@
+import sys
+import re
+
+"""
+Example usage:
+ python check_log_for_xa.py path/to/log/mysqld.2.err rollback,commit,prepare
+"""
+
+log_path = sys.argv[1]
+desired_filters = sys.argv[2]
+
+all_filters = [
+ ('rollback', re.compile('(\[Note\] rollback xid .+)')),
+ ('commit', re.compile('(\[Note\] commit xid .+)')),
+ ('prepare',
+ re.compile('(\[Note\] Found \d+ prepared transaction\(s\) in \w+)')),
+]
+
+active_filters = filter(lambda f: f[0] in desired_filters, all_filters)
+
+results = set()
+with open(log_path) as log:
+ for line in log:
+ line = line.strip()
+ for f in active_filters:
+ match = f[1].search(line)
+ if match:
+ results.add("**found '%s' log entry**" % f[0])
+
+for res in results:
+ print res
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc
new file mode 100644
index 00000000000..c108a97362d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc
@@ -0,0 +1,54 @@
+#
+# CHECK TABLE statements
+#
+# Note: the output is likely to be different for the engine under test,
+# in which case rdiff will be needed. Or, the output might say that
+# the storage engine does not support CHECK.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+CHECK TABLE t1;
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+CHECK TABLE t1, t2 FOR UPGRADE;
+INSERT INTO t2 (a,b) VALUES (5,'e');
+CHECK TABLE t2 QUICK;
+INSERT INTO t1 (a,b) VALUES (6,'f');
+CHECK TABLE t1 FAST;
+INSERT INTO t1 (a,b) VALUES (7,'g');
+INSERT INTO t2 (a,b) VALUES (8,'h');
+CHECK TABLE t2, t1 MEDIUM;
+INSERT INTO t1 (a,b) VALUES (9,'i');
+INSERT INTO t2 (a,b) VALUES (10,'j');
+CHECK TABLE t1, t2 EXTENDED;
+INSERT INTO t1 (a,b) VALUES (11,'k');
+CHECK TABLE t1 CHANGED;
+
+DROP TABLE t1, t2;
+
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb;
+INSERT INTO t1 (a) VALUES (1),(2),(5);
+CHECK TABLE t1;
+INSERT INTO t1 (a) VALUES (6),(8),(12);
+CHECK TABLE t1 FOR UPGRADE;
+INSERT INTO t1 (a) VALUES (13),(15),(16);
+CHECK TABLE t1 QUICK;
+INSERT INTO t1 (a) VALUES (17),(120),(132);
+CHECK TABLE t1 FAST;
+INSERT INTO t1 (a) VALUES (801),(900),(7714);
+CHECK TABLE t1 MEDIUM;
+INSERT INTO t1 (a) VALUES (8760),(10023),(12000);
+CHECK TABLE t1 EXTENDED;
+INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028);
+CHECK TABLE t1 CHANGED;
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.test b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test
new file mode 100644
index 00000000000..4d349f7a167
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test
@@ -0,0 +1,12 @@
+--source include/have_rocksdb.inc
+
+#
+# CHECK TABLE statements
+#
+# Note: the output is likely to be different for the engine under test,
+# in which case rdiff will be needed. Or, the output might say that
+# the storage engine does not support CHECK.
+#
+
+--source check_table.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test
new file mode 100644
index 00000000000..e5de6246f60
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test
@@ -0,0 +1,107 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+--enable_warnings
+
+# Start from clean slate
+#--source include/restart_mysqld.inc
+
+CREATE TABLE t1 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t2 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t3 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t4 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+# Populate tables
+let $max = 1000;
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t2;
+--source drop_table_repopulate_table.inc
+let $table = t3;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+# Make sure new table gets unique indices
+CREATE TABLE t5 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+let $max = 1000;
+let $table = t5;
+--source drop_table_repopulate_table.inc
+
+# Create checkpoint without trailing '/'
+let $checkpoint = $MYSQL_TMP_DIR/checkpoint;
+let $succeeds = 1;
+--source set_checkpoint.inc
+
+# Create checkpoint with a trailing '/'
+let $checkpoint = $MYSQL_TMP_DIR/checkpoint/;
+let $succeeds = 1;
+--source set_checkpoint.inc
+
+# Set checkpoint dir as empty string, which fails
+let $checkpoint = ;
+let $succeeds = 0;
+--source set_checkpoint.inc
+
+# Set checkpoint as a directory that does not exist, which fails
+let $checkpoint = /does/not/exist;
+let $succeeds = 0;
+--source set_checkpoint.inc
+
+# Set checkpoint as a directory that already exists, which fails
+let $checkpoint = $MYSQL_TMP_DIR/already-existing-directory;
+--mkdir $checkpoint
+let $succeeds = 0;
+--source set_checkpoint.inc
+--exec rm -rf $checkpoint
+
+--disable_result_log
+truncate table t1;
+optimize table t1;
+truncate table t2;
+optimize table t2;
+truncate table t3;
+optimize table t3;
+truncate table t4;
+optimize table t4;
+truncate table t5;
+optimize table t5;
+drop table if exists t1;
+drop table if exists t2;
+drop table if exists t3;
+drop table if exists t4;
+drop table if exists t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test
new file mode 100644
index 00000000000..fbe8028f6d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test
@@ -0,0 +1,76 @@
+--source include/have_rocksdb.inc
+
+#
+# CHECKSUM TABLE statements for standard CHECKSUM properties.
+# Live checksums are covered in checksum_table_live.test
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0;
+
+CHECKSUM TABLE t1;
+CHECKSUM TABLE t2, t1;
+CHECKSUM TABLE t1, t2 QUICK;
+CHECKSUM TABLE t1, t2 EXTENDED;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Issue #110: SQL command checksum returns inconsistent result
+--echo #
+create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb;
+insert into t1 values (2,'fooo');
+insert into t1 values (1,NULL);
+checksum table t1;
+checksum table t1;
+select * from t1 where pk=2;
+checksum table t1;
+checksum table t1;
+flush tables;
+checksum table t1;
+checksum table t1;
+
+drop table t1;
+
+--echo #
+--echo # The following test is about making sure MyRocks CHECKSUM TABLE
+--echo # values are the same as with InnoDB.
+--echo # If you see checksum values changed, make sure their counterparts
+--echo # in suite/innodb/r/checksum-matches-myrocks.result match.
+--echo #
+
+create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb;
+insert into t1 values (2,'fooo');
+insert into t1 values (1,NULL);
+checksum table t1;
+drop table t1;
+
+create table t1 (
+ pk bigint unsigned primary key,
+ col1 varchar(10),
+ col2 tinyint,
+ col3 double
+) engine=rocksdb;
+
+checksum table t1;
+
+insert into t1 values (1, NULL, NULL, NULL);
+insert into t1 values (2, 'foo', NULL, NULL);
+checksum table t1;
+
+insert into t1 values (3, NULL, 123, NULL);
+insert into t1 values (4, NULL, NULL, 2.78);
+checksum table t1;
+
+insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78);
+insert into t1 values (6, '', NULL, 2.78);
+checksum table t1;
+
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test
new file mode 100644
index 00000000000..da278ed7f9b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test
@@ -0,0 +1,24 @@
+--source include/have_rocksdb.inc
+
+#
+# CHECKSUM TABLE statements for live CHECKSUM.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+# For most engines CHECKSUM=1 option will be ignored,
+# and the results will be different
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+
+CHECKSUM TABLE t1;
+CHECKSUM TABLE t2, t1;
+CHECKSUM TABLE t1, t2 QUICK;
+CHECKSUM TABLE t1, t2 EXTENDED;
+
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc
new file mode 100644
index 00000000000..2d3c9292441
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc
@@ -0,0 +1,55 @@
+#
+# NOT NULL attribute in columns
+#
+# Usage:
+# let $col_type = <column type>;
+# let $col_default = <default value for a column>;
+# --source col_not_null.inc
+#
+# We will add NOT NULL to the column options;
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo #----------------------------------
+--echo # $col_type NOT NULL columns without a default
+--echo #----------------------------------
+
+eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+
+--error ER_BAD_NULL_ERROR
+INSERT INTO t1 (c) VALUES (NULL);
+eval INSERT INTO t1 (c) VALUES ($col_default);
+SELECT HEX(c) FROM t1;
+
+DROP TABLE t1;
+
+--echo #----------------------------------
+--echo # $col_type NOT NULL columns with a default
+--echo #----------------------------------
+
+eval CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ c $col_type NOT NULL DEFAULT $col_default
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+--error ER_INVALID_DEFAULT
+eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL;
+
+--error ER_BAD_NULL_ERROR
+INSERT INTO t1 (c) VALUES (NULL);
+
+eval INSERT INTO t1 (c) VALUES ($col_default);
+eval INSERT INTO t1 () VALUES ();
+
+# HEX should be universal for all column types
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc
new file mode 100644
index 00000000000..4c5b89c9d00
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc
@@ -0,0 +1,61 @@
+#
+# NOT NULL attribute in TIMESTAMP columns
+#
+# This is a copy of col_not_null.inc, except that
+# instead of getting an error on inserting NULL into a non-NULL column,
+# we are getting the current timestamp (see MySQL:68472).
+# If the bug is ever fixed, this include file won't be needed anymore.
+
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo #----------------------------------
+--echo # $col_type NOT NULL column without a default
+--echo #----------------------------------
+
+eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+
+# Here where the non-standard behavior strikes:
+# instead of an error we are getting the current timestamp
+
+# As of mysql-5.6.11, this no longer works, and we get an error:
+--error ER_BAD_NULL_ERROR
+INSERT INTO t1 (c) VALUES (NULL);
+eval INSERT INTO t1 (c) VALUES ($col_default);
+SELECT HEX(c) FROM t1;
+
+DROP TABLE t1;
+
+--echo #----------------------------------
+--echo # $col_type NOT NULL columns with a default
+--echo #----------------------------------
+
+eval CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ c $col_type NOT NULL DEFAULT $col_default
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+--error ER_INVALID_DEFAULT
+eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL;
+
+# Here where the non-standard behavior strikes:
+# instead of an error we are getting the current timestamp
+
+# As of mysql-5.6.11, this no longer works, and we get an error:
+--error ER_BAD_NULL_ERROR
+INSERT INTO t1 (c) VALUES (NULL);
+
+eval INSERT INTO t1 (c) VALUES ($col_default);
+eval INSERT INTO t1 () VALUES ();
+
+# HEX should be universal for all column types
+SELECT pk, HEX(c) FROM t1 ORDER BY pk;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc
new file mode 100644
index 00000000000..7ebfee0b114
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc
@@ -0,0 +1,34 @@
+#
+# NULL attribute and DEFAULT NULL in columns
+#
+# Usage:
+# let $col_type = <column type>;
+# let $col_default = <default non-null value for a column>;
+# --source col_null.inc
+#
+# We will add NULL attribute to the column options.
+#
+
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ c $col_type NULL,
+ c1 $col_type NULL DEFAULT NULL,
+ c2 $col_type NULL DEFAULT $col_default,
+ pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL);
+eval INSERT INTO t1 (c,c1,c2) VALUES ($col_default,$col_default,$col_default);
+INSERT INTO t1 () VALUES ();
+
+SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk;
+SELECT pk, HEX(c2) FROM t1 ORDER BY pk;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test
new file mode 100644
index 00000000000..6f91ee7ca9a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test
@@ -0,0 +1,27 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether DEFAULT column attribute
+# is supported in CREATE and ALTER TABLE.
+# If the attribute is supported at all, it will be covered
+# in more details in col_option_null and col_option_not_null tests.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb;
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a) VALUES (1);
+SELECT a FROM t1;
+
+ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT '';
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (b) VALUES ('a');
+SELECT a,b FROM t1 ORDER BY a,b;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test
new file mode 100644
index 00000000000..fbb5a932fc0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test
@@ -0,0 +1,224 @@
+--source include/have_rocksdb.inc
+
+#
+# NOT NULL column attribute
+#
+
+#########################################
+# TODO:
+# Currently the test produces incorrect (?) result
+# due to bug MySQL:68472. If the bug is ever fixed,
+# the test and result files will need to be updated.
+#########################################
+
+
+let $extra_col_opts = NOT NULL;
+
+--echo ########################
+--echo # BINARY columns
+--echo ########################
+
+--source type_binary.inc
+--let $col_type = BINARY
+--let $col_default = 0
+--source col_not_null.inc
+
+--echo ########################
+--echo # VARBINARY columns
+--echo ########################
+
+--source type_varbinary.inc
+--let $col_type = VARBINARY(64)
+--let $col_default = 'test'
+--source col_not_null.inc
+
+--echo ########################
+--echo # BIT columns
+--echo ########################
+
+--source type_bit.inc
+--let $col_type = BIT
+--let $col_default = 1
+--source col_not_null.inc
+
+--echo ########################
+--echo # BLOB columns
+--echo ########################
+
+--source type_blob.inc
+
+--let $col_default = ''
+
+--let $col_type = BLOB
+--source col_not_null.inc
+
+--let $col_type = TINYBLOB
+--source col_not_null.inc
+
+--let $col_type = MEDIUMBLOB
+--source col_not_null.inc
+
+--let $col_type = LONGBLOB
+--source col_not_null.inc
+
+--echo ########################
+--echo # BOOL columns
+--echo ########################
+
+--source type_bool.inc
+--let $col_type = BOOL
+--let $col_default = '0'
+--source col_not_null.inc
+
+--echo ########################
+--echo # CHAR columns
+--echo ########################
+
+--source type_char.inc
+--let $col_type = CHAR
+--let $col_default = '_'
+--source col_not_null.inc
+
+--echo ########################
+--echo # VARCHAR columns
+--echo ########################
+
+--source type_varchar.inc
+--let $col_type = VARCHAR(64)
+--let $col_default = 'test default'
+--source col_not_null.inc
+
+--echo ########################
+--echo # date and time columns
+--echo ########################
+
+--source type_date_time.inc
+
+SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12');
+
+--let $col_type = DATE
+--let $col_default = '2012-12-21'
+--source col_not_null.inc
+
+--let $col_type = DATETIME
+--let $col_default = '2012-12-21 12:21:12'
+--source col_not_null.inc
+
+# Even with explicit-defaults-for-timestamps, we still can't use
+# the standard include file, due to bug MySQL:68472
+
+--let $col_type = TIMESTAMP
+--let $col_default = '2012-12-21 12:21:12'
+--source col_not_null_timestamp.inc
+
+--let $col_type = TIME
+--let $col_default = '12:21:12'
+--source col_not_null.inc
+
+--let $col_type = YEAR
+--let $col_default = '2012'
+--source col_not_null.inc
+
+--let $col_type = YEAR(2)
+--let $col_default = '12'
+--source col_not_null.inc
+
+--echo ########################
+--echo # ENUM columns
+--echo ########################
+
+--source type_enum.inc
+
+--let $col_type = ENUM('test1','test2','test3')
+--let $col_default = 'test2'
+--source col_not_null.inc
+
+--echo ########################
+--echo # Fixed point columns (NUMERIC, DECIMAL)
+--echo ########################
+
+--source type_fixed.inc
+
+--let $col_type = DECIMAL
+--let $col_default = 1.1
+--source col_not_null.inc
+
+--let $col_type = NUMERIC
+--let $col_default = 0
+--source col_not_null.inc
+
+--echo ########################
+--echo # Floating point columns (FLOAT, DOUBLE)
+--echo ########################
+
+--source type_float.inc
+
+--let $col_type = FLOAT
+--let $col_default = 1.1
+--source col_not_null.inc
+
+--let $col_type = DOUBLE
+--let $col_default = 0
+--source col_not_null.inc
+
+--echo ########################
+--echo # INT columns
+--echo ########################
+
+--source type_int.inc
+
+--let $col_type = INT
+--let $col_default = 2147483647
+--source col_not_null.inc
+
+--let $col_type = TINYINT
+--let $col_default = 127
+--source col_not_null.inc
+
+--let $col_type = SMALLINT
+--let $col_default = 0
+--source col_not_null.inc
+
+--let $col_type = MEDIUMINT
+--let $col_default = 1
+--source col_not_null.inc
+
+--let $col_type = BIGINT
+--let $col_default = 9223372036854775807
+--source col_not_null.inc
+
+--echo ########################
+--echo # SET columns
+--echo ########################
+
+--source type_set.inc
+--let $col_type = SET('test1','test2','test3')
+--let $col_default = 'test2,test3'
+--source col_not_null.inc
+
+--echo ########################
+--echo # TEXT columns
+--echo ########################
+
+--source type_text.inc
+
+--let $col_default = ''
+
+--let $col_type = TEXT
+--source col_not_null.inc
+
+--let $col_type = TINYTEXT
+--source col_not_null.inc
+
+--let $col_type = MEDIUMTEXT
+--source col_not_null.inc
+
+--let $col_type = LONGTEXT
+--source col_not_null.inc
+
+
+--let $col_type =
+--let $col_default =
+--let $extra_col_opts =
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test
new file mode 100644
index 00000000000..18f2601eb16
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test
@@ -0,0 +1,216 @@
+--source include/have_rocksdb.inc
+
+#
+# NULL column attribute
+#
+
+let $extra_col_opts = NULL;
+
+
+--echo ########################
+--echo # BINARY columns
+--echo ########################
+
+--source type_binary.inc
+--let $col_type = BINARY
+--let $col_default = 0
+--source col_null.inc
+
+--echo ########################
+--echo # VARBINARY columns
+--echo ########################
+
+--source type_varbinary.inc
+--let $col_type = VARBINARY(64)
+--let $col_default = 'test'
+--source col_null.inc
+
+--echo ########################
+--echo # BIT columns
+--echo ########################
+
+--source type_bit.inc
+--let $col_type = BIT
+--let $col_default = 1
+--source col_null.inc
+
+--echo ########################
+--echo # BLOB columns
+--echo ########################
+
+--source type_blob.inc
+
+--let $col_default = ''
+
+--let $col_type = BLOB
+--source col_null.inc
+
+--let $col_type = TINYBLOB
+--source col_null.inc
+
+--let $col_type = MEDIUMBLOB
+--source col_null.inc
+
+--let $col_type = LONGBLOB
+--source col_null.inc
+
+--echo ########################
+--echo # BOOL columns
+--echo ########################
+
+--source type_bool.inc
+--let $col_type = BOOL
+--let $col_default = '0'
+--source col_null.inc
+
+
+--echo ########################
+--echo # CHAR columns
+--echo ########################
+
+--source type_char.inc
+--let $col_type = CHAR
+--let $col_default = '_'
+--source col_null.inc
+
+--echo ########################
+--echo # VARCHAR columns
+--echo ########################
+
+
+--source type_varchar.inc
+--let $col_type = VARCHAR(64)
+--let $col_default = 'test default'
+--source col_null.inc
+
+
+--echo ########################
+--echo # date and time columns
+--echo ########################
+
+--source type_date_time.inc
+
+--let $col_type = DATE
+--let $col_default = '2012-12-21'
+--source col_null.inc
+
+--let $col_type = DATETIME
+--let $col_default = '2012-12-21 12:21:12'
+--source col_null.inc
+
+--let $col_type = TIMESTAMP
+--let $col_default = '2012-12-21 12:21:12'
+--source col_null.inc
+
+--let $col_type = TIME
+--let $col_default = '12:21:12'
+--source col_null.inc
+
+--let $col_type = YEAR
+--let $col_default = '2012'
+--source col_null.inc
+
+--let $col_type = YEAR(2)
+--let $col_default = '12'
+--source col_null.inc
+
+
+--echo ########################
+--echo # ENUM columns
+--echo ########################
+
+--source type_enum.inc
+--let $col_type = ENUM('test1','test2','test3')
+--let $col_default = 'test2'
+--source col_null.inc
+
+--echo ########################
+--echo # Fixed point columns (NUMERIC, DECIMAL)
+--echo ########################
+
+--source type_fixed.inc
+
+--let $col_type = DECIMAL
+--let $col_default = 1.1
+--source col_null.inc
+
+--let $col_type = NUMERIC
+--let $col_default = 0
+--source col_null.inc
+
+--echo ########################
+--echo # Floating point columns (FLOAT, DOUBLE)
+--echo ########################
+
+--source type_float.inc
+
+--let $col_type = FLOAT
+--let $col_default = 1.1
+--source col_null.inc
+
+--let $col_type = DOUBLE
+--let $col_default = 0
+--source col_null.inc
+
+--echo ########################
+--echo # INT columns
+--echo ########################
+
+--source type_int.inc
+
+--let $col_type = INT
+--let $col_default = 2147483647
+--source col_null.inc
+
+--let $col_type = TINYINT
+--let $col_default = 127
+--source col_null.inc
+
+--let $col_type = SMALLINT
+--let $col_default = 0
+--source col_null.inc
+
+--let $col_type = MEDIUMINT
+--let $col_default = 1
+--source col_null.inc
+
+--let $col_type = BIGINT
+--let $col_default = 9223372036854775807
+--source col_null.inc
+
+--echo ########################
+--echo # SET columns
+--echo ########################
+
+--source type_set.inc
+--let $col_type = SET('test1','test2','test3')
+--let $col_default = 'test2,test3'
+--source col_null.inc
+
+
+--echo ########################
+--echo # TEXT columns
+--echo ########################
+
+--source type_text.inc
+
+--let $col_default = ''
+
+--let $col_type = TEXT
+--source col_null.inc
+
+--let $col_type = TINYTEXT
+--source col_null.inc
+
+--let $col_type = MEDIUMTEXT
+--source col_null.inc
+
+--let $col_type = LONGTEXT
+--source col_null.inc
+
+
+--let $col_type =
+--let $col_default =
+--let $extra_col_opts =
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test
new file mode 100644
index 00000000000..25cda84ce2e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test
@@ -0,0 +1,74 @@
+--source include/have_rocksdb.inc
+
+#
+# UNSIGNED column attribute
+#
+
+--let $extra_col_opts = UNSIGNED
+
+--echo ########################
+--echo # Fixed point columns (NUMERIC, DECIMAL)
+--echo ########################
+
+--source type_fixed.inc
+
+CREATE TABLE t1 (
+ a DECIMAL UNSIGNED,
+ b NUMERIC UNSIGNED,
+ PRIMARY KEY (a)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b) VALUES (1.0,-1.0);
+INSERT INTO t1 (a,b) VALUES (-100,100);
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo ########################
+--echo # Floating point columns (FLOAT, DOUBLE)
+--echo ########################
+
+--source type_float.inc
+
+CREATE TABLE t1 (
+ a DOUBLE UNSIGNED,
+ b FLOAT UNSIGNED,
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b) VALUES (1.0,-1.0);
+INSERT INTO t1 (a,b) VALUES (-100,100);
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo ########################
+--echo # INT columns
+--echo ########################
+
+--source type_int.inc
+
+CREATE TABLE t1 (
+ t TINYINT UNSIGNED,
+ s SMALLINT UNSIGNED,
+ m MEDIUMINT UNSIGNED,
+ i INT UNSIGNED,
+ b BIGINT UNSIGNED,
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615);
+INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1);
+--sorted_result
+SELECT t,s,m,i,b FROM t1;
+
+DROP TABLE t1;
+
+--let $extra_col_opts =
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test
new file mode 100644
index 00000000000..37982ae0964
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test
@@ -0,0 +1,67 @@
+--source include/have_rocksdb.inc
+
+#
+# ZEROFILL column attribute
+#
+
+let $extra_col_opts = ZEROFILL;
+
+--echo ########################
+--echo # Fixed point columns (NUMERIC, DECIMAL)
+--echo ########################
+
+--source type_fixed.inc
+
+CREATE TABLE t1 (
+ a DECIMAL ZEROFILL,
+ b NUMERIC ZEROFILL,
+ PRIMARY KEY (a)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b) VALUES (1.1,1234);
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo ########################
+--echo # Floating point columns (FLOAT, DOUBLE)
+--echo ########################
+
+--source type_float.inc
+
+CREATE TABLE t1 (
+ a DOUBLE ZEROFILL,
+ b FLOAT ZEROFILL,
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b) VALUES (1,1234.5);
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo ########################
+--echo # INT columns
+--echo ########################
+
+--source type_int.inc
+
+CREATE TABLE t1 (
+ t TINYINT ZEROFILL,
+ s SMALLINT ZEROFILL,
+ m MEDIUMINT ZEROFILL,
+ i INT ZEROFILL,
+ b BIGINT ZEROFILL,
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0);
+SELECT t,s,m,i,b FROM t1;
+DROP TABLE t1;
+
+--let $extra_col_opts =
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt
new file mode 100644
index 00000000000..79e591636fd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt
@@ -0,0 +1 @@
+--rocksdb_strict_collation_check=ON
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test
new file mode 100644
index 00000000000..c3fcaad776c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test
@@ -0,0 +1,181 @@
+--source include/have_rocksdb.inc
+--source include/have_fullregex.inc
+
+SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# ci non-indexed column is allowed
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
+DROP TABLE t1;
+
+# ci indexed column is not allowed
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8;
+# ci indexed column with rocksdb_strict_collation_check=OFF is allowed.
+SET GLOBAL rocksdb_strict_collation_check=0;
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8;
+DROP TABLE t1;
+SET GLOBAL rocksdb_strict_collation_check=1;
+
+# cs indexed column is allowed
+CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+
+# cs latin1_bin is allowed
+CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
+DROP TABLE t1;
+
+# cs utf8_bin is allowed
+CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin;
+DROP TABLE t1;
+
+# cs mixed latin1_bin and utf8_bin is allowed
+CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb;
+DROP TABLE t1;
+
+# ci indexed column is not allowed unless table name is in exception list
+SET GLOBAL rocksdb_strict_collation_exceptions=t1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test regex for exception list
+SET GLOBAL rocksdb_strict_collation_exceptions="t.*";
+CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t123;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+SET GLOBAL rocksdb_strict_collation_exceptions=".t.*";
+CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE xt123;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list with commas
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list with vertical bar
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra comma at the front
+SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra vertical bar at the front
+SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra comma in the middle
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra vertical bar in the middle
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra comma at the end
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and extra vertical bar at the end
+SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test multiple entries in the list and tons of commas and vertical bars just for the fun of it
+SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,";
+CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE s1;
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8;
+
+# test allowing alters to create temporary tables
+SET GLOBAL rocksdb_strict_collation_exceptions='t1';
+CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb;
+ALTER TABLE t1 AUTO_INCREMENT=1;
+DROP TABLE t1;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb;
+CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb;
+--error ER_UNKNOWN_ERROR
+ALTER TABLE t2 ADD INDEX(value);
+DROP TABLE t2;
+
+
+# test invalid regex (missing end bracket)
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+SET GLOBAL rocksdb_strict_collation_exceptions="[a-b";
+--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+--error ER_UNKNOWN_ERROR
+CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]";
+CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE a, b;
+
+# test invalid regex (trailing escape)
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+SET GLOBAL rocksdb_strict_collation_exceptions="abc\\";
+--exec grep -A 1 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+--error ER_UNKNOWN_ERROR
+CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+SET GLOBAL rocksdb_strict_collation_exceptions="abc";
+CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+--error ER_UNKNOWN_ERROR
+CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
+DROP TABLE abc;
+
+# cleanup
+SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt
new file mode 100644
index 00000000000..13563edb439
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_strict_collation_check=ON
+--rocksdb_strict_collation_exceptions='r1.lol'
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test
new file mode 100644
index 00000000000..7f741e286b1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test
@@ -0,0 +1,27 @@
+CREATE TABLE `r1.lol` (
+ `c1` int(10) NOT NULL DEFAULT '0',
+ `c2` int(11) NOT NULL DEFAULT '0',
+ `c3` int(1) NOT NULL DEFAULT '0',
+ `c4` int(11) NOT NULL DEFAULT '0',
+ `c5` int(11) NOT NULL DEFAULT '0',
+ `c6` varchar(100) NOT NULL DEFAULT '',
+ `c7` varchar(100) NOT NULL DEFAULT '',
+ `c8` varchar(255) NOT NULL DEFAULT '',
+ `c9` int(10) NOT NULL DEFAULT '125',
+ `c10` int(10) NOT NULL DEFAULT '125',
+ `c11` text NOT NULL,
+ `c12` int(11) NOT NULL DEFAULT '0',
+ `c13` int(10) NOT NULL DEFAULT '0',
+ `c14` text NOT NULL,
+ `c15` blob NOT NULL,
+ `c16` int(11) NOT NULL DEFAULT '0',
+ `c17` int(11) NOT NULL DEFAULT '0',
+ `c18` int(11) NOT NULL DEFAULT '0',
+ PRIMARY KEY (`c1`),
+ KEY i1 (`c4`),
+ KEY i2 (`c7`),
+ KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1;
+
+DROP INDEX i1 ON `r1.lol`;
+DROP TABLE `r1.lol`;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt
new file mode 100644
index 00000000000..3b4871f864a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt
@@ -0,0 +1,3 @@
+--rocksdb_debug_optimizer_n_rows=1000
+--rocksdb_records_in_range=50
+--rocksdb_compaction_sequential_deletes_count_sd=1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test
new file mode 100644
index 00000000000..9cb32e8d615
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test
@@ -0,0 +1,87 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS r1;
+--enable_warnings
+
+create table r1 (
+ id1 int,
+ id2 int,
+ type int,
+ value varchar(100),
+ value2 int,
+ value3 int,
+ primary key (type, id1, id2),
+ index id1_type (id1, type, value2, value, id2)
+) engine=rocksdb collate latin1_bin;
+
+select 'loading data';
+
+--disable_query_log
+let $i=0;
+while ($i<1000)
+{
+ inc $i;
+ eval insert r1(id1, id2, type, value, value2, value3)
+ values($i,$i,$i, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',$i,$i);
+}
+--enable_query_log
+
+set global rocksdb_force_flush_memtable_now=1;
+optimize table r1;
+
+--exec echo Test 1: Do a bunch of updates without setting the compaction sysvar
+--exec echo Expect: no compaction
+let $window = 0;
+let $deletes = 0;
+let $file_size = 0;
+let $secondary_only = 0;
+let $primary = 1;
+let $no_more_deletes = 0;
+--source compact_deletes_test.inc
+
+--exec echo Test 2: Do a bunch of updates and set the compaction sysvar
+--exec echo Expect: compaction
+let $window = 1000;
+let $deletes = 990;
+let $file_size = 0;
+let $secondary_only = 0;
+let $primary = 1;
+let $no_more_deletes = 1;
+--source compact_deletes_test.inc
+
+--exec echo Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large
+--exec echo Expect: no compaction
+let $window = 1000;
+let $deletes = 1000;
+let $file_size = 1000000;
+let $secondary_only = 0;
+let $primary = 1;
+let $no_more_deletes = 0;
+--source compact_deletes_test.inc
+
+--exec echo Test 4: Do a bunch of secondary key updates and set the compaction sysvar
+--exec echo Expect: compaction
+let $window = 1000;
+let $deletes = 50;
+let $file_size = 0;
+let $secondary_only = 1;
+let $primary = 0;
+let $no_more_deletes = 1;
+--source compact_deletes_test.inc
+
+--exec echo Test 5: Do a bunch of secondary key updates and set the compaction sysvar,
+--exec echo and rocksdb_compaction_sequential_deletes_count_sd turned on
+--exec echo Expect: compaction
+let $window = 1000;
+let $deletes = 50;
+let $file_size = 0;
+let $secondary_only = 1;
+let $primary = 0;
+let $no_more_deletes = 1;
+SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd;
+SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON;
+--source compact_deletes_test.inc
+SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd;
+
+drop table r1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc
new file mode 100644
index 00000000000..15a611c8dbb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc
@@ -0,0 +1,43 @@
+# Usage:
+# let $window = <window size>;
+# let $deletes = <number of deletes per window>;
+# let $file_size = <min size of the file>;
+# --source compact_deletes_test.inc
+#
+
+let $save_rocksdb_compaction_sequential_deletes_window = `SELECT @@rocksdb_compaction_sequential_deletes_window`;
+eval set global rocksdb_compaction_sequential_deletes_window=$window;
+let $save_rocksdb_compaction_sequential_deletes = `SELECT @@rocksdb_compaction_sequential_deletes`;
+eval set global rocksdb_compaction_sequential_deletes= $deletes;
+let $save_rocksdb_compaction_sequential_deletes_file_size = `SELECT @@rocksdb_compaction_sequential_deletes_file_size`;
+eval set global rocksdb_compaction_sequential_deletes_file_size=$file_size;
+--disable_query_log
+let $i=0;
+while ($i<1000)
+{
+ inc $i;
+ if ($secondary_only)
+ {
+ eval update r1 set value2=value2+1 where id1=$i;
+ }
+ if ($primary)
+ {
+ eval update r1 set id2=id2+10000 where id1=500;
+ }
+}
+--enable_query_log
+set global rocksdb_force_flush_memtable_now=1;
+select sleep(1);
+
+--disable_query_log
+let $wait_timeout= 300; # Override default 30 seconds with 300.
+let $wait_condition = select count(*) = 0
+ as c from information_schema.rocksdb_global_info
+ where TYPE = 'DDL_DROP_INDEX_ONGOING';
+--source include/wait_condition.inc
+--enable_query_log
+
+--exec bash suite/rocksdb/t/sst_count_rows.sh $MYSQLTEST_VARDIR $MYSQL_SST_DUMP $no_more_deletes
+eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes;
+eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size;
+eval SET GLOBAL rocksdb_compaction_sequential_deletes_window= $save_rocksdb_compaction_sequential_deletes_window;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt
new file mode 100644
index 00000000000..81b5acc4e56
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test
new file mode 100644
index 00000000000..c146d43474c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test
@@ -0,0 +1,4 @@
+--source include/have_rocksdb.inc
+
+create table t (id int primary key) engine=rocksdb;
+drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test
new file mode 100644
index 00000000000..2a2896691b7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test
@@ -0,0 +1,34 @@
+--source include/have_rocksdb.inc
+
+#
+# Generate concurrent requests to alter a table using mysqlslap
+#
+
+--disable_warnings
+DROP DATABASE IF EXISTS mysqlslap;
+--enable_warnings
+
+CREATE DATABASE mysqlslap;
+
+use mysqlslap;
+
+CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB;
+INSERT INTO a1 VALUES (1, 1);
+
+--write_file $MYSQL_TMP_DIR/concurrent_alter.sh
+$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index bx(b); alter table a1 drop index bx" --concurrency=1 --iterations=25 &
+$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index ax(a); alter table a1 drop index ax" --concurrency=1 --iterations=25 &
+sleep 2
+$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 &
+$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000
+sleep 2
+$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 &
+$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000
+wait
+EOF
+
+--exec bash $MYSQL_TMP_DIR/concurrent_alter.sh
+
+SHOW CREATE TABLE a1;
+
+DROP DATABASE mysqlslap;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test
new file mode 100644
index 00000000000..4dfa5abbbbb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = READ COMMITTED;
+
+--source consistent_snapshot.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test
new file mode 100644
index 00000000000..c9f28dbcbe4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = REPEATABLE READ;
+
+--source consistent_snapshot.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test
new file mode 100644
index 00000000000..57b45050fea
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = SERIALIZABLE;
+
+--source consistent_snapshot.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc
new file mode 100644
index 00000000000..be01338cb85
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc
@@ -0,0 +1,136 @@
+#
+# TRANSACTION WITH CONSISTENT SNAPSHOT
+#
+
+--enable_connect_log
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+
+# While a consistent snapshot transaction is executed,
+# no external inserts should be visible to the transaction.
+# But it should only work this way for REPEATABLE-READ and SERIALIZABLE
+
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+
+connection con1;
+COMMIT;
+
+# verifying snapshot is released after finishing transaction
+connection con2;
+select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS';
+
+connection con1;
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+
+connection con2;
+INSERT INTO t1 (a) VALUES (1);
+
+connection con1;
+--echo # If consistent read works on this isolation level ($trx_isolation), the following SELECT should not return the value we inserted (1)
+SELECT a FROM t1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB;
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4);
+
+BEGIN;
+
+connection con2;
+INSERT INTO r1 values (5,5,5);
+
+connection con1;
+SELECT * FROM r1; # 5
+
+connection con2;
+INSERT INTO r1 values (6,6,6);
+
+connection con1;
+SELECT * FROM r1; # 5
+COMMIT;
+SELECT * FROM r1; # 6
+
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+
+connection con2;
+INSERT INTO r1 values (7,7,7);
+
+connection con1;
+SELECT * FROM r1; # 6
+
+connection con2;
+INSERT INTO r1 values (8,8,8);
+
+connection con1;
+SELECT * FROM r1; # 6
+COMMIT;
+SELECT * FROM r1; # 8
+
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+
+connection con2;
+INSERT INTO r1 values (9,9,9);
+
+connection con1;
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+
+connection con2;
+INSERT INTO r1 values (10,10,10);
+
+connection con1;
+SELECT * FROM r1; # 9
+
+--error 0,ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--echo ERROR: $mysql_errno
+# Succeeds with Read Committed, Fails with Repeatable Read
+--error 0,ER_UNKNOWN_ERROR
+INSERT INTO r1 values (11,11,11);
+--echo ERROR: $mysql_errno
+SELECT * FROM r1; # self changes should be visible
+
+
+drop table r1;
+
+connection default;
+disconnect con1;
+disconnect con2;
+
+
+--source include/wait_until_count_sessions.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test
new file mode 100644
index 00000000000..a9ee98dfda5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test
@@ -0,0 +1,80 @@
+--source include/have_rocksdb.inc
+
+--echo #
+--echo # Test how MyRocks handles reading corrupted data from disk.
+--echo # Data corruption is simulated at source-code level.
+--echo #
+
+--source include/have_debug.inc
+
+
+--echo #
+--echo # A test for case when data in the table *record* is longer
+--echo # than table DDL expects it to be
+--echo #
+create table t1 (
+ pk int not null primary key,
+ col1 varchar(10)
+) engine=rocksdb;
+
+insert into t1 values (1,1),(2,2),(3,3);
+
+select * from t1;
+
+set @tmp1=@@rocksdb_verify_checksums;
+set rocksdb_verify_checksums=1;
+set session debug= "+d,myrocks_simulate_bad_row_read1";
+--error ER_GET_ERRNO
+select * from t1 where pk=1;
+set session debug= "-d,myrocks_simulate_bad_row_read1";
+set rocksdb_verify_checksums=@tmp1;
+
+select * from t1 where pk=1;
+
+set session debug= "+d,myrocks_simulate_bad_row_read2";
+--error ER_GET_ERRNO
+select * from t1 where pk=1;
+set session debug= "-d,myrocks_simulate_bad_row_read2";
+
+set session debug= "+d,myrocks_simulate_bad_row_read3";
+--error ER_GET_ERRNO
+select * from t1 where pk=1;
+set session debug= "-d,myrocks_simulate_bad_row_read3";
+
+insert into t1 values(4,'0123456789');
+select * from t1;
+drop table t1;
+
+--echo #
+--echo # A test for case when index data is longer than table DDL
+--echo # expects it to be
+--echo #
+
+create table t2 (
+ pk varchar(4) not null primary key,
+ col1 int not null
+) engine=rocksdb collate latin1_bin;
+
+insert into t2 values ('ABCD',1);
+select * from t2;
+set session debug= "+d,myrocks_simulate_bad_pk_read1";
+--error ER_GET_ERRNO
+select * from t2;
+set session debug= "-d,myrocks_simulate_bad_pk_read1";
+
+drop table t2;
+
+create table t2 (
+ pk varchar(4) not null primary key,
+ col1 int not null
+) engine=rocksdb;
+
+insert into t2 values ('ABCD',1);
+
+select * from t2;
+set session debug= "+d,myrocks_simulate_bad_pk_read1";
+--error ER_GET_ERRNO
+select * from t2;
+set session debug= "-d,myrocks_simulate_bad_pk_read1";
+
+drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/create_table.test b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test
new file mode 100644
index 00000000000..4fffe7497c4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test
@@ -0,0 +1,192 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic CREATE TABLE statements
+#
+
+#############################################
+# TODO:
+# A part of the test is currently disabled
+# because temporary tables are not supported
+#############################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+# Simple create table with minimal table options
+# which are defined in have_engine.inc
+# (default empty) plus ENGINE=
+
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+
+# IF NOT EXISTS
+CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+
+# CREATE .. LIKE
+
+CREATE TABLE t2 LIKE t1;
+SHOW CREATE TABLE t2;
+
+--error ER_ILLEGAL_HA_CREATE_OPTION
+CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb;
+
+--disable_parsing
+
+DROP TABLE t2;
+
+CREATE TEMPORARY TABLE t2 LIKE t1;
+
+SHOW CREATE TABLE t2;
+DROP TEMPORARY TABLE t2;
+
+--enable_parsing
+
+DROP TABLE t2;
+
+DROP TABLE IF EXISTS t1;
+
+# CREATE .. AS SELECT
+
+# Use the engine as default
+
+SET default_storage_engine = rocksdb;
+
+CREATE TABLE t1 (a INT PRIMARY KEY);
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a;
+SHOW CREATE TABLE t1;
+--sorted_result
+SELECT * FROM t1;
+
+# Just to add FLUSH LOGS into the mix while we are in the most common test
+FLUSH LOGS;
+
+DROP TABLE IF EXISTS t1;
+
+# CREATE TABLE with MAX_INDEXES (64) keys and no primary key
+# MyRocks adds a hidden primary key, so make sure we don't break anything
+CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT,
+ c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT,
+ c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT,
+ c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT,
+ c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT,
+ c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT,
+ c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT,
+ c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT,
+ c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT,
+ c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT,
+ c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT,
+ c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT,
+ c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT,
+ c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT,
+ c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT,
+ c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT,
+ c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT,
+ c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT,
+ c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT,
+ c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT,
+ c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT,
+ c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT,
+ c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT,
+ c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT,
+ c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT,
+ c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT,
+ c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT,
+ c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT,
+ c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT,
+ c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT,
+ c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT,
+ c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT,
+ c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT,
+ c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT,
+ c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT,
+ c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT,
+ c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT,
+ c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT,
+ c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT,
+ c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT,
+ c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT,
+ c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT,
+ c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT,
+ c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT,
+ c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT,
+ c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT,
+ c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT,
+ c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT,
+ c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT,
+ c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT,
+ c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT,
+ c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT,
+ c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT,
+ c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT,
+ c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT,
+ c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT,
+ c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT,
+ c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT,
+ c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT,
+ c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT,
+ c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT,
+ c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT,
+ c448 INT,
+ KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14),
+ KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28),
+ KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42),
+ KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56),
+ KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70),
+ KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84),
+ KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98),
+ KEY (c99,c100,c101,c102,c103,c104,c105),
+ KEY (c106,c107,c108,c109,c110,c111,c112),
+ KEY (c113,c114,c115,c116,c117,c118,c119),
+ KEY (c120,c121,c122,c123,c124,c125,c126),
+ KEY (c127,c128,c129,c130,c131,c132,c133),
+ KEY (c134,c135,c136,c137,c138,c139,c140),
+ KEY (c141,c142,c143,c144,c145,c146,c147),
+ KEY (c148,c149,c150,c151,c152,c153,c154),
+ KEY (c155,c156,c157,c158,c159,c160,c161),
+ KEY (c162,c163,c164,c165,c166,c167,c168),
+ KEY (c169,c170,c171,c172,c173,c174,c175),
+ KEY (c176,c177,c178,c179,c180,c181,c182),
+ KEY (c183,c184,c185,c186,c187,c188,c189),
+ KEY (c190,c191,c192,c193,c194,c195,c196),
+ KEY (c197,c198,c199,c200,c201,c202,c203),
+ KEY (c204,c205,c206,c207,c208,c209,c210),
+ KEY (c211,c212,c213,c214,c215,c216,c217),
+ KEY (c218,c219,c220,c221,c222,c223,c224),
+ KEY (c225,c226,c227,c228,c229,c230,c231),
+ KEY (c232,c233,c234,c235,c236,c237,c238),
+ KEY (c239,c240,c241,c242,c243,c244,c245),
+ KEY (c246,c247,c248,c249,c250,c251,c252),
+ KEY (c253,c254,c255,c256,c257,c258,c259),
+ KEY (c260,c261,c262,c263,c264,c265,c266),
+ KEY (c267,c268,c269,c270,c271,c272,c273),
+ KEY (c274,c275,c276,c277,c278,c279,c280),
+ KEY (c281,c282,c283,c284,c285,c286,c287),
+ KEY (c288,c289,c290,c291,c292,c293,c294),
+ KEY (c295,c296,c297,c298,c299,c300,c301),
+ KEY (c302,c303,c304,c305,c306,c307,c308),
+ KEY (c309,c310,c311,c312,c313,c314,c315),
+ KEY (c316,c317,c318,c319,c320,c321,c322),
+ KEY (c323,c324,c325,c326,c327,c328,c329),
+ KEY (c330,c331,c332,c333,c334,c335,c336),
+ KEY (c337,c338,c339,c340,c341,c342,c343),
+ KEY (c344,c345,c346,c347,c348,c349,c350),
+ KEY (c351,c352,c353,c354,c355,c356,c357),
+ KEY (c358,c359,c360,c361,c362,c363,c364),
+ KEY (c365,c366,c367,c368,c369,c370,c371),
+ KEY (c372,c373,c374,c375,c376,c377,c378),
+ KEY (c379,c380,c381,c382,c383,c384,c385),
+ KEY (c386,c387,c388,c389,c390,c391,c392),
+ KEY (c393,c394,c395,c396,c397,c398,c399),
+ KEY (c400,c401,c402,c403,c404,c405,c406),
+ KEY (c407,c408,c409,c410,c411,c412,c413),
+ KEY (c414,c415,c416,c417,c418,c419,c420),
+ KEY (c421,c422,c423,c424,c425,c426,c427),
+ KEY (c428,c429,c430,c431,c432,c433,c434),
+ KEY (c435,c436,c437,c438,c439,c440,c441),
+ KEY (c442,c443,c444,c445,c446,c447,c448));
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test
new file mode 100644
index 00000000000..3be7fda9952
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test
@@ -0,0 +1,43 @@
+--echo #
+--echo # Validate that deadlock errors don't occur with a high level of concurrency
+--echo #
+
+--source include/have_rocksdb.inc
+
+--echo # Disable for valgrind because this takes too long
+--source include/not_valgrind.inc
+
+--disable_warnings
+DROP DATABASE IF EXISTS mysqlslap;
+--enable_warnings
+
+CREATE DATABASE mysqlslap;
+USE mysqlslap;
+CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb;
+CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb;
+
+SET @save = @@global.rocksdb_lock_wait_timeout;
+SET GLOBAL rocksdb_lock_wait_timeout = 60;
+
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1 VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1"
+SELECT count from t1;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id1=1 AND id2=1"
+SELECT count from t1;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id2=1"
+SELECT count from t1;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1"
+SELECT count from t1;
+
+# Same tests on a table with reverse orderings
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1rev VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1"
+SELECT count from t1rev;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id1=1 AND id2=1"
+SELECT count from t1rev;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id2=1"
+SELECT count from t1rev;
+--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1"
+SELECT count from t1rev;
+
+SET GLOBAL rocksdb_lock_wait_timeout = @save;
+
+DROP DATABASE mysqlslap;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete.test b/storage/rocksdb/mysql-test/rocksdb/t/delete.test
new file mode 100644
index 00000000000..b1654e606a5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/delete.test
@@ -0,0 +1,101 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic DELETE statements.
+# DELETE LOW_PRIORITY is covered in delete_low_prio test
+# DELETE QUICK is covered in delete_quick test (syntax only)
+# DELETE IGNORE is covered in delete_ignore test
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+# Single-table DELETE
+
+DELETE FROM t1 WHERE b IN ('c');
+--sorted_result
+SELECT a,b FROM t1;
+
+DELETE FROM t1 WHERE a < 0 OR b = 'a';
+--sorted_result
+SELECT a,b FROM t1;
+
+# ORDER BY and LIMIT
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+--sorted_result
+SELECT a,b FROM t1;
+
+# Multi-table DELETE
+
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DELETE FROM t1;
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1, t2;
+
+
+#
+# Transactional DELETE
+#
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+BEGIN;
+DELETE FROM t1 WHERE b IN ('c');
+--sorted_result
+SELECT a,b FROM t1;
+DELETE FROM t1 WHERE a < 0 OR b = 'a';
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+# Savepoints
+
+BEGIN;
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+SAVEPOINT spt1;
+
+DELETE FROM t1;
+RELEASE SAVEPOINT spt1;
+ROLLBACK;
+--sorted_result
+SELECT a,b FROM t1;
+
+BEGIN;
+DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+SAVEPOINT spt1;
+DELETE FROM t1;
+INSERT INTO t1 (a,b) VALUES (1,'a');
+--error ER_UNKNOWN_ERROR
+ROLLBACK TO SAVEPOINT spt1;
+--error ER_UNKNOWN_ERROR
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test
new file mode 100644
index 00000000000..93a9d1adaf9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test
@@ -0,0 +1,36 @@
+--source include/have_rocksdb.inc
+--source include/have_debug_sync.inc
+
+# This is a test case to reproduce https://github.com/facebook/mysql-5.6/issues/162
+# Expected output of the last select for update was (1,2,100) and (1,3,100), but
+# currently it returns (1,2,1) and (1,3,1), which must be fixed.
+
+connect (con, localhost, root,,);
+connection default;
+
+--disable_warnings
+set debug_sync='RESET';
+drop table if exists t1;
+--enable_warnings
+
+create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb;
+insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2);
+
+connection con;
+set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go';
+send update t1 set value=100 where id1=1;
+
+connection default;
+set debug_sync='now WAIT_FOR parked';
+delete from t1 where id1=1 and id2=1;
+set debug_sync='now SIGNAL go';
+
+connection con;
+reap;
+select * from t1 where id1=1 for update;
+
+# Cleanup
+connection default;
+disconnect con;
+set debug_sync='RESET';
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test
new file mode 100644
index 00000000000..d087d80f4ac
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test
@@ -0,0 +1,37 @@
+--source include/have_rocksdb.inc
+
+#
+# DELETE IGNORE
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb;
+
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1;
+--sorted_result
+SELECT a,b FROM t1;
+
+DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+# Cleanup
+DROP TABLE t1, t2;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test
new file mode 100644
index 00000000000..127ef47dfaf
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test
@@ -0,0 +1,32 @@
+--source include/have_rocksdb.inc
+
+#
+# DELETE QUICK syntax.
+# For now we only check that the keyword is accepted,
+# without actually checking whether the feature works.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+
+DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo';
+--sorted_result
+SELECT a,b FROM t1;
+
+CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test
new file mode 100644
index 00000000000..f4d890eb1b4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test
@@ -0,0 +1,39 @@
+--source include/have_rocksdb.inc
+
+#
+# DELETE statements for tables with keys
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+DELETE FROM t1 WHERE b > 'y';
+DELETE FROM t1 WHERE a=2;
+
+--sorted_result
+SELECT a,b FROM t1;
+DELETE FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+DELETE FROM t1 WHERE b > 'y';
+DELETE FROM t1 WHERE a=2;
+
+--sorted_result
+SELECT a,b FROM t1;
+DELETE FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9);
+DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1;
+--sorted_result
+SELECT a,b,c FROM t1;
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/describe.test b/storage/rocksdb/mysql-test/rocksdb/t/describe.test
new file mode 100644
index 00000000000..9bc5d299a31
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/describe.test
@@ -0,0 +1,24 @@
+--source include/have_rocksdb.inc
+
+#
+# DESCRIBE statement
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2, t3;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b');
+
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8;
+INSERT INTO t2 (a,b) VALUES (1, 'bar');
+
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8;
+
+DESCRIBE t1;
+DESC t2 a;
+DESCRIBE t3 '%';
+
+DROP TABLE t1, t2, t3;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
new file mode 100644
index 00000000000..5e459b0d471
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
@@ -0,0 +1,4 @@
+cons_snapshot_serializable : Consistent read does not work on serializable
+level_read_uncommitted : Not supported
+level_serializable: Not supported
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test
new file mode 100644
index 00000000000..174a4bbf286
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test
@@ -0,0 +1,11 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP DATABASE IF EXISTS test_drop_database;
+--enable_warnings
+
+CREATE DATABASE test_drop_database;
+CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB;
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a);
+DROP TABLE t1;
+DROP DATABASE test_drop_database;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test
new file mode 100644
index 00000000000..57d7cdf57c2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test
@@ -0,0 +1,116 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+##
+## test dropping index inplace
+##
+
+CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 (a) VALUES (1);
+INSERT INTO t1 (a) VALUES (3);
+INSERT INTO t1 (a) VALUES (5);
+
+ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+# Key ka does not exist in table t1
+--error 1176
+SELECT * FROM t1 FORCE INDEX(ka) where a > 1;
+
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kb) where a > 1;
+--sorted_result
+SELECT * FROM t1 where b > 1;
+
+DROP TABLE t1;
+
+##
+## test dropping multiple indexes at once and multi-part indexes
+##
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+
+# test restarting to make sure everything is still ok and persisted properly
+--source include/restart_mysqld.inc
+
+SHOW CREATE TABLE t1;
+
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+
+--sorted_result
+SELECT * FROM t1 FORCE INDEX(kc) where c > 3;
+--sorted_result
+SELECT * FROM t1 where b > 3;
+
+DROP TABLE t1;
+
+# test dropping pk to see if thats still ok
+CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW INDEX IN t1;
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE;
+SHOW INDEX IN t1;
+
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW INDEX IN t1;
+# test dropping index on tables with no pk
+ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE;
+SHOW INDEX IN t1;
+
+DROP TABLE t1;
+
+# test dropping unique keys
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+ALTER TABLE t1 ADD UNIQUE INDEX kb(b);
+ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c);
+ALTER TABLE t1 ADD UNIQUE INDEX kc(c);
+SHOW INDEX IN t1;
+
+ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc;
+SHOW INDEX IN t1;
+
+# test restarting to make sure everything is still ok and persisted properly
+--source include/restart_mysqld.inc
+
+--sorted_result
+INSERT INTO t1 (b,c) VALUES (1,2);
+INSERT INTO t1 (b,c) VALUES (3,4);
+INSERT INTO t1 (b,c) VALUES (5,6);
+SELECT * FROM t1 FORCE INDEX(kc) where c > 3;
+
+# test dropping index on tables with no pk
+ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+# case where dropping column, where column is the key, we dont want to use
+# inplace in this scenario
+CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT);
+INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
+ALTER TABLE t1 ADD KEY idx ( col1, col2 );
+ANALYZE TABLE t1;
+ALTER TABLE t1 DROP COLUMN col2;
+ALTER TABLE t1 DROP COLUMN col3;
+DROP TABLE t1;
+
+# case drop and add at same time, should not use inplace algorithm yet
+CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT);
+INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
+ALTER TABLE t1 ADD KEY idx ( col1, col2 );
+ANALYZE TABLE t1;
+ALTER TABLE t1 DROP COLUMN col2;
+ALTER TABLE t1 DROP COLUMN col3;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc
new file mode 100644
index 00000000000..b40004402c9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc
@@ -0,0 +1,3 @@
+drop procedure save_read_stats;
+drop procedure get_read_stats;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt
new file mode 100644
index 00000000000..f53a6050e89
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt
@@ -0,0 +1,3 @@
+--rocksdb_max_background_compactions=8
+--rocksdb_max_subcompactions=1
+--rocksdb_default_cf_options=write_buffer_size=512k;target_file_size_base=512k;level0_file_num_compaction_trigger=2;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;max_bytes_for_level_base=1m
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test
new file mode 100644
index 00000000000..7b28474d9f2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test
@@ -0,0 +1,115 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+--enable_warnings
+
+# Start from clean slate
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+--source include/restart_mysqld.inc
+--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+CREATE TABLE t1 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t2 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t3 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t4 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+# Populate tables
+let $max = 1000;
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t2;
+--source drop_table_repopulate_table.inc
+let $table = t3;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+drop table t2;
+
+# Restart the server before t2's indices are deleted
+--source include/restart_mysqld.inc
+
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+drop table t3;
+
+# Insert enough data to trigger compactions that eliminate t2 and t3
+let $max = 50000;
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+drop table t4;
+
+# Restart the server before t4's indices are deleted
+--source include/restart_mysqld.inc
+
+# Make sure new table gets unique indices
+CREATE TABLE t5 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+let $max = 1000;
+let $table = t5;
+--source drop_table_repopulate_table.inc
+
+drop table t5;
+
+# Manually compact column families, cleaning up all lingering data
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+
+# Signal thread to check for dropped indices
+set global rocksdb_signal_drop_index_thread = 1;
+
+let $show_rpl_debug_info= 1; # to force post-failure printout
+let $wait_timeout= 300; # Override default 30 seconds with 300.
+let $wait_condition = select count(*) = 0
+ as c from information_schema.rocksdb_global_info
+ where TYPE = 'DDL_DROP_INDEX_ONGOING';
+--source include/wait_condition.inc
+
+# Get list of all indices needing to be dropped
+# Check total compacted-away rows for all indices
+# Check that all indices have been successfully dropped
+--exec perl suite/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+# Cleanup
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test
new file mode 100644
index 00000000000..3742ab0e444
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test
@@ -0,0 +1,110 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+DROP TABLE IF EXISTS t5;
+--enable_warnings
+
+# Start from clean slate
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+--source include/restart_mysqld.inc
+--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+CREATE TABLE t1 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t2 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t3 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+CREATE TABLE t4 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+# Populate tables
+let $max = 1000;
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t2;
+--source drop_table_repopulate_table.inc
+let $table = t3;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+
+# Restart the server before t2's indices are deleted
+--source include/restart_mysqld.inc
+
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+
+# Insert enough data to trigger compactions that eliminate t2 and t3
+let $max = 50000;
+let $table = t1;
+--source drop_table_repopulate_table.inc
+let $table = t4;
+--source drop_table_repopulate_table.inc
+
+
+# Restart the server before t4's indices are deleted
+--source include/restart_mysqld.inc
+
+# Make sure new table gets unique indices
+CREATE TABLE t5 (
+ a int not null,
+ b int not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+let $max = 1000;
+let $table = t5;
+--source drop_table_repopulate_table.inc
+
+let $output= $MYSQLTEST_VARDIR/tmp/size_output;
+
+--exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ before/' > $output
+drop table t1;
+drop table t2;
+drop table t3;
+drop table t4;
+drop table t5;
+
+let $show_rpl_debug_info= 1; # to force post-failure printout
+let $wait_timeout= 300; # Override default 30 seconds with 300.
+let $wait_condition = select count(*) = 0
+ as c from information_schema.rocksdb_global_info
+ where TYPE = 'DDL_DROP_INDEX_ONGOING';
+--source include/wait_condition.inc
+
+# Check that space is reclaimed
+--exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ after/' >> $output
+--exec perl suite/rocksdb/t/drop_table2_check.pl $output
+
+# Cleanup
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl
new file mode 100644
index 00000000000..8f43f4725b5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2_check.pl
@@ -0,0 +1,19 @@
+#!/usr/bin/perl
+
+my $a = 0;
+my $b=0;
+die unless($ARGV[0]);
+open(my $f, "<", $ARGV[0]) or die $!;
+while(readline($f)) {
+ if (/(\d+) before/) {
+ $a = $1;
+ }
+
+ if (/(\d+) after/ ) {
+ $b = $1;
+ }
+}
+
+if ($a > $b * 2) {
+ printf("Compacted\n");
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt
new file mode 100644
index 00000000000..a9ebc4ec20b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_max_subcompactions=1
+--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc
new file mode 100644
index 00000000000..4d23f7a1c5f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc
@@ -0,0 +1,47 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Start from clean slate
+set global rocksdb_compact_cf = 'cf1';
+set global rocksdb_compact_cf = 'rev:cf2';
+set global rocksdb_signal_drop_index_thread = 1;
+--source include/restart_mysqld.inc
+--exec truncate --size=0 $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+CREATE TABLE t1 (
+ a int not null,
+ b int not null,
+ c varchar(500) not null,
+ primary key (a,b) comment 'cf1',
+ key (b) comment 'rev:cf2'
+) ENGINE=RocksDB;
+
+# Populate tables
+let $max = 50000;
+let $table = t1;
+--source drop_table3_repopulate_table.inc
+
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+if ($truncate_table)
+{
+ truncate table t1;
+}
+if ($drop_table)
+{
+ drop table t1;
+}
+
+let $show_rpl_debug_info= 1; # to force post-failure printout
+let $wait_timeout= 300; # Override default 30 seconds with 300.
+let $wait_condition = select count(*) = 0
+ as c from information_schema.rocksdb_global_info
+ where TYPE = 'DDL_DROP_INDEX_ONGOING';
+--source include/wait_condition.inc
+
+select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes';
+
+# Cleanup
+DROP TABLE IF EXISTS t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test
new file mode 100644
index 00000000000..b3a6bf9958e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+-- let $truncate_table = 0
+-- let $drop_table = 1
+-- source drop_table3.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc
new file mode 100644
index 00000000000..c34af07204f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc
@@ -0,0 +1,15 @@
+# Usage:
+# let $max = <count>;
+# let $table = <table name>;
+# --source drop_table_repopulate_table.inc
+#
+eval DELETE FROM $table;
+
+--disable_query_log
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO $table VALUES ($i, $i, rpad('a', 499, 'b'));
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl
new file mode 100755
index 00000000000..b123ac5492f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl
@@ -0,0 +1,37 @@
+sub print_array {
+ $str = shift;
+ @arr = @_;
+ $prev= 0;
+ foreach (@arr) {
+ if ($prev) {
+ $dummy_idx = $_ - $prev;
+ }else {
+ $dummy_idx = 0;
+ }
+ $prev= $_;
+ print "$str $dummy_idx\n";
+ }
+}
+
+while (<>) {
+ if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) {
+ $a{$1} += $2;
+ }
+ if (/Begin filtering dropped index \(\d+,(\d+)\)/) {
+ push @b, $1;
+ }
+ if (/Finished filtering dropped index \(\d+,(\d+)\)/) {
+ push @c, $1;
+ }
+}
+$prev= 0;
+foreach (sort {$a <=> $b} keys %a){
+ if ($prev) {
+ $dummy_idx= $_ - $prev;
+ }else {
+ $dummy_idx= 0;
+ }
+ $prev= $_;
+}
+print_array("Begin filtering dropped index+", sort {$a <=> $b} @b);
+print_array("Finished filtering dropped index+", sort {$a <=> $b} @c);
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc
new file mode 100644
index 00000000000..6faf41ef7b1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc
@@ -0,0 +1,15 @@
+# Usage:
+# let $max = <count>;
+# let $table = <table name>;
+# --source drop_table_repopulate_table.inc
+#
+eval DELETE FROM $table;
+
+--disable_query_log
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO $table VALUES ($i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc
new file mode 100644
index 00000000000..c6a3ccde7a6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc
@@ -0,0 +1,6 @@
+let $show_rpl_debug_info= 1; # to force post-failure printout
+let $wait_timeout= 300; # Override default 30 seconds with 300.
+let $wait_condition = select count(*) = 0
+ as c from information_schema.rocksdb_global_info
+ where TYPE = 'DDL_DROP_INDEX_ONGOING';
+--source include/wait_condition.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test
new file mode 100644
index 00000000000..ebe18ab2e95
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test
@@ -0,0 +1,41 @@
+--source include/have_rocksdb.inc
+
+# Test insert ... on duplicate key update statements
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+CREATE TABLE t1 (id1 INT, id2 INT, id3 INT,
+ PRIMARY KEY (id1, id2, id3),
+ UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB;
+
+CREATE TABLE t2 (id1 INT, id2 INT, id3 INT,
+ PRIMARY KEY (id1, id2, id3),
+ UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB;
+
+
+--source suite/rocksdb/include/dup_key_update.inc
+
+# Cleanup
+DROP TABLE t1;
+DROP TABLE t2;
+
+CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin,
+ id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin,
+ id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ PRIMARY KEY (id1, id2, id3),
+ UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB;
+
+CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin,
+ id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin,
+ id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ PRIMARY KEY (id1, id2, id3),
+ UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB;
+
+--source suite/rocksdb/include/dup_key_update.inc
+
+# Cleanup
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test
new file mode 100644
index 00000000000..781163f34fb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+--disable_warnings
+DROP TABLE IF EXISTS t;
+--enable_warnings
+CREATE TABLE t(id int primary key) engine=rocksdb;
+INSERT INTO t values (1), (2), (3);
+--error ER_TABLE_EXISTS_ERROR
+CREATE TABLE t(id int primary key) engine=rocksdb;
+FLUSH TABLES;
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp
+--error ER_UNKNOWN_ERROR
+CREATE TABLE t(id int primary key) engine=rocksdb;
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm
+FLUSH TABLES;
+SELECT * FROM t;
+DROP TABLE t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test
new file mode 100644
index 00000000000..255819704a8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test
@@ -0,0 +1,17 @@
+--source include/have_rocksdb.inc
+
+#
+# Any create table using the system column family should fail
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--error ER_WRONG_ARGUMENTS
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB;
+
+#cleanup
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test
new file mode 100644
index 00000000000..bd8071b1b5e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test
@@ -0,0 +1,45 @@
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+CREATE TABLE t1 (b INT PRIMARY KEY);
+
+# Try simple foreign key - should fail
+--error ER_NOT_SUPPORTED_YET
+CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b));
+
+# Try simple valid syntax with 'foreign' as part - should succeed
+CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL);
+DROP TABLE t2;
+
+# Try simple valid syntax with 'foreign' and 'key' as part (with no space) - should succeed
+CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL);
+DROP TABLE t2;
+
+# Try with valid id containing 'foreign' and then a foreign key - should fail
+--error ER_NOT_SUPPORTED_YET
+CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b));
+
+CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL);
+# Alter with foreign key - should fail
+--error ER_NOT_SUPPORTED_YET
+ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b);
+DROP TABLE t2;
+
+# Alter with valid syntax that contains 'foreign' - should succeed
+CREATE TABLE t2 (a INT NOT NULL);
+ALTER TABLE t2 ADD bforeign INT NOT NULL;
+DROP TABLE t2;
+
+# Alter with valid syntax that contains 'foreign' and 'key' (no space) - should succeed
+CREATE TABLE t2 (a INT NOT NULL);
+ALTER TABLE t2 ADD foreignkey INT NOT NULL;
+DROP TABLE t2;
+
+# Alter with valid syntax that contains 'foreign' and then foreign key - should fail
+CREATE TABLE t2 (a INT NOT NULL);
+--error ER_NOT_SUPPORTED_YET
+ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b);
+DROP TABLE t2;
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt
new file mode 100644
index 00000000000..f0b7f4b5ce5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt
@@ -0,0 +1 @@
+--binlog-format=row --binlog-row-image=full --gap-lock-raise-error=1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test
new file mode 100644
index 00000000000..af7c9b1ab4f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test
@@ -0,0 +1,14 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_rocksdb.inc
+
+# For issue#254
+create table t (id int primary key, value int);
+begin;
+update t set value=100 where id in (1, 2);
+commit;
+begin;
+--error ER_UNKNOWN_ERROR
+select * from t for update;
+commit;
+drop table t;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test
new file mode 100644
index 00000000000..59fe7e6f80a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test
@@ -0,0 +1,37 @@
+--source include/have_rocksdb.inc
+
+let $engine=rocksdb;
+--source include/gap_lock_raise_error_all.inc
+
+SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions;
+
+SET GLOBAL gap_lock_exceptions="t.*";
+--source include/gap_lock_raise_error_init.inc
+
+set session autocommit=0;
+--error ER_UNKNOWN_ERROR
+select * from gap1 limit 1 for update;
+--error ER_UNKNOWN_ERROR
+select * from gap1 where value != 100 limit 1 for update;
+
+--source include/gap_lock_raise_error_cleanup.inc
+
+SET GLOBAL gap_lock_exceptions="gap.*";
+--source include/gap_lock_raise_error_init.inc
+
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+select * from gap1 where value != 100 limit 1 for update;
+
+--source include/gap_lock_raise_error_cleanup.inc
+
+# This test has been temporarily removed because it fails when the server
+# is compiled using GCC 4.8 as full regular expression handling was added
+# in GCC 4.9. We need to add the ability to detect if full regex is
+# available before re-enabling this test.
+## Make sure we handle invalid regex expressions and generate a warning
+#--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+#SET GLOBAL gap_lock_exceptions="[a-b,abc\\";
+#--exec grep -A 2 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl b/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl
new file mode 100644
index 00000000000..c723ec3ca17
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/gen_insert.pl
@@ -0,0 +1,32 @@
+#!/usr/bin/perl
+
+my $table_name= $ARGV[0];
+my $id1= 1;
+my $id2= 1;
+my $id3= 1;
+my $id4= 1;
+my $id5= 1;
+my $value= 1000;
+my $value2= 'aaabbbccc';
+my $max_rows = 1 * 10000;
+
+for(my $row_id= 1; $row_id <= $max_rows; $row_id++) {
+ my $value_clause = "($id1, $id2, $id3, $id4, $id5, $value, \"$value2\")";
+
+ if ($row_id % 100 == 1) {
+ print "INSERT INTO $table_name VALUES";
+ }
+
+ if ($row_id % 100 == 0) {
+ print "$value_clause;\n";
+ }else {
+ print "$value_clause,";
+ }
+
+ $id4++;
+ $id5++;
+ $id3++ if($row_id % 5 == 0);
+ $id2++ if($row_id % 5 == 0);
+ $id1++ if($row_id % 10 == 0);
+}
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test
new file mode 100644
index 00000000000..4f07bf206f2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test
@@ -0,0 +1,25 @@
+--source include/have_rocksdb.inc
+
+#
+# Create tables with partitions and try to generate an error while creating
+# partitions.
+#
+--disable_warnings
+
+DROP TABLE IF EXISTS t1;
+
+--enable_warnings
+
+CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3;
+
+SHOW TABLES;
+
+--disable_query_log
+call mtr.add_suppression("Failed to execute action for entry.*");
+--enable_query_log
+
+--error ER_WRONG_TABLE_NAME
+ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index');
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test
new file mode 100644
index 00000000000..91a492ef26e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test
@@ -0,0 +1,53 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic HANDLER counts
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+FLUSH STATUS;
+CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT,
+INDEX b(b)) ENGINE=rocksdb;
+INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50);
+SHOW SESSION STATUS LIKE 'Handler_write%';
+
+UPDATE t1 SET b=1000 WHERE id=1;
+SHOW SESSION STATUS LIKE 'Handler_update%';
+
+DELETE FROM t1 WHERE id=2;
+SHOW SESSION STATUS LIKE 'Handler_delete%';
+
+INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
+SHOW SESSION STATUS LIKE 'Handler_write%';
+
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id=8;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+FLUSH STATUS;
+SELECT * FROM t1 WHERE b=6;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+FLUSH STATUS;
+--sorted_result
+SELECT * FROM t1;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+FLUSH STATUS;
+SELECT * FROM t1 WHERE b <=5 ORDER BY b;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id >=8 ORDER BY id;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+FLUSH STATUS;
+SELECT * FROM t1 WHERE id < 8 ORDER BY id;
+SHOW SESSION STATUS LIKE 'Handler_read%';
+
+# Cleanup
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc
new file mode 100644
index 00000000000..17baf5b6c57
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc
@@ -0,0 +1,257 @@
+--enable_connect_log
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+### See full test cases here:
+### https://github.com/ept/hermitage/blob/master/mysql.md
+
+--disable_warnings
+DROP TABLE IF EXISTS test;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+connect (con2,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+connect (con3,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+
+connection con1;
+
+create table test (id int primary key, value int) engine=rocksdb;
+
+### Prevents Aborted Reads (G1a)
+--source hermitage_init.inc
+
+connection con1;
+select * from test;
+update test set value = 101 where id = 1;
+connection con2;
+select * from test; # Still shows 1 => 10
+connection con1;
+rollback;
+connection con2; # Still shows 1 => 10
+select * from test;
+commit;
+
+
+### Prevents Intermediate Reads (G1b)
+--source hermitage_init.inc
+
+connection con1;
+update test set value = 101 where id = 1;
+connection con2;
+select * from test; # Still shows 1 => 10
+connection con1;
+update test set value = 11 where id = 1;
+commit;
+connection con2;
+select * from test; # Now shows 1 => 11
+commit;
+
+
+### Prevents Circular Information Flow (G1c)
+--source hermitage_init.inc
+
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 22 where id = 2;
+connection con1;
+select * from test where id = 2; # Still shows 2 => 20
+connection con2;
+select * from test where id = 1; # Still shows 1 => 10
+connection con1;
+commit;
+connection con2;
+commit;
+
+
+### prevents Observed Transaction Vanishes (OTV)
+--source hermitage_init.inc
+
+connection con1;
+update test set value = 11 where id = 1;
+update test set value = 19 where id = 2;
+connection con2;
+send update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+reap;
+connection con3;
+select * from test; # Shows 1 => 11, 2 => 19
+connection con2;
+update test set value = 18 where id = 2;
+connection con3;
+select * from test; # Shows 1 => 11, 2 => 19
+connection con2;
+commit;
+connection con3;
+select * from test; # Shows 1 => 12, 2 => 18
+commit;
+
+
+### Predicate-Many-Preceders (PMP) -- RC does not prevent, RR prevents
+--source hermitage_init.inc
+
+connection con1;
+select * from test where value = 30;
+connection con2;
+insert into test (id, value) values(3, 30);
+commit;
+connection con1;
+# RC: Returns the newly inserted row
+# RR: Still returns nothing
+select * from test where value % 3 = 0;
+commit;
+
+--source hermitage_init.inc
+connection con1;
+update test set value = value + 10;
+connection con2;
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors';
+select * from test;
+send delete from test where value = 20;
+connection con1;
+commit;
+connection con2;
+if ($trx_isolation == "READ COMMITTED")
+{
+ reap;
+ # RC: Returns 2 => 30
+ select * from test;
+}
+if ($trx_isolation == "REPEATABLE READ")
+{
+ --error ER_LOCK_DEADLOCK
+ reap;
+ select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors';
+
+}
+commit;
+
+
+### Lost Update (P4) -- RC does not prevent, RR prevents
+--source hermitage_init.inc
+
+connection con1;
+select * from test where id = 1;
+connection con2;
+select * from test where id = 1;
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+send update test set value = 12 where id = 1;
+connection con1;
+commit;
+connection con2;
+if ($trx_isolation == "READ COMMITTED")
+{
+ reap;
+ # RC: Returns 1 => 12
+ select * from test;
+}
+if ($trx_isolation == "REPEATABLE READ")
+{
+ --error ER_LOCK_DEADLOCK
+ reap;
+}
+commit;
+
+
+### Read Skew (G-single) -- RC does not prevent, RR prevents
+--source hermitage_init.inc
+
+connection con1;
+select * from test where id = 1;
+connection con2;
+select * from test where id = 1;
+select * from test where id = 2;
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+select * from test where id = 2; # RC shows 18, RR shows 20
+commit;
+
+# test using predicate dependencies
+--source hermitage_init.inc
+
+connection con1;
+select * from test where value % 5 = 0;
+connection con2;
+update test set value = 12 where value = 10;
+commit;
+connection con1;
+# RC: returns 1 => 12, RR: returns nothing
+select * from test where value % 3 = 0;
+commit;
+
+# on a write predicate
+--source hermitage_init.inc
+
+connection con1;
+select * from test where id = 1;
+connection con2;
+select * from test;
+update test set value = 12 where id = 1;
+update test set value = 18 where id = 2;
+commit;
+connection con1;
+if ($trx_isolation == "READ COMMITTED")
+{
+ delete from test where value = 20; # doesn't delete anything
+ select * from test where id = 2; # shows 2 => 18
+}
+if ($trx_isolation == "REPEATABLE READ")
+{
+ --error ER_LOCK_DEADLOCK
+ delete from test where value = 20;
+}
+commit;
+
+
+### Write Skew (G2-item) -- Neither RC/RR prevents
+--source hermitage_init.inc
+
+connection con1;
+select * from test where id in (1,2);
+connection con2;
+select * from test where id in (1,2);
+connection con1;
+update test set value = 11 where id = 1;
+connection con2;
+update test set value = 21 where id = 2;
+connection con1;
+commit;
+connection con2;
+commit;
+
+### Anti-Dependency Cycles (G2) -- Neither RC/RR prevents
+--source hermitage_init.inc
+
+connection con1;
+select * from test where value % 3 = 0;
+connection con2;
+select * from test where value % 3 = 0;
+connection con1;
+insert into test (id, value) values(3, 30);
+connection con2;
+insert into test (id, value) values(4, 42);
+connection con1;
+commit;
+connection con2;
+commit;
+select * from test where value % 3 = 0; # Either. Returns 3 => 30, 4 => 42
+connection con1;
+select * from test where value % 3 = 0;
+
+
+connection default;
+drop table test;
+
+disconnect con1;
+disconnect con2;
+disconnect con3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test
new file mode 100644
index 00000000000..e4138e8d89f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test
@@ -0,0 +1,10 @@
+--source include/have_rocksdb.inc
+
+# Hermitage is an attempt to test transaction isolation levels.
+# https://github.com/ept/hermitage
+
+let $trx_isolation = READ COMMITTED;
+--source hermitage.inc
+
+let $trx_isolation = REPEATABLE READ;
+--source hermitage.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc
new file mode 100644
index 00000000000..4f3f03efab0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc
@@ -0,0 +1,8 @@
+connection con1;
+truncate table test;
+insert into test (id, value) values (1, 10), (2, 20);
+begin;
+connection con2;
+begin;
+connection con3;
+begin;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test
new file mode 100644
index 00000000000..9ee23a88bbe
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test
@@ -0,0 +1,24 @@
+--source include/have_rocksdb.inc
+
+#
+# Information Schema DDL
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS is_ddl_t1;
+DROP TABLE IF EXISTS is_ddl_t2;
+--enable_warnings
+
+CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT,
+ PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf')
+ ENGINE = ROCKSDB;
+
+CREATE TABLE is_ddl_t2 (x INT, y INT, z INT,
+ PRIMARY KEY (z, y) COMMENT 'zy_cf',
+ KEY (x)) ENGINE = ROCKSDB;
+
+SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%';
+
+# cleanup
+DROP TABLE is_ddl_t1;
+DROP TABLE is_ddl_t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.inc b/storage/rocksdb/mysql-test/rocksdb/t/index.inc
new file mode 100644
index 00000000000..6b4e4ff233b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index.inc
@@ -0,0 +1,121 @@
+#
+# Basic syntax related to indexes:
+# unique and non-unique keys,
+# single- and multi-column keys,
+# index option COMMENT.
+#
+# See other index* tests for operations
+# which are less likely to be supported
+#
+# PRIMARY KEY syntax is covered in index_primary test.
+# Index types BTREE|HASH -- in index_type_btree|hash tests.
+# SPATIAL -- in type_spatial_indexes test.
+# FULLTEXT -- in fulltext_search test.
+# KEY_BLOCK_SIZE -- in index_key_block_size test.
+#
+# Usage to call the test from another test:
+#
+# A calling test may define $index_type, in which case
+# USING clause will be added to the syntax.
+#
+
+################################################
+# TODO:
+# A part of the test is disabled because unique indexes
+# are not currently supported
+################################################
+
+
+let $using_index_type = ;
+if ($index_type)
+{
+ let $using_index_type = USING $index_type;
+}
+
+
+eval CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ KEY $using_index_type (a)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ KEY a_b $using_index_type (a,b) COMMENT 'a_b index'
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ KEY $using_index_type (a),
+ KEY $using_index_type (b)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+DROP TABLE t1;
+
+--disable_parsing
+
+eval CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ UNIQUE INDEX $using_index_type (a)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+--error ER_DUP_ENTRY,ER_DUP_KEY
+INSERT INTO t1 (a,b) VALUES (1,'c');
+
+DROP TABLE t1;
+
+--source drop_table_sync.inc
+
+--enable_parsing
+
+#
+# ALTER TABLE
+#
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'z');
+
+eval ALTER TABLE t1 ADD KEY (a) $using_index_type COMMENT 'simple index on a';
+--replace_column 7 #
+SHOW INDEX FROM t1;
+ALTER TABLE t1 DROP KEY a;
+DROP TABLE t1;
+
+--disable_parsing
+
+eval CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ UNIQUE INDEX $using_index_type (a)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+--error ER_DUP_ENTRY,ER_DUP_KEY
+INSERT INTO t1 (a,b) VALUES (1,'c');
+
+ALTER TABLE t1 DROP INDEX a;
+INSERT INTO t1 (a,b) VALUES (1,'c');
+--error ER_DUP_ENTRY
+eval ALTER TABLE t1 ADD UNIQUE INDEX a(a) $using_index_type;
+DROP TABLE t1;
+
+--enable_parsing
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.test b/storage/rocksdb/mysql-test/rocksdb/t/index.test
new file mode 100644
index 00000000000..f12e7714bc3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic syntax related to indexes:
+# unique and non-unique keys,
+# single- and multi-column keys,
+# index option COMMENT.
+#
+# See other index* tests for operations
+# which are less likely to be supported
+#
+# PRIMARY KEY syntax is covered in index_primary test.
+# Index types BTREE|HASH -- in index_type_btree|hash tests.
+# SPATIAL -- in type_spatial_indexes test.
+# FULLTEXT -- in fulltext_search test.
+# KEY_BLOCK_SIZE -- in index_key_block_size test.
+#
+
+# (Default index type)
+
+--source index.inc
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt
new file mode 100644
index 00000000000..436edf2b40c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt
@@ -0,0 +1 @@
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test
new file mode 100644
index 00000000000..1021846c508
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test
@@ -0,0 +1,51 @@
+--source include/have_rocksdb.inc
+
+#
+# Information Schema index file map
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+
+CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB;
+CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB;
+
+INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
+INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4);
+
+COMMIT;
+
+# Flush memtable out to SST
+SET GLOBAL rocksdb_force_flush_memtable_now = 1;
+
+###############################################################################
+# Test that expected index_file_map data exists
+###############################################################################
+
+# Return the data for the primary key of t1
+--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+ (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+ WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
+
+# Return the data for the secondary index of t1
+--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+ (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+ WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j");
+
+# Return the data for the primary index of t2
+--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
+WHERE INDEX_NUMBER =
+ (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
+ WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY");
+
+# cleanup
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test
new file mode 100644
index 00000000000..f156aec0021
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test
@@ -0,0 +1,70 @@
+--source include/have_rocksdb.inc
+
+#
+# KEY_BLOCK_SIZE index option.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ KEY (a) KEY_BLOCK_SIZE=8
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT PRIMARY KEY,
+ KEY ind1(b ASC) KEY_BLOCK_SIZE=0
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value'
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ KEY a_b(a,b) KEY_BLOCK_SIZE=8192
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+
+DROP TABLE t1;
+
+--source drop_table_sync.inc
+
+#
+# ALTER TABLE
+#
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b) VALUES (100,'z');
+
+ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192;
+--replace_column 7 #
+SHOW INDEX FROM t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test
new file mode 100644
index 00000000000..3abd2dd05fe
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test
@@ -0,0 +1,64 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic syntax related to primary keys
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY,
+ b CHAR(8)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW KEYS IN t1;
+
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (1,'c');
+
+DROP TABLE t1;
+
+--error ER_MULTIPLE_PRI_KEY
+CREATE TABLE t1 (a INT PRIMARY KEY,
+ b CHAR(8) PRIMARY KEY
+) ENGINE=rocksdb;
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8),
+ PRIMARY KEY (a,b)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (1,'b');
+ DROP TABLE t1;
+
+# KEY in a column definition means PK!
+
+CREATE TABLE t1 (a INT KEY,
+ b CHAR(8),
+ KEY (b)
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT,
+ b CHAR(8) PRIMARY KEY
+) ENGINE=rocksdb;
+
+--replace_column 7 #
+SHOW INDEX IN t1;
+
+--error ER_MULTIPLE_PRI_KEY
+ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a);
+--replace_column 7 #
+SHOW KEYS IN t1;
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test
new file mode 100644
index 00000000000..4adc5b55329
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test
@@ -0,0 +1,12 @@
+--source include/have_rocksdb.inc
+
+#
+# Index type BTREE
+#
+
+let $index_type = BTREE;
+
+--source index.inc
+
+let $index_type =;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test
new file mode 100644
index 00000000000..f3dc9cf5f10
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test
@@ -0,0 +1,12 @@
+--source include/have_rocksdb.inc
+
+#
+# Index type HASH
+#
+
+let $index_type = HASH;
+
+--source index.inc
+
+let $index_type =;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt
new file mode 100644
index 00000000000..a12f583ef82
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt
@@ -0,0 +1 @@
+--force-restart --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test
new file mode 100644
index 00000000000..39bae56bea6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test
@@ -0,0 +1,72 @@
+--source include/have_rocksdb.inc
+--source include/have_log_bin.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+--enable_warnings
+
+--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
+--replace_result $max_index_id max_index_id
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+
+CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
+INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
+
+--let $MASTER_UUID = query_get_value(SELECT @@SERVER_UUID, @@SERVER_UUID, 1)
+--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
+--replace_result $MASTER_UUID uuid $max_index_id max_index_id
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
+
+CREATE INDEX tindex1 on t1 (i1);
+--let $start_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
+
+CREATE INDEX tindex2 on t1 (i2);
+--let $end_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
+
+if ($end_max_index_id <= $start_max_index_id) {
+ echo Max index ID did not increase;
+}
+
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS';
+
+CREATE TABLE t2 (
+ a int,
+ b int,
+ c int,
+ d int,
+ e int,
+ PRIMARY KEY (a) COMMENT "cf_a",
+ KEY (b) COMMENT "cf_b",
+ KEY (c) COMMENT "cf_c",
+ KEY (d) COMMENT "$per_index_cf",
+ KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB;
+
+select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS';
+
+CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB;
+insert into t3 (a) values (1), (2), (3);
+SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK;
+--let $t3_index_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', INDEX_NUMBER, 1)
+--let $t3_cf_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', COLUMN_FAMILY, 1)
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+DROP TABLE t3;
+--let $result = query_get_value("SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO WHERE TYPE = 'DDL_DROP_INDEX_ONGOING' AND NAME LIKE 'cf_id:$t3_cf_id,index_id:$t3_index_id'", NAME, 1)
+--echo $result
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+--echo next line shouldn't cause assertion to fail
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
+SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK';
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK;
+
+DROP TABLE t1;
+DROP TABLE t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc
new file mode 100644
index 00000000000..c798bb91cfa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc
@@ -0,0 +1,35 @@
+# This inc script creates two procedures -- save_read_stats() and
+# get_read_stats(). get_read_stats() prints differential rocksdb_rows_read,
+# rocksdb_rows_updated, and rocksdb_rows_deleted values since calling
+# save_read_stats().
+
+delimiter //;
+create procedure save_read_stats()
+begin
+ select rows_requested into @rq from information_schema.table_statistics
+ where table_schema=database() and table_name='t1';
+ select variable_value into @rr from information_schema.global_status
+ where variable_name='rocksdb_rows_read';
+ select variable_value into @ru from information_schema.global_status
+ where variable_name='rocksdb_rows_updated';
+ select variable_value into @rd from information_schema.global_status
+ where variable_name='rocksdb_rows_deleted';
+end//
+
+create procedure get_read_stats()
+begin
+ select rows_requested - @rq as rows_requested from
+ information_schema.table_statistics
+ where table_schema=database() and table_name='t1';
+ select variable_value - @rr as rows_read from
+ information_schema.global_status
+ where variable_name='rocksdb_rows_read';
+ select variable_value - @ru as rows_updated from
+ information_schema.global_status
+ where variable_name='rocksdb_rows_updated';
+ select variable_value - @rd as rows_deleted from
+ information_schema.global_status
+ where variable_name='rocksdb_rows_deleted';
+end//
+delimiter ;//
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test
new file mode 100644
index 00000000000..99ad24f09fc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test
@@ -0,0 +1,35 @@
+--source include/have_rocksdb.inc
+
+# Make sure that the InnoDb information schema tables are disabled when InnoDB
+# is turned off and attempting to access them doesn't crash.
+
+SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES;
+SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert.test b/storage/rocksdb/mysql-test/rocksdb/t/insert.test
new file mode 100644
index 00000000000..14cfe1cadb8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/insert.test
@@ -0,0 +1,99 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic INSERT statements
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+# INSERT [INTO] .. VALUES|VALUE ..
+
+INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106);
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT t1 VALUE (10,'foo',107),(11,'abc',108);
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT INTO t1 (b,a) VALUES ('test',0);
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL);
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT t1 (a) VALUE (10),(20);
+--sorted_result
+SELECT a,b FROM t1;
+
+# INSERT [INTO] .. SET
+
+INSERT INTO t1 SET a = 11, b = 'f';
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT t1 SET b = DEFAULT;
+--sorted_result
+SELECT a,b FROM t1;
+
+
+# INSERT .. SELECT
+
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+INSERT INTO t2 SELECT a,b,pk FROM t1;
+INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo';
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT t1 (a,b) SELECT a,b FROM t1;
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1, t2;
+
+#
+# Transactional INSERT
+#
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+BEGIN;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo');
+INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc');
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+BEGIN;
+INSERT INTO t1 (b,a) VALUES ('test',0);
+SAVEPOINT spt1;
+INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT);
+RELEASE SAVEPOINT spt1;
+INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT);
+ROLLBACK;
+--sorted_result
+SELECT a,b FROM t1;
+
+BEGIN;
+INSERT t1 (a) VALUE (10),(20);
+SAVEPOINT spt1;
+INSERT INTO t1 SET a = 11, b = 'f';
+INSERT t1 SET b = DEFAULT;
+--error ER_UNKNOWN_ERROR
+ROLLBACK TO SAVEPOINT spt1;
+INSERT INTO t1 (b,a) VALUES ('test1',10);
+--error ER_UNKNOWN_ERROR
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt
new file mode 100644
index 00000000000..2d5e33057f6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt
@@ -0,0 +1,6 @@
+--rocksdb_write_disable_wal=1
+--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=256;level0_stop_writes_trigger=256;max_write_buffer_number=16;compression_per_level=kNoCompression;memtable=vector:1024
+--rocksdb_override_cf_options=__system__={memtable=skip_list:16}
+--rocksdb_compaction_sequential_deletes=0
+--rocksdb_compaction_sequential_deletes_window=0
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test
new file mode 100644
index 00000000000..3e1cf7375e8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test
@@ -0,0 +1,41 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+create table t1(
+ id bigint not null primary key,
+ i1 bigint, #unique
+ i2 bigint, #repeating
+ c1 varchar(20), #unique
+ c2 varchar(20), #repeating
+ index t1_2(i1)
+) engine=rocksdb;
+
+--disable_query_log
+set rocksdb_bulk_load=1;
+let $i=0;
+while ($i<50000)
+{
+ inc $i;
+ eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10);
+}
+set rocksdb_bulk_load=0;
+--enable_query_log
+
+select count(*), sum(id), sum(i1), sum(i2) from t1;
+
+# reload without load optimized config
+let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
+--exec echo "wait" > $restart_file
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+-- exec echo "restart:--rocksdb_write_disable_wal=0 --rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+-- enable_reconnect
+-- source include/wait_until_connected_again.inc
+
+select count(*), sum(id), sum(i1), sum(i2) from t1;
+
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test
new file mode 100644
index 00000000000..b2f37a07999
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test
@@ -0,0 +1,93 @@
+--source include/have_rocksdb.inc
+
+#
+# INSERT statements for tables with keys
+#
+
+##################################################
+# TODO:
+# A part of the test is disabled because currently
+# unique indexes are not supported
+##################################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (3,'a'),(0,'');
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo #----------------------------------------
+--echo # UNIQUE KEYS are not supported currently
+--echo #-----------------------------------------
+
+--disable_parsing
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (3,'a'),(0,'');
+INSERT INTO t1 (a,b) VALUES (0,'');
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (3,'a'),(4,'d') ON DUPLICATE KEY UPDATE a = a+10;
+
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+INSERT INTO t1 (a,b) VALUES (100,'b'), (2,'c');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE a = a+VALUES(a);
+--sorted_result
+SELECT a,b FROM t1;
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (101,'x'),(101,'x');
+DROP TABLE t1;
+
+--enable_parsing
+
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f');
+INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (3,'a'),(0,'');
+INSERT INTO t1 (a,b) VALUES (0,'');
+--sorted_result
+SELECT a,b FROM t1;
+
+INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z');
+INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b);
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test
new file mode 100644
index 00000000000..b0b3eb7a8fd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+create table t1 (
+ id int,
+ value int,
+ primary key (id)
+) engine=rocksdb;
+
+insert into t1 values(1,1),(2,2);
+set autocommit=0;
+
+begin;
+insert into t1 values (50,50);
+select * from t1;
+
+update t1 set id=id+100;
+
+select * from t1;
+
+rollback;
+set autocommit=1;
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt
new file mode 100644
index 00000000000..436edf2b40c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt
@@ -0,0 +1 @@
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test
new file mode 100644
index 00000000000..743bf7dd7a1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test
@@ -0,0 +1,19 @@
+--source include/have_rocksdb.inc
+
+create table ten(a int primary key);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+
+create table t100(pk int primary key, a int, b int, key(a));
+insert into t100 select a,a,a from test.one_k;
+
+set global rocksdb_force_flush_memtable_now=1;
+select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes;
+
+update t100 set a=a+1;
+set global rocksdb_force_flush_memtable_now=1;
+select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes;
+
+drop table ten, t100, one_k;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue111.test b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test
new file mode 100644
index 00000000000..671ea4708d6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test
@@ -0,0 +1,38 @@
+--source include/have_rocksdb.inc
+
+connect (con2,localhost,root,,);
+connection default;
+
+create table t1 (
+ pk int not null primary key,
+ col1 int not null,
+ col2 int not null,
+ key(col1)
+) engine=rocksdb;
+
+create table ten(a int primary key);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+
+insert into t1 select a,a,a from one_k;
+
+--echo # Start the transaction, get the snapshot
+begin;
+select * from t1 where col1<10;
+
+--echo # Connect with another connection and make a conflicting change
+connection con2;
+
+begin;
+update t1 set col2=123456 where pk=0;
+commit;
+
+connection default;
+
+--error ER_LOCK_DEADLOCK
+update t1 set col2=col2+1 where col1 < 10 limit 5;
+
+disconnect con2;
+drop table t1, ten, one_k;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue290.test b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test
new file mode 100644
index 00000000000..99c0479ec21
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test
@@ -0,0 +1,40 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE `linktable` (
+ `id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `visibility` tinyint(3) NOT NULL DEFAULT '0',
+ `data` varchar(255) NOT NULL DEFAULT '',
+ `time` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `version` int(11) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk',
+ KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type'
+) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin;
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $insert = INSERT INTO linktable (id1, link_type, id2) values (1, 1, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+set global rocksdb_force_flush_memtable_now=1;
+
+insert into linktable (id1, link_type, id2) values (2, 1, 1);
+insert into linktable (id1, link_type, id2) values (2, 1, 2);
+insert into linktable (id1, link_type, id2) values (2, 1, 3);
+insert into linktable (id1, link_type, id2) values (2, 1, 4);
+insert into linktable (id1, link_type, id2) values (2, 1, 5);
+insert into linktable (id1, link_type, id2) values (2, 1, 6);
+insert into linktable (id1, link_type, id2) values (2, 1, 7);
+insert into linktable (id1, link_type, id2) values (2, 1, 8);
+insert into linktable (id1, link_type, id2) values (2, 1, 9);
+insert into linktable (id1, link_type, id2) values (2, 1, 10);
+
+--replace_column 9 #
+explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
+
+drop table linktable;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue314.test b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test
new file mode 100644
index 00000000000..2059eef2195
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE t1(a int);
+SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
+INSERT INTO t1 VALUES(1);
+--error ER_UNKNOWN_ERROR
+select * from t1;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+select * from t1;
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test
new file mode 100644
index 00000000000..bedf87a8efa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = READ COMMITTED;
+
+--source transaction_isolation.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test
new file mode 100644
index 00000000000..8ee3af60bf3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = READ UNCOMMITTED;
+--source transaction_isolation.inc
+--source consistent_snapshot.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test
new file mode 100644
index 00000000000..cf29073f69e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = REPEATABLE READ;
+--source transaction_isolation.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test
new file mode 100644
index 00000000000..9b5db0e8998
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let $trx_isolation = SERIALIZABLE;
+--source transaction_isolation.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc
new file mode 100644
index 00000000000..5d3678f5f27
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc
@@ -0,0 +1,117 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic LOAD DATA statements
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+let $datadir = `SELECT @@datadir`;
+
+--write_file $datadir/se_loaddata.dat
+1,foo,
+2,bar,
+3,,
+4,abc,
+EOF
+
+--replace_result $datadir <DATADIR>
+eval
+LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1
+ FIELDS TERMINATED BY ',' (a,b);
+--sorted_result
+SELECT a,b FROM t1;
+
+--replace_result $datadir <DATADIR>
+eval
+LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1
+ CHARACTER SET utf8 COLUMNS TERMINATED BY ','
+ ESCAPED BY '/' (a,b);
+--sorted_result
+SELECT a,b FROM t1;
+
+--remove_file $datadir/se_loaddata.dat
+--write_file $datadir/se_loaddata.dat
+5;YYY;
+102;'zzz';
+0;'test';
+EOF
+
+--replace_result $datadir <DATADIR>
+eval
+LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1
+ FIELDS TERMINATED BY ';'
+ (a) SET b='loaded';
+
+--sorted_result
+SELECT a,b FROM t1;
+
+--remove_file $datadir/se_loaddata.dat
+--write_file $datadir/se_loaddata.dat
+prefix:5;'foo';
+prefix:6;'';
+prefix:100;foo;
+prefix:7;'test';suffix
+101;abc;
+102;'z';
+prefix:0;;
+EOF
+
+--replace_result $datadir <DATADIR>
+eval
+LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1
+ FIELDS TERMINATED BY ';'
+ OPTIONALLY ENCLOSED BY ''''
+ LINES STARTING BY 'prefix:'
+ IGNORE 2 LINES (a,b);
+
+--sorted_result
+SELECT a,b FROM t1;
+
+--remove_file $datadir/se_loaddata.dat
+--write_file $datadir/se_loaddata.dat
+1 foo
+2 bar
+3
+4 abc
+EOF
+
+--replace_result $datadir <DATADIR>
+eval
+LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1;
+--sorted_result
+SELECT a,b FROM t1;
+--remove_file $datadir/se_loaddata.dat
+
+
+--write_file $datadir/se_replacedata.dat
+1 aaa
+2 bbb
+3 ccc
+4 ddd
+EOF
+
+--replace_result $datadir <DATADIR>
+if ($skip_unique_check == 1)
+{
+ --error ER_UNKNOWN_ERROR
+ eval
+ LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1;
+}
+if ($skip_unique_check == 0)
+{
+ eval
+ LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1;
+}
+--sorted_result
+SELECT a,b FROM t1;
+--remove_file $datadir/se_replacedata.dat
+
+
+# Cleanup
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test
new file mode 100644
index 00000000000..837fa746ed7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+let $skip_unique_check = 0;
+--source loaddata.inc
+let $skip_unique_check = 1;
+set session rocksdb_skip_unique_check=1;
+--source loaddata.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock.test b/storage/rocksdb/mysql-test/rocksdb/t/lock.test
new file mode 100644
index 00000000000..015a08bef57
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/lock.test
@@ -0,0 +1,202 @@
+--source include/have_rocksdb.inc
+
+# Standard table locking:
+# LOCK TABLE .. READ
+# LOCK TABLE .. [LOW_PRIORITY] WRITE
+# UNLOCK TABLES
+#
+# and global locking:
+# FLUSH TABLES [..] WITH READ LOCK
+#
+############################################
+# TODO:
+# A part of the test is currently disabled
+# because temporary tables are not supported
+############################################
+
+--enable_connect_log
+
+# Save the initial number of concurrent sessions.
+--source include/count_sessions.inc
+
+connect (con1,localhost,root,,);
+SET lock_wait_timeout=1;
+
+connection default;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2, t3;
+--enable_warnings
+
+CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3);
+
+# LOW_PRIORITY has no effect, but is still syntactically correct
+LOCK TABLE t1 LOW_PRIORITY WRITE;
+SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2;
+
+UPDATE t1 SET id=-1 WHERE id=1;
+
+connection con1;
+# With WRITE lock held by connection 'default',
+# nobody else can access the table
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT id,id2 FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLE t1 READ;
+
+connection default;
+LOCK TABLE t1 READ;
+--error ER_TABLE_NOT_LOCKED_FOR_WRITE
+UPDATE t1 SET id=1 WHERE id=1;
+
+connection con1;
+# With READ lock held by connection 'default',
+# it should be possible to read from the table
+# or acquire another READ lock,
+# but not update it or acquire WRITE lock
+SELECT COUNT(DISTINCT id) FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET id=2 WHERE id=2;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLE t1 WRITE;
+LOCK TABLE t1 READ;
+UNLOCK TABLES;
+
+
+--connection default
+
+--error ER_TABLE_NOT_LOCKED
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+
+--disable_parsing
+
+CREATE TEMPORARY TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+DROP TABLE IF EXISTS t2;
+
+--enable_parsing
+
+UNLOCK TABLES;
+
+CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLE t1 WRITE, t2 WRITE;
+INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1;
+UPDATE t1 SET id=1 WHERE id=-1;
+DROP TABLE t1,t2;
+
+#
+# INSERT ... SELECT with lock tables
+#
+
+CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (nr,nm) VALUES (1,3);
+INSERT INTO t2 (nr,nm) VALUES (2,4);
+
+LOCK TABLES t1 WRITE, t2 READ;
+INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3;
+INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4;
+UNLOCK TABLES;
+
+LOCK TABLES t1 WRITE;
+--error ER_TABLE_NOT_LOCKED
+INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1;
+UNLOCK TABLES;
+LOCK TABLES t1 WRITE, t1 AS t1_alias READ;
+INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias;
+--error ER_TABLE_NOT_LOCKED
+DROP TABLE t1,t2;
+UNLOCK TABLES;
+DROP TABLE t1,t2;
+
+#
+# Check that a dropped table is removed from a lock
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE;
+# This removes one table after the other from the lock.
+DROP TABLE t2, t3, t1;
+#
+# Check that a lock merge works
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb;
+LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ;
+
+ALTER TABLE t2 ADD COLUMN c2 INT;
+
+DROP TABLE t1, t2, t3;
+
+# FLUSH TABLES is not permitted when there is an active LOCK TABLES .. READ,
+# FLUSH TABLES .. WITH READ LOCK should be used instead
+# (and for other connections the table is locked)
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+
+LOCK TABLE t1 READ, t2 READ;
+--error ER_TABLE_NOT_LOCKED_FOR_WRITE
+FLUSH TABLE t1;
+--error ER_TABLE_NOT_LOCKED_FOR_WRITE
+FLUSH TABLES;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+FLUSH TABLES t1, t2 WITH READ LOCK;
+UNLOCK TABLES;
+
+FLUSH TABLES t1, t2 WITH READ LOCK;
+
+--connection con1
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+
+--connection default
+UNLOCK TABLES;
+
+# Global lock
+
+FLUSH TABLES WITH READ LOCK;
+
+--connection con1
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+
+--connection default
+UNLOCK TABLES;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+FLUSH TABLES WITH READ LOCK;
+--error ER_CANT_UPDATE_WITH_READLOCK
+DROP TABLE t1, t2;
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+
+--disconnect con1
+
+#
+# Ensure that FLUSH TABLES doesn't substitute a base locked table
+# with a temporary one.
+#
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb;
+
+LOCK TABLE t1 WRITE, t2 WRITE;
+
+--disable_parsing
+
+CREATE TEMPORARY TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb;
+FLUSH TABLE t1;
+DROP TEMPORARY TABLE t1;
+
+--enable_parsing
+
+SELECT a,b FROM t1;
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+
+# Check that all connections opened by test cases in this file are really
+# gone so execution of other tests won't be affected by their presence.
+--source include/wait_until_count_sessions.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test
new file mode 100644
index 00000000000..d8a6bde45c8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test
@@ -0,0 +1,110 @@
+--source include/have_rocksdb.inc
+
+--enable_connect_log
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+#1. Using all primary key columns, with equal conditions
+connection con1;
+CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB;
+
+#1.1 SELECT FOR UPDATE
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+
+connection con2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+BEGIN;
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE;
+
+#1.2 UPDATE
+connection con1;
+ROLLBACK;
+BEGIN;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+
+connection con2;
+ROLLBACK;
+BEGIN;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1;
+
+#1.3 DELETE
+connection con1;
+ROLLBACK;
+BEGIN;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+
+connection con2;
+ROLLBACK;
+BEGIN;
+--error ER_LOCK_WAIT_TIMEOUT
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0;
+
+--disable_parsing
+#
+# The following is commented out because RocksDB's Transaction API doesn't
+# "support" READ COMMITED, in particular, it doesn't release row locks
+# after each statement. (MyRocks is able to request a new snapshot for
+# every statement, but this won't free the locks. TODO: Is the behavior
+# that is tested below really needed?)
+#
+connection con1;
+ROLLBACK;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+
+connection con2;
+ROLLBACK;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+BEGIN;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE;
+SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE;
+
+connection con1;
+ROLLBACK;
+BEGIN;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+
+connection con2;
+ROLLBACK;
+BEGIN;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1;
+UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1;
+
+connection con1;
+ROLLBACK;
+BEGIN;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+
+connection con2;
+ROLLBACK;
+BEGIN;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1;
+DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0;
+
+connection con1;
+ROLLBACK;
+connection con2;
+ROLLBACK;
+
+--enable_parsing
+connection default;
+disconnect con1;
+disconnect con2;
+
+DROP TABLE t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test
new file mode 100644
index 00000000000..035046ae368
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test
@@ -0,0 +1,67 @@
+--source include/have_rocksdb.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case1_1.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case1_1.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case1_2.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case1_2.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case2.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case2.inc
+
+# Rerun the case2 tests with rocksdb_lock_scanned_rows on
+let $lock_scanned_rows=1;
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case2.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case2.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case3.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case3.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case4.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case4.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case5.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case5.inc
+
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case6.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case6.inc
+
+let $lock_scanned_rows=0;
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case7.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case7.inc
+
+# Rerun the case7 tests with rocksdb_lock_scanned_rows on
+let $lock_scanned_rows=1;
+let $isolation_level = REPEATABLE READ;
+--source suite/rocksdb/include/locking_issues_case7.inc
+
+let $isolation_level = READ COMMITTED;
+--source suite/rocksdb/include/locking_issues_case7.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/misc.test b/storage/rocksdb/mysql-test/rocksdb/t/misc.test
new file mode 100644
index 00000000000..b8efd0389d2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/misc.test
@@ -0,0 +1,45 @@
+--source include/have_rocksdb.inc
+
+#
+# Different statements not related to an engine,
+# but added to provide function coverage for handler.cc and handler.h.
+# The test can be disabled or removed later.
+#
+
+# hits get_error_message(int, String*)
+
+INSERT INTO mysql.event (
+ db,
+ name,
+ body,
+ definer,
+ interval_value,
+ interval_field,
+ originator,
+ character_set_client,
+ collation_connection,
+ db_collation,
+ body_utf8)
+values (
+ database(),
+ "ev1",
+ "select 1",
+ user(),
+ 100,
+ "SECOND_MICROSECOND",
+ 1,
+ 'utf8',
+ 'utf8_general_ci',
+ 'utf8_general_ci',
+ 'select 1');
+
+--error ER_NOT_SUPPORTED_YET
+SHOW EVENTS;
+DROP EVENT ev1;
+
+# hits get_foreign_key_list(THD*, List<st_foreign_key_info>*)
+--sorted_result
+SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME
+FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt
new file mode 100644
index 00000000000..53303252212
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt
@@ -0,0 +1 @@
+--binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test
new file mode 100644
index 00000000000..0c04b60427e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+################################################################################
+# Test the --skip-empty-trans option for mysqlbinlog with rocksdb
+################################################################################
+--disable_warnings
+--source include/have_gtid.inc
+--enable_warnings
+
+reset master;
+set timestamp=1000000000;
+let $storage_engine=rocksdb;
+
+# Test row based replication
+set SESSION binlog_format = 'ROW';
+--source include/mysqlbinlog_gtid_skip_empty_trans_input.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt
new file mode 100644
index 00000000000..e41620e94f6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt
@@ -0,0 +1 @@
+--force-restart --binlog_format=row
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test
new file mode 100644
index 00000000000..107790f0c9a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test
@@ -0,0 +1,65 @@
+--source include/have_rocksdb.inc
+
+--source include/have_log_bin.inc
+
+--enable_connect_log
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+drop table if exists r1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb;
+insert into r1 values (1,1,1,1,1,1,1,1);
+insert into r1 values (1,1,1,2,2,2,2,2);
+insert into r1 values (1,1,2,1,3,3,3,3);
+insert into r1 values (1,1,2,2,4,4,4,4);
+insert into r1 values (1,2,1,1,5,5,5,5);
+insert into r1 values (1,2,1,2,6,6,6,6);
+insert into r1 values (1,2,2,1,7,7,7,7);
+insert into r1 values (1,2,2,2,8,8,8,8);
+insert into r1 values (2,1,1,1,9,9,9,9);
+insert into r1 values (2,1,1,2,10,10,10,10);
+insert into r1 values (2,1,2,1,11,11,11,11);
+insert into r1 values (2,1,2,2,12,12,12,12);
+insert into r1 values (2,2,1,1,13,13,13,13);
+insert into r1 values (2,2,1,2,14,14,14,14);
+insert into r1 values (2,2,2,1,15,15,15,15);
+insert into r1 values (2,2,2,2,16,16,16,16);
+
+connection con2;
+BEGIN;
+insert into r1 values (5,5,5,5,5,5,5,5);
+update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1';
+
+--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test
+
+rollback;
+
+connection con1;
+
+--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l
+
+set @save_default_storage_engine=@@global.default_storage_engine;
+SET GLOBAL default_storage_engine=rocksdb;
+--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
+--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l
+
+
+# wiping general log so that this test case doesn't fail with --repeat
+--exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log
+
+# testing mysqldump work with statement based binary logging
+SET GLOBAL binlog_format=statement;
+--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null
+SET GLOBAL binlog_format=row;
+
+drop table r1;
+reset master;
+set @@global.default_storage_engine=@save_default_storage_engine;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt
new file mode 100644
index 00000000000..2672d4ff35e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt
@@ -0,0 +1 @@
+--binlog_format=row
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test
new file mode 100644
index 00000000000..3631e703de6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test
@@ -0,0 +1,43 @@
+--source include/have_rocksdb.inc
+
+--source include/have_log_bin.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+
+--disable_query_log
+let $i = 1;
+while ($i <= 50000) {
+ let $insert = INSERT INTO t1 VALUES($i, $i, REPEAT('x', 150));
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+optimize table t1;
+
+#wiping block cache
+let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
+--exec echo "wait" > $restart_file
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+-- enable_reconnect
+-- source include/wait_until_connected_again.inc
+
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+
+--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null
+
+# verifying block cache was not filled
+select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+
+select count(*) from t1;
+
+# verifying block cache was filled
+select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add';
+
+#cleanup
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test
new file mode 100644
index 00000000000..e3e0cf898a1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test
@@ -0,0 +1,26 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
+
+--disable_query_log
+let $max = 10000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1;
+
+set session debug= "+d,myrocks_simulate_negative_stats";
+SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+set session debug= "-d,myrocks_simulate_negative_stats";
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test
new file mode 100644
index 00000000000..ccef7182c11
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test
@@ -0,0 +1,32 @@
+--source include/have_rocksdb.inc
+
+# For Issue#117 MyRocks does merge sort with small data sets
+
+let $datadir = `SELECT @@datadir`;
+
+--disable_query_log
+let $i= 0;
+while ($i < 30) {
+ DROP TABLE IF EXISTS ti_nk;
+
+ CREATE TABLE `ti_nk` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(36) COLLATE latin1_bin NOT NULL,
+ PRIMARY KEY (`c`)
+ );
+ let $j = 0;
+ while ($j < 200) {
+ eval insert into ti_nk values ($j, $j, md5($j));
+ inc $j;
+ }
+
+ select variable_value into @s from information_schema.global_status where variable_name='Sort_merge_passes';
+ eval SELECT a, b, c FROM ti_nk ORDER BY a,b,c INTO OUTFILE '$datadir/select.out';
+ --remove_file $datadir/select.out
+ select case when variable_value-@s = 0 then 'true' else 'false' end as skip_merge_sort from information_schema.global_status where variable_name='Sort_merge_passes';
+ inc $i;
+}
+--enable_query_log
+
+DROP TABLE ti_nk;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc
new file mode 100644
index 00000000000..0dcd452194a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc
@@ -0,0 +1,65 @@
+#
+# This include file checks some very basic capabilities for restart insert
+# update and delete for tables with no pk
+# NOTE: requires table with structure similar to
+# CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb;
+#
+
+SHOW CREATE TABLE t1;
+SHOW COLUMNS IN t1;
+
+### test INSERT
+INSERT INTO t1 (a,b) VALUES (76,'bar');
+INSERT INTO t1 (a,b) VALUES (35,'foo');
+INSERT INTO t1 (a,b) VALUES (77,'baz');
+
+## test SELECT w/ index scans
+--sorted_result
+SELECT * FROM t1 WHERE a = 35;
+--sorted_result
+SELECT * FROM t1 WHERE a = 35 AND b = 'foo';
+--sorted_result
+SELECT * FROM t1 WHERE a = 77 OR b = 'bar';
+--sorted_result
+SELECT * FROM t1 WHERE a > 35;
+--sorted_result
+SELECT * FROM t1;
+
+# test UPDATE
+UPDATE t1 SET a=a+100;
+--sorted_result
+SELECT * FROM t1;
+
+UPDATE t1 SET a=a-100, b='bbb' WHERE a>100;
+--sorted_result
+SELECT * FROM t1;
+UPDATE t1 SET a=300, b='ccc' WHERE a>70;
+--sorted_result
+SELECT * FROM t1;
+UPDATE t1 SET a=123 WHERE a=35;
+--sorted_result
+SELECT * FROM t1;
+UPDATE t1 SET a=321 WHERE b='ccc';
+--sorted_result
+SELECT * FROM t1;
+
+
+## test RESTART/OPEN
+--source include/restart_mysqld.inc
+## test insert after restart
+INSERT INTO t1 (a,b) VALUES (45,'bob');
+--sorted_result
+SELECT * FROM t1;
+
+# test DELETE
+DELETE FROM t1 WHERE a=123;
+--sorted_result
+SELECT * FROM t1;
+
+DELETE FROM t1 WHERE b > 'bbb' AND a > 100;
+--sorted_result
+SELECT * FROM t1;
+
+# test TRUNCATE
+TRUNCATE TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt
new file mode 100644
index 00000000000..71f74ee53ab
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc
new file mode 100644
index 00000000000..a41bd046455
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc
@@ -0,0 +1,78 @@
+let $datadir = `SELECT @@datadir`;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6;
+--enable_warnings
+create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
+create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb;
+
+--disable_query_log
+let $t = 1;
+while ($t <= 6) {
+ let $i = 1;
+ while ($i <= 10000) {
+ let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150));
+ inc $i;
+ eval $insert;
+ }
+ inc $t;
+}
+--enable_query_log
+
+# Disable auto compaction so that effects of optimize table are stable
+let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
+--exec echo "wait" > $restart_file
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+-- enable_reconnect
+-- source include/wait_until_connected_again.inc
+
+select count(*) from t1;
+select count(*) from t2;
+select count(*) from t3;
+select count(*) from t4;
+select count(*) from t5;
+select count(*) from t6;
+delete from t1 where id <= 9900;
+delete from t2 where id <= 9900;
+delete from t3 where id <= 9900;
+delete from t4 where id <= 9900;
+delete from t5 where id <= 9900;
+delete from t6 where id <= 9900;
+
+--let $size_cmd = du -ks $datadir/.rocksdb/*.sst | awk '{t=t+\$1} END{print t}' >> $MYSQL_TMP_DIR/sst_size.dat
+--exec $size_cmd
+optimize table t1;
+--exec $size_cmd
+optimize table t3;
+--exec $size_cmd
+optimize table t4;
+--exec $size_cmd
+optimize table t6;
+--exec $size_cmd
+
+select count(*) from t1;
+select count(*) from t2;
+select count(*) from t3;
+select count(*) from t4;
+select count(*) from t5;
+select count(*) from t6;
+
+# run a check script to verify sst files reduced enough during each optimize table
+--exec perl suite/rocksdb/optimize_table_check_sst.pl $MYSQL_TMP_DIR/sst_size.dat
+
+#cleanup
+optimize table t2;
+optimize table t5;
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+DROP TABLE t5;
+DROP TABLE t6;
+--remove_file $MYSQL_TMP_DIR/sst_size.dat
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test
new file mode 100644
index 00000000000..383bae72904
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# OPTIMIZE TABLE statements
+#
+
+--source optimize_table.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test
new file mode 100644
index 00000000000..9f3ccb9dd6c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test
@@ -0,0 +1,42 @@
+--source include/have_rocksdb.inc
+
+#
+# Create tables with partitions and try to update/select from them.
+#
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS VAR_POP;
+DROP TABLE IF EXISTS TEMP0;
+DROP TABLE IF EXISTS VAR_SAMP;
+
+--enable_warnings
+
+CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
+
+--disable_query_log
+let $max = 1000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+
+# Parition string is "#P#". To verify that parsing is done correctly then we'll
+# verify if tables containing "P" somwhere can be created correctly.
+CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB;
+CREATE TABLE TEMP0 (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 3;
+CREATE TABLE VAR_SAMP (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 10;
+
+--enable_query_log
+
+SHOW TABLES;
+
+SELECT * FROM t1 ORDER BY i LIMIT 10;
+SELECT COUNT(*) FROM t1;
+
+DROP TABLE t1;
+DROP TABLE VAR_POP;
+DROP TABLE TEMP0;
+DROP TABLE VAR_SAMP;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test
new file mode 100644
index 00000000000..4290811e868
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test
@@ -0,0 +1,92 @@
+--source include/have_rocksdb.inc
+
+#
+# Information Schema perf context
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+--enable_warnings
+
+
+SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level;
+SET GLOBAL rocksdb_perf_context_level=3;
+
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB;
+
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+
+###############################################################################
+# Test that expected perf context stats exists
+###############################################################################
+
+# Check per-table perf context
+--replace_column 5 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1';
+
+# Check global perf context
+--replace_column 2 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL;
+
+###############################################################################
+# Test iteration skip counters
+###############################################################################
+
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+
+SELECT * FROM t1;
+
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+
+SELECT * FROM t1 WHERE j BETWEEN 1 AND 5;
+
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't1'
+AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT');
+
+###############################################################################
+# Test write I/O stats
+###############################################################################
+
+# Statistics for multi-statement transactions cannot be attributed to
+# individual tables but should show up in global perf context stats
+
+BEGIN;
+INSERT INTO t2 VALUES (1), (2);
+INSERT INTO t2 VALUES (3), (4);
+COMMIT;
+
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't2'
+AND STAT_TYPE = 'IO_WRITE_NANOS'
+AND VALUE > 0;
+
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0;
+
+SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS';
+
+# Single statement writes do show up in per-table stats
+INSERT INTO t2 VALUES (5), (6), (7), (8);
+
+SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_NAME = 't2'
+AND STAT_TYPE = 'IO_WRITE_NANOS'
+AND VALUE > 0;
+
+SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL
+WHERE STAT_TYPE = 'IO_WRITE_NANOS';
+
+SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END;
+
+# cleanup
+DROP TABLE t1;
+DROP TABLE t2;
+SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt
new file mode 100644
index 00000000000..52f4895dc2f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=write_buffer_size=16k --log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test
new file mode 100644
index 00000000000..56070652618
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test
@@ -0,0 +1,70 @@
+--source include/have_log_bin.inc
+--source include/have_rocksdb.inc
+--source include/count_sessions.inc
+--disable_warnings
+--source include/have_gtid.inc
+--enable_warnings
+-- let $uuid = `select @@server_uuid;`
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1);
+
+# Read-only, long-running transaction. SingleDelete/Put shouldn't increase much.
+select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put';
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+-- replace_result $uuid uuid
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+connection con2;
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t1 SET value=value+1 WHERE id=1;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+
+connection con1;
+select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put';
+select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+SELECT * FROM t1;
+--error ER_UNKNOWN_ERROR
+INSERT INTO t1 values (2, 2);
+ROLLBACK;
+SELECT * FROM t1;
+INSERT INTO t1 values (2, 2);
+SELECT * FROM t1 ORDER BY id;
+
+# Regular long-running transaction.
+# No "Transaction could not check for conflicts for opearation" error should happen.
+BEGIN;
+
+connection con2;
+--disable_query_log
+let $i = 5;
+while ($i <= 10000) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+connection con1;
+SELECT COUNT(*) FROM t1;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+OPTIMIZE TABLE t1;
+DROP TABLE t1;
+reset master;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt
new file mode 100644
index 00000000000..99929434028
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt
@@ -0,0 +1,4 @@
+--force-restart
+--rocksdb_debug_optimizer_n_rows=20000
+--rocksdb_records_in_range=1000
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test
new file mode 100644
index 00000000000..9c939ef06e4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test
@@ -0,0 +1,144 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Create the table and insert some keys
+CREATE TABLE t1 (
+ i INT,
+ a INT,
+ b INT,
+ PRIMARY KEY (i),
+ KEY ka(a),
+ KEY kb(b) comment 'rev:cf1'
+) ENGINE = rocksdb;
+
+--disable_query_log
+let $max = 20000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+# get results for records_in_range prior to memtable flush
+# normal CF
+explain extended select * from t1 where a> 500 and a< 750;
+explain extended select * from t1 where a< 750;
+explain extended select * from t1 where a> 500;
+explain extended select * from t1 where a>=0 and a<=1000;
+
+#reverse CF
+explain extended select * from t1 where b> 500 and b< 750;
+explain extended select * from t1 where b< 750;
+explain extended select * from t1 where b> 500;
+explain extended select * from t1 where b>=0 and b<=1000;
+
+## cost calculation differences between covering vs non-covering (#298)
+set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range;
+set rocksdb_records_in_range = 15000;
+# covering, range
+explain extended select a from t1 where a < 750;
+# non-covering, full
+explain extended select a, b from t1 where a < 750;
+# covering, ref
+explain extended select a from t1 where a = 700;
+# non-covering, ref
+explain extended select a,b from t1 where a = 700;
+# covering, full index
+explain extended select a from t1 where a in (700, 800);
+# non-covering, full
+explain extended select a,b from t1 where a in (700, 800);
+set rocksdb_records_in_range=8000;
+# covering, range
+explain extended select a from t1 where a in (700, 800);
+# non-covering, full
+explain extended select a,b from t1 where a in (700, 800);
+set rocksdb_records_in_range = @save_rocksdb_records_in_range;
+
+# flush memtable and repeat
+set global rocksdb_force_flush_memtable_now = true;
+# normal CF
+explain extended select * from t1 where a> 500 and a< 750;
+explain extended select * from t1 where a< 750;
+explain extended select * from t1 where a> 500;
+explain extended select * from t1 where a>=0 and a<=1000;
+
+#reverse CF
+explain extended select * from t1 where b> 500 and b< 750;
+explain extended select * from t1 where b< 750;
+explain extended select * from t1 where b> 500;
+explain extended select * from t1 where b>=0 and b<=1000;
+
+# a set of 1
+explain extended select * from t1 where a>= 500 and a<= 500;
+explain extended select * from t1 where b>= 500 and b<= 500;
+
+# two indexes
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+
+# composite index
+drop index ka on t1;
+drop index kb on t1;
+create index kab on t1(a,b);
+set global rocksdb_force_flush_memtable_now = true;
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+
+# override records in range
+set rocksdb_records_in_range=444;
+explain extended select * from t1 where a< 750 and b> 500 and b< 750;
+set rocksdb_records_in_range=0;
+
+# issue 82
+## forward cf
+CREATE TABLE `linktable` (
+ `id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `visibility` tinyint(3) NOT NULL DEFAULT '0',
+ `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '',
+ `time` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `version` int(11) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk',
+ KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+
+insert into linktable values (1,1,1,1,1,1,1,1,1);
+insert into linktable values (1,1,2,1,1,1,1,1,1);
+insert into linktable values (1,1,3,1,1,1,1,1,1);
+insert into linktable values (1,1,4,1,1,1,1,1,1);
+set global rocksdb_force_flush_memtable_now = true;
+explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
+drop table linktable;
+
+## rev cf
+CREATE TABLE `linktable` (
+ `id1` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id1_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `id2` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `id2_type` int(10) unsigned NOT NULL DEFAULT '0',
+ `link_type` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `visibility` tinyint(3) NOT NULL DEFAULT '0',
+ `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '',
+ `time` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `version` int(11) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk',
+ KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+
+insert into linktable values (1,1,1,1,1,1,1,1,1);
+insert into linktable values (1,1,2,1,1,1,1,1,1);
+insert into linktable values (1,1,3,1,1,1,1,1,1);
+insert into linktable values (1,1,4,1,1,1,1,1,1);
+set global rocksdb_force_flush_memtable_now = true;
+explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
+drop table linktable;
+
+#cleanup
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc
new file mode 100644
index 00000000000..47d6a299095
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc
@@ -0,0 +1,38 @@
+#
+# REPAIR TABLE statements
+#
+# Note: the output is likely to be different for the engine under test,
+# in which case rdiff will be needed. Or, the output might say that
+# the storage engine does not support REPAIR.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb;
+
+REPAIR TABLE t1;
+
+INSERT INTO t1 (a,b) VALUES (3,'c');
+INSERT INTO t2 (a,b) VALUES (4,'d');
+REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2;
+INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
+REPAIR LOCAL TABLE t2;
+INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+INSERT INTO t2 (a,b) VALUES (9,'i');
+REPAIR LOCAL TABLE t2, t1 EXTENDED;
+INSERT INTO t1 (a,b) VALUES (10,'j');
+INSERT INTO t2 (a,b) VALUES (11,'k');
+REPAIR TABLE t1, t2 QUICK USE_FRM;
+INSERT INTO t1 (a,b) VALUES (12,'l');
+INSERT INTO t2 (a,b) VALUES (13,'m');
+REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+FLUSH TABLE t1;
+
+DROP TABLE t1, t2;
+
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test
new file mode 100644
index 00000000000..5c4807ce986
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# REPAIR TABLE statements
+#
+
+--source repair_table.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/replace.test b/storage/rocksdb/mysql-test/rocksdb/t/replace.test
new file mode 100644
index 00000000000..3ac37330b2d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/replace.test
@@ -0,0 +1,54 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic REPLACE statements
+#
+
+###########################################
+# TODO:
+# A part of the test is currently disabled
+# because unique indexes are not supported
+###########################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+# When there is no duplicate key, REPLACE should work as INSERT
+
+REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e');
+--sorted_result
+SELECT a,b FROM t1;
+
+REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo');
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+--disable_parsing
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX (a)) ENGINE=rocksdb;
+REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (2,'d');
+REPLACE INTO t1 (a,b) VALUES (2,'d');
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb;
+REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b) VALUES (4,'b');
+REPLACE INTO t1 (a,b) VALUES (4,'b');
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt
new file mode 100644
index 00000000000..6ad42e58aa2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test
new file mode 100644
index 00000000000..f612cb0997b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test
@@ -0,0 +1,1925 @@
+--source include/have_rocksdb.inc
+
+#
+# RocksDB Storage Engine tests
+#
+select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb';
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
+drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20;
+drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29;
+drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39;
+drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49;
+--enable_warnings
+
+# Disable background compaction to prevent stats from affect explain output
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--echo #
+--echo # Issue #1: Don't update indexes if index values have not changed
+--echo #
+# [Jay Edgar] I moved this test first because it uses the
+# rocksdb_number_keys_written value, but this value is affected out of band
+# by drop tables. There is a background thread that periodically processes
+# through the list of dropped keys and if any are gone from the database it
+# deletes information related to the key - and this delete causes this count
+# to be incorrect. I moved this test first and made the whole test require
+# a fresh server to hopefully avoid tihs.
+create table t1 (
+ pk int primary key,
+ a int,
+ b int,
+ key(a)
+) engine=rocksdb;
+
+insert into t1 values
+(1,1,1), (2,2,2), (3,3,3), (4,4,4);
+
+set @var1=(select variable_value
+ from information_schema.global_status
+ where variable_name='rocksdb_number_keys_written');
+
+--echo # Do an update that doesn't change the key 'a'.
+update t1 set b=3334341 where a=2;
+
+set @var2=(select variable_value
+ from information_schema.global_status
+ where variable_name='rocksdb_number_keys_written');
+--echo # The following should produce 1
+select @var2 - @var1;
+
+--echo # Do an update that sets the key to the same value
+update t1 set a=pk where a=3;
+set @var3=(select variable_value
+ from information_schema.global_status
+ where variable_name='rocksdb_number_keys_written');
+--echo # We have 'updated' column to the same value, so the following must return 0:
+select @var3 - @var2;
+drop table t1;
+
+create table t0 (a int primary key) engine=rocksdb;
+show create table t0;
+drop table t0;
+
+create table t1 (a int primary key, b int) engine=rocksdb;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+
+select * from t1;
+
+--echo # Check that we can create another table and insert there
+create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb;
+insert into t2 value ('abc','def');
+insert into t2 value ('hijkl','mnopq');
+select * from t2;
+
+--echo # Select again from t1 to see that records from different tables dont mix
+select * from t1;
+
+explain select * from t2 where a='no-such-key';
+--replace_column 9 #
+explain select * from t2 where a='abc';
+select * from t2 where a='abc';
+
+--echo # Try a composite PK
+create table t3 (
+ pk1 int,
+ pk2 varchar(10),
+ col1 varchar(10),
+ primary key(pk1, pk2)
+) engine=rocksdb;
+
+insert into t3 values (2,'two', 'row#2');
+insert into t3 values (3,'three', 'row#3');
+insert into t3 values (1,'one', 'row#1');
+
+select * from t3;
+select * from t3 where pk1=3 and pk2='three';
+
+drop table t1, t2, t3;
+
+--echo #
+--echo # Test blob values
+--echo #
+
+create table t4 (a int primary key, b blob) engine=rocksdb;
+insert into t4 values (1, repeat('quux-quux', 60));
+insert into t4 values (10, repeat('foo-bar', 43));
+insert into t4 values (5, repeat('foo-bar', 200));
+
+insert into t4 values (2, NULL);
+
+
+select
+ a,
+ (case a
+ when 1 then b=repeat('quux-quux', 60)
+ when 10 then b=repeat('foo-bar', 43)
+ when 5 then b=repeat('foo-bar', 200)
+ when 2 then b is null
+ else 'IMPOSSIBLE!' end) as CMP
+from t4;
+
+drop table t4;
+
+--echo #
+--echo # Test blobs of various sizes
+--echo #
+
+--echo # TINYBLOB
+create table t5 (a int primary key, b tinyblob) engine=rocksdb;
+insert into t5 values (1, repeat('quux-quux', 6));
+insert into t5 values (10, repeat('foo-bar', 4));
+insert into t5 values (5, repeat('foo-bar', 2));
+select
+ a,
+ (case a
+ when 1 then b=repeat('quux-quux', 6)
+ when 10 then b=repeat('foo-bar', 4)
+ when 5 then b=repeat('foo-bar', 2)
+ else 'IMPOSSIBLE!' end) as CMP
+from t5;
+drop table t5;
+
+--echo # MEDIUMBLOB
+create table t6 (a int primary key, b mediumblob) engine=rocksdb;
+insert into t6 values (1, repeat('AB', 65000));
+insert into t6 values (10, repeat('bbb', 40000));
+insert into t6 values (5, repeat('foo-bar', 2));
+select
+ a,
+ (case a
+ when 1 then b=repeat('AB', 65000)
+ when 10 then b=repeat('bbb', 40000)
+ when 5 then b=repeat('foo-bar', 2)
+ else 'IMPOSSIBLE!' end) as CMP
+from t6;
+drop table t6;
+
+--echo # LONGBLOB
+create table t7 (a int primary key, b longblob) engine=rocksdb;
+insert into t7 values (1, repeat('AB', 65000));
+insert into t7 values (10, repeat('bbb', 40000));
+insert into t7 values (5, repeat('foo-bar', 2));
+select
+ a,
+ (case a
+ when 1 then b=repeat('AB', 65000)
+ when 10 then b=repeat('bbb', 40000)
+ when 5 then b=repeat('foo-bar', 2)
+ else 'IMPOSSIBLE!' end) as CMP
+from t7;
+drop table t7;
+
+
+--echo #
+--echo # Check if DELETEs work
+--echo #
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+
+insert into t8 values
+ ('one', 'eins'),
+ ('two', 'zwei'),
+ ('three', 'drei'),
+ ('four', 'vier'),
+ ('five', 'funf');
+
+--echo # Delete by PK
+--replace_column 9 #
+explain delete from t8 where a='three';
+delete from t8 where a='three';
+
+select * from t8;
+
+--echo # Delete while doing a full table scan
+delete from t8 where col1='eins' or col1='vier';
+select * from t8;
+
+--echo # delete w/o WHERE:
+delete from t8;
+select * from t8;
+
+--echo #
+--echo # Test UPDATEs
+--echo #
+insert into t8 values
+ ('one', 'eins'),
+ ('two', 'zwei'),
+ ('three', 'drei'),
+ ('four', 'vier'),
+ ('five', 'funf');
+
+update t8 set col1='dva' where a='two';
+
+update t8 set a='fourAAA' where col1='vier';
+
+select * from t8;
+delete from t8;
+
+--echo #
+--echo # Basic transactions tests
+--echo #
+begin;
+insert into t8 values ('trx1-val1', 'data');
+insert into t8 values ('trx1-val2', 'data');
+rollback;
+select * from t8;
+
+begin;
+insert into t8 values ('trx1-val1', 'data');
+insert into t8 values ('trx1-val2', 'data');
+commit;
+select * from t8;
+
+drop table t8;
+
+--echo #
+--echo # Check if DROP TABLE works
+--echo #
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+select * from t8;
+insert into t8 values ('foo','foo');
+drop table t8;
+create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb;
+select * from t8;
+drop table t8;
+
+--echo #
+--echo # MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table
+--echo #
+--error ER_ILLEGAL_HA_CREATE_OPTION
+CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+
+--echo #
+--echo # MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work
+--echo #
+CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t10 VALUES (1),(3);
+CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t11 VALUES (1),(4);
+
+select * from t10;
+select * from t11;
+--replace_column 9 #
+EXPLAIN
+SELECT * FROM t10, t11 WHERE i=j;
+SELECT * FROM t10, t11 WHERE i=j;
+
+DROP TABLE t10,t11;
+
+--echo #
+--echo # MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122
+--echo #
+CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t12 VALUES (2),(1);
+SELECT * FROM t12 ORDER BY pk;
+DROP TABLE t12;
+
+--echo #
+--echo # MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ...
+--echo #
+create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2;
+#--error ER_GET_ERRNO
+#alter table t14 add partition partitions 2;
+# ^^ works, but causes weird warnings in error log.
+drop table t14;
+
+--echo #
+--echo # MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table
+--echo #
+create table t9 (i int primary key) engine=rocksdb;
+--error ER_ILLEGAL_HA
+alter table t9 discard tablespace;
+drop table t9;
+
+--echo #
+--echo # MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ...
+--echo # on accessing a table after ALTER
+--echo #
+CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t15 VALUES (1,1),(5,2);
+#--error ER_ILLEGAL_HA
+ALTER TABLE t15 DROP COLUMN a;
+DROP TABLE t15;
+
+--echo #
+--echo # MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table
+--echo #
+create table t16 (pk int primary key, a char(8)) engine=RocksDB;
+insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d');
+
+#
+# Not anymore: The following query will still eat a record because of CANT-SEE-OWN-CHANGES
+# property.
+#
+--error ER_DUP_ENTRY
+update t16 set pk=100, a = 'updated' where a in ('b','c');
+select * from t16;
+drop table t16;
+
+--echo #
+--echo # MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table
+--echo #
+--disable_warnings
+drop table if exists t_very_long_table_name;
+--enable_warnings
+
+CREATE TABLE `t_very_long_table_name` (
+ `c` char(1) NOT NULL,
+ `c0` char(0) NOT NULL,
+ `c1` char(1) NOT NULL,
+ `c20` char(20) NOT NULL,
+ `c255` char(255) NOT NULL,
+ PRIMARY KEY (`c255`)
+ ) ENGINE=RocksDB DEFAULT CHARSET=latin1;
+INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255));
+drop table t_very_long_table_name;
+
+
+--echo #
+--echo # Test table locking and read-before-write checks.
+--echo #
+create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb;
+insert into t17 values ('row1', 'val1');
+
+--error ER_DUP_ENTRY
+insert into t17 values ('row1', 'val1-try2');
+--error ER_DUP_ENTRY
+insert into t17 values ('ROW1', 'val1-try2');
+
+insert into t17 values ('row2', 'val2');
+insert into t17 values ('row3', 'val3');
+
+--echo # This is ok
+update t17 set pk='row4' where pk='row1';
+
+--echo # This will try to overwrite another row:
+--error ER_DUP_ENTRY
+update t17 set pk='row3' where pk='row2';
+
+select * from t17;
+
+--echo #
+--echo # Locking tests
+--echo #
+
+connect (con1,localhost,root,,);
+
+--echo # First, make sure there's no locking when transactions update different rows
+connection con1;
+set autocommit=0;
+update t17 set col1='UPD1' where pk='row2';
+
+connection default;
+update t17 set col1='UPD2' where pk='row3';
+
+connection con1;
+commit;
+
+connection default;
+select * from t17;
+
+--echo # Check the variable
+show variables like 'rocksdb_lock_wait_timeout';
+set rocksdb_lock_wait_timeout=2; # seconds
+show variables like 'rocksdb_lock_wait_timeout';
+
+--echo # Try updating the same row from two transactions
+connection con1;
+begin;
+update t17 set col1='UPD2-AA' where pk='row2';
+
+connection default;
+--error ER_LOCK_WAIT_TIMEOUT
+update t17 set col1='UPD2-BB' where pk='row2';
+
+set rocksdb_lock_wait_timeout=1000; # seconds
+--send
+ update t17 set col1='UPD2-CC' where pk='row2';
+
+connection con1;
+rollback;
+
+connection default;
+reap;
+select * from t17 where pk='row2';
+
+drop table t17;
+
+disconnect con1;
+--echo #
+--echo # MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable)
+--echo #
+--enable_connect_log
+
+create table t18 (pk int primary key, i int) engine=RocksDB;
+begin;
+select * from t18;
+select * from t18 where pk = 1;
+
+--connect (con1,localhost,root,,)
+insert into t18 values (1,100);
+
+--connection default
+select * from t18;
+select * from t18 where pk = 1;
+commit;
+
+drop table t18;
+
+--echo #
+--echo # MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY
+--echo #
+create table t19 (pk int primary key, i int) engine=RocksDB;
+insert into t19 values (1,1);
+insert into t19 values (1,100) on duplicate key update i = 102;
+select * from t19;
+drop table t19;
+
+--echo # MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY
+create table t20 (pk int primary key, i int) engine=RocksDB;
+insert into t20 values (1,1);
+replace into t20 values (1,100);
+select * from t20;
+drop table t20;
+
+--echo #
+--echo # MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT
+--echo #
+create table t21 (v varbinary(16) primary key, i int) engine=RocksDB;
+insert into t21 values ('a',1);
+select * from t21;
+drop table t21;
+
+--echo #
+--echo # MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE
+--echo #
+
+CREATE TABLE t22 (a int primary key) ENGINE=RocksDB;
+INSERT INTO t22 VALUES (1),(2);
+CREATE TABLE t23 (b int primary key) ENGINE=RocksDB;
+INSERT INTO t23 SELECT * FROM t22;
+DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a;
+DROP TABLE t22,t23;
+
+--echo #
+--echo # MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT
+--echo #
+CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB;
+INSERT INTO t24 VALUES (1),(2);
+
+CREATE TABLE t25 LIKE t24;
+INSERT INTO t25 SELECT * FROM t24;
+
+DELETE t25.* FROM t24, t25;
+DROP TABLE t24,t25;
+
+--echo #
+--echo # MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself
+--echo #
+create table t26 (pk int primary key, c char(1)) engine=RocksDB;
+insert into t26 values (1,'a'),(2,'b');
+update t26 set c = 'x' order by pk limit 1;
+delete from t26 order by pk limit 1;
+select * from t26;
+drop table t26;
+
+
+--echo #
+--echo # Test whether SELECT ... FOR UPDATE puts locks
+--echo #
+create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB;
+insert into t27 values
+ ('row1', 'row1data'),
+ ('row2', 'row2data'),
+ ('row3', 'row3data');
+
+connection con1;
+begin;
+select * from t27 where pk='row3' for update;
+
+connection default;
+set rocksdb_lock_wait_timeout=1;
+--error ER_LOCK_WAIT_TIMEOUT
+update t27 set col1='row2-modified' where pk='row3';
+
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+
+drop table t27;
+
+--echo #
+--echo # MDEV-4060: RocksDB: Assertion `! trx->batch' fails in
+--echo #
+create table t28 (pk int primary key, a int) engine=RocksDB;
+insert into t28 values (1,10),(2,20);
+begin;
+update t28 set a = 100 where pk = 3;
+rollback;
+select * from t28;
+drop table t28;
+
+
+--echo #
+--echo # Secondary indexes
+--echo #
+create table t30 (
+ pk varchar(16) not null primary key,
+ key1 varchar(16) not null,
+ col1 varchar(16) not null,
+ key(key1)
+) engine=rocksdb;
+
+insert into t30 values ('row1', 'row1-key', 'row1-data');
+insert into t30 values ('row2', 'row2-key', 'row2-data');
+insert into t30 values ('row3', 'row3-key', 'row3-data');
+
+--replace_column 9 #
+explain
+select * from t30 where key1='row2-key';
+select * from t30 where key1='row2-key';
+
+--replace_column 9 #
+explain
+select * from t30 where key1='row1';
+--echo # This will produce nothing:
+select * from t30 where key1='row1';
+
+--replace_column 9 #
+explain
+select key1 from t30;
+select key1 from t30;
+
+--echo # Create a duplicate record
+insert into t30 values ('row2a', 'row2-key', 'row2a-data');
+
+--echo # Can we see it?
+select * from t30 where key1='row2-key';
+
+delete from t30 where pk='row2';
+select * from t30 where key1='row2-key';
+
+--echo #
+--echo # Range scans on secondary index
+--echo #
+delete from t30;
+insert into t30 values
+ ('row1', 'row1-key', 'row1-data'),
+ ('row2', 'row2-key', 'row2-data'),
+ ('row3', 'row3-key', 'row3-data'),
+ ('row4', 'row4-key', 'row4-data'),
+ ('row5', 'row5-key', 'row5-data');
+analyze table t30;
+
+--replace_column 9 #
+explain
+select * from t30 where key1 <='row3-key';
+select * from t30 where key1 <='row3-key';
+
+--replace_column 9 #
+explain
+select * from t30 where key1 between 'row2-key' and 'row4-key';
+select * from t30 where key1 between 'row2-key' and 'row4-key';
+
+--replace_column 9 #
+explain
+select * from t30 where key1 in ('row2-key','row4-key');
+select * from t30 where key1 in ('row2-key','row4-key');
+
+--replace_column 9 #
+explain
+select key1 from t30 where key1 in ('row2-key','row4-key');
+select key1 from t30 where key1 in ('row2-key','row4-key');
+
+--replace_column 9 #
+explain
+select * from t30 where key1 > 'row1-key' and key1 < 'row4-key';
+select * from t30 where key1 > 'row1-key' and key1 < 'row4-key';
+
+--replace_column 9 #
+explain
+select * from t30 order by key1 limit 3;
+select * from t30 order by key1 limit 3;
+
+--replace_column 9 #
+explain
+select * from t30 order by key1 desc limit 3;
+select * from t30 order by key1 desc limit 3;
+
+--echo #
+--echo # Range scans on primary key
+--echo #
+--replace_column 9 #
+explain
+select * from t30 where pk <='row3';
+select * from t30 where pk <='row3';
+
+--replace_column 9 #
+explain
+select * from t30 where pk between 'row2' and 'row4';
+select * from t30 where pk between 'row2' and 'row4';
+
+--replace_column 9 #
+explain
+select * from t30 where pk in ('row2','row4');
+select * from t30 where pk in ('row2','row4');
+
+--replace_column 9 #
+explain
+select * from t30 order by pk limit 3;
+select * from t30 order by pk limit 3;
+
+drop table t30;
+
+
+--echo #
+--echo # MDEV-3841: RocksDB: Reading by PK prefix does not work
+--echo #
+create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB;
+insert into t31 values (1,10,100),(2,20,200);
+select * from t31 where i = 1;
+select * from t31 where j = 10;
+select * from t31 where k = 100;
+select * from t31 where i = 1 and j = 10;
+select * from t31 where i = 1 and k = 100;
+select * from t31 where j = 10 and k = 100;
+select * from t31 where i = 1 and j = 10 and k = 100;
+drop table t31;
+
+--echo #
+--echo # MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work
+--echo #
+create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB;
+insert into t32 values
+ (1,10,100,''),
+ (2,20,200,'');
+select * from t32 where i = 1 and j = 10 and k = 100;
+update t32 set a = 'updated' where i = 1 and j = 10 and k = 100;
+select * from t32;
+drop table t32;
+
+--echo #
+--echo # MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC
+--echo #
+CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB;
+INSERT INTO t33 VALUES (1,'a'),(2,'b');
+SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC;
+DROP TABLE t33;
+
+--echo #
+--echo # MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index
+--echo #
+#--error ER_GET_ERRMSG
+--echo # Unique indexes can be created, but uniqueness won't be enforced
+create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB;
+drop table t33;
+
+--echo #
+--echo # MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range
+--echo #
+CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t34 VALUES (10),(11);
+SELECT pk FROM t34 WHERE pk > 5 AND pk < 15;
+SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15;
+SELECT pk FROM t34 WHERE pk > 5;
+SELECT pk FROM t34 WHERE pk < 15;
+drop table t34;
+
+--echo #
+--echo # MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC
+--echo #
+create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB;
+insert into t35 values (1,1,1,1,1),(2,2,2,2,2);
+select * from t35 where a = 1 and c = 1 and d = 1 order by e desc;
+drop table t35;
+
+--echo #
+--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index
+--echo #
+CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB;
+INSERT INTO t36 VALUES (1,10),(2,20);
+SELECT 3 IN ( SELECT a FROM t36 );
+drop table t36;
+
+--echo #
+--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index
+--echo #
+CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b))
+ ENGINE=RocksDB;
+INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y');
+SELECT MAX(a) FROM t37 WHERE a < 100;
+DROP TABLE t37;
+
+--echo #
+--echo # MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC
+--echo #
+CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB;
+INSERT INTO t38 VALUES (1,10), (2,20);
+SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC;
+drop table t38;
+
+--echo #
+--echo # MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys
+--echo # with a multi-part key and ORDER BY .. DESC
+--echo #
+CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB;
+INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y');
+
+CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB;
+INSERT INTO t41 VALUES (1),(2);
+
+SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC;
+DROP TABLE t40,t41;
+
+--echo #
+--echo # MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL
+--echo #
+CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB;
+INSERT INTO t42 VALUES (1, NULL),(2, 8);
+SELECT ( 3 ) NOT IN ( SELECT a FROM t42 );
+DROP TABLE t42;
+
+--echo #
+--echo # MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on
+--echo # DELETE with search by NULL-able secondary key ...
+--echo #
+CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB;
+INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x');
+UPDATE t43 SET pk = 10 WHERE a = 8;
+REPLACE INTO t43 ( a ) VALUES ( 8 );
+REPLACE INTO t43 ( b ) VALUES ( 'y' );
+SELECT * FROM t43 WHERE a = 8;
+DELETE FROM t43 WHERE a = 8;
+DROP TABLE t43;
+
+--echo #
+--echo # Basic AUTO_INCREMENT tests
+--echo #
+create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb;
+insert into t44 (col1) values ('row1');
+insert into t44 (col1) values ('row2');
+insert into t44 (col1) values ('row3');
+select * from t44;
+drop table t44;
+
+--echo #
+--echo # ALTER TABLE tests
+--echo #
+create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb;
+insert into t45 values (1, 'row1');
+insert into t45 values (2, 'row2');
+alter table t45 rename t46;
+select * from t46;
+drop table t46;
+--error ER_BAD_TABLE_ERROR
+drop table t45;
+
+
+--echo #
+--echo # Check Bulk loading
+--echo # Bulk loading used to overwrite existing data
+--echo # Now it fails if there is data overlap with what
+--echo # already exists
+--echo #
+show variables like 'rocksdb%';
+create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
+insert into t47 values (1, 'row1');
+insert into t47 values (2, 'row2');
+set rocksdb_bulk_load=1;
+insert into t47 values (3, 'row3'),(4, 'row4');
+set rocksdb_bulk_load=0;
+select * from t47;
+drop table t47;
+
+--echo #
+--echo # Fix TRUNCATE over empty table (transaction is committed when it wasn't
+--echo # started)
+--echo #
+create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb;
+set autocommit=0;
+#--error ER_ILLEGAL_HA
+truncate table t48;
+set autocommit=1;
+drop table t48;
+
+--echo #
+--echo # MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded
+--echo #
+--enable_connect_log
+
+create table t49 (pk int primary key, a int) engine=RocksDB;
+insert into t49 values (1,10),(2,20);
+begin;
+update t49 set a = 100 where pk = 1;
+
+--connect (con1,localhost,root,,)
+--let $con1_id = `SELECT CONNECTION_ID()`
+set rocksdb_lock_wait_timeout=5000;
+set @var1= to_seconds(now());
+send update t49 set a = 1000 where pk = 1;
+
+--connect (con2,localhost,root,,)
+--echo kill query \$con1_id;
+--disable_query_log
+eval kill query $con1_id;
+--enable_query_log
+--connection con1
+--error ER_QUERY_INTERRUPTED
+--reap
+set @var2= to_seconds(now());
+
+# We expect the time to kill query in con1 should be below
+# rocksdb_lock_wait_timeout (5000).
+--echo "[Jay Edgar] I've updated this query to help determine why it is sometimes failing"
+--echo "(t13541934). If you get an error here (i.e. not 'passed') notify me."
+select if ((@var2 - @var1) < 1000, "passed", (@var2 - @var1)) as 'result';
+
+--connection default
+--disconnect con1
+
+commit;
+drop table t49;
+
+--echo #
+--echo # Index-only tests for INT-based columns
+--echo #
+create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,2,2);
+insert into t1 values (-5,-5,-5);
+--echo # INT column uses index-only:
+--replace_column 9 #
+explain
+select key1 from t1 where key1=2;
+select key1 from t1 where key1=2;
+select key1 from t1 where key1=-5;
+drop table t1;
+
+
+create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb;
+insert into t2 values (1,1,1), (2,2,2);
+--echo # INT UNSIGNED column uses index-only:
+--replace_column 9 #
+explain
+select key1 from t2 where key1=2;
+select key1 from t2 where key1=2;
+drop table t2;
+
+
+create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb;
+insert into t3 values (1,1,1), (2,2,2);
+--echo # BIGINT uses index-only:
+--replace_column 9 #
+explain
+select key1 from t3 where key1=2;
+select key1 from t3 where key1=2;
+drop table t3;
+
+--echo #
+--echo # Index-only reads for string columns
+--echo #
+create table t1 (
+ pk int primary key,
+ key1 char(10) character set binary,
+ col1 int,
+ key (key1)
+) engine=rocksdb;
+insert into t1 values(1, 'one',11), (2,'two',22);
+--replace_column 9 #
+explain
+select key1 from t1 where key1='one';
+--echo # The following will produce no rows. This looks like a bug,
+--echo # but it is actually correct behavior. Binary strings are end-padded
+--echo # with \0 character (and not space). Comparison does not ignore
+--echo # the tail of \0.
+select key1 from t1 where key1='one';
+--replace_column 9 #
+explain
+select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0';
+select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0';
+drop table t1;
+
+
+create table t2 (
+ pk int primary key,
+ key1 char(10) collate latin1_bin,
+ col1 int,
+ key (key1)
+) engine=rocksdb;
+insert into t2 values(1, 'one',11), (2,'two',22);
+--replace_column 9 #
+explain
+select key1 from t2 where key1='one';
+select key1 from t2 where key1='one';
+drop table t2;
+
+
+create table t3 (
+ pk int primary key,
+ key1 char(10) collate utf8_bin,
+ col1 int,
+ key (key1)
+) engine=rocksdb;
+insert into t3 values(1, 'one',11), (2,'two',22);
+--replace_column 9 #
+explain
+select key1 from t3 where key1='one';
+select key1 from t3 where key1='one';
+drop table t3;
+
+
+--echo # a VARCHAR column
+create table t4 (
+ pk int primary key,
+ key1 varchar(10) collate latin1_bin,
+ key(key1)
+) engine=rocksdb;
+insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five');
+
+--replace_column 9 #
+explain
+select key1 from t4 where key1='two';
+select key1 from t4 where key1='two';
+
+select key1 from t4 where key1='fifty-five';
+
+--replace_column 9 #
+explain
+select key1 from t4 where key1 between 's' and 'u';
+select key1 from t4 where key1 between 's' and 'u';
+
+drop table t4;
+
+--echo #
+--echo # MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len
+--echo #
+CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8);
+SELECT * FROM t1 WHERE i != 3 OR pk1 > 9;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1),(2,2);
+BEGIN;
+UPDATE t1 SET i = 100;
+
+--connect (con1,localhost,root,,test)
+--error ER_LOCK_WAIT_TIMEOUT
+DELETE IGNORE FROM t1 ORDER BY i;
+--disconnect con1
+
+--connection default
+COMMIT;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field
+--echo # (testcase only)
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'foo'), (2,'bar');
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin;
+INSERT INTO t1 VALUES (1,'h','h');
+SELECT * FROM t1;
+SELECT c1 FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE
+--echo #
+CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,4),(2,5);
+SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE;
+DROP TABLE t2;
+
+--echo #
+--echo # MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e');
+SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h';
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field
+--echo #
+create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB;
+insert into t1 values ('2',2),('6',6);
+select * from t1 where c = i;
+select * from t1 ignore index (i) where c = i;
+drop table t1;
+
+
+--echo #
+--echo # Test statement rollback inside a transaction
+--echo #
+create table t1 (pk varchar(12) primary key) engine=rocksdb;
+insert into t1 values ('old-val1'),('old-val2');
+
+create table t2 (pk varchar(12) primary key) engine=rocksdb;
+insert into t2 values ('new-val2'),('old-val1');
+
+begin;
+insert into t1 values ('new-val1');
+--error ER_DUP_ENTRY
+insert into t1 select * from t2;
+commit;
+
+select * from t1;
+drop table t1, t2;
+
+--echo #
+--echo # MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT:
+--echo # rows that should be deleted remain in the table
+--echo #
+CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB;
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB;
+
+INSERT INTO t1 (pk) VALUES (NULL),(NULL);
+BEGIN;
+INSERT INTO t2 (pk) VALUES (NULL),(NULL);
+INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL);
+
+--enable_info
+SELECT * FROM t1 ORDER BY pk LIMIT 9;
+DELETE FROM t1 ORDER BY pk LIMIT 9;
+SELECT * FROM t1 ORDER BY pk LIMIT 9;
+--disable_info
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on
+--echo # inserting into a varchar column
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-4061: RocksDB: Changes from an interrupted query are still applied
+--echo #
+
+--enable_connect_log
+
+create table t1 (pk int primary key, a int) engine=RocksDB;
+insert into t1 values (1,10),(2,20);
+
+--let $con_id = `select connection_id()`
+
+set autocommit = 1;
+--send
+update t1 set a = sleep(100) where pk = 1;
+
+--connect (con1,localhost,root,,)
+--echo kill query \$con_id;
+--disable_query_log
+eval kill query $con_id;
+--enable_query_log
+
+--connection default
+--error ER_QUERY_INTERRUPTED
+--reap
+
+select * from t1;
+--disconnect con1
+--disable_connect_log
+drop table t1;
+
+
+--echo #
+--echo # MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB;
+INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0);
+SELECT * FROM t1;
+SELECT pk FROM t1;
+SELECT * FROM t1 WHERE a != 97;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Test @@rocksdb_max_row_locks
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB;
+set @a=-1;
+insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100;
+set @tmp1= @@rocksdb_max_row_locks;
+set rocksdb_max_row_locks= 20;
+--error ER_INTERNAL_ERROR
+update t1 set a=a+10;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Test AUTO_INCREMENT behavior problem,
+--echo # "explicit insert into an auto-inc column is not noticed by RocksDB"
+--echo #
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+
+insert into t1 values (null);
+insert into t1 values (null);
+select * from t1;
+drop table t1;
+
+create table t2 (i int primary key auto_increment) engine=RocksDB;
+
+insert into t2 values (1);
+select * from t2;
+
+--echo # this fails (ie. used to fail), RocksDB engine did not notice use of '1' above
+insert into t2 values (null);
+select * from t2;
+
+--echo # but then this succeeds, so previous statement must have incremented next number counter
+insert into t2 values (null);
+select * from t2;
+drop table t2;
+
+--echo #
+--echo # Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown
+--echo #
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+
+insert into t1 values (null);
+insert into t1 values (null);
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
+
+--source include/restart_mysqld.inc
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+insert into t1 values (null);
+select * from t1;
+
+drop table t1;
+
+--echo #
+--echo # Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0
+--echo #
+create table t1 (i int primary key auto_increment) engine=RocksDB;
+
+insert into t1 values (null),(null);
+--replace_column 7 #
+show table status like 't1';
+drop table t1;
+
+--echo #
+--echo # Fix Issue #4: Crash when using pseudo-unique keys
+--echo #
+CREATE TABLE t1 (
+ i INT,
+ t TINYINT,
+ s SMALLINT,
+ m MEDIUMINT,
+ b BIGINT,
+ pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY,
+ UNIQUE KEY b_t (b,t)
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+
+SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+DROP TABLE t1;
+
+--echo #
+--echo # Fix issue #5: Transaction rollback doesn't undo all changes.
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (id int auto_increment primary key, value int) engine=rocksdb;
+
+set autocommit=0;
+begin;
+set @a:=0;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4;
+rollback;
+select count(*) from t1;
+
+set autocommit=1;
+drop table t0, t1;
+
+--echo #
+--echo # Check status variables
+--echo #
+--replace_column 2 #
+show status like 'rocksdb%';
+
+select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%';
+--echo # RocksDB-SE's status variables are global internally
+--echo # but they are shown as both session and global, like InnoDB's status vars.
+select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%';
+
+
+--echo #
+--echo # Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+ pk int primary key,
+ col1 varchar(255),
+ key(col1)
+) engine=rocksdb;
+insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0;
+select * from t1 where pk=3;
+drop table t0, t1;
+
+--echo #
+--echo # Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+CREATE TABLE t1 (
+ id1 bigint(20) unsigned NOT NULL DEFAULT '0',
+ id2 bigint(20) unsigned NOT NULL DEFAULT '0',
+ link_type bigint(20) unsigned NOT NULL DEFAULT '0',
+ visibility tinyint(3) NOT NULL DEFAULT '0',
+ data varchar(255) NOT NULL DEFAULT '',
+ time bigint(20) unsigned NOT NULL DEFAULT '0',
+ version int(11) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (link_type,id1,id2)
+) engine=rocksdb;
+
+insert into t1 select a,a,a,1,a,a,a from t0;
+
+alter table t1 add index id1_type (id1,link_type,visibility,time,version,data);
+select * from t1 where id1 = 3;
+
+drop table t0,t1;
+
+--echo #
+--echo # Test column families
+--echo #
+
+create table t1 (
+ pk int primary key,
+ col1 int,
+ col2 int,
+ key(col1) comment 'cf3',
+ key(col2) comment 'cf4'
+) engine=rocksdb;
+
+insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
+--replace_column 9 #
+explain
+select * from t1 where col1=2;
+select * from t1 where col1=2;
+
+--replace_column 9 #
+explain
+select * from t1 where col2=3;
+select * from t1 where col2=3;
+
+select * from t1 where pk=4;
+
+drop table t1;
+
+--echo #
+--echo # Try primary key in a non-default CF:
+--echo #
+create table t1 (
+ pk int,
+ col1 int,
+ col2 int,
+ key(col1) comment 'cf3',
+ key(col2) comment 'cf4',
+ primary key (pk) comment 'cf5'
+) engine=rocksdb;
+insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
+--replace_column 9 #
+explain
+select * from t1 where col1=2;
+select * from t1 where col1=2;
+
+select * from t1 where pk=4;
+
+drop table t1;
+
+--echo #
+--echo # Issue #15: SIGSEGV from reading in blob data
+--echo #
+CREATE TABLE t1 (
+ id int not null,
+ blob_col text,
+ PRIMARY KEY (id)
+) ENGINE=ROCKSDB CHARSET=latin1;
+
+INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col);
+INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col);
+DROP TABLE t1;
+
+
+--echo #
+--echo # Issue #17: Automatic per-index column families
+--echo #
+create table t1 (
+ id int not null,
+ key1 int,
+ PRIMARY KEY (id),
+ index (key1) comment '$per_index_cf'
+) engine=rocksdb;
+
+--echo #Same CF ids with different CF flags
+--error ER_UNKNOWN_ERROR
+create table t1_err (
+ id int not null,
+ key1 int,
+ PRIMARY KEY (id),
+ index (key1) comment 'test.t1.key1'
+) engine=rocksdb;
+
+create table t1_err (
+ id int not null,
+ key1 int,
+ PRIMARY KEY (id),
+ index (key1) comment 'test.t1.key2'
+) engine=rocksdb;
+drop table t1_err;
+
+--echo # Unfortunately there is no way to check which column family everything goes to
+insert into t1 values (1,1);
+select * from t1;
+--echo # Check that ALTER and RENAME are disallowed
+--error ER_NOT_SUPPORTED_YET
+alter table t1 add col2 int;
+
+--error ER_NOT_SUPPORTED_YET
+rename table t1 to t2;
+
+drop table t1;
+
+--echo # Check detection of typos in \$per_index_cf
+--error ER_NOT_SUPPORTED_YET
+create table t1 (
+ id int not null,
+ key1 int,
+ PRIMARY KEY (id),
+ index (key1) comment '$per_idnex_cf'
+)engine=rocksdb;
+
+
+--echo #
+--echo # Issue #22: SELECT ... FOR UPDATE takes a long time
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+ id1 int,
+ id2 int,
+ value1 int,
+ value2 int,
+ primary key(id1, id2) COMMENT 'new_column_family',
+ key(id2)
+) engine=rocksdb default charset=latin1 collate=latin1_bin;
+
+insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B;
+
+--replace_column 9 #
+explain
+select * from t1 where id1=30 and value1=30 for update;
+
+set @var1=(select variable_value
+ from information_schema.global_status
+ where variable_name='rocksdb_number_keys_read');
+
+select * from t1 where id1=3 and value1=3 for update;
+
+set @var2=(select variable_value
+ from information_schema.global_status
+ where variable_name='rocksdb_number_keys_read');
+--echo # The following must return true (before the fix, the difference was 70):
+select if((@var2 - @var1) < 30, 1, @var2-@var1);
+
+drop table t0,t1;
+
+--echo #
+--echo # Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting
+--echo #
+create table t1 (id int primary key, value int) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+--echo # The following must not use 'Using filesort':
+--replace_column 9 #
+explain select * from t1 ORDER BY id;
+drop table t1;
+
+--echo #
+--echo # Issue #26: Index-only scans for DATETIME and TIMESTAMP
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+--echo # Try a DATETIME column:
+create table t1 (
+ pk int auto_increment primary key,
+ kp1 datetime,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 12:34:56', interval a day), a from t0;
+
+select * from t1;
+
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+--echo # Now, the same with NOT NULL column
+create table t2 (
+ pk int auto_increment primary key,
+ kp1 datetime not null,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+drop table t1,t2;
+
+--echo # Try a DATE column:
+create table t1 (
+ pk int auto_increment primary key,
+ kp1 date,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01', interval a day), a from t0;
+
+select * from t1;
+
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01' and '2015-01-05';
+
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01' and '2015-01-05';
+
+--echo # Now, the same with NOT NULL column
+create table t2 (
+ pk int auto_increment primary key,
+ kp1 date not null,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+drop table t1,t2;
+
+--echo #
+--echo # Try a TIMESTAMP column:
+--echo #
+create table t1 (
+ pk int auto_increment primary key,
+ kp1 timestamp,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 12:34:56', interval a day), a from t0;
+
+select * from t1;
+
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+--echo # Now, the same with NOT NULL column
+create table t2 (
+ pk int auto_increment primary key,
+ kp1 timestamp not null,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59';
+drop table t1,t2;
+
+--echo #
+--echo # Try a TIME column:
+--echo #
+create table t1 (
+ pk int auto_increment primary key,
+ kp1 time,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+--disable_warnings
+insert into t1 (kp1,kp2)
+select date_add('2015-01-01 09:00:00', interval a minute), a from t0;
+--enable_warnings
+
+select * from t1;
+
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+
+--echo # Now, the same with NOT NULL column
+create table t2 (
+ pk int auto_increment primary key,
+ kp1 time not null,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '09:01:00' and '09:05:00';
+drop table t1,t2;
+
+--echo #
+--echo # Try a YEAR column:
+--echo #
+create table t1 (
+ pk int auto_increment primary key,
+ kp1 year,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+--disable_warnings
+insert into t1 (kp1,kp2) select 2015+a, a from t0;
+--enable_warnings
+
+select * from t1;
+
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2016' and '2020';
+
+select kp1,kp2 from t1 force index (kp1)
+where kp1 between '2016' and '2020';
+
+--echo # Now, the same with NOT NULL column
+create table t2 (
+ pk int auto_increment primary key,
+ kp1 year not null,
+ kp2 int,
+ col1 int,
+ key(kp1, kp2)
+) engine=rocksdb;
+insert into t2 select * from t1;
+--echo # This must show 'Using index'
+--replace_column 9 #
+explain
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2016' and '2020';
+
+select kp1,kp2 from t2 force index (kp1)
+where kp1 between '2016' and '2020';
+
+drop table t1,t2;
+
+--echo #
+--echo # Issue #57: Release row locks on statement errors
+--echo #
+create table t1 (id int primary key) engine=rocksdb;
+insert into t1 values (1), (2), (3);
+begin;
+insert into t1 values (4), (5), (6);
+--error ER_DUP_ENTRY
+insert into t1 values (7), (8), (2), (9);
+select * from t1;
+
+-- connect(con1,localhost,root,,)
+--connection con1
+begin;
+--error ER_LOCK_WAIT_TIMEOUT
+select * from t1 where id=4 for update;
+
+select * from t1 where id=7 for update;
+
+select * from t1 where id=9 for update;
+
+--connection default
+-- disconnect con1
+drop table t1;
+
+--echo #Index on blob column
+SET @old_mode = @@sql_mode;
+SET sql_mode = 'strict_all_tables';
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb;
+drop table t1;
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb;
+insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde');
+select * from t1;
+--replace_column 9 #
+explain select * from t1 where b like '1%';
+--replace_column 9 #
+explain select b, a from t1 where b like '1%';
+update t1 set b= '12345' where b = '2abcde';
+select * from t1;
+drop table t1;
+--error ER_TOO_LONG_KEY
+create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb;
+SET sql_mode = @old_mode;
+
+drop table t0;
+
+--echo #
+--echo # Fix assertion failure (attempt to overrun the key buffer) for prefix indexes
+--echo #
+
+create table t1 (
+ pk int primary key,
+ col1 varchar(100),
+ key (col1(10))
+) engine=rocksdb;
+
+insert into t1 values (1, repeat('0123456789', 9));
+
+drop table t1;
+
+--echo #
+--echo # Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*)
+--echo #
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+
+CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk;
+
+INSERT INTO t1 VALUES (1,1);
+REPLACE INTO t1 VALUES (1,2);
+
+SELECT * FROM t1;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error
+--echo #
+create table t1(a int primary key);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t2 (
+ a varchar(32) primary key,
+ col1 int
+) engine=rocksdb;
+
+insert into t2
+select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B;
+update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004';
+
+drop table t1,t2;
+
+--echo #
+--echo # Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed
+--echo #
+CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,1,1,1,1,1,1);
+SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6;
+EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6;
+drop table t2;
+
+--echo #
+--echo # Issue #135: register transaction was not being called for statement
+--echo #
+--disable_warnings
+DROP DATABASE IF EXISTS test_db;
+--enable_warnings
+CREATE DATABASE test_db;
+CREATE TABLE test_db.t1(c1 INT PRIMARY KEY);
+LOCK TABLES test_db.t1 READ;
+SET AUTOCOMMIT=0;
+SELECT c1 FROM test_db.t1;
+START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;
+DROP DATABASE test_db;
+
+--echo #
+--echo # Issue #143: Split rocksdb_bulk_load option into two
+--echo #
+CREATE TABLE t1 (id int primary key, value int) engine=RocksDB;
+SET rocksdb_skip_unique_check=1;
+INSERT INTO t1 VALUES(1, 1);
+INSERT INTO t1 VALUES(1, 2);
+INSERT INTO t1 VALUES(1, 3);
+SELECT * FROM t1;
+--error ER_UNKNOWN_ERROR
+REPLACE INTO t1 VALUES(4, 4);
+--error ER_UNKNOWN_ERROR
+INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1;
+TRUNCATE TABLE t1;
+SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size;
+SET rocksdb_skip_unique_check=0;
+SET rocksdb_commit_in_the_middle=1;
+SET rocksdb_bulk_load_size=10;
+BEGIN;
+INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
+ (11),(12),(13),(14),(15),(16),(17),(18),(19);
+ROLLBACK;
+SELECT * FROM t1;
+INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15);
+BEGIN;
+UPDATE t1 SET value=100;
+ROLLBACK;
+SELECT * FROM t1;
+BEGIN;
+DELETE FROM t1;
+ROLLBACK;
+SELECT * FROM t1;
+SET rocksdb_commit_in_the_middle=0;
+SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size;
+DROP TABLE t1;
+
+--echo #
+--echo # Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance()
+--echo #
+CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY;
+INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar");
+create TABLE t1 (a int not null, b int not null, primary key(a,b));
+INSERT INTO t1 VALUES (1,1);
+SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4));
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release
+--echo #
+--connect (con1,localhost,root,,)
+create table r1 (id int auto_increment primary key, value int);
+insert into r1 (id) values (null), (null), (null), (null), (null);
+connection con1;
+create table r2 like r1;
+show create table r2;
+connection default;
+begin;
+insert into r1 values (10, 1);
+commit;
+connection con1;
+begin;
+select * from r1;
+commit;
+connection default;
+drop table r1, r2;
+
+# hidden primary key
+create table r1 (id int auto_increment, value int, index i(id));
+insert into r1 (id) values (null), (null), (null), (null), (null);
+connection con1;
+create table r2 like r1;
+show create table r2;
+connection default;
+begin;
+insert into r1 values (10, 1);
+commit;
+connection con1;
+begin;
+select * from r1;
+commit;
+connection default;
+drop table r1, r2;
+
+disconnect con1;
+
+--echo #
+--echo # Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT
+--echo #
+CREATE TABLE t1(c1 INT);
+lock TABLE t1 read local;
+SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM'');
+set AUTOCOMMIT=0;
+start transaction with consistent snapshot;
+SELECT * FROM t1;
+COMMIT;
+UNLOCK TABLES;
+DROP TABLE t1;
+
+--echo #
+--echo # Issue#213 Crash on LOCK TABLES + partitions
+--echo #
+CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2;
+INSERT INTO t1(a)VALUES (20010101101010.999949);
+lock tables t1 write,t1 as t0 write,t1 as t2 write;
+SELECT a FROM t1 ORDER BY a;
+truncate t1;
+INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020');
+UNLOCK TABLES;
+DROP TABLE t1;
+
+--echo #
+--echo # Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type
+--echo # (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue
+--echo # needs a datype that doesn't support index-inly)
+--echo #
+
+CREATE TABLE t1(
+ c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL,
+ c2 varchar(10) character set utf8 collate utf8_general_ci,
+ c3 INT,
+ INDEX idx(c1,c2)
+);
+INSERT INTO t1 VALUES ('c1-val1','c2-val1',5);
+INSERT INTO t1 VALUES ('c1-val2','c2-val3',6);
+INSERT INTO t1 VALUES ('c1-val3','c2-val3',7);
+SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC;
+--replace_column 9 #
+explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC;
+drop table t1;
+
+--echo #
+--echo # Issue#267: MyRocks issue with no matching min/max row and count(*)
+--echo #
+CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2));
+INSERT INTO t1 VALUES(1,null);
+INSERT INTO t1 VALUES(2,null);
+SELECT count(*) as total_rows, min(c2) as min_value FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Issue#263: MyRocks auto_increment skips values if you insert a negative value
+--echo #
+# We have slightly different behavior regarding auto-increment values than
+# InnoDB, so the results of the SHOW TABLE STATUS command will be slightly
+# different. InnoDB will reserve 3 values but only use 2 of them (because
+# the user hard-coded a -1 as the second value). MyRocks will only reserve
+# the values as needed, so only 2 values will be used. This means that the
+# SHOW TABLE STATUS in InnoDB will indicate that the next auto-increment
+# value is 4 while MyRocks will show it as 3.
+CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
+INSERT INTO t1 VALUES(0),(-1),(0);
+SHOW TABLE STATUS LIKE 't1';
+SELECT * FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1(a INT AUTO_INCREMENT KEY);
+INSERT INTO t1 VALUES(0),(10),(0);
+SHOW TABLE STATUS LIKE 't1';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt
new file mode 100644
index 00000000000..95d819ee425
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options="write_buffer_size=12m;target_file_size_base=1m;max_bytes_for_level_multiplier=10"
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh
new file mode 100755
index 00000000000..9381de1fafc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+cat > $MYSQL_TMP_DIR/cf_configs.cnf <<EOL
+
+EOL
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test
new file mode 100644
index 00000000000..23f9b771d42
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options.test
@@ -0,0 +1,76 @@
+--source include/have_rocksdb.inc
+
+let MYSQLD_DATADIR= `SELECT @@datadir`;
+let file= $MYSQL_TMP_DIR/cf_configs.cnf;
+
+# create new column families with no configs in cf_configs.cnf file
+
+create table t1 (a int,
+ primary key (a) comment 'cf1') engine=rocksdb;
+create table t2 (a int,
+ primary key (a) comment 'cf2') engine=rocksdb;
+create table t3 (a int,
+ primary key (a) comment 'z') engine=rocksdb;
+
+insert into t1 values (1);
+insert into t2 values (2);
+insert into t3 values (2);
+
+# restart with new column families in DB
+
+--source include/restart_mysqld.inc
+
+# check column family options in log -- should be all default settings
+
+--echo
+--echo Default options for all column families:
+--echo
+select cf_name, option_type, value
+ from information_schema.rocksdb_cf_options
+ where option_type in ('WRITE_BUFFER_SIZE',
+ 'TARGET_FILE_SIZE_BASE',
+ 'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
+ order by cf_name, option_type;
+# restart with cf configs for cf1 and cf2
+
+--exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err
+--let $_mysqld_option=--rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m};
+--source include/restart_mysqld_with_option.inc
+
+# check column family options in log -- should reflect individual settings
+
+--echo
+--echo Individualized options for column families:
+--echo
+select cf_name, option_type, value
+ from information_schema.rocksdb_cf_options
+ where option_type in ('WRITE_BUFFER_SIZE',
+ 'TARGET_FILE_SIZE_BASE',
+ 'MAX_BYTES_FOR_LEVEL_MULTIPLIER')
+ order by cf_name, option_type;
+
+# syntax error in options (no equal sign)
+
+--exec echo "restart:--rocksdb_override_cf_options=cf1" > $_expect_file_name
+--error 1
+--source include/wait_until_connected_again.inc
+
+# invalid cf option config (no curly braces)
+
+--exec echo "restart:--rocksdb_override_cf_options=cf1=write_buffer_size=8m" > $_expect_file_name
+--error 1
+--source include/wait_until_connected_again.inc
+
+# invalid cf option config (cf listed twice)
+
+--exec echo "restart:--rocksdb_override_cf_options=cf1={write_buffer_size=8m};cf1={target_file_size_base=2m}" > $_expect_file_name
+--error 1
+--source include/wait_until_connected_again.inc
+
+# clean up
+
+--source include/restart_mysqld.inc
+
+--echo
+drop table t1,t2,t3;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test
new file mode 100644
index 00000000000..8e30332bafe
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test
@@ -0,0 +1,71 @@
+--source include/have_rocksdb.inc
+
+#
+# RocksDB-SE tests for reverse-ordered Column Families
+#
+
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+ pk int primary key,
+ a int not null,
+ b int not null,
+ key(a) comment 'rev:foo',
+ key(b) comment 'bar'
+) engine=rocksdb;
+
+insert into t1 select a,a,a from t0;
+insert into t1 select a+10,a+10,a+10 from t0;
+
+--echo # Primary key is not in a reverse-ordered CF, so full table scan
+--echo # returns rows in ascending order:
+select * from t1;
+
+--replace_column 9 #
+explain
+select a from t1 order by a limit 5;
+select a from t1 order by a limit 5;
+
+--replace_column 9 #
+explain
+select b from t1 order by b limit 5;
+select a from t1 order by a limit 5;
+
+--replace_column 9 #
+explain
+select a from t1 order by a desc limit 5;
+select a from t1 order by a desc limit 5;
+
+--replace_column 9 #
+explain
+select b from t1 order by b desc limit 5;
+select b from t1 order by b desc limit 5;
+
+drop table t1;
+
+--echo #
+--echo # Try a primary key in a reverse-ordered CF.
+--echo #
+
+create table t2 (
+ pk int,
+ a int not null,
+ primary key(pk) comment 'rev:cf1'
+) engine=rocksdb;
+
+insert into t2 select a,a from t0;
+--echo # Primary key is in a reverse-ordered CF, so full table scan
+--echo # returns rows in descending order:
+select * from t2;
+
+set autocommit=0;
+begin;
+delete from t2 where a=3 or a=7;
+select * from t2;
+rollback;
+set autocommit=1;
+
+drop table t2;
+drop table t0;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt
new file mode 100644
index 00000000000..792e3808f1e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_debug_optimizer_n_rows=1000
+--rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl
new file mode 100644
index 00000000000..322f0781719
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.pl
@@ -0,0 +1,16 @@
+$file=$ARGV[0];
+$total=$ARGV[1];
+$pct=$ARGV[2];
+
+open($fh, "<", $file) or die $!;
+while(readline($fh)) {
+ if (/(\d+) index entries checked \((\d+) had checksums/) {
+ if ($1 == $total && $2 >= $total*($pct-2)/100 && $2 <= $total*($pct+2)/100) {
+ printf("%d index entries had around %d checksums\n", $total, $total*$pct/100);
+ }
+ }elsif (/(\d+) table records had checksums/) {
+ if ($1 >= $total*($pct-2)/100 && $1 <= $total*($pct+2)/100) {
+ printf("Around %d table records had checksums\n", $total*$pct/100);
+ }
+ }
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test
new file mode 100644
index 00000000000..1a0364ebaee
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test
@@ -0,0 +1,124 @@
+--source include/have_rocksdb.inc
+
+#
+# Tests for row checksums feature
+#
+--source include/have_debug.inc
+
+set @save_rocksdb_store_checksums=@@global.rocksdb_store_checksums;
+set @save_rocksdb_verify_checksums=@@global.rocksdb_verify_checksums;
+set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct;
+
+# wiping mysql log for repeatable tests
+--exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+--disable_warnings
+drop table if exists t1,t2,t3;
+--enable_warnings
+-- exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+show variables like 'rocksdb_%checksum%';
+
+create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t1 values (1,1,1),(2,2,2),(3,3,3);
+check table t1;
+--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t1" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+drop table t1;
+
+set session rocksdb_store_checksums=on;
+create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t2 values (1,1,1),(2,2,2),(3,3,3);
+check table t2;
+--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t2" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+--echo # Now, make a table that has both rows with checksums and without
+create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+insert into t3 values (1,1,1),(2,2,2),(3,3,3);
+set session rocksdb_store_checksums=off;
+update t3 set b=3 where a=2;
+set session rocksdb_store_checksums=on;
+check table t3;
+--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t3" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+set session rocksdb_store_checksums=on;
+set session rocksdb_checksums_pct=5;
+create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
+--disable_query_log
+let $i=0;
+let $x= 100000;
+while ($i<10000)
+{
+ inc $i;
+ eval insert t4(pk,a,b) values($i, $i, $i div 10);
+ eval update t4 set a= a+$x where a=$i;
+ eval update t4 set pk=pk+$x where pk=$i;
+}
+--enable_query_log
+check table t4;
+--exec grep "^[0-9-]* [0-9:]* [0-9]* \[Note\] CHECKTABLE t4" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 > $MYSQL_TMP_DIR/rocksdb_checksums.log
+--exec perl suite/rocksdb/t/rocksdb_checksums.pl $MYSQL_TMP_DIR/rocksdb_checksums.log 10000 5
+--remove_file $MYSQL_TMP_DIR/rocksdb_checksums.log
+set session rocksdb_checksums_pct=100;
+
+--echo #
+--echo # Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches.
+--echo #
+insert into mtr.test_suppressions values
+ ('Checksum mismatch in key of key-value pair for index'),
+ ('Checksum mismatch in value of key-value pair for index'),
+ ('Data with incorrect checksum');
+
+--echo # 1. Start with mismatch in key checksum of the PK.
+set session debug= "+d,myrocks_simulate_bad_pk_checksum1";
+set session rocksdb_verify_checksums=off;
+select * from t3;
+set session rocksdb_verify_checksums=on;
+--error ER_INTERNAL_ERROR
+select * from t3;
+--error ER_INTERNAL_ERROR
+select * from t4;
+set session debug= "-d,myrocks_simulate_bad_pk_checksum1";
+
+--echo # 2. Continue with mismatch in pk value checksum.
+set session debug= "+d,myrocks_simulate_bad_pk_checksum2";
+set session rocksdb_verify_checksums=off;
+select * from t3;
+set session rocksdb_verify_checksums=on;
+--error ER_INTERNAL_ERROR
+select * from t3;
+--error ER_INTERNAL_ERROR
+select * from t4;
+set session debug= "-d,myrocks_simulate_bad_pk_checksum2";
+
+--echo # 3. Check if we catch checksum mismatches for secondary indexes
+--replace_column 9 #
+explain
+select * from t3 force index(a) where a<4;
+select * from t3 force index(a) where a<4;
+
+set session debug= "+d,myrocks_simulate_bad_key_checksum1";
+--error ER_INTERNAL_ERROR
+select * from t3 force index(a) where a<4;
+--error ER_INTERNAL_ERROR
+select * from t4 force index(a) where a<1000000;
+set session debug= "-d,myrocks_simulate_bad_key_checksum1";
+
+--echo # 4. The same for index-only reads?
+--replace_column 9 #
+explain
+select a from t3 force index(a) where a<4;
+select a from t3 force index(a) where a<4;
+
+set session debug= "+d,myrocks_simulate_bad_key_checksum1";
+--error ER_INTERNAL_ERROR
+select a from t3 force index(a) where a<4;
+--error ER_INTERNAL_ERROR
+select a from t4 force index(a) where a<1000000;
+set session debug= "-d,myrocks_simulate_bad_key_checksum1";
+
+set @@global.rocksdb_store_checksums=@save_rocksdb_store_checksums;
+set @@global.rocksdb_verify_checksums=@save_rocksdb_verify_checksums;
+set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct;
+
+drop table t2,t3,t4;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test
new file mode 100644
index 00000000000..ecb4b2a3609
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test
@@ -0,0 +1,24 @@
+--source include/have_rocksdb.inc
+--source include/have_debug_sync.inc
+
+# This validates the fix for Issue #144. The problem was that with more
+# than one client accessing/deleting the same row there was a possibility
+# of client A finding a row (through Next() or Prev()) but the row being
+# deleted before the GetForUpdate() call could occur. When this happened
+# a nearly useless error was being returned.
+
+let $order=ASC;
+let $comment="";
+--source suite/rocksdb/include/rocksdb_concurrent_delete.inc
+
+let $order=DESC;
+let $comment="";
+--source suite/rocksdb/include/rocksdb_concurrent_delete.inc
+
+let $order=ASC;
+let $comment="rev:cf2";
+--source suite/rocksdb/include/rocksdb_concurrent_delete.inc
+
+let $order=DESC;
+let $comment="rev:cf2";
+--source suite/rocksdb/include/rocksdb_concurrent_delete.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py
new file mode 100644
index 00000000000..37b118d525a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py
@@ -0,0 +1,95 @@
+"""
+This script tests concurrent inserts on a given table.
+Example Usage (in Mysql Test Framework):
+
+ CREATE TABLE t1 (a INT) ENGINE=rocksdb;
+
+ let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py \
+ root 127.0.0.1 $MASTER_MYPORT test t1 100 4;
+ exec $exec;
+
+"""
+import cStringIO
+import hashlib
+import MySQLdb
+import os
+import random
+import signal
+import sys
+import threading
+import time
+import string
+
+def get_insert(table_name, idx):
+ return """INSERT INTO %s (a) VALUES (%d)""" % (table_name, idx)
+
+class Inserter(threading.Thread):
+ Instance = None
+ def __init__(self, con, table_name, num_inserts):
+ threading.Thread.__init__(self)
+ self.finished = False
+ self.num_inserts = num_inserts
+ con.autocommit(False)
+ self.con = con
+ self.rand = random.Random()
+ self.exception = None
+ self.table_name = table_name
+ Inserter.Instance = self
+ self.start()
+ def run(self):
+ try:
+ self.runme()
+ except Exception, e:
+ self.exception = traceback.format_exc()
+ print "caught (%s)" % e
+ finally:
+ self.finish()
+ def runme(self):
+ cur = self.con.cursor()
+ for i in xrange(self.num_inserts):
+ try:
+ cur.execute(get_insert(self.table_name, i))
+ r = self.rand.randint(1,10)
+ if r < 4:
+ self.con.commit()
+ except:
+ cur = self.con.cursor()
+ try:
+ self.con.commit()
+ except Exception, e:
+ self.exception = traceback.format_exc()
+ print "caught (%s)" % e
+ pass
+ def finish(self):
+ self.finished = True
+
+if __name__ == '__main__':
+ if len(sys.argv) != 8:
+ print "Usage: rocksdb_concurrent_insert.py user host port db_name " \
+ "table_name num_inserts num_threads"
+ sys.exit(1)
+
+ user = sys.argv[1]
+ host = sys.argv[2]
+ port = int(sys.argv[3])
+ db = sys.argv[4]
+ table_name = sys.argv[5]
+ num_inserts = int(sys.argv[6])
+ num_workers = int(sys.argv[7])
+
+ worker_failed = False
+ workers = []
+ for i in xrange(num_workers):
+ inserter = Inserter(
+ MySQLdb.connect(user=user, host=host, port=port, db=db), table_name,
+ num_inserts)
+ workers.append(inserter)
+
+ for w in workers:
+ w.join()
+ if w.exception:
+ print "Worker hit an exception:\n%s\n" % w.exception
+ worker_failed = True
+
+ if worker_failed:
+ sys.exit(1)
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test
new file mode 100644
index 00000000000..6dd4dd11748
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test
@@ -0,0 +1,30 @@
+--source include/have_rocksdb.inc
+
+let $ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test.install.db;
+let $rdb_ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test;
+let $sql_file = $MYSQL_TMP_DIR/rocksdb_datadir.sql;
+
+--write_file $sql_file
+DROP DATABASE IF EXISTS mysqltest;
+CREATE DATABASE mysqltest;
+USE mysqltest;
+CREATE TABLE t1 (a INT PRIMARY KEY);
+INSERT INTO t1 VALUES(42);
+SET GLOBAL rocksdb_force_flush_memtable_now = 1;
+SELECT sleep(1);
+DROP TABLE t1;
+EOF
+
+# Must ensure this directory exists before launching mysqld
+mkdir $ddir;
+
+# Launch mysqld with non-standard rocksdb_datadir
+exec $MYSQLD_BOOTSTRAP_CMD --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file;
+
+--echo Check for the number of MANIFEST files
+exec ls $rdb_ddir/MANIFEST-0000* | wc -l;
+
+# Clean up
+exec rm -rf $ddir;
+remove_files_wildcard $rdb_ddir *;
+remove_file $sql_file;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt
new file mode 100644
index 00000000000..885b15e36e3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_perf_context_level=2
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc
new file mode 100644
index 00000000000..5728e49b5e0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.inc
@@ -0,0 +1,154 @@
+#
+# Testing Index Condition Pushdown for MyRocks
+# Test file parameter: $cf_name specifies the CF to store test data in
+# It can be forward or reverse-ordered CF
+#
+select * from information_schema.engines where engine = 'rocksdb';
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+
+eval
+create table t2 (
+ pk int primary key,
+ kp1 int,
+ kp2 int,
+ col1 int,
+ key (kp1,kp2) comment '$cf_name'
+) engine=rocksdb;
+
+insert into t2 select a,a,a,a from t1;
+
+--echo # Try a basic case:
+--replace_column 9 #
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+
+--echo # Check that ICP doesnt work for columns where column value
+--echo # cant be restored from mem-comparable form:
+
+eval
+create table t3 (
+ pk int primary key,
+ kp1 int,
+ kp2 varchar(10) collate utf8_general_ci,
+ col1 int,
+ key (kp1,kp2) comment '$cf_name'
+) engine=rocksdb;
+
+insert into t3 select a,a/10,a,a from t1;
+--echo # This must not use ICP:
+--replace_column 9 #
+explain
+select * from t3 where kp1=3 and kp2 like '%foo%';
+
+--replace_column 9 #
+explain format=json
+select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
+
+--echo # Check that we handle the case where out-of-range is encountered sooner
+--echo # than matched index condition
+--replace_column 9 #
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+select * from t2 where kp1< 3 and kp2+1>50000;
+
+--replace_column 9 #
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+select * from t2 where kp1< 3 and kp2+1>50000;
+
+--echo # Try doing backwards scans
+--replace_column 9 #
+explain
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+
+--replace_column 9 #
+explain
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+
+--replace_column 9 #
+explain
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+
+drop table t0,t1,t2,t3;
+
+--echo #
+--echo # Check how ICP affects counters
+--echo #
+--echo # First, some preparations
+--echo #
+create procedure save_read_stats()
+ select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT
+ into @rr, @rq, @rif, @rin
+ from information_schema.table_statistics
+ where table_name='t4' and table_schema=database();
+
+create procedure get_read_stats()
+ select
+ ROWS_READ-@rr, ROWS_REQUESTED-@rq, ROWS_INDEX_FIRST-@rif, ROWS_INDEX_NEXT-@rin
+ from information_schema.table_statistics
+ where table_name='t4' and table_schema=database();
+
+eval
+create table t4 (
+ id int,
+ id1 int,
+ id2 int,
+ value int,
+ value2 varchar(100),
+ primary key (id),
+ key id1_id2 (id1, id2) comment '$cf_name'
+) engine=rocksdb charset=latin1 collate latin1_bin;
+
+insert into t4 values
+(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5),
+(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10);
+
+--echo #
+--echo # Now, the test itself
+--echo #
+call save_read_stats();
+call get_read_stats();
+
+
+--echo # ============== index-only query ==============
+--replace_column 9 #
+explain
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+call save_read_stats();
+select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+query_vertical call get_read_stats();
+
+--echo # ============== Query without ICP ==============
+set optimizer_switch='index_condition_pushdown=off';
+--replace_column 9 #
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+query_vertical call get_read_stats();
+
+--echo # ============== Query with ICP ==============
+set optimizer_switch='index_condition_pushdown=on';
+--replace_column 9 #
+explain
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+call save_read_stats();
+select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1;
+query_vertical call get_read_stats();
+
+drop table t4;
+drop procedure save_read_stats;
+drop procedure get_read_stats;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test
new file mode 100644
index 00000000000..175476974df
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test
@@ -0,0 +1,44 @@
+--source include/have_rocksdb.inc
+
+
+let $cf_name=cf1;
+
+--source suite/rocksdb/t/rocksdb_icp.inc
+
+--echo #
+--echo # Issue #67: Inefficient index condition pushdown
+--echo #
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+ pk int not null primary key,
+ key1 bigint(20) unsigned,
+ col1 int,
+ key (key1)
+) engine=rocksdb;
+
+insert into t1
+select
+ A.a+10*B.a+100*C.a,
+ A.a+10*B.a+100*C.a,
+ 1234
+from t0 A, t0 B, t0 C;
+
+set @count=0;
+let $save_query=
+set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context
+ where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT');
+
+--replace_column 9 #
+explain
+select * from t1 where key1=1;
+
+eval $save_query;
+select * from t1 where key1=1;
+eval $save_query;
+--echo # The following must be =1, or in any case not 999:
+select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment";
+
+drop table t0,t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt
new file mode 100644
index 00000000000..6ae466bcc09
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test
new file mode 100644
index 00000000000..c3fd43e5b7e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+
+let $cf_name=rev:cf1;
+
+--source suite/rocksdb/t/rocksdb_icp.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test
new file mode 100644
index 00000000000..3b28df0d63b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test
@@ -0,0 +1,92 @@
+--source include/have_rocksdb.inc
+
+#
+# MyRocks-specific tests for locking
+#
+--source include/have_debug.inc
+
+--enable_connect_log
+create table t1 (pk int not null primary key) engine=rocksdb;
+
+insert into t1 values (1),(2),(3);
+
+set autocommit=0;
+begin;
+select * from t1 where pk=1 for update;
+
+--connect (con1,localhost,root,,)
+--connection con1
+--echo ### Connection con1
+let $ID= `select connection_id()`;
+set @@rocksdb_lock_wait_timeout=500;
+set autocommit=0;
+begin;
+--send select * from t1 where pk=1 for update;
+
+--connection default
+--echo ### Connection default
+
+let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST
+ where ID = $ID and STATE = "Waiting for row lock";
+--source include/wait_condition.inc
+## Waiting for row lock
+## select connection_id();
+## select state='Waiting for row lock' from information_schema.processlist where id=2;
+
+rollback;
+
+connection con1;
+reap;
+rollback;
+connection default;
+
+##
+## Now, repeat the same test but let the wait time out.
+##
+begin;
+select * from t1 where pk=1 for update;
+
+--connection con1
+--echo ### Connection con1
+set @@rocksdb_lock_wait_timeout=2;
+set autocommit=0;
+begin;
+--error ER_LOCK_WAIT_TIMEOUT
+select * from t1 where pk=1 for update;
+
+--connection default
+
+rollback;
+set autocommit=1;
+
+--connection con1
+drop table t1;
+--connection default
+
+--echo #
+--echo # Now, test what happens if another transaction modified the record and committed
+--echo #
+
+CREATE TABLE t1 (
+ id int primary key,
+ value int
+) engine=rocksdb collate latin1_bin;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10);
+
+--connection con1
+BEGIN;
+SELECT * FROM t1 WHERE id=3;
+
+--connection default
+BEGIN;
+UPDATE t1 SET value=30 WHERE id=3;
+COMMIT;
+
+--connection con1
+--error ER_LOCK_DEADLOCK
+SELECT * FROM t1 WHERE id=3 FOR UPDATE;
+
+ROLLBACK;
+--disconnect con1
+--connection default
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test
new file mode 100644
index 00000000000..82fb70b0596
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test
@@ -0,0 +1,121 @@
+--source include/have_rocksdb.inc
+
+--source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1,t2;
+--enable_warnings
+
+--echo # Tests for MyRocks + partitioning
+
+--echo #
+--echo # MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB
+PARTITION BY HASH(pk) PARTITIONS 2;
+INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1);
+
+CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1, 1), (2, 1);
+
+SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 );
+
+drop table t1,t2;
+
+--echo #
+--echo # Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables
+--echo #
+CREATE TABLE t1 (
+ id INT PRIMARY KEY,
+ a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8,
+ b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null,
+ c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null,
+ INDEX (a),
+ INDEX (b),
+ INDEX (c)
+) ENGINE=RocksDB PARTITION BY key (id) partitions 2;
+
+INSERT INTO t1 (id, b) VALUES (28, 3);
+UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1;
+check table t1;
+drop table t1;
+
+--echo #
+--echo # Issue #105, another testcase
+--echo #
+create table t1 (
+ pk int primary key,
+ col1 int,
+ col2 int,
+ key (col1) comment 'rev:cf_issue105'
+) engine=rocksdb partition by hash(pk) partitions 2;
+
+insert into t1 values (1,10,10);
+insert into t1 values (2,10,10);
+
+insert into t1 values (11,20,20);
+insert into t1 values (12,20,20);
+explain select * from t1 force index(col1) where col1=10;
+select * from t1 force index(col1) where col1=10;
+select * from t1 use index () where col1=10;
+drop table t1;
+
+--echo #
+--echo # Issue #108: Index-only scans do not work for partitioned tables and extended keys
+--echo #
+create table t1 (
+ pk int primary key,
+ col1 int,
+ col2 int,
+ key (col1)
+) engine=rocksdb partition by hash(pk) partitions 2;
+
+insert into t1 values (1,10,10);
+insert into t1 values (2,10,10);
+
+insert into t1 values (11,20,20);
+insert into t1 values (12,20,20);
+--echo # The following must use "Using index"
+explain select pk from t1 force index(col1) where col1=10;
+
+drop table t1;
+
+--echo #
+--echo # Issue #214: subqueries cause crash
+--echo #
+create TABLE t1(a int,b int,c int,primary key(a,b))
+ partition by list (b*a) (partition x1 values in (1) tablespace ts1,
+ partition x2 values in (3,11,5,7) tablespace ts2,
+ partition x3 values in (16,8,5+19,70-43) tablespace ts3);
+create table t2(b binary(2));
+set session optimizer_switch=5;
+insert into t1(a,b) values(1,7);
+select a from t1 where a in (select a from t1 where a in (select b from t2));
+
+drop table t1, t2;
+
+--echo #
+--echo # Issue #260: altering name to invalid value leaves table unaccessible
+--echo #
+CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4;
+INSERT INTO t1 VALUES(1,'a');
+--error ER_ERROR_ON_RENAME
+RENAME TABLE t1 TO db3.t3;
+SELECT * FROM t1;
+SHOW TABLES;
+# try it again to the same database
+RENAME TABLE t1 TO test.t3;
+SELECT * FROM t3;
+SHOW TABLES;
+# now try it again but with another existing database
+CREATE DATABASE db3;
+USE test;
+RENAME TABLE t3 to db3.t2;
+USE db3;
+SELECT * FROM t2;
+SHOW TABLES;
+# cleanup
+DROP TABLE t2;
+use test;
+DROP DATABASE db3;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt
new file mode 100644
index 00000000000..a00258bc48c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt
@@ -0,0 +1 @@
+--query_cache_type=1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test
new file mode 100644
index 00000000000..5cfbe3fbd39
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test
@@ -0,0 +1,30 @@
+--source include/have_rocksdb.inc
+
+# Important:
+# The test needs to be run with --mysqld=--query-cache-type=1
+
+-- source include/have_query_cache.inc
+--enable_connect_log
+
+create table t1 (pk int primary key, c char(8)) engine=RocksDB;
+insert into t1 values (1,'new'),(2,'new');
+
+select * from t1;
+
+--connect (con1,localhost,root,,)
+
+update t1 set c = 'updated';
+#select * from t1;
+
+--connection default
+flush status;
+show status like 'Qcache_hits';
+show global status like 'Qcache_hits';
+select * from t1;
+select sql_no_cache * from t1;
+select * from t1 where pk = 1;
+show status like 'Qcache_hits';
+show status like 'Qcache_not_cached';
+show global status like 'Qcache_hits';
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt
new file mode 100644
index 00000000000..6ad42e58aa2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test
new file mode 100644
index 00000000000..c6f1ecc8424
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test
@@ -0,0 +1,193 @@
+--source include/have_rocksdb.inc
+
+#
+# Range access test for RocksDB storage engine
+#
+select * from information_schema.engines where engine = 'rocksdb';
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3,t4,t5;
+--enable_warnings
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+
+create table t2 (
+ pk int not null,
+ a int not null,
+ b int not null,
+ primary key(pk),
+ key(a) comment 'rev:cf1'
+) engine=rocksdb;
+
+# 10 pk values for each value of a...
+insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A;
+
+--echo #
+--echo # HA_READ_KEY_EXACT tests
+--echo #
+
+--echo # Original failure was here:
+--replace_column 9 #
+explain
+select * from t2 force index (a) where a=0;
+select * from t2 force index (a) where a=0;
+
+--echo # The rest are for code coverage:
+--replace_column 9 #
+explain
+select * from t2 force index (a) where a=2;
+select * from t2 force index (a) where a=2;
+
+--replace_column 9 #
+explain
+select * from t2 force index (a) where a=3 and pk=33;
+select * from t2 force index (a) where a=3 and pk=33;
+
+select * from t2 force index (a) where a=99 and pk=99;
+select * from t2 force index (a) where a=0 and pk=0;
+select * from t2 force index (a) where a=-1;
+select * from t2 force index (a) where a=-1 and pk in (101,102);
+select * from t2 force index (a) where a=100 and pk in (101,102);
+
+
+--echo #
+--echo # #36: Range in form tbl.key >= const doesn't work in reverse column family
+--echo #
+--replace_column 9 #
+explain
+select count(*) from t2 force index (a) where a>=0 and a <=1;
+select count(*) from t2 force index (a) where a>=0 and a <=1;
+
+--replace_column 9 #
+explain
+select count(*) from t2 force index (a) where a>=-1 and a <=1;
+select count(*) from t2 force index (a) where a>=-1 and a <=1;
+
+--replace_column 9 #
+explain
+select * from t2 force index (a) where a=0 and pk>=3;
+select * from t2 force index (a) where a=0 and pk>=3;
+
+--echo # Try edge cases where we fall over the end of the table
+create table t3 like t2;
+insert into t3 select * from t2;
+
+select * from t3 where pk>=1000000;
+select * from t2 where pk>=1000000;
+
+--echo #
+--echo # #42: Range in form tbl.key > const doesn't work in reverse column family
+--echo #
+--replace_column 9 #
+explain
+select count(*) from t2 force index (a) where a>0;
+select count(*) from t2 force index (a) where a>0;
+
+--replace_column 9 #
+explain
+select count(*) from t2 force index (a) where a>99;
+select count(*) from t2 force index (a) where a>99;
+
+select * from t2 where pk>1000000;
+select * from t3 where pk>1000000;
+
+--replace_column 9 #
+explain
+select count(*) from t2 force index (a) where a=2 and pk>25;
+select count(*) from t2 force index (a) where a=2 and pk>25;
+
+
+select * from t2 force index (a) where a>-10 and a < 1;
+select * from t3 force index (a) where a>-10 and a < 1;
+
+
+--echo #
+--echo # #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family
+--echo #
+select max(a) from t2 where a < 2;
+select max(a) from t2 where a < -1;
+
+select max(pk) from t2 where a=3 and pk < 6;
+
+select max(pk) from t2 where pk < 200000;
+select max(pk) from t2 where pk < 20;
+
+select max(a) from t3 where a < 2;
+select max(a) from t3 where a < -1;
+select max(pk) from t3 where pk < 200000;
+select max(pk) from t3 where pk < 20;
+
+select max(pk) from t2 where a=3 and pk < 33;
+select max(pk) from t3 where a=3 and pk < 33;
+
+--echo #
+--echo # #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF
+--echo #
+
+--echo # Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV
+--replace_column 9 #
+explain
+select * from t2 where a between 99 and 2000 order by a desc;
+select * from t2 where a between 99 and 2000 order by a desc;
+
+select max(a) from t2 where a <=10;
+select max(a) from t2 where a <=-4;
+
+select max(pk) from t2 where a=5 and pk <=55;
+select max(pk) from t2 where a=5 and pk <=55555;
+select max(pk) from t2 where a=5 and pk <=0;
+
+select max(pk) from t2 where pk <=-1;
+select max(pk) from t2 where pk <=999999;
+select max(pk) from t3 where pk <=-1;
+select max(pk) from t3 where pk <=999999;
+
+--echo #
+--echo # Tests for search_flag=HA_READ_PREFIX_LAST
+--echo #
+
+create table t4 (
+ pk int primary key,
+ a int,
+ b int,
+ c int,
+ key(a,b,c)
+) engine=rocksdb;
+
+insert into t4 select pk,pk,pk,pk from t2 where pk < 100;
+
+--replace_column 9 #
+explain
+select * from t4 where a=1 and b in (1) order by c desc;
+select * from t4 where a=1 and b in (1) order by c desc;
+
+--replace_column 9 #
+explain
+select * from t4 where a=5 and b in (4) order by c desc;
+select * from t4 where a=5 and b in (4) order by c desc;
+
+--echo # HA_READ_PREFIX_LAST for reverse-ordered CF
+create table t5 (
+ pk int primary key,
+ a int,
+ b int,
+ c int,
+ key(a,b,c) comment 'rev:cf2'
+) engine=rocksdb;
+
+insert into t5 select pk,pk,pk,pk from t2 where pk < 100;
+
+--replace_column 9 #
+explain
+select * from t5 where a=1 and b in (1) order by c desc;
+select * from t5 where a=1 and b in (1) order by c desc;
+
+--replace_column 9 #
+explain
+select * from t5 where a=5 and b in (4) order by c desc;
+select * from t5 where a=5 and b in (4) order by c desc;
+
+drop table t0,t1,t2,t3,t4,t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test
new file mode 100644
index 00000000000..6b8d0b90e90
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test
@@ -0,0 +1,20 @@
+--source include/have_rocksdb.inc
+
+# Issue#212 MyRocks chooses full index scan even if range scan is more efficient
+# rocksdb_debug_optimizer_n_rows must not be set.
+
+create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2));
+--disable_query_log
+let $i=0;
+while ($i<10000)
+{
+ inc $i;
+ eval insert t1(id1, id2, c1, c2, c3, c4, c5, c6, c7)
+ values($i, 0, $i, 0, 0, 0, 0, 0, 0);
+}
+--enable_query_log
+analyze table t1;
+select count(*) from t1;
+explain select c1 from t1 where c1 > 5 limit 10;
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test
new file mode 100644
index 00000000000..ebcc741fc17
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test
@@ -0,0 +1,57 @@
+source include/have_rocksdb.inc;
+create table t1 (a int primary key) engine=rocksdb;
+
+-- echo Verify rocksdb_rows_inserted
+select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted';
+insert into t1 values(1);
+select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted';
+select @new_rows_inserted - @old_rows_inserted;
+
+-- echo Verify rocksdb_rows_updated
+select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated';
+update t1 set a=2 where a=1;
+select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated';
+select @new_rows_updated - @old_rows_updated;
+
+-- echo Verify rocksdb_rows_read
+select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read';
+select * from t1;
+select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read';
+select @new_rows_read - @old_rows_read;
+
+-- echo Verify rocksdb_rows_deleted
+select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted';
+delete from t1;
+select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted';
+select @new_rows_deleted - @old_rows_deleted;
+
+use mysql;
+create table t1(a int primary key) engine=rocksdb;
+
+-- echo Verify rocksdb_system_rows_inserted
+select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted';
+insert into t1 values(1);
+select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted';
+select @new_system_rows_inserted - @old_system_rows_inserted;
+
+-- echo Verify rocksdb_system_rows_updated
+select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated';
+update t1 set a=2 where a=1;
+select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated';
+select @new_system_rows_updated - @old_system_rows_updated;
+
+-- echo Verify rocksdb_system_rows_read
+select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read';
+select * from t1;
+select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read';
+select @new_system_rows_read - @old_system_rows_read;
+
+-- echo Verify rocksdb_system_rows_deleted
+select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted';
+delete from t1;
+select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted';
+select @new_system_rows_deleted - @old_system_rows_deleted;
+
+drop table t1;
+use test;
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test
new file mode 100644
index 00000000000..5eaeff5cdbd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test
@@ -0,0 +1,80 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# First set sampling rate to 100% and make sure that the baseline is
+# correct and we get the correct number of rows as a result.
+#
+SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100;
+
+create table t1 (pk int primary key) engine=rocksdb;
+
+--disable_query_log
+let $i = 0;
+let $n = 10000;
+
+while ($i < $n)
+{
+ inc $i;
+ eval insert t1(pk) values($i);
+}
+--enable_query_log
+
+set global rocksdb_force_flush_memtable_now = true;
+
+# This should return 10K rows.
+select table_rows from information_schema.tables
+where table_schema = database() and table_name = 't1';
+
+let $t1_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't1'`;
+
+drop table t1;
+
+--disable_warnings
+drop table if exists t2;
+--enable_warnings
+
+#
+# Now, set the sampling rate to 10% and expect to see the same amount of
+# rows.
+#
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10;
+
+create table t2 (pk int primary key) engine=rocksdb;
+
+--disable_query_log
+let $i = 0;
+let $n = 10000;
+
+while ($i < $n)
+{
+ inc $i;
+ eval insert t2(pk) values($i);
+}
+--enable_query_log
+
+set global rocksdb_force_flush_memtable_now = true;
+
+# This should return 10K rows as well.
+select table_rows from information_schema.tables
+where table_schema = database() and table_name = 't2';
+
+let $t2_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't2'`;
+let $diff = `select abs($t1_len - $t2_len)`;
+
+#
+# Table sizes are approximations and for this particular case we allow about
+# 10% deviation.
+#
+if ($diff < 6000) {
+ select table_name from information_schema.tables where table_schema = database() and table_name = 't2';
+}
+
+drop table t2;
+
+SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf
new file mode 100644
index 00000000000..13dea1236d8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf
@@ -0,0 +1,14 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+sync_binlog=0
+binlog_format=row
+rocksdb_read_free_rpl_tables="t.*"
+slave-exec-mode=strict
+
+[mysqld.2]
+sync_binlog=0
+binlog_format=row
+rocksdb_read_free_rpl_tables="t.*"
+slave-exec-mode=strict
+rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test
new file mode 100644
index 00000000000..38fb3c32149
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test
@@ -0,0 +1,302 @@
+--source include/have_rocksdb.inc
+
+source include/master-slave.inc;
+
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+# initialization/insert
+connection master;
+--source init_stats_procedure.inc
+
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3), (4,4);
+--source include/sync_slave_sql_with_master.inc
+
+--let $diff_tables= master:t1, slave:t1
+
+--echo
+--echo # regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves
+--echo
+connection slave;
+call save_read_stats();
+connection master;
+update t1 set value=value+1 where id=1;
+delete from t1 where id=4;
+select * from t1;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+
+
+--echo
+--echo # "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore
+--echo
+connection slave;
+--source include/stop_slave.inc
+delete from t1 where id in (2, 3);
+--source include/start_slave.inc
+call save_read_stats();
+
+connection master;
+update t1 set value=value+1 where id=3;
+delete from t1 where id=2;
+select * from t1;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+
+
+--echo
+--echo ## tables without primary key -- read free replication should be disabled
+--echo
+--echo
+--echo #no index
+--echo
+connection master;
+drop table t1;
+create table t1 (c1 int, c2 int);
+insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5);
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call save_read_stats();
+connection master;
+update t1 set c2=100 where c1=3;
+delete from t1 where c1 <= 2;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+
+--echo
+--echo #secondary index only
+--echo
+connection master;
+drop table t1;
+create table t1 (c1 int, c2 int, index i(c1));
+insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5);
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call save_read_stats();
+connection master;
+update t1 set c2=100 where c1=3;
+delete from t1 where c1 <= 2;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+
+
+
+--echo
+--echo ## large row operations -- primary key modification, secondary key modification
+--echo
+connection master;
+drop table t1;
+create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2));
+
+--disable_query_log
+let $i=1;
+while ($i<=10000)
+{
+ eval insert t1(id1,id2,c1,c2,c3,c4,c5,c6,c7)
+ values($i,0,$i,0,0,0,0,0,0);
+ inc $i;
+}
+--enable_query_log
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call save_read_stats();
+connection master;
+
+--echo
+--echo #updating all seconary keys by 1
+--echo
+--disable_query_log
+let $i=1;
+while ($i<=10000)
+{
+ eval update t1 set c2=c2+1 where id1=$i and id2=0;
+ inc $i;
+}
+--enable_query_log
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+connection master;
+--source include/diff_tables.inc
+
+--echo
+--echo #updating all primary keys by 2
+--echo
+connection slave;
+call save_read_stats();
+connection master;
+--disable_query_log
+let $i=1;
+while ($i<=10000)
+{
+ eval update t1 set id2=id2+2 where id1=$i and id2=0;
+ inc $i;
+}
+--enable_query_log
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+connection master;
+--source include/diff_tables.inc
+
+--echo
+--echo #updating secondary keys after truncating t1 on slave
+--echo
+connection slave;
+truncate table t1;
+call save_read_stats();
+connection master;
+update t1 set c2=c2+10;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+connection master;
+--source include/diff_tables.inc
+
+--echo
+--echo #updating primary keys after truncating t1 on slave
+--echo
+connection slave;
+truncate table t1;
+call save_read_stats();
+connection master;
+update t1 set id2=id2+10;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+connection master;
+--source include/diff_tables.inc
+
+--echo
+--echo #deleting half rows
+--echo
+connection slave;
+call save_read_stats();
+connection master;
+delete from t1 where id1 <= 5000;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+connection master;
+--source include/diff_tables.inc
+
+#--echo
+#--echo # some tables with read-free replication on and some with it off
+#--echo # secondary keys lose rows
+#--echo
+# The configuration is set up so the slave will do read-free replication on
+# all tables starting with 't'
+connection master;
+--echo [on master]
+create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+--source include/sync_slave_sql_with_master.inc
+
+# make a mismatch between the slave and the master
+connection slave;
+--echo [on slave]
+delete from t2 where id <= 2;
+delete from u2 where id <= 2;
+
+# make changes on the master
+connection master;
+--echo [on master]
+update t2 set i2=100, value=100 where id=1;
+update u2 set i2=100, value=100 where id=1;
+
+connection slave;
+--echo [on slave]
+call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*");
+call mtr.add_suppression("Slave: Can't find record in 'u2'.*");
+# wait until we have the expected error
+--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND)
+--source include/wait_for_slave_sql_error.inc
+
+# query the t2 table on the slave
+connection slave;
+select count(*) from t2 force index(primary);
+select count(*) from t2 force index(i1);
+select count(*) from t2 force index(i2);
+select * from t2 where id=1;
+select i1 from t2 where i1=1;
+select i2 from t2 where i2=100;
+
+# query the u2 table on the slave
+select count(*) from u2 force index(primary);
+select count(*) from u2 force index(i1);
+select count(*) from u2 force index(i2);
+select * from u2 where id=1;
+select i1 from u2 where i1=1;
+select i2 from u2 where i2=100;
+
+# the slave replication thread stopped because of the errors;
+# cleanup the problem and restart it
+--disable_query_log
+insert into u2 values(1,1,1,1), (2,2,2,2);
+start slave sql_thread;
+--source include/wait_for_slave_sql_to_start.inc
+--enable_query_log
+
+--echo
+--echo # some tables with read-free replication on and some with it off
+--echo # secondary keys have extra rows
+--echo
+connection master;
+--echo [on master]
+create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2));
+insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
+--source include/sync_slave_sql_with_master.inc
+
+# make a mismatch between the slave and the master
+connection slave;
+--echo [on slave]
+update t3 set i1=100 where id=1;
+update u3 set i1=100 where id=1;
+
+# make changes on the master
+connection master;
+--echo [on master]
+delete from t3 where id=1;
+delete from u3 where id=1;
+
+# make sure the slave is caught up
+--source include/sync_slave_sql_with_master.inc
+
+# query the t3 table on the slave
+connection slave;
+--echo [on slave]
+select count(*) from t3 force index(primary);
+select count(*) from t3 force index(i1);
+select count(*) from t3 force index(i2);
+select i1 from t3 where i1=100;
+
+# query the u3 table on the slave
+select count(*) from u3 force index(primary);
+select count(*) from u3 force index(i1);
+select count(*) from u3 force index(i2);
+select i1 from u3 where i1=100;
+
+# cleanup
+connection master;
+drop table t1, t2, t3, u2, u3;
+--source drop_stats_procedure.inc
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf
new file mode 100644
index 00000000000..44100e59cc2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf
@@ -0,0 +1,9 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+[mysqld.2]
+binlog_format=row
+slave_parallel_workers=4
+slave_exec_mode=SEMI_STRICT
+rocksdb_lock_wait_timeout=5
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc
new file mode 100644
index 00000000000..5a78979f048
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc
@@ -0,0 +1,92 @@
+--source include/have_rocksdb.inc
+--source include/master-slave.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+connection master;
+
+create table t0 (a int) engine=myisam;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int) engine=myisam;
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+ pk int primary key,
+ kp1 int,
+ kp2 int,
+ col1 int,
+ key (kp1,kp2)
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+create table t3 like t2;
+insert into t3 select * from t2;
+
+
+# For GitHub issue#166
+# Slave is suspended at ha_rocksdb::read_range_first() -> index_read_map_impl()
+# -> ha_rocksdb::get_row_by_rowid() -- which is after creating an iterator,
+# Seek(), Next() (getting pk=1)
+# and before GetForUpdate() and before creating a snapshot.
+# Deletes remove pk=2 and pk=3, then resumes update on slave.
+# The update resumes with GetForUpdate(pk=1),
+# index_next() -> secondary_index_read() -> get_row_by_rowid(pk=2)
+# then doesn't find a row.
+# The slave should not stop with error (Can't find a record).
+
+--source include/sync_slave_sql_with_master.inc
+
+connection slave;
+let $old_debug = `select @@global.debug`;
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+--source include/stop_slave.inc
+--source include/start_slave.inc
+
+connection master;
+update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0;
+
+connection slave;
+set debug_sync= 'now WAIT_FOR Reached';
+eval set global debug = '$old_debug';
+set sql_log_bin=0;
+delete from t2 where pk=2;
+delete from t2 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+
+connection master;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t2 where pk < 5;
+
+# For GitHub issue#162 (result file must be updated after fixing #162)
+connection slave;
+set global debug= 'd,dbug.rocksdb.get_row_by_rowid';
+--source include/stop_slave.inc
+--source include/start_slave.inc
+
+connection master;
+update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0;
+
+connection slave;
+call mtr.add_suppression("Deadlock found when trying to get lock");
+set debug_sync= 'now WAIT_FOR Reached';
+eval set global debug = '$old_debug';
+set sql_log_bin=0;
+delete from t3 where pk=2;
+delete from t3 where pk=3;
+set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running';
+
+connection master;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+# col1 for pk=4 should be 100
+select * from t3 where pk < 5;
+
+# Cleanup
+connection master;
+drop table t0, t1, t2, t3;
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test
new file mode 100644
index 00000000000..36188427585
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test
@@ -0,0 +1,4 @@
+--source include/have_binlog_format_row.inc
+
+--source rpl_row_not_found.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf
new file mode 100644
index 00000000000..b46b417c257
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf
@@ -0,0 +1,7 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+[mysqld.2]
+binlog_format=row
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test
new file mode 100644
index 00000000000..2f00741afbb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test
@@ -0,0 +1,47 @@
+--source include/have_rocksdb.inc
+
+source include/master-slave.inc;
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+connection master;
+
+select @@binlog_format;
+create table t1 (pk int primary key) engine=rocksdb;
+insert into t1 values (1),(2),(3);
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+
+select * from t1;
+
+connection master;
+drop table t1;
+
+--echo #
+--echo # Issue #18: slave crash on update with row based binary logging
+--echo #
+create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+update t1 set value2=100 where id=1;
+update t1 set value2=200 where id=2;
+update t1 set value2=300 where id=3;
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t1 where id=1;
+select * from t1 where id=2;
+select * from t1 where id=3;
+
+connection master;
+drop table t1;
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf
new file mode 100644
index 00000000000..b46b417c257
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf
@@ -0,0 +1,7 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+[mysqld.2]
+binlog_format=row
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test
new file mode 100644
index 00000000000..c0b0122cbc0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test
@@ -0,0 +1,46 @@
+--source include/have_rocksdb.inc
+
+source include/master-slave.inc;
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+# initialization/insert
+connection master;
+# creating save_read_stats() and get_read_stats() procedures
+--source init_stats_procedure.inc
+
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5);
+--source include/sync_slave_sql_with_master.inc
+
+connection slave;
+call save_read_stats();
+connection master;
+update t1 set value=value+1 where id=1;
+update t1 set value=value+1 where id=3;
+select * from t1;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+call save_read_stats();
+
+connection master;
+delete from t1 where id in (4,5);
+select * from t1;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+call get_read_stats();
+select * from t1;
+
+
+# cleanup
+connection master;
+drop table t1;
+--source drop_stats_procedure.inc
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf
new file mode 100644
index 00000000000..d20d3396f0a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf
@@ -0,0 +1,19 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+gtid_mode=ON
+enforce_gtid_consistency
+log_slave_updates
+binlog_row_image=FULL
+rocksdb_read_free_rpl_tables=.*
+rocksdb_strict_collation_check=0
+[mysqld.2]
+binlog_format=row
+gtid_mode=ON
+enforce_gtid_consistency
+log_slave_updates
+binlog_row_image=FULL
+rocksdb_read_free_rpl_tables=.*
+rocksdb_strict_collation_check=0
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test
new file mode 100644
index 00000000000..4490353b749
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test
@@ -0,0 +1,262 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_rbr_triggers.inc
+-- source include/have_rocksdb.inc
+-- source include/master-slave.inc
+
+-- echo # Test of row replication with triggers on the slave side
+connection master;
+CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1));
+SELECT * FROM t1;
+
+sync_slave_with_master;
+
+connection slave;
+SET @old_slave_exec_mode= @@global.slave_exec_mode;
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET @@global.slave_exec_mode= IDEMPOTENT;
+SET @@global.slave_run_triggers_for_rbr= YES;
+SELECT * FROM t1;
+create table t2 (id char(2) primary key, cnt int, o char(1), n char(1));
+insert into t2 values
+ ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '),
+ ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '),
+ ('i0', 0, ' ', ' '),('i1', 0, ' ', ' ');
+create trigger t1_cnt_b before update on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0';
+create trigger t1_cnt_db before delete on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0';
+create trigger t1_cnt_ib before insert on t1 for each row
+ update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0';
+create trigger t1_cnt_a after update on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1';
+create trigger t1_cnt_da after delete on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1';
+create trigger t1_cnt_ia after insert on t1 for each row
+ update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1';
+SELECT * FROM t2 order by id;
+
+connection master;
+--echo # INSERT triggers test
+insert into t1 values ('a','b');
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+
+connection master;
+--echo # UPDATE triggers test
+update t1 set C1= 'd';
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+
+connection master;
+--echo # DELETE triggers test
+delete from t1 where C1='d';
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+--echo # INSERT triggers which cause also UPDATE test (insert duplicate row)
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+
+connection master;
+insert into t1 values ('0','1');
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+--echo # INSERT triggers which cause also DELETE test
+--echo # (insert duplicate row in table referenced by foreign key)
+insert into t1 values ('1','1');
+
+connection master;
+# Foreign key is not supported in MyRocks
+#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) );
+#insert into t1 values ('1','1');
+
+#sync_slave_with_master;
+
+#connection slave;
+#SELECT * FROM t2 order by id;
+
+#connection master;
+#drop table t3,t1;
+drop table if exists t1;
+
+sync_slave_with_master;
+
+connection slave;
+SET @@global.slave_exec_mode= @old_slave_exec_mode;
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+drop table t2;
+
+--connection master
+CREATE TABLE t1 (i INT);
+CREATE TABLE t2 (i INT);
+
+--sync_slave_with_master
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr=YES;
+CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW
+ INSERT INTO t2 VALUES (new.i);
+
+--connection master
+BEGIN;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+COMMIT;
+--sync_slave_with_master
+select * from t2;
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+--connection master
+drop tables t2,t1;
+
+--sync_slave_with_master
+
+-- echo # Triggers on slave do not work if master has some
+
+connection master;
+CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1));
+SELECT * FROM t1;
+create trigger t1_dummy before delete on t1 for each row
+ set @dummy= 1;
+
+sync_slave_with_master;
+
+connection slave;
+SET @old_slave_exec_mode= @@global.slave_exec_mode;
+SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr;
+SET @@global.slave_exec_mode= IDEMPOTENT;
+SET @@global.slave_run_triggers_for_rbr= YES;
+SELECT * FROM t1;
+create table t2 (id char(2) primary key, cnt int, o char(1), n char(1));
+insert into t2 values
+ ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '),
+ ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '),
+ ('i0', 0, ' ', ' '),('i1', 0, ' ', ' ');
+create trigger t1_cnt_b before update on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0';
+create trigger t1_cnt_ib before insert on t1 for each row
+ update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0';
+create trigger t1_cnt_a after update on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1';
+create trigger t1_cnt_da after delete on t1 for each row
+ update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1';
+create trigger t1_cnt_ia after insert on t1 for each row
+ update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1';
+SELECT * FROM t2 order by id;
+
+connection master;
+--echo # INSERT triggers test
+insert into t1 values ('a','b');
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+connection master;
+--echo # UPDATE triggers test
+update t1 set C1= 'd';
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+
+connection master;
+--echo # DELETE triggers test
+delete from t1 where C1='d';
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+--echo # INSERT triggers which cause also UPDATE test (insert duplicate row)
+insert into t1 values ('0','1');
+SELECT * FROM t2 order by id;
+
+
+connection master;
+insert into t1 values ('0','1');
+
+sync_slave_with_master;
+
+connection slave;
+SELECT * FROM t2 order by id;
+--echo # INSERT triggers which cause also DELETE test
+--echo # (insert duplicate row in table referenced by foreign key)
+insert into t1 values ('1','1');
+
+connection master;
+
+# Foreign Key is not supported in MyRocks
+#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) );
+#insert into t1 values ('1','1');
+
+#sync_slave_with_master;
+
+#connection slave;
+#SELECT * FROM t2 order by id;
+
+#connection master;
+#drop table t3,t1;
+drop table if exists t1;
+
+sync_slave_with_master;
+
+connection slave;
+SET @@global.slave_exec_mode= @old_slave_exec_mode;
+SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr;
+drop table t2;
+
+--echo #
+--echo # MDEV-5513: Trigger is applied to the rows after first one
+--echo #
+
+--connection master
+create table t1 (a int, b int);
+create table tlog (a int auto_increment primary key);
+set sql_log_bin=0;
+create trigger tr1 after insert on t1 for each row insert into tlog values (null);
+set sql_log_bin=1;
+
+sync_slave_with_master;
+--connection slave
+
+set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr;
+set global slave_run_triggers_for_rbr=1;
+create trigger tr2 before insert on t1 for each row set new.b = new.a;
+
+--connection master
+insert into t1 values (1,10),(2,20),(3,30);
+
+--sync_slave_with_master
+select * from t1;
+
+--echo #
+--echo # Verify slave skips running triggers if master ran and logged the row events for triggers
+--echo #
+--connection master
+create table t4(a int, b int);
+delete from tlog;
+create trigger tr4 before insert on t4 for each row insert into tlog values (null);
+insert into t4 values (1, 10),(2, 20);
+select * from tlog;
+
+--sync_slave_with_master
+select * from t4;
+select * from tlog;
+
+# Cleanup
+set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved;
+--connection master
+drop table t1, tlog, t4;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf
new file mode 100644
index 00000000000..b46b417c257
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf
@@ -0,0 +1,7 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+[mysqld.2]
+binlog_format=row
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test
new file mode 100644
index 00000000000..0f26c24c27d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test
@@ -0,0 +1,90 @@
+--source include/have_rocksdb.inc
+
+source include/master-slave.inc;
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+connection master;
+
+create table t1 (id int primary key, value int);
+insert into t1 values (1,1), (2,2), (3,3);
+
+begin;
+insert into t1 values (11, 1);
+savepoint a;
+insert into t1 values (12, 1);
+--error ER_UNKNOWN_ERROR
+rollback to savepoint a;
+--error ER_UNKNOWN_ERROR
+commit;
+commit;
+select * from t1;
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+
+select * from t1;
+
+connection master;
+begin;
+insert into t1 values (21, 1);
+savepoint a;
+insert into t1 values (22, 1);
+--error ER_UNKNOWN_ERROR
+rollback to savepoint a;
+--error ER_UNKNOWN_ERROR
+insert into t1 values (23, 1);
+--error ER_UNKNOWN_ERROR
+commit;
+commit;
+select * from t1;
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t1;
+
+
+connection master;
+begin;
+insert into t1 values (31, 1);
+savepoint a;
+insert into t1 values (32, 1);
+savepoint b;
+insert into t1 values (33, 1);
+--error ER_UNKNOWN_ERROR
+rollback to savepoint a;
+--error ER_UNKNOWN_ERROR
+insert into t1 values (34, 1);
+rollback;
+select * from t1;
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t1;
+
+### GitHub Issue#195
+connection master;
+SET autocommit=off;
+select * from t1;
+SAVEPOINT A;
+select * from t1;
+SAVEPOINT A;
+insert into t1 values (35, 35);
+--error ER_UNKNOWN_ERROR
+ROLLBACK TO SAVEPOINT A;
+--error ER_UNKNOWN_ERROR
+START TRANSACTION;
+select * from t1;
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t1;
+
+
+connection master;
+drop table t1;
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf
new file mode 100644
index 00000000000..6e5130c1f01
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf
@@ -0,0 +1,7 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=statement
+[mysqld.2]
+binlog_format=mixed
+rocksdb_lock_wait_timeout=5
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test
new file mode 100644
index 00000000000..b4126266956
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test
@@ -0,0 +1,57 @@
+--source include/have_rocksdb.inc
+source include/master-slave.inc;
+
+connection master;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+connection master;
+
+select @@binlog_format;
+create table t1 (pk int primary key) engine=rocksdb;
+--error ER_UNKNOWN_ERROR
+insert into t1 values (1),(2),(3);
+
+set session rocksdb_unsafe_for_binlog=on;
+insert into t1 values (1),(2),(3);
+select * from t1;
+delete from t1;
+set session rocksdb_unsafe_for_binlog=off;
+
+--error ER_UNKNOWN_ERROR
+insert into t1 values (1),(2),(3);
+
+set binlog_format=row;
+insert into t1 values (1),(2),(3);
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+
+select * from t1;
+
+connection master;
+drop table t1;
+
+create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,1,1);
+insert into t1 values (3,1,1);
+insert into t1 values (4,1,1);
+insert into t1 values (5,1,1);
+update t1 set value2=100 where id=1;
+update t1 set value2=200 where id=2;
+update t1 set value2=300 where id=3;
+
+--source include/sync_slave_sql_with_master.inc
+connection slave;
+select * from t1 where id=1;
+select * from t1 where id=2;
+select * from t1 where id=3;
+
+connection master;
+drop table t1;
+set binlog_format=row;
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf
new file mode 100644
index 00000000000..470b073d185
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf
@@ -0,0 +1,9 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=statement
+rocksdb_unsafe_for_binlog=1
+[mysqld.2]
+binlog_format=row
+slave_parallel_workers=4
+rocksdb_lock_wait_timeout=5
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test
new file mode 100644
index 00000000000..d85fb0a1772
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test
@@ -0,0 +1,2 @@
+--source rpl_row_not_found.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc
new file mode 100644
index 00000000000..9a6bf73d6a0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc
@@ -0,0 +1,43 @@
+#
+# Random Query Generator tests
+#
+# Arguments needed to be set by the test when including this one:
+# $TESTDIR : name of sub-directory in conf containing the data/grammar files
+# $GRAMMAR_FILES: space separated list of grammar files
+# $DATA_FILE: name of the data file
+#
+
+let $MYSQL_BASEDIR = `SELECT @@BASEDIR`;
+let RQG_BASE = $MYSQL_BASEDIR/rqg/rqg/common/mariadb-patches;
+let MYSQL_SOCKET = `SELECT @@SOCKET`;
+let GRAMMAR_FILES = $GRAMMAR_FILES;
+let DATA_FILE = $DATA_FILE;
+let TESTDIR = $TESTDIR;
+let $TESTDB = rqg_$TESTDIR;
+let TESTDB = $TESTDB;
+
+--eval CREATE DATABASE IF NOT EXISTS $TESTDB
+
+--perl
+
+foreach $grammar_file (split(/ /, $ENV{'GRAMMAR_FILES'})) {
+
+ # Errors from the gentest.pl file will be captured in the results file
+ my $cmd = "RQG_HOME=$ENV{'RQG_BASE'} perl $ENV{'RQG_BASE'}/gentest.pl " .
+ "--dsn=dbi:mysql:host=:port=:user=root:database=$ENV{'TESTDB'}" .
+ ":mysql_socket=$ENV{'MYSQL_SOCKET'} " .
+ "--gendata=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$ENV{'DATA_FILE'} " .
+ "--grammar=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$grammar_file " .
+ "--threads=5 --queries=10000 --duration=60 --sqltrace 2>&1 >> " .
+ "$ENV{'MYSQLTEST_VARDIR'}/tmp/$ENV{'TESTDB'}.log";
+
+ print "Running test with grammar file $grammar_file\n";
+ system($cmd);
+ if ($? != 0) {
+ print ("Failure running test! Command executed: $cmd\n");
+ }
+}
+
+EOF
+
+--eval DROP DATABASE $TESTDB
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt
new file mode 100644
index 00000000000..5b714857e13
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt
@@ -0,0 +1 @@
+--rocksdb_strict_collation_check=0
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test
new file mode 100644
index 00000000000..4eb02ac648a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+# RQG's examples test
+let $TESTDIR = examples;
+let $GRAMMAR_FILES = example.yy;
+let $DATA_FILE = example.zz;
+
+--source rqg.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt
new file mode 100644
index 00000000000..f494273892c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt
@@ -0,0 +1 @@
+--rocksdb_strict_collation_check=0 --secure-file-priv=/tmp
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test
new file mode 100644
index 00000000000..d5914745219
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test
@@ -0,0 +1,53 @@
+--source include/have_rocksdb.inc
+
+call mtr.add_suppression("Did not write failed ");
+call mtr.add_suppression("Can't open and lock privilege tables");
+
+SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER;
+
+# mysql.user and mysql.tables_priv are modified by the
+# tests, so they need to be restored to the original
+# state.
+--disable_warnings
+CREATE TABLE mysql.user_temp LIKE mysql.user;
+INSERT mysql.user_temp SELECT * FROM mysql.user;
+CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv;
+INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp;
+--enable_warnings
+
+# RQG's runtime test
+let $TESTDIR = runtime;
+
+let $GRAMMAR_FILES = alter_online.yy;
+let $DATA_FILE = alter_online.zz;
+
+--source rqg.inc
+
+let $GRAMMAR_FILES = concurrency_1.yy;
+let $DATA_FILE = concurrency_1.zz;
+
+--source rqg.inc
+
+let $GRAMMAR_FILES = connect_kill_sql.yy;
+let $DATA_FILE = connect_kill_data.zz;
+
+--source rqg.inc
+
+let $GRAMMAR_FILES = metadata_stability.yy;
+let $DATA_FILE = metadata_stability.zz;
+
+--source rqg.inc
+
+--disable_warnings
+DELETE FROM mysql.tables_priv;
+DELETE FROM mysql.user;
+INSERT mysql.user SELECT * FROM mysql.user_temp;
+INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp;
+DROP TABLE mysql.user_temp;
+DROP TABLE mysql.tables_priv_temp;
+DROP TABLE IF EXISTS test.executors;
+DROP DATABASE IF EXISTS testdb_N;
+DROP DATABASE IF EXISTS testdb_S;
+--enable_warnings
+
+SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt
new file mode 100644
index 00000000000..5b714857e13
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt
@@ -0,0 +1 @@
+--rocksdb_strict_collation_check=0
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test
new file mode 100644
index 00000000000..f29ddcb8c81
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test
@@ -0,0 +1,10 @@
+--source include/have_rocksdb.inc
+
+call mtr.add_suppression("Deadlock found when trying to get lock");
+
+# RQG's transactions test
+let $TESTDIR = transactions;
+let $GRAMMAR_FILES = transactions.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy combinations.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy;
+let $DATA_FILE = transactions.zz;
+
+--source rqg.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out
new file mode 100644
index 00000000000..406e5066132
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out
@@ -0,0 +1 @@
+Can't open perl script "./mtr": No such file or directory
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select.test b/storage/rocksdb/mysql-test/rocksdb/t/select.test
new file mode 100644
index 00000000000..c4e1ad464a3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/select.test
@@ -0,0 +1,202 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic SELECT statements
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar');
+
+CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (a,b) SELECT a, b FROM t1;
+INSERT INTO t1 (a,b) SELECT a, b FROM t2;
+
+--sorted_result
+SELECT * FROM t1;
+
+# Modifiers
+
+--sorted_result
+SELECT DISTINCT a FROM t1;
+
+--sorted_result
+SELECT ALL b, a FROM t1;
+
+# Optimizer and cache directives should not have any visible effect here,
+# but we will add them for completness
+
+--sorted_result
+SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a;
+
+--sorted_result
+SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2;
+
+--sorted_result
+SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a)
+ FROM t1 t1_1, t2, t1 t1_2;
+SELECT FOUND_ROWS();
+
+let $query_cache = `SELECT @@query_cache_size`;
+SET GLOBAL query_cache_size = 1024*1024;
+--sorted_result
+SELECT SQL_CACHE * FROM t1, t2;
+eval SET GLOBAL query_cache_size = $query_cache;
+
+# Combination of main clauses
+
+--sorted_result
+SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1
+WHERE b > 'b' AND a IS NOT NULL
+GROUP BY 2 DESC, field1 ASC
+HAVING field1 < 1000
+ORDER BY field2, 1 DESC, field1*2
+LIMIT 5 OFFSET 1;
+
+# ROLLUP
+--sorted_result
+SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP;
+
+# Procedure
+
+--sorted_result
+SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE();
+
+# SELECT INTO
+let $datadir = `SELECT @@datadir`;
+
+--replace_result $datadir <DATADIR>
+eval
+SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
+ INTO OUTFILE '$datadir/select.out'
+ CHARACTER SET utf8
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '''';
+--cat_file $datadir/select.out
+--remove_file $datadir/select.out
+
+--replace_result $datadir <DATADIR>
+--error ER_TOO_MANY_ROWS
+eval
+SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
+ INTO DUMPFILE '$datadir/select.dump';
+--remove_file $datadir/select.dump
+--replace_result $datadir <DATADIR>
+eval
+SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1
+ INTO DUMPFILE '$datadir/select.dump';
+
+--cat_file $datadir/select.dump
+--echo
+--remove_file $datadir/select.dump
+
+SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max;
+SELECT @min, @max;
+
+# Joins
+
+--sorted_result
+SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2
+ WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a;
+
+--sorted_result
+SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200);
+
+--sorted_result
+SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 };
+
+--sorted_result
+SELECT t1.* FROM t2 INNER JOIN t1;
+
+--sorted_result
+SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b;
+
+--sorted_result
+SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b;
+
+SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b;
+
+SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1;
+
+--sorted_result
+SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL;
+
+SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b;
+
+SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a);
+
+--sorted_result
+SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > '';
+
+--sorted_result
+SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL;
+
+--sorted_result
+SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2;
+
+--sorted_result
+SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 );
+
+--sorted_result
+SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2;
+
+# Subquery as scalar operand, subquery in the FROM clause
+
+--sorted_result
+SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2;
+
+# Comparison using subqueries
+
+--sorted_result
+SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 );
+
+--sorted_result
+SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 );
+
+# Subquery with IN, correlated subquery
+
+--sorted_result
+SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a );
+
+# Subquery with ANY, ALL
+
+--sorted_result
+SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 );
+
+--sorted_result
+SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' );
+
+# Row subqueries
+
+--sorted_result
+SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 );
+
+# Subquery with EXISTS
+
+--sorted_result
+SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b );
+
+# Subquery in ORDER BY
+
+--sorted_result
+SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC;
+
+# Subquery in HAVING
+
+--sorted_result
+SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b );
+
+# Union
+
+--sorted_result
+SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1;
+
+--sorted_result
+SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1;
+
+
+# Cleanup
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test
new file mode 100644
index 00000000000..14fdfb7896c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test
@@ -0,0 +1,55 @@
+--source include/have_rocksdb.inc
+
+#
+# SELECT .. FOR UPDATE
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--enable_connect_log
+
+--source include/count_sessions.inc
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+
+--connect (con1,localhost,root,,)
+BEGIN;
+--sorted_result
+SELECT a,b FROM t1 WHERE b='a' FOR UPDATE;
+
+--connection default
+SET lock_wait_timeout = 1;
+
+# Should still be able to select
+
+--sorted_result
+SELECT a,b FROM t1 WHERE b='a';
+
+# ... but not with LOCK IN SHARE MODE
+
+--sorted_result
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET b='c' WHERE b='a';
+
+--connection con1
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+--disconnect con1
+--connection default
+# Now it can be updated all right
+UPDATE t1 SET b='c' WHERE b='a';
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+--source include/wait_until_count_sessions.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test
new file mode 100644
index 00000000000..c8548d96888
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test
@@ -0,0 +1,46 @@
+##############################################################################
+## SKIP LOCKED | NOWAIT are *not* supported for SELECT...FOR UPDATE in RocksDB
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+create table t1 (a int primary key) engine=rocksdb;
+
+insert into t1 values (1), (2), (3);
+
+### SKIP LOCKED
+
+--echo Should succeed since no table gets involved
+select 1 for update skip locked;
+
+--error ER_NO_SUCH_TABLE
+select * from nonexistence for update skip locked;
+
+--error ER_ILLEGAL_HA
+select * from t1 for update skip locked;
+
+--error ER_ILLEGAL_HA
+select * from t1 where a > 1 and a < 3 for update skip locked;
+
+--error ER_ILLEGAL_HA
+insert into t1 select * from t1 for update skip locked;
+
+### NOWAIT
+
+--echo Should succeed since no table gets involved
+select 1 for update nowait;
+
+--error ER_NO_SUCH_TABLE
+select * from nonexistence for update nowait;
+
+--error ER_ILLEGAL_HA
+select * from t1 for update nowait;
+
+--error ER_ILLEGAL_HA
+select * from t1 where a > 1 and a < 3 for update nowait;
+
+--error ER_ILLEGAL_HA
+insert into t1 select * from t1 for update nowait;
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test
new file mode 100644
index 00000000000..d1d289bcb74
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test
@@ -0,0 +1,58 @@
+--source include/have_rocksdb.inc
+
+#
+# SELECT .. LOCK IN SHARE MODE
+#
+# If the engine has its own lock timeouts,
+# it makes sense to set them to minimum to decrease
+# the duration of the test.
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--enable_connect_log
+
+--source include/count_sessions.inc
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+
+--connect (con1,localhost,root,,)
+BEGIN;
+--sorted_result
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+
+--connection default
+SET lock_wait_timeout = 1;
+
+# Should still be able to select
+
+--sorted_result
+SELECT a,b FROM t1 WHERE b='a';
+--sorted_result
+--echo #
+--echo # Currently, SELECT ... LOCK IN SHARE MODE works like
+--echo # SELECT FOR UPDATE
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET b='c' WHERE b='a';
+
+--connection con1
+COMMIT;
+--sorted_result
+SELECT a,b FROM t1;
+
+--disconnect con1
+--connection default
+# Now it can be updated all right
+UPDATE t1 SET b='c' WHERE b='a';
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+--source include/wait_until_count_sessions.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc
new file mode 100644
index 00000000000..a8d8ed53cba
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc
@@ -0,0 +1,27 @@
+# Usage:
+# let $checkpoint = <value to set the checkpoint>;
+# let $succeeds = <1 if checkpoint creation should succeed, 0 otherwise>;
+# --source set_checkpoint.inc
+
+
+if ($succeeds)
+{
+ # Create checkpoint
+ --replace_result '$checkpoint' [CHECKPOINT]
+ eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint';
+
+ # Check checkpoint
+ --exec ls $checkpoint/CURRENT | sed s/.*CURRENT/CURRENT/g
+
+ # Cleanup
+ --exec rm -rf $checkpoint
+}
+if (!$succeeds)
+{
+ --disable_result_log
+ --disable_query_log
+ --error ER_UNKNOWN_ERROR
+ eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint';
+ --enable_query_log
+ --enable_result_log
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt
new file mode 100644
index 00000000000..cef79bc8585
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine-master.opt
@@ -0,0 +1 @@
+--force-restart
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test
new file mode 100644
index 00000000000..0cb32d95d8a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test
@@ -0,0 +1,75 @@
+--source include/have_rocksdb.inc
+
+#
+# SHOW ENGINE STATUS command
+# Checking that the command doesn't produce an error.
+# If it starts producing an actual result, the result file
+# will need to be updated, and possibly masked.
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+--enable_warnings
+
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB;
+CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB
+ PARTITION BY KEY(l) PARTITIONS 4;
+
+--replace_column 3 #
+SHOW ENGINE rocksdb STATUS;
+
+INSERT INTO t1 VALUES (1), (2), (3);
+SELECT COUNT(*) FROM t1;
+
+INSERT INTO t2 VALUES (1), (2), (3), (4);
+SELECT COUNT(*) FROM t2;
+
+INSERT INTO t4 VALUES (1), (2), (3), (4), (5);
+SELECT COUNT(*) FROM t4;
+
+# Fetch data from information schema as well
+--replace_column 3 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS;
+
+--replace_column 2 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS;
+
+SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE)
+FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT
+WHERE TABLE_SCHEMA = 'test'
+GROUP BY TABLE_NAME, PARTITION_NAME;
+
+--replace_column 3 #
+SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS;
+
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+
+SHOW ENGINE rocksdb MUTEX;
+# For SHOW ALL MUTEX even the number of lines is volatile, so the result logging is disabled
+--disable_result_log
+SHOW ENGINE ALL MUTEX;
+--enable_result_log
+
+# The output from SHOW ENGINE ROCKSDB TRANSACTION STATUS has some
+# non-deterministic results. Replace the timestamp with 'TIMESTAMP', the
+# number of seconds active with 'NUM', the thread id with 'TID' and the thread
+# pointer with 'PTR'. This test may fail in the future if it is being run in
+# parallel with other tests as the number of snapshots would then be greater
+# than expected. We may need to turn off the result log if that is the case.
+--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/
+SHOW ENGINE rocksdb TRANSACTION STATUS;
+
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/
+SHOW ENGINE rocksdb TRANSACTION STATUS;
+
+ROLLBACK;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt
new file mode 100644
index 00000000000..83bb6823ee3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_debug_optimizer_n_rows=1000
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test
new file mode 100644
index 00000000000..29cc2ccfb5e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test
@@ -0,0 +1,64 @@
+--source include/have_rocksdb.inc
+
+#
+# SHOW TABLE STATUS statement
+#
+
+###################################
+# TODO:
+# The result file is likely to change
+# if MDEV-4197 is fixed
+###################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2, t3;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo');
+
+CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t2 (a,b) VALUES (1,'bar');
+
+set global rocksdb_force_flush_memtable_now = true;
+
+CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8;
+
+--replace_column 6 # 7 #
+SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' );
+
+# Some statistics don't get updated as quickly. The Data_length and
+# Avg_row_length are trailing statistics, meaning they don't get updated
+# for the current SST until the next SST is written. Insert a bunch of data,
+# then flush, then insert a bit more and do another flush to get them to show
+# up.
+
+--disable_query_log
+let $count = 2;
+let $max = 10000;
+while ($count < $max) {
+ eval INSERT INTO t2 (a) VALUES ($count);
+ inc $count;
+}
+
+set global rocksdb_force_flush_memtable_now = true;
+eval INSERT INTO t2 (a) VALUES ($max);
+set global rocksdb_force_flush_memtable_now = true;
+--enable_query_log
+
+# We expect the number of rows to be 10000. Data_len and Avg_row_len
+# may vary, depending on built-in compression library.
+--replace_column 6 # 7 #
+SHOW TABLE STATUS WHERE name LIKE 't2';
+DROP TABLE t1, t2, t3;
+
+#
+# Confirm that long db and table names work.
+#
+
+CREATE DATABASE `db_new..............................................end`;
+USE `db_new..............................................end`;
+CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb;
+INSERT INTO `t1_new..............................................end` VALUES (1);
+--query_vertical SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end'
+DROP DATABASE `db_new..............................................end`;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt
new file mode 100644
index 00000000000..d6c7939eae6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt
@@ -0,0 +1 @@
+--log-bin --binlog_format=row --rocksdb_default_cf_options=write_buffer_size=64k
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test
new file mode 100644
index 00000000000..ba625deb514
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test
@@ -0,0 +1,36 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Ensure bin log is enabled.
+SHOW GLOBAL VARIABLES LIKE "log_bin";
+
+# Create the table and insert some keys
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+
+--disable_query_log
+let $max = 1000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+# Restart the server
+let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
+--exec echo "wait" > $restart_file
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+-- enable_reconnect
+-- source include/wait_until_connected_again.inc
+
+# Verify table has correct rows
+SELECT COUNT(*) FROM t1;
+
+#cleanup
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt
new file mode 100644
index 00000000000..72b3af6bcf7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt
@@ -0,0 +1 @@
+--rocksdb_default_cf_options=write_buffer_size=16k
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test
new file mode 100644
index 00000000000..718f6b7202e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test
@@ -0,0 +1,89 @@
+--source include/have_rocksdb.inc
+
+# only SingleDelete increases
+CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t1 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t1 SET value=value+1 WHERE value=$i;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+optimize table t1;
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+
+
+# both SingleDelete and Delete increases
+CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB;
+INSERT INTO t2 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t2 SET id=id+1 WHERE id=$i;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+optimize table t2;
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+
+# only Delete increases
+CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB;
+INSERT INTO t3 VALUES (1,1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t3 SET id=id+1 WHERE id=$i;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+optimize table t3;
+select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+
+# only SingleDelete increases
+CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB;
+INSERT INTO t4 VALUES (1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t4 SET id=id+1 WHERE id=$i;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+optimize table t4;
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+
+# only SingleDelete increases
+CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB;
+INSERT INTO t5 VALUES (1, 1);
+select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+--disable_query_log
+let $i = 1;
+while ($i <= 10000) {
+ let $update = UPDATE t5 SET id1=id1+1 WHERE id1=$i;
+ inc $i;
+ eval $update;
+}
+--enable_query_log
+optimize table t5;
+select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete';
+select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete';
+
+DROP TABLE t1, t2, t3, t4, t5;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt
new file mode 100644
index 00000000000..fc5c3ed4c7a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt
@@ -0,0 +1 @@
+--log-slow-extra --rocksdb-perf-context-level=2
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test
new file mode 100644
index 00000000000..9f36a7fb958
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test
@@ -0,0 +1,34 @@
+--source include/have_rocksdb.inc
+SET @cur_long_query_time = @@long_query_time;
+# Set the long query time to something big so that nothing unexpected gets into it
+SET @@long_query_time = 600;
+# Test the slow query log feature
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB;
+
+--disable_query_log
+let $max = 10000;
+let $i = 1;
+while ($i < $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i);
+ inc $i;
+ eval $insert;
+}
+
+DELETE FROM t1 WHERE id < 2500;
+--enable_query_log
+
+SET @@long_query_time = 0;
+# we expect this query to be reflected in the slow query log
+SELECT COUNT(*) FROM t1;
+
+SET @@long_query_time = @cur_long_query_time;
+
+# Verify the output of the slow query log contains counts for the skipped keys
+--exec awk -f suite/rocksdb/slow_query_log.awk $MYSQLTEST_VARDIR/mysqld.1/mysqld-slow.log
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh
new file mode 100755
index 00000000000..72442fa1e3e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+sst_dump=$2
+wait_for_no_more_deletes=$3
+num_retries=240
+retry=0
+
+echo "wait_for_delete: $wait_for_no_more_deletes"
+
+while : ; do
+ TOTAL_D=0
+ TOTAL_E=0
+ for f in `ls $1/mysqld.1/data/.rocksdb/*.sst`
+ do
+ # excluding system cf
+ DELETED=`$sst_dump --command=scan --output_hex --file=$f | \
+ perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \
+ grep -e ": 0" -e ": 7" | wc -l`
+ EXISTS=`$sst_dump --command=scan --output_hex --file=$f | \
+ perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \
+ grep ": 1" | wc -l`
+ TOTAL_D=$(($TOTAL_D+$DELETED))
+ TOTAL_E=$(($TOTAL_E+$EXISTS))
+ # echo "${f##*/} $DELETED $EXISTS"
+ done
+ if [ $TOTAL_E != "0" ]
+ then
+ if [ $TOTAL_D = "0" ] || [ $wait_for_no_more_deletes = "0" ]
+ then
+ break
+ fi
+ fi
+ if [ $retry -ge $num_retries ]
+ then
+ break
+ fi
+ sleep 1
+ retry=$(($retry + 1))
+done
+
+if [ "$TOTAL_E" = "0" ]
+then
+ echo "No records in the database"
+ exit
+fi
+
+if [ "$TOTAL_D" = "0" ]
+then
+ echo "No more deletes left"
+else
+ echo "There are deletes left"
+fi
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt
new file mode 100644
index 00000000000..8a56deb0299
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt
@@ -0,0 +1,3 @@
+--rocksdb_default_cf_options=max_write_buffer_number_to_maintain=10
+--rocksdb_debug_optimizer_n_rows=1000
+--rocksdb_table_stats_sampling_pct=100
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics.test b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test
new file mode 100644
index 00000000000..3971fd18ecd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test
@@ -0,0 +1,74 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+--enable_warnings
+
+# table with index in default CF
+create table t1(
+ id bigint not null primary key auto_increment,
+ a varchar(255) not null,
+ b bigint,
+ index t1_1(b)
+) engine=rocksdb;
+
+# a table with index in a different CF
+create table t2(
+ id bigint not null primary key auto_increment,
+ a varchar(255) not null,
+ b bigint,
+ index t2_1(b) comment 'cf_t3'
+) engine=rocksdb;
+
+# a table wint index in a reverse CF
+create table t3(
+ id bigint not null primary key auto_increment,
+ a varchar(255) not null,
+ b bigint,
+ index t3_1(b) comment 'rev:cf_t4'
+) engine=rocksdb;
+
+--disable_query_log
+let $i=0;
+while ($i<100000)
+{
+ inc $i;
+ eval insert t1(a,b) values(concat('a',$i,'b',$i,'c',$i), $i);
+ if ($i<5000)
+ {
+ eval insert t2(a,b) values(concat('a',$i,'b',$i,'c',$i), $i);
+ eval insert t3(a,b) values(concat('a',$i,'b',$i,'c',$i), $i);
+ }
+}
+--enable_query_log
+
+# should have some statistics before the memtable flush
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1';
+
+# due to inconsistencies in when the memtable is flushed, just verify t1 has fewer
+# than the expected number of rows.
+SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1';
+
+# flush and get even better statistics
+set global rocksdb_force_flush_memtable_now = true;
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+
+# restart the server, check the stats
+--source include/restart_mysqld.inc
+
+# give the server a chance to load in statistics
+--exec sleep 5
+
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+
+analyze table t1,t2,t3,t4,t5;
+
+# make sure that stats do not change after calling analyze table
+SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
+SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE();
+
+drop table t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test
new file mode 100644
index 00000000000..734a5169608
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test
@@ -0,0 +1,27 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Create the table and insert some keys
+CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
+
+--disable_query_log
+let $max = 1000;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+# Verify table has correct rows
+SELECT COUNT(*) FROM t1;
+
+# Verify the table stats are returned
+SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1";
+
+#cleanup
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test
new file mode 100644
index 00000000000..8fb4539b401
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test
@@ -0,0 +1,29 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether AUTO_INCREMENT option
+# is supported in CREATE and ALTER TABLE
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1;
+
+ALTER TABLE t1 AUTO_INCREMENT=100;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1 ORDER BY a;
+
+ALTER TABLE t1 AUTO_INCREMENT=50;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES (NULL);
+SELECT * FROM t1 ORDER BY a;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test
new file mode 100644
index 00000000000..3e6797a8686
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether AVG_ROW_LENGTH option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 AVG_ROW_LENGTH=30000000;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test
new file mode 100644
index 00000000000..3b49b967937
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test
@@ -0,0 +1,19 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether CHECKSUM option is supported
+# in CREATE and ALTER TABLE.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 CHECKSUM=0;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test
new file mode 100644
index 00000000000..b97b3dd9d4c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test
@@ -0,0 +1,32 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether CONNECTION option is supported
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE DATABASE test_remote;
+CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql
+OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote');
+CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql
+OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote');
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection';
+SHOW CREATE TABLE t1;
+ALTER TABLE t1 CONNECTION='test_connection2';
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+DROP SERVER test_connection;
+DROP SERVER test_connection2;
+DROP DATABASE test_remote;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test
new file mode 100644
index 00000000000..a188d298cb0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test
@@ -0,0 +1,37 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether DATA DIRECTORY and INDEX DIRECTORY
+# are supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the options
+# have any real effect on the table, only
+# that they are accepted
+# (and apparently ignored)
+#
+
+--let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/
+--let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/
+--mkdir $data_dir
+--mkdir $index_dir
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
+eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir';
+--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
+SHOW CREATE TABLE t1;
+
+--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
+eval ALTER TABLE t1 INDEX DIRECTORY = '$data_dir';
+--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+--rmdir $data_dir
+--rmdir $index_dir
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test
new file mode 100644
index 00000000000..85cd45e969d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether DELAY_KEY_WRITE option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 DELAY_KEY_WRITE=0;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test
new file mode 100644
index 00000000000..e289827ac72
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether INSERT_METHOD option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted (and apparently ignored)
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 INSERT_METHOD=NO;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test
new file mode 100644
index 00000000000..d927c785ae9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether KEY_BLOCK_SIZE option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 KEY_BLOCK_SIZE=1;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test
new file mode 100644
index 00000000000..35aa0f4dafa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether MAX_ROWS option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 MAX_ROWS=30000000;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test
new file mode 100644
index 00000000000..d62a8771ea3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether MIN_ROWS option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 MIN_ROWS=10000;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test
new file mode 100644
index 00000000000..acdb612b4d4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether PACK KEYS option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 PACK_KEYS=0;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test
new file mode 100644
index 00000000000..e897992e933
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test
@@ -0,0 +1,27 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether PASSWORD option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+# This option is not supported by any known engines,
+# that's why the result file does not contain it;
+# but it's syntactically acceptable.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password';
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 PASSWORD='new_password';
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test
new file mode 100644
index 00000000000..de834d238ef
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether ROW_FORMAT option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test
new file mode 100644
index 00000000000..d3c371b18c7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test
@@ -0,0 +1,28 @@
+--source include/have_rocksdb.inc
+
+#
+# Check whether UNION option
+# is supported in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the option
+# has any real effect on the table, only
+# that it's accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, child1, child2;
+--enable_warnings
+
+--disable_query_log
+CREATE TABLE child1 (a INT PRIMARY KEY) ENGINE=MyISAM;
+CREATE TABLE child2 (a INT PRIMARY KEY) ENGINE=MyISAM;
+--enable_query_log
+
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1);
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 UNION = (child1,child2);
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1, child1, child2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test
new file mode 100644
index 00000000000..5d60c02a7e6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test
@@ -0,0 +1,42 @@
+--source include/have_rocksdb.inc
+
+#
+# Standard options in CREATE and ALTER TABLE
+#
+# Note: the test does not check whether the options
+# have any real effect on the table, only
+# that they are accepted
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Create table with standard options
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb
+ DEFAULT CHARACTER SET = utf8
+ COLLATE = utf8_general_ci
+ COMMENT = 'standard table options'
+;
+SHOW CREATE TABLE t1;
+
+# Alter comment
+
+ALTER TABLE t1 COMMENT = 'table altered';
+SHOW CREATE TABLE t1;
+
+# Alter ENGINE value
+
+ALTER TABLE t1 ENGINE=MEMORY;
+SHOW CREATE TABLE t1;
+ALTER TABLE t1 ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+
+# Alter character set and collation
+
+ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction.test b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test
new file mode 100644
index 00000000000..a76fa8f9871
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test
@@ -0,0 +1,105 @@
+--source include/have_rocksdb.inc
+
+create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb;
+
+insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10);
+
+# insert
+begin;
+insert into t1 values (11,11,11);
+--source transaction_select.inc
+rollback;
+
+# insert in the middle
+begin;
+insert into t1 values (7,7,7);
+--source transaction_select.inc
+rollback;
+
+# update non-index column by primary key
+begin;
+update t1 set value2=100 where id=1;
+--source transaction_select.inc
+rollback;
+
+# update secondary key by primary key
+begin;
+update t1 set value=100 where id=1;
+--source transaction_select.inc
+rollback;
+
+# update primary key by primary key
+begin;
+update t1 set id=100 where id=1;
+--source transaction_select.inc
+rollback;
+
+# update non-index column key by secondary key
+begin;
+update t1 set value2=100 where value=1;
+--source transaction_select.inc
+rollback;
+
+# update secondary key by secondary key
+begin;
+update t1 set value=100 where value=1;
+--source transaction_select.inc
+rollback;
+
+# update primary key by secondary key
+begin;
+update t1 set id=100 where value=1;
+--source transaction_select.inc
+rollback;
+
+# update non-index column by non-index column
+begin;
+update t1 set value2=100 where value2=1;
+--source transaction_select.inc
+rollback;
+
+# update secondary key by non-index column
+begin;
+update t1 set value=100 where value2=1;
+--source transaction_select.inc
+rollback;
+
+# update primary key column by non-index column
+begin;
+update t1 set id=100 where value2=1;
+--source transaction_select.inc
+rollback;
+
+
+# delete by primary key
+begin;
+delete from t1 where id=1;
+--source transaction_select.inc
+rollback;
+
+# delete by secondary key
+begin;
+delete from t1 where value=1;
+--source transaction_select.inc
+rollback;
+
+# delete by non-index column
+begin;
+delete from t1 where value2=1;
+--source transaction_select.inc
+rollback;
+
+# mixed
+begin;
+insert into t1 values (11,11,11);
+insert into t1 values (12,12,12);
+insert into t1 values (13,13,13);
+delete from t1 where id=9;
+delete from t1 where value=8;
+update t1 set id=100 where value2=5;
+update t1 set value=103 where value=4;
+update t1 set id=115 where id=3;
+--source transaction_select.inc
+rollback;
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc
new file mode 100644
index 00000000000..dbd1d90622f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc
@@ -0,0 +1,150 @@
+#
+# Basic check for transaction isolation.
+# The results should be different depending on the isolation level.
+# For some isolation levels, some statements will end with a timeout.
+# If the engine has its own timeout parameters, reduce them to minimum,
+# otherwise the test will take very long.
+# If the timeout value is greater than the testcase-timeout the test is run with,
+# it might fail due to the testcase timeout.
+#
+
+--enable_connect_log
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+connect (con2,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+
+connection con1;
+
+CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+START TRANSACTION;
+--sorted_result
+SELECT a FROM t1; # First snapshot
+
+connection con2;
+
+BEGIN;
+--error 0,ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a) VALUES(1);
+
+connection con1;
+--sorted_result
+SELECT a FROM t1; # Second snapshot
+
+connection con2;
+--error 0,ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a) VALUES (2);
+
+connection con1;
+--sorted_result
+SELECT a FROM t1; # Third snapshot
+
+--error 0,ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a) SELECT a+100 FROM t1;
+
+--sorted_result
+SELECT a FROM t1;
+
+connection con2;
+--sorted_result
+SELECT a FROM t1; # Inside the transaction
+COMMIT;
+--sorted_result
+SELECT a FROM t1; # Outside the transaction
+
+connection con1;
+--sorted_result
+SELECT a FROM t1; # Inside the transaction
+
+# Note: INSERT .. SELECT might be tricky, for example for InnoDB
+# even with REPEATABLE-READ it works as if it is executed with READ COMMITTED.
+# The test will have a 'logical' result for repeatable read, even although
+# we currently don't have an engine which works this way.
+
+--error 0,ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 (a) SELECT a+200 FROM t1;
+
+--sorted_result
+SELECT a FROM t1;
+COMMIT;
+--sorted_result
+SELECT a FROM t1; # Outside the transaction
+
+connection con2;
+--sorted_result
+SELECT a FROM t1; # After both transactions have committed
+
+# Now test with an error in one statement to make sure the snapshots are
+# Held/released when expected
+connection default;
+CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (a) VALUES (1);
+COMMIT;
+
+connection con1;
+BEGIN;
+--sorted_result
+SELECT a from t2;
+--error ER_DUP_ENTRY
+INSERT INTO t2 (a) VALUES (1), (3); # failure
+
+connection con2;
+--error 0,ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t2 (a) VALUES (2);
+COMMIT;
+
+connection con1;
+--sorted_result
+SELECT a from t2;
+COMMIT;
+
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t1;
+DROP TABLE t2;
+
+CREATE TABLE t3 (
+ pk int unsigned PRIMARY KEY,
+ count int unsigned DEFAULT '0'
+) ENGINE=ROCKSDB;
+
+connect (con1,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+connect (con2,localhost,root,,);
+eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation;
+
+connection con1;
+BEGIN;
+SELECT * FROM t3;
+
+connection con2;
+BEGIN;
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+
+connection con1;
+--error 0,ER_LOCK_DEADLOCK
+INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1;
+COMMIT;
+
+# count will be 0 for repeatable read (because the last insert failed)
+# and 1 for read committed
+SELECT count FROM t3;
+
+connection default;
+disconnect con1;
+disconnect con2;
+DROP TABLE t3;
+
+--source include/wait_until_count_sessions.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc
new file mode 100644
index 00000000000..4feb427be49
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc
@@ -0,0 +1,15 @@
+select * from t1 where id=1;
+select * from t1 where value=1;
+select value from t1 where value=1;
+select * from t1 where value2=1;
+select * from t1 where id=5;
+select * from t1 where value=5;
+select value from t1 where value=5;
+select * from t1 where value2=5;
+select * from t1 where id < 3;
+select * from t1 where value < 3;
+select value from t1 where value < 3;
+select * from t1 where value2 < 3;
+select * from t1;
+select value from t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test
new file mode 100644
index 00000000000..a61488654a3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test
@@ -0,0 +1,74 @@
+--source include/have_rocksdb.inc
+
+#
+# TRUNCATE TABLE
+#
+
+########################################
+# TODO:
+# A part of the test is disabled because
+# HANDLER is not supported. If it ever
+# changes, the test will complain about
+# NOT producing ER_ILLEGAL_HA
+########################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+TRUNCATE TABLE t1;
+INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
+TRUNCATE TABLE t1;
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+
+# Truncate resets auto-increment value on the table
+
+CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb;
+
+#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 #
+--replace_column 5 # 6 # 7 #
+SHOW TABLE STATUS LIKE 't1';
+
+INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
+#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 #
+--replace_column 5 # 6 # 7 #
+SHOW TABLE STATUS LIKE 't1';
+
+TRUNCATE TABLE t1;
+#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 #
+--replace_column 5 # 6 # 7 #
+SHOW TABLE STATUS LIKE 't1';
+
+INSERT INTO t1 (c) VALUES ('d');
+#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 #
+--replace_column 5 # 6 # 7 #
+SHOW TABLE STATUS LIKE 't1';
+
+--sorted_result
+SELECT a,c FROM t1;
+DROP TABLE t1;
+
+# Truncate closes handlers
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+
+--error ER_ILLEGAL_HA
+HANDLER t1 OPEN AS h1;
+
+--disable_parsing
+
+HANDLER h1 READ FIRST;
+TRUNCATE TABLE t1;
+--error ER_UNKNOWN_TABLE
+HANDLER h1 READ NEXT;
+HANDLER t1 OPEN AS h2;
+HANDLER h2 READ FIRST;
+
+--enable_parsing
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt
new file mode 100644
index 00000000000..a9ebc4ec20b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt
@@ -0,0 +1,2 @@
+--rocksdb_max_subcompactions=1
+--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test
new file mode 100644
index 00000000000..b3f95f812b3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+-- let $truncate_table = 1
+-- let $drop_table = 0
+-- source drop_table3.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc
new file mode 100644
index 00000000000..8e2b0e8549b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc
@@ -0,0 +1,45 @@
+#
+# BINARY column types
+#
+
+# $extra_col_options is set by a calling test
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ b BINARY $extra_col_opts,
+ b0 BINARY(0) $extra_col_opts,
+ b1 BINARY(1) $extra_col_opts,
+ b20 BINARY(20) $extra_col_opts,
+ b255 BINARY(255) $extra_col_opts,
+ pk BINARY PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Valid values
+
+INSERT INTO t1 VALUES ('','','','','','');
+INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a');
+
+--sorted_result
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+
+# Invalid values
+
+INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b');
+--error ER_DUP_ENTRY
+INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1;
+
+--sorted_result
+SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk;
+
+--error ER_TOO_BIG_FIELDLENGTH
+eval ALTER TABLE t1 ADD COLUMN b257 BINARY(257) $extra_col_opts;
+
+SHOW COLUMNS IN t1;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test
new file mode 100644
index 00000000000..91749e36a2e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# BINARY column types
+#
+
+--source type_binary.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt
new file mode 100644
index 00000000000..6ad42e58aa2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test
new file mode 100644
index 00000000000..f4360ed629b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test
@@ -0,0 +1,99 @@
+--source include/have_rocksdb.inc
+
+#
+# BINARY and VARBINARY columns with indexes
+#
+
+#######################################
+# TODO:
+# A part of the test is disabled
+# because unique keys are not supported
+#######################################
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (b BINARY,
+ b20 BINARY(20) PRIMARY KEY,
+ v16 VARBINARY(16),
+ v128 VARBINARY(128)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20;
+SELECT HEX(b20) FROM t1 ORDER BY b20;
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC;
+SELECT HEX(b20) FROM t1 ORDER BY b20 DESC;
+
+DROP TABLE t1;
+
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (b BINARY,
+ b20 BINARY(20),
+ v16 VARBINARY(16),
+ v128 VARBINARY(128),
+ UNIQUE INDEX b_v (b,v128),
+ pk VARBINARY(10) PRIMARY KEY
+) ENGINE=rocksdb;
+
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar';
+--sorted_result
+SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar';
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar';
+--sorted_result
+SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar';
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128);
+--sorted_result
+SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128);
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (b BINARY,
+ b20 BINARY(20),
+ v16 VARBINARY(16),
+ v128 VARBINARY(128),
+ pk VARBINARY(10) PRIMARY KEY,
+ INDEX (v16(10))
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6);
+INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1;
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%';
+--sorted_result
+SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%';
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%';
+--sorted_result
+SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%';
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc
new file mode 100644
index 00000000000..ba0c6537404
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc
@@ -0,0 +1,53 @@
+#
+# BIT column type
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Valid values
+
+eval CREATE TABLE t1 (
+ a BIT $extra_col_opts,
+ b BIT(20) $extra_col_opts,
+ c BIT(64) $extra_col_opts,
+ d BIT(1) $extra_col_opts,
+ PRIMARY KEY (c)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+ALTER TABLE t1 DROP COLUMN d;
+eval ALTER TABLE t1 ADD COLUMN d BIT(0) $extra_col_opts;
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1);
+SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0;
+
+INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0);
+--sorted_result
+SELECT a+0, b+0, c+0 FROM t1 WHERE d<100;
+
+INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0);
+--sorted_result
+SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2);
+
+DELETE FROM t1;
+
+# Out of range values
+# (should produce warnings)
+
+INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1);
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+
+INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0);
+--sorted_result
+SELECT a+0,b+0,c+0,d+0 FROM t1;
+
+DROP TABLE t1;
+
+--error ER_TOO_BIG_DISPLAYWIDTH
+eval CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) $extra_col_opts) ENGINE=rocksdb;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test
new file mode 100644
index 00000000000..8d57cabffc8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# BIT column type
+#
+
+--source type_bit.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test
new file mode 100644
index 00000000000..e4f4bb81819
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test
@@ -0,0 +1,113 @@
+--source include/have_rocksdb.inc
+
+#
+# BIT columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ a BIT,
+ b BIT(20) PRIMARY KEY,
+ c BIT(32),
+ d BIT(64)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c,d) VALUES
+(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13),
+(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF);
+
+--replace_column 9 #
+EXPLAIN SELECT b+0 FROM t1 ORDER BY b;
+SELECT b+0 FROM t1 ORDER BY b;
+
+DROP TABLE t1;
+
+--echo # TODO: Unique indexes are not enforced
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ a BIT,
+ b BIT(20),
+ c BIT(32),
+ d BIT(64),
+ pk BIT(10) PRIMARY KEY,
+UNIQUE INDEX b_c (b,c)
+) ENGINE=rocksdb;
+
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c,d,pk) VALUES
+(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4),
+(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10);
+
+--replace_column 9 #
+EXPLAIN SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF;
+--sorted_result
+SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (
+ a BIT,
+ b BIT(20),
+ c BIT(32),
+ d BIT(64),
+ pk BIT(10) PRIMARY KEY,
+ INDEX(a)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c,d,pk) VALUES
+(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4),
+(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a;
+SELECT DISTINCT a+0 FROM t1 ORDER BY a;
+
+DROP TABLE t1;
+
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ a BIT,
+ b BIT(20),
+ c BIT(32),
+ d BIT(64),
+ pk BIT(10) PRIMARY KEY,
+ UNIQUE INDEX (d)
+) ENGINE=rocksdb;
+
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c,d,pk) VALUES
+(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4),
+(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9),
+(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10);
+
+--replace_column 9 #
+EXPLAIN SELECT d FROM t1 WHERE d BETWEEN 1 AND 10000;
+--sorted_result
+SELECT d+0 FROM t1 WHERE d BETWEEN 1 AND 10000;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc
new file mode 100644
index 00000000000..723b3ee528c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc
@@ -0,0 +1,49 @@
+#
+# BLOB column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ b BLOB $extra_col_opts,
+ b0 BLOB(0) $extra_col_opts,
+ b1 BLOB(1) $extra_col_opts,
+ b300 BLOB(300) $extra_col_opts,
+ bm BLOB(65535) $extra_col_opts,
+ b70k BLOB(70000) $extra_col_opts,
+ b17m BLOB(17000000) $extra_col_opts,
+ t TINYBLOB $extra_col_opts,
+ m MEDIUMBLOB $extra_col_opts,
+ l LONGBLOB $extra_col_opts
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Valid values
+# (cannot get MAX for all columns due to max_allowed_packet limitations)
+
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) );
+
+--sorted_result
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+
+# Invalid values (produce warnings, except for mediumblob and longblob columns for which the values are within limits)
+
+INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+
+--sorted_result
+SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1;
+
+--error ER_TOO_BIG_DISPLAYWIDTH
+ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296);
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test
new file mode 100644
index 00000000000..54859707091
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# BLOB column types
+#
+
+--source type_blob.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt
new file mode 100644
index 00000000000..6ad42e58aa2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test
new file mode 100644
index 00000000000..24c70e8e733
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test
@@ -0,0 +1,176 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+#
+# BLOB columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ b BLOB,
+ t TINYBLOB,
+ m MEDIUMBLOB,
+ l LONGBLOB,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+
+--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 #
+EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f;
+SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f;
+
+--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 #
+EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f;
+SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f;
+
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (
+ b BLOB,
+ t TINYBLOB,
+ m MEDIUMBLOB,
+ l LONGBLOB,
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ UNIQUE INDEX l_t (l(256),t(64))
+) ENGINE=rocksdb;
+
+--replace_column 6 # 7 # 10 # 11 #
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+
+# Here we are getting possible key l_t, but not the final key
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC;
+
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (
+ b BLOB,
+ t TINYBLOB,
+ m MEDIUMBLOB,
+ l LONGBLOB,
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ INDEX (m(128))
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (b,t,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC;
+SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b BLOB,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b TINYBLOB,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b MEDIUMBLOB,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b LONGBLOB,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc
new file mode 100644
index 00000000000..cddc0822c44
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc
@@ -0,0 +1,64 @@
+#
+# BOOLEAN column type
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ b1 BOOL $extra_col_opts,
+ b2 BOOLEAN $extra_col_opts
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Valid values
+
+INSERT INTO t1 (b1,b2) VALUES (1,TRUE);
+SELECT b1,b2 FROM t1;
+
+INSERT INTO t1 (b1,b2) VALUES (FALSE,0);
+--sorted_result
+SELECT b1,b2 FROM t1;
+
+INSERT INTO t1 (b1,b2) VALUES (2,3);
+--sorted_result
+SELECT b1,b2 FROM t1;
+
+INSERT INTO t1 (b1,b2) VALUES (-1,-2);
+--sorted_result
+SELECT b1,b2 FROM t1;
+
+--sorted_result
+SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1;
+
+--sorted_result
+SELECT b1,b2 FROM t1 WHERE b1 = TRUE;
+
+--sorted_result
+SELECT b1,b2 FROM t1 WHERE b2 = FALSE;
+
+# Invalid values
+
+INSERT INTO t1 (b1,b2) VALUES ('a','b');
+--sorted_result
+SELECT b1,b2 FROM t1;
+
+INSERT INTO t1 (b1,b2) VALUES (128,-129);
+--sorted_result
+SELECT b1,b2 FROM t1;
+
+# This is why we don't have zerofill and unsigned tests
+# for boolean columns:
+--error ER_PARSE_ERROR
+eval ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED $extra_col_opts;
+
+--error ER_PARSE_ERROR
+eval ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL $extra_col_opts;
+
+DROP TABLE t1;
+
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test
new file mode 100644
index 00000000000..d5a3b9be83d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# BOOLEAN column type
+#
+
+--source type_bool.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc
new file mode 100644
index 00000000000..d770dc608fd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc
@@ -0,0 +1,45 @@
+#
+# CHAR column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ c CHAR $extra_col_opts,
+ c0 CHAR(0) $extra_col_opts,
+ c1 CHAR(1) $extra_col_opts,
+ c20 CHAR(20) $extra_col_opts,
+ c255 CHAR(255) $extra_col_opts,
+ PRIMARY KEY (c255)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Valid values
+
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','','');
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.');
+
+--sorted_result
+SELECT c,c0,c1,c20,c255 FROM t1;
+
+# Invalid values
+
+INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256));
+INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1;
+
+--sorted_result
+SELECT c,c0,c1,c20,c255 FROM t1;
+
+--sorted_result
+SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20;
+
+--error ER_TOO_BIG_FIELDLENGTH
+eval ALTER TABLE t1 ADD COLUMN c257 CHAR(257) $extra_col_opts;
+
+DROP TABLE t1;
+
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test
new file mode 100644
index 00000000000..5bcf23b39ee
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test
@@ -0,0 +1,19 @@
+--source include/have_rocksdb.inc
+
+#
+# CHAR column types
+#
+
+--source type_char.inc
+
+# Issue #226
+CREATE TABLE t1(c1 CHAR(0) NOT NULL);
+INSERT INTO t1 VALUES('a');
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# Issue #259
+CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key);
+INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five');
+SELECT * FROM t1 LIMIT 1 UNION SELECT * FROM t1;
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test
new file mode 100644
index 00000000000..6ee2f03e74d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test
@@ -0,0 +1,107 @@
+--source include/have_rocksdb.inc
+
+#
+# CHAR and VARCHAR columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ c CHAR,
+ c20 CHAR(20) PRIMARY KEY,
+ v16 VARCHAR(16),
+ v128 VARCHAR(128)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+
+--replace_column 9 #
+EXPLAIN SELECT c20 FROM t1 ORDER BY c20;
+SELECT c20 FROM t1 ORDER BY c20;
+
+--replace_column 9 #
+EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20;
+SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20;
+
+DROP TABLE t1;
+
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ c CHAR,
+ c20 CHAR(20),
+ v16 VARCHAR(16),
+ v128 VARCHAR(128),
+ pk CHAR(64) PRIMARY KEY,
+ UNIQUE INDEX c_v (c,v128)
+) ENGINE=rocksdb;
+
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b');
+
+--replace_column 9 #
+EXPLAIN SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar';
+--sorted_result
+SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar';
+
+--replace_column 9 #
+EXPLAIN SELECT v128, COUNT(*) FROM t1 GROUP BY v128;
+--sorted_result
+SELECT v128, COUNT(*) FROM t1 GROUP BY v128;
+
+--replace_column 9 #
+EXPLAIN SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128;
+--sorted_result
+SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128;
+
+SET SESSION optimizer_switch = 'engine_condition_pushdown=on';
+--replace_column 9 #
+EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
+--sorted_result
+SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
+SET SESSION optimizer_switch = @@global.optimizer_switch;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (
+ c CHAR,
+ c20 CHAR(20),
+ v16 VARCHAR(16),
+ v128 VARCHAR(128),
+ pk VARCHAR(64) PRIMARY KEY,
+ INDEX (v16)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4');
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%';
+--sorted_result
+SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%';
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%';
+--sorted_result
+SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%';
+
+--replace_column 9 #
+EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+--sorted_result
+SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test
new file mode 100644
index 00000000000..d51b55f07e2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test
@@ -0,0 +1,142 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+# Test to see if index-only scan fails gracefully if unpack info is not
+# available.
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t (id int not null auto_increment primary key,
+ c varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ key sk (c));
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+insert into t (c) values ('A'), ('b'), ('C');
+--replace_column 9 #
+explain select c from t;
+select c from t;
+select c from t where c = 'a';
+
+drop table t;
+
+# Test if unknown collation works.
+set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
+create table t (id int not null auto_increment primary key,
+ c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci,
+ key sk (c));
+insert into t (c) values ('☀'), ('ß');
+--replace_column 9 #
+explain select c from t;
+select c from t;
+drop table t;
+set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans";
+
+# Testing if all characters in latin1 charset get restored correctly. This is
+# done by comparing results from a PK scan.
+create table t (id int not null auto_increment,
+ c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci,
+ primary key (id),
+ key sk1 (c1),
+ key sk2 (c2));
+
+let $i = 0;
+
+--disable_query_log
+while ($i < 256)
+{
+ --eval insert into t (c1, c2) values (CHAR('$i'), CHAR('$i'))
+ inc $i;
+}
+--enable_query_log
+
+--replace_column 9 #
+explain select hex(c1) from t order by c1;
+--replace_column 9 #
+explain select hex(c1) from t IGNORE INDEX (sk1) order by c1;
+
+--replace_column 9 #
+explain select hex(c2) from t order by c2;
+--replace_column 9 #
+explain select hex(c2) from t IGNORE INDEX (sk1) order by c2;
+
+--let $file1=$MYSQLTEST_VARDIR/tmp/filesort_order
+--let $file2=$MYSQLTEST_VARDIR/tmp/sk_order
+
+--disable_query_log
+--eval select hex(c1) INTO OUTFILE '$file1' from t order by c1
+--eval select hex(c1) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c1
+--enable_query_log
+
+--diff_files $file1 $file2
+--remove_file $file1
+--remove_file $file2
+
+--disable_query_log
+--eval select hex(c2) INTO OUTFILE '$file1' from t order by c2
+--eval select hex(c2) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c2
+--enable_query_log
+
+--diff_files $file1 $file2
+--remove_file $file1
+--remove_file $file2
+
+truncate t;
+
+# Test handling of spaces at the end of fields.
+insert into t (c1, c2) values ('Asdf ', 'Asdf ');
+select char_length(c1), char_length(c2), c1, c2 from t;
+
+drop table t;
+
+create table t (id int not null auto_increment,
+ c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci,
+ primary key (id),
+ unique key sk2 (c2));
+
+insert into t (c2) values ('Asdf');
+--error ER_DUP_ENTRY
+insert into t (c2) values ('asdf ');
+
+drop table t;
+
+create table t (id int not null auto_increment,
+ c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ primary key (id),
+ unique key sk1 (c1));
+
+insert into t (c1) values ('Asdf');
+--error ER_DUP_ENTRY
+insert into t (c1) values ('asdf ');
+--error ER_DUP_ENTRY
+insert into t (c1) values ('asdf');
+
+drop table t;
+
+create table t (id int not null auto_increment,
+ c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci,
+ primary key (id),
+ unique key sk1 (c1(1)));
+
+insert into t (c1) values ('Asdf');
+insert into t (c1) values ('bbbb ');
+--error ER_DUP_ENTRY
+insert into t (c1) values ('a ');
+
+--replace_column 9 #
+explain select c1 from t;
+select c1 from t;
+
+drop table t;
+
+# Test varchar keyparts with key prefix
+set session rocksdb_verify_checksums = on;
+create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1;
+insert into t values (1, ' a');
+--replace_column 9 #
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+drop table t;
+
+# Test varchar with length greater than 255
+create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1;
+insert into t values (1, REPEAT('a', 700));
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc
new file mode 100644
index 00000000000..69d1154ea39
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc
@@ -0,0 +1,45 @@
+#
+# Date and time column types
+# (DATE, DATETIME, TIMESTAMP, TIME, YEAR)
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ d DATE $extra_col_opts,
+ dt DATETIME $extra_col_opts,
+ ts TIMESTAMP $extra_col_opts,
+ t TIME $extra_col_opts,
+ y YEAR $extra_col_opts,
+ y4 YEAR(4) $extra_col_opts,
+ y2 YEAR(2) $extra_col_opts,
+ pk DATETIME PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+SET @tm = '2012-04-09 05:27:00';
+
+# Valid values
+# '1970-01-01 00:00:01'
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'),
+('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'),
+('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15');
+
+--sorted_result
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+
+# Invalid values
+
+INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES
+('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16');
+
+SELECT d,dt,ts,t,y,y4,y2 FROM t1;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test
new file mode 100644
index 00000000000..af4e006c900
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test
@@ -0,0 +1,9 @@
+--source include/have_rocksdb.inc
+
+#
+# Date and time column types
+# (DATE, DATETIME, TIMESTAMP, TIME, YEAR)
+#
+
+--source type_date_time.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test
new file mode 100644
index 00000000000..06cf86b7661
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test
@@ -0,0 +1,157 @@
+--source include/have_rocksdb.inc
+
+#
+# Date and time columns with indexes
+# (DATE, DATETIME, TIMESTAMP, TIME, YEAR)
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+
+CREATE TABLE t1 (
+ d DATE,
+ dt DATETIME PRIMARY KEY,
+ ts TIMESTAMP,
+ t TIME,
+ y YEAR
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+SET @tm = '2012-04-09 05:27:00';
+
+INSERT INTO t1 (d,dt,ts,t,y) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm));
+
+--replace_column 9 #
+EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3;
+SELECT dt FROM t1 ORDER BY dt LIMIT 3;
+
+--replace_column 9 #
+EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3;
+SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3;
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (d,dt,ts,t,y) VALUES
+('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000');
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ d DATE,
+ dt DATETIME,
+ ts TIMESTAMP,
+ t TIME,
+ y YEAR,
+ pk TIME PRIMARY KEY,
+ INDEX (ts)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+SET @tm = '2012-04-09 05:27:00';
+
+INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00');
+
+--replace_column 9 #
+EXPLAIN SELECT ts FROM t1 WHERE ts > NOW();
+--sorted_result
+SELECT ts FROM t1 WHERE ts > NOW();
+
+--replace_column 9 #
+EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW();
+--sorted_result
+SELECT ts FROM t1 USE INDEX () WHERE ts > NOW();
+
+DROP TABLE t1;
+
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ d DATE,
+ dt DATETIME,
+ ts TIMESTAMP,
+ t TIME,
+ y YEAR,
+ pk YEAR PRIMARY KEY,
+ UNIQUE INDEX d_t (d,t)
+) ENGINE=rocksdb;
+
+
+SHOW INDEX IN t1;
+SET @tm = '2012-04-09 05:27:00';
+
+INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','1990'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','1991'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','1992'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','1993'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','1994'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'1995');
+
+--replace_column 9 #
+EXPLAIN SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE();
+--sorted_result
+SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE();
+
+--replace_column 9 #
+EXPLAIN SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE();
+--sorted_result
+SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE();
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (d,dt,ts,t,y) VALUES
+('2012-01-12', '2010-11-22 12:33:53', '2011-11-14 21:45:55', '00:12:33', '2000');
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (
+ d DATE,
+ dt DATETIME,
+ ts TIMESTAMP,
+ t TIME,
+ y YEAR,
+ pk TIME PRIMARY KEY,
+ INDEX (y,t)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+SET @tm = '2012-04-09 05:27:00';
+
+INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
+('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'),
+('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'),
+('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'),
+('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'),
+('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'),
+(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18');
+
+--replace_column 9 #
+EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y;
+--sorted_result
+SELECT y, COUNT(*) FROM t1 GROUP BY y;
+
+--replace_column 9 #
+EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y;
+--sorted_result
+SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test
new file mode 100644
index 00000000000..d5ee75686df
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test
@@ -0,0 +1,163 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+--echo #
+--echo # Check that DECIMAL PK
+--echo #
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+--echo # First, make the server to create a dataset in the old format:
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t1 (
+ pk1 decimal(32,16),
+ pk2 decimal(32,16),
+ pk3 decimal(32,16),
+ a smallint not null,
+ primary key(pk1, pk2, pk3)
+);
+insert into t1
+select
+ A.a, B.a, C.a, 1234
+from t0 A, t0 B, t0 C;
+
+--echo #
+--echo # Looking at the table size, one can tell that the data is stored using
+--echo # old format:
+--echo #
+set global rocksdb_force_flush_memtable_now=1;
+
+--let $data_length_old = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", DATA_LENGTH, 1)
+
+--echo # Check the format version:
+select table_name,index_name,kv_format_version
+from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name='t1';
+
+flush tables;
+
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+--source include/restart_mysqld.inc
+
+--echo # Check that the new server reads the data in the old format:
+select * from t1 order by pk1,pk2,pk3 limit 5;
+
+--echo #
+--echo # Ok, now, enable the new data format:
+--echo #
+create table t2 (
+ pk1 decimal(32,16),
+ pk2 decimal(32,16),
+ pk3 decimal(32,16),
+ a smallint not null,
+ primary key(pk1, pk2, pk3)
+);
+insert into t2
+select
+ A.a, B.a, C.a, 1234
+from t0 A, t0 B, t0 C;
+set global rocksdb_force_flush_memtable_now=1;
+
+--let $data_length_new = query_get_value("select DATA_LENGTH from information_schema.tables where table_schema=database() and table_name='t2'", DATA_LENGTH, 1)
+--disable_query_log
+--eval select $data_length_old > $data_length_new as "larger"
+--enable_query_log
+
+--echo # This should show the new PK data fromat
+select table_name,index_name,kv_format_version from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name='t2';
+
+--echo #
+--echo # Check that the server is able to read BOTH the old and the new formats:
+--echo #
+select * from t2 limit 3;
+select * from t1 limit 3;
+
+drop table t1,t2;
+drop table t0;
+
+--echo #
+--echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly.
+--echo # (Decoding happens from the mem-comparable image in the index, regardless
+--echo # of whether the value part has original value or not)
+--echo #
+
+create table t1 (
+ pk int not null primary key,
+ col1 decimal (2,1) signed,
+ col2 decimal (2,1) unsigned,
+ filler varchar(100),
+ key key1(col1, col2)
+)engine=rocksdb;
+insert into t1 values
+(1,-9.1, 0.7, 'filler'),
+(2,-8.2, 1.6, 'filler'),
+(3, 0.3, 2.5, 'filler'),
+(4, 1.4, 3.4, 'filler'),
+(5, 2.5, 4.3, 'filler'),
+(6, 3.3, 5.3, 'filler');
+insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1;
+insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1;
+analyze table t1;
+
+--echo # The following can't use index-only:
+--replace_column 9 #
+explain select * from t1 where col1 between -8 and 8;
+
+--echo # This will use index-only:
+--replace_column 9 #
+explain
+select col1, col2 from t1 where col1 between -8 and 8;
+select col1, col2 from t1 where col1 between -8 and 8;
+
+insert into t1 values (11, NULL, 0.9, 'row1-with-null');
+insert into t1 values (10, -8.4, NULL, 'row2-with-null');
+--replace_column 9 #
+explain
+select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7;
+select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7;
+
+--echo # Try an UPDATE
+select * from t1 where pk in (3,4);
+update t1 set col2= col2+0.2 where pk in (3,4);
+select * from t1 where pk in (3,4);
+
+drop table t1;
+
+--echo #
+--echo # Try another DECIMAL-based type that takes more space
+--echo #
+create table t1 (
+ pk int not null primary key,
+ col1 decimal (12,6) signed,
+ col2 decimal (12,6) unsigned,
+ filler varchar(100),
+ key key1(col1, col2)
+)engine=rocksdb;
+insert into t1 values
+(1,-900.001, 000.007, 'filler'),
+(2,-700.002, 100.006, 'filler'),
+(3, 000.003, 200.005, 'filler'),
+(4, 100.004, 300.004, 'filler'),
+(5, 200.005, 400.003, 'filler'),
+(6, 300.003, 500.003, 'filler');
+insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1;
+insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1;
+analyze table t1;
+
+--replace_column 9 #
+explain
+select col1, col2 from t1 force index(key1) where col1 between -800 and 800;
+select col1, col2 from t1 force index(key1) where col1 between -800 and 800;
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc
new file mode 100644
index 00000000000..8184f6261cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc
@@ -0,0 +1,50 @@
+#
+# ENUM column type
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Valid values.
+# We cannot test the maximum of 65,536 here,
+# because mysqltest has the standard limit of MAX_QUERY=256K;
+# but we can at least try 257
+
+eval CREATE TABLE t1 (
+ a ENUM('') $extra_col_opts,
+ b ENUM('test1','test2','test3','test4','test5') $extra_col_opts,
+ c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') $extra_col_opts,
+ PRIMARY KEY (b)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2);
+SELECT a,b,c FROM t1;
+
+# Out of range values
+# (should produce warnings)
+
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+--sorted_result
+SELECT a,b,c FROM t1;
+
+# Non-unique values in enum
+# (should produce a warning)
+eval ALTER TABLE t1 ADD COLUMN e ENUM('a','A') $extra_col_opts;
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A');
+--sorted_result
+SELECT a,b,c,e FROM t1;
+
+# Simple comparison
+
+--sorted_result
+SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != '';
+
+DROP TABLE t1;
+
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test
new file mode 100644
index 00000000000..d79469b2fad
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# ENUM column type
+#
+
+--source type_enum.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test
new file mode 100644
index 00000000000..d7086a45fe1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test
@@ -0,0 +1,93 @@
+--source include/have_rocksdb.inc
+
+#
+# ENUM columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'),
+ b ENUM('test1','test2','test3','test4','test5'),
+ c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'),
+ pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+ UNIQUE KEY a_b (a,b)
+) ENGINE=rocksdb;
+
+
+INSERT INTO t1 (a,b,c,pk) VALUES
+('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3),
+('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6);
+
+SHOW INDEX IN t1;
+
+--replace_column 9 #
+EXPLAIN SELECT a FROM t1 WHERE b > 'test2' ORDER BY a;
+SELECT a FROM t1 WHERE b > 'test2' ORDER BY a;
+
+--replace_column 9 #
+EXPLAIN SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a;
+SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (
+ a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'),
+ b ENUM('test1','test2','test3','test4','test5'),
+ c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b,c) VALUES
+('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'),
+('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d');
+
+SHOW INDEX IN t1;
+
+--replace_column 9 #
+EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u';
+--sorted_result
+SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u';
+
+--replace_column 9 #
+EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u';
+--sorted_result
+SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u';
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'),
+ b ENUM('test1','test2','test3','test4','test5'),
+ c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'),
+ pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+ INDEX(b)
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b,c,pk) VALUES
+('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3),
+('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6);
+
+SHOW INDEX IN t1;
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT b FROM t1;
+--sorted_result
+SELECT DISTINCT b FROM t1;
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b);
+--sorted_result
+SELECT DISTINCT b FROM t1 IGNORE INDEX (b);
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc
new file mode 100644
index 00000000000..424f7c4f4ac
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc
@@ -0,0 +1,85 @@
+#
+# Fixed point types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ d DECIMAL $extra_col_opts,
+ d0 DECIMAL(0) $extra_col_opts,
+ d1_1 DECIMAL(1,1) $extra_col_opts,
+ d10_2 DECIMAL(10,2) $extra_col_opts,
+ d60_10 DECIMAL(60,10) $extra_col_opts,
+ n NUMERIC $extra_col_opts,
+ n0_0 NUMERIC(0,0) $extra_col_opts,
+ n1 NUMERIC(1) $extra_col_opts,
+ n20_4 NUMERIC(20,4) $extra_col_opts,
+ n65_4 NUMERIC(65,4) $extra_col_opts,
+ pk NUMERIC $extra_col_opts PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Always valid values
+
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3);
+
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+
+# Values which can be valid or not,
+# depending on whether columns are SIGNED or UNSIGNED
+# (if not valid should produce warnings)
+
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4);
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5);
+
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100;
+
+# Invalid values
+
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 6
+);
+
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7);
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+
+INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8);
+--sorted_result
+SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1;
+
+--error ER_TOO_BIG_PRECISION
+eval ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) $extra_col_opts;
+
+--error ER_TOO_BIG_PRECISION
+eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) $extra_col_opts;
+
+--error ER_TOO_BIG_SCALE
+eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) $extra_col_opts;
+
+DROP TABLE t1;
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test
new file mode 100644
index 00000000000..4f2c09d17c1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# Fixed point types
+#
+
+--source type_fixed.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test
new file mode 100644
index 00000000000..e9e6df58d21
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test
@@ -0,0 +1,107 @@
+--source include/have_rocksdb.inc
+
+#
+# Fixed point columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+
+CREATE TABLE t1 (
+ d1 DECIMAL(10,2) PRIMARY KEY,
+ d2 DECIMAL(60,10),
+ n1 NUMERIC,
+ n2 NUMERIC(65,4)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (d1,d2,n1,n2) VALUES
+(10.22,60.12345,123456,14.3456),
+(10.0,60.12345,123456,14),
+(11.14,15,123456,13),
+(100,100,1,2),
+(0,0,0,0),
+(4540424564.23,3343303441.0,12,13),
+(15,17,23,100000);
+
+--replace_column 9 #
+EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC;
+SELECT d1 FROM t1 ORDER BY d1 DESC;
+
+--replace_column 9 #
+EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC;
+SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC;
+
+DROP TABLE t1;
+
+# --error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ d1 DECIMAL(10,2),
+ d2 DECIMAL(60,10),
+ n1 NUMERIC,
+ n2 NUMERIC(65,4),
+ pk NUMERIC PRIMARY KEY,
+ UNIQUE INDEX n1_n2 (n1,n2)
+) ENGINE=rocksdb;
+
+# --disable_parsing
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES
+(10.22,60.12345,123456,14.3456,1),
+(10.0,60.12345,123456,14,2),
+(11.14,15,123456,13,3),
+(100,100,1,2,4),
+(0,0,0,0,5),
+(4540424564.23,3343303441.0,12,13,6),
+(15,17,23,100000,7);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT n1+n2 FROM t1;
+--sorted_result
+SELECT DISTINCT n1+n2 FROM t1;
+
+DROP TABLE t1;
+
+#--enable_parsing
+
+CREATE TABLE t1 (
+ d1 DECIMAL(10,2),
+ d2 DECIMAL(60,10),
+ n1 NUMERIC,
+ n2 NUMERIC(65,4),
+ pk DECIMAL(20,10) PRIMARY KEY,
+ INDEX (d2)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES
+(10.22,60.12345,123456,14.3456,1),
+(10.0,60.12345,123456,14,2),
+(11.14,15,123456,13,3),
+(100,100,1,2,4),
+(0,0,0,0,5),
+(4540424564.23,3343303441.0,12,13,6),
+(15,17,23,100000,7);
+
+--replace_column 9 #
+EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2;
+--sorted_result
+SELECT d2, COUNT(*) FROM t1 GROUP BY d2;
+
+--replace_column 9 #
+EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2;
+--sorted_result
+SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc
new file mode 100644
index 00000000000..2f37e55b8d6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc
@@ -0,0 +1,108 @@
+#
+# Float types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ f FLOAT $extra_col_opts,
+ f0 FLOAT(0) $extra_col_opts,
+ r1_1 REAL(1,1) $extra_col_opts,
+ f23_0 FLOAT(23) $extra_col_opts,
+ f20_3 FLOAT(20,3) $extra_col_opts,
+ d DOUBLE $extra_col_opts,
+ d1_0 DOUBLE(1,0) $extra_col_opts,
+ d10_10 DOUBLE PRECISION (10,10) $extra_col_opts,
+ d53 DOUBLE(53,0) $extra_col_opts,
+ d53_10 DOUBLE(53,10) $extra_col_opts,
+ pk DOUBLE $extra_col_opts PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Always valid values
+
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1);
+
+--sorted_result
+--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1
+
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2);
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+ 99999999999999999999999999999999999999,
+ 99999999999999999999999999999999999999.9999999999999999,
+ 0.9,
+ 99999999999999999999999999999999999999.9,
+ 99999999999999999.999,
+ 999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+ 9,
+ 0.9999999999,
+ 1999999999999999999999999999999999999999999999999999999,
+ 19999999999999999999999999999999999999999999.9999999999,
+ 3
+);
+
+--sorted_result
+--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1
+
+# Values which can be valid or not,
+# depending on whether columns are SIGNED or UNSIGNED
+# (if not valid should produce warnings)
+
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4);
+
+--sorted_result
+--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1
+
+--sorted_result
+--query_vertical SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1
+
+# Invalid values
+
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 9999999999999999999999999999999999999999999999999999999999999.9999,
+ 5
+);
+
+--sorted_result
+--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1
+
+INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (
+ 999999999999999999999999999999999999999,
+ 999999999999999999999999999999999999999.9999999999999999,
+ 1.9,
+ 999999999999999999999999999999999999999.9,
+ 999999999999999999.999,
+ 9999999999999999999999999999999999999999999999999999999999999999999999999999999999,
+ 99,
+ 1.9999999999,
+ 1999999999999999999999999999999999999999999999999999999,
+ 19999999999999999999999999999999999999999999.9999999999,
+ 6
+);
+
+--sorted_result
+--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1
+
+--error ER_TOO_BIG_DISPLAYWIDTH
+eval ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) $extra_col_opts;
+
+--error ER_TOO_BIG_PRECISION
+eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) $extra_col_opts;
+
+--error ER_TOO_BIG_SCALE
+eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) $extra_col_opts;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test
new file mode 100644
index 00000000000..1f1849992ca
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# Float types
+#
+
+--source type_float.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test
new file mode 100644
index 00000000000..907bc614d49
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test
@@ -0,0 +1,175 @@
+--source include/have_rocksdb.inc
+
+#
+# Float type columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ f FLOAT PRIMARY KEY,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp) VALUES
+(1.2345,1422.22,1.2345,1234567.89),
+(0,0,0,0),
+(-1,-1,-1,-1),
+(17.5843,4953453454.44,29229114.0,1111111.23),
+(4644,1422.22,466664.999,0.5);
+
+--replace_column 9 #
+EXPLAIN SELECT f FROM t1 ORDER BY f;
+SELECT f FROM t1 ORDER BY f;
+
+--replace_column 9 #
+EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f;
+SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f;
+
+DROP TABLE t1;
+
+#--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ f FLOAT,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10),
+ pk DOUBLE PRIMARY KEY,
+ UNIQUE KEY r_dp (r,dp)
+) ENGINE=rocksdb;
+
+#--disable_parsing
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+
+--replace_column 9 #
+EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0;
+--sorted_result
+SELECT r, dp FROM t1 WHERE r > 0 or dp > 0;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ f FLOAT,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10),
+ pk FLOAT PRIMARY KEY,
+ UNIQUE KEY(d)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d;
+SELECT DISTINCT d FROM t1 ORDER BY d;
+
+DROP TABLE t1;
+
+#--enable_parsing
+
+CREATE TABLE t1 (
+ f FLOAT,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10),
+ pk FLOAT PRIMARY KEY,
+ KEY(d)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d;
+SELECT DISTINCT d FROM t1 ORDER BY d;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ f FLOAT,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10),
+ pk FLOAT PRIMARY KEY,
+ UNIQUE KEY(f)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+
+# Should fail because of 'unique' constraint
+--error ER_DUP_ENTRY
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,0,0,0,6);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f;
+SELECT DISTINCT f FROM t1 ORDER BY f;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ f FLOAT,
+ r REAL(20,3),
+ d DOUBLE,
+ dp DOUBLE PRECISION (64,10),
+ pk FLOAT PRIMARY KEY,
+ KEY(f)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,1422.22,1.2345,1234567.89,1),
+(0,0,0,0,2),
+(-1,-1,-1,-1,3),
+(17.5843,4953453454.44,29229114.0,1111111.23,4),
+(4644,1422.22,466664.999,0.5,5);
+
+# Should succeed because of no 'unique' constraint
+INSERT INTO t1 (f,r,d,dp,pk) VALUES
+(1.2345,0,0,0,6);
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f;
+SELECT DISTINCT f FROM t1 ORDER BY f;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc
new file mode 100644
index 00000000000..dbcdfe4fbdd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc
@@ -0,0 +1,68 @@
+#
+# INT column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ i INT $extra_col_opts,
+ i0 INT(0) $extra_col_opts,
+ i1 INT(1) $extra_col_opts,
+ i20 INT(20) $extra_col_opts,
+ t TINYINT $extra_col_opts,
+ t0 TINYINT(0) $extra_col_opts,
+ t1 TINYINT(1) $extra_col_opts,
+ t20 TINYINT(20) $extra_col_opts,
+ s SMALLINT $extra_col_opts,
+ s0 SMALLINT(0) $extra_col_opts,
+ s1 SMALLINT(1) $extra_col_opts,
+ s20 SMALLINT(20) $extra_col_opts,
+ m MEDIUMINT $extra_col_opts,
+ m0 MEDIUMINT(0) $extra_col_opts,
+ m1 MEDIUMINT(1) $extra_col_opts,
+ m20 MEDIUMINT(20) $extra_col_opts,
+ b BIGINT $extra_col_opts,
+ b0 BIGINT(0) $extra_col_opts,
+ b1 BIGINT(1) $extra_col_opts,
+ b20 BIGINT(20) $extra_col_opts,
+ pk INT AUTO_INCREMENT PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Always valid values
+
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807);
+--sorted_result
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+
+# Values which can be valid or not,
+# depending on whether columns are SIGNED or UNSIGNED
+# (if not valid should produce warnings)
+
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808);
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615);
+
+--sorted_result
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+
+# Invalid values
+
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809);
+
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616);
+
+INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615);
+
+--sorted_result
+SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1;
+
+--error ER_TOO_BIG_DISPLAYWIDTH
+eval ALTER TABLE t1 ADD COLUMN i257 INT(257) $extra_col_opts;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test
new file mode 100644
index 00000000000..e92f6692172
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# INT column types
+#
+
+--source type_int.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test
new file mode 100644
index 00000000000..c95c3d88b8a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test
@@ -0,0 +1,75 @@
+--source include/have_rocksdb.inc
+
+#
+# INT column types with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ i INT PRIMARY KEY,
+ t TINYINT,
+ s SMALLINT,
+ m MEDIUMINT,
+ b BIGINT
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+
+--replace_column 9 #
+EXPLAIN SELECT i FROM t1 ORDER BY i;
+SELECT i FROM t1 ORDER BY i;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ i INT,
+ t TINYINT,
+ s SMALLINT,
+ m MEDIUMINT,
+ b BIGINT,
+ pk SMALLINT AUTO_INCREMENT PRIMARY KEY,
+ INDEX s_m (s,m)
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+
+--replace_column 9 #
+EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1;
+--sorted_result
+SELECT s, m FROM t1 WHERE s != 10 AND m != 1;
+
+DROP TABLE t1;
+
+--echo # RocksDB: unique indexes allowed
+#--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ i INT,
+ t TINYINT,
+ s SMALLINT,
+ m MEDIUMINT,
+ b BIGINT,
+ pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY,
+ UNIQUE KEY b_t (b,t)
+) ENGINE=rocksdb;
+
+##--disable_parsing
+
+INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16);
+
+# This query should use the index b_t, we just don't want to run EXPLAIN
+# (to avoid mismatches due to different subquery-related plans)
+SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t;
+
+DROP TABLE t1;
+
+##--enable_parsing
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc
new file mode 100644
index 00000000000..25ae58294f7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc
@@ -0,0 +1,49 @@
+#
+# SET column type
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# Valid values.
+
+eval CREATE TABLE t1 (
+ a SET('') $extra_col_opts,
+ b SET('test1','test2','test3','test4','test5') $extra_col_opts,
+ c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') $extra_col_opts,
+ PRIMARY KEY (c)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,,23'),
+('',5,2),
+(',','test4,test2','');
+--sorted_result
+SELECT a,b,c FROM t1;
+
+# Out of range values
+# (should produce warnings)
+
+INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1);
+--sorted_result
+SELECT a,b,c FROM t1;
+
+# Non-unique values in set
+# (should produce a warning)
+eval ALTER TABLE t1 ADD COLUMN e SET('a','A') $extra_col_opts;
+
+SHOW COLUMNS IN t1;
+
+--error ER_TOO_BIG_SET
+eval ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') $extra_col_opts;
+
+# Simple comparison
+
+--sorted_result
+SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != '';
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test
new file mode 100644
index 00000000000..bd306668e6d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# SET column type
+#
+
+--source type_set.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test
new file mode 100644
index 00000000000..79232255d25
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test
@@ -0,0 +1,104 @@
+--source include/have_rocksdb.inc
+
+#
+# SET columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (
+ a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'),
+ b SET('test1','test2','test3','test4','test5'),
+ c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,23'),
+('',5,2),
+('N.America,Asia','test4,test2',''),
+('Africa,Europe,Asia','test2,test3','01'),
+('Antarctica','test3','34,44'),
+('Asia','test5','50'),
+('Europe,S.America','test1,','39');
+
+--replace_column 9 #
+EXPLAIN SELECT c FROM t1 ORDER BY c;
+SELECT c FROM t1 ORDER BY c;
+
+--replace_column 9 #
+EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c;
+SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c;
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02');
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44');
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'),
+ b SET('test1','test2','test3','test4','test5'),
+ c SET('01','22','23','33','34','39','40','44','50','63','64'),
+ pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+ INDEX(a)
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c,pk) VALUES
+('','test2,test3','01,34,44,23',1),
+('',5,2,2),
+('N.America,Asia','test4,test2','',3),
+('Africa,Europe,Asia','test2,test3','01',4),
+('Antarctica','test3','34,44',5),
+('Asia','test5','50',6),
+('Europe,S.America','test1,','39',7);
+
+--replace_column 9 #
+EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0;
+--sorted_result
+SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0;
+
+DROP TABLE t1;
+
+--disable_parsing
+#--error ER_GET_ERRMSG
+CREATE TABLE t1 (
+ a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'),
+ b SET('test1','test2','test3','test4','test5'),
+ c SET('01','22','23','33','34','39','40','44','50','63','64'),
+ pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY,
+ UNIQUE INDEX b_a (b,a)
+) ENGINE=innodb;
+
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (a,b,c) VALUES
+('','test2,test3','01,34,44,23'),
+('',5,2),
+('N.America,Asia','test4,test2',''),
+('Africa,Europe,Asia','test2,test3','01'),
+('Antarctica','test3','34,44'),
+('Asia','test5','50'),
+('Europe,S.America','test1,','39');
+
+--replace_column 9 #
+EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a;
+--sorted_result
+SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a;
+
+DROP TABLE t1;
+
+--enable_parsing
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc
new file mode 100644
index 00000000000..f5c9c3b8978
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc
@@ -0,0 +1,49 @@
+#
+# TEXT column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT PRIMARY KEY,
+ t TEXT $extra_col_opts,
+ t0 TEXT(0) $extra_col_opts,
+ t1 TEXT(1) $extra_col_opts,
+ t300 TEXT(300) $extra_col_opts,
+ tm TEXT(65535) $extra_col_opts,
+ t70k TEXT(70000) $extra_col_opts,
+ t17m TEXT(17000000) $extra_col_opts,
+ tt TINYTEXT $extra_col_opts,
+ m MEDIUMTEXT $extra_col_opts,
+ l LONGTEXT $extra_col_opts
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+# Valid values
+# (cannot get MAX for all columns due to max_allowed_packet limitations)
+
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+('','','','','','','','','',''),
+('a','b','c','d','e','f','g','h','i','j'),
+('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'),
+( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) );
+
+--sorted_result
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+
+# Invalid values (produce warnings, except for mediumtext and longtext columns for which the values are within limits)
+
+INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES
+( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) );
+
+--sorted_result
+SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1;
+
+--error ER_TOO_BIG_DISPLAYWIDTH
+eval ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) $extra_col_opts;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test
new file mode 100644
index 00000000000..c6dd24ff42e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# TEXT column types
+#
+
+--source type_text.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt
new file mode 100644
index 00000000000..6ad42e58aa2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test
new file mode 100644
index 00000000000..5fdc4bff39d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test
@@ -0,0 +1,171 @@
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+#
+# TEXT columns with indexes
+#
+
+SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+
+CREATE TABLE t1 (
+ t TEXT,
+ tt TINYTEXT,
+ m MEDIUMTEXT,
+ l LONGTEXT,
+ PRIMARY KEY t (t(32))
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (t,tt,m,l) VALUES
+('','','',''),
+('a','b','c','d'),
+('b','d','c','b'),
+('test1','test2','test3','test4'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)),
+('abc','def','ghi','jkl'),
+('test2','test3','test4','test5'),
+('test3','test4','test5','test6'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128));
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f;
+SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f;
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f;
+SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f;
+DROP TABLE t1;
+
+--error ER_BLOB_KEY_WITHOUT_LENGTH
+CREATE TABLE t1 (
+ t TEXT,
+ tt TINYTEXT,
+ m MEDIUMTEXT,
+ l LONGTEXT,
+ pk TINYTEXT PRIMARY KEY,
+ UNIQUE INDEX l_tt (l(256),tt(64))
+) ENGINE=rocksdb;
+
+CREATE TABLE t1 (
+ t TEXT,
+ tt TINYTEXT,
+ m MEDIUMTEXT,
+ l LONGTEXT,
+ pk MEDIUMTEXT,
+ PRIMARY KEY mt (pk(1)),
+ INDEX (m(128))
+) ENGINE=rocksdb;
+
+SHOW INDEX IN t1;
+
+INSERT INTO t1 (t,tt,m,l,pk) VALUES
+('','','','','0'),
+('a','b','c','d','1'),
+('b','d','c','b','2'),
+('test1','test2','test3','test4','3'),
+(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'),
+('abc','def','ghi','jkl','5'),
+('test2','test3','test4','test5','6'),
+('test3','test4','test5','test6','7'),
+(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'),
+(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9');
+
+--replace_column 9 #
+EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b TEXT,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b TINYTEXT,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b MEDIUMTEXT,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b LONGTEXT,
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+
+SELECT b FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b LONGTEXT CHARACTER SET "binary" COLLATE "binary",
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+INSERT INTO t1 (b) VALUES
+('00000000000000000000000000000000'),
+('00000000000000000000000000000001'),
+('00000000000000000000000000000002');
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' ');
+
+SELECT hex(b) FROM t1;
+
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin",
+ PRIMARY KEY b (b(32))
+) ENGINE=rocksdb;
+
+--error ER_DUP_ENTRY
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' ');
+
+INSERT INTO t1 (b) VALUES (''), (_binary 0x0);
+SELECT hex(b) FROM t1;
+
+DROP TABLE t1;
+
+SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc
new file mode 100644
index 00000000000..a9f9883198c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc
@@ -0,0 +1,75 @@
+#
+# VARBINARY column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ v0 VARBINARY(0) $extra_col_opts,
+ v1 VARBINARY(1) $extra_col_opts,
+ v64 VARBINARY(64) $extra_col_opts,
+ v65000 VARBINARY(65000) $extra_col_opts,
+ PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+eval CREATE TABLE t2 (v VARBINARY(65532) $extra_col_opts, PRIMARY KEY(v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+
+--sorted_result
+SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1;
+
+# Invalid values
+
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+
+--sorted_result
+SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1;
+
+eval ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) $extra_col_opts;
+SHOW COLUMNS IN t1;
+
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test
new file mode 100644
index 00000000000..2ef3da8a499
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+#
+# VARBINARY column types
+#
+
+--source type_varbinary.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc
new file mode 100644
index 00000000000..ce5dbb444d3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc
@@ -0,0 +1,77 @@
+#
+# VARCHAR column types
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+eval CREATE TABLE t1 (
+ v0 VARCHAR(0) $extra_col_opts,
+ v1 VARCHAR(1) $extra_col_opts,
+ v64 VARCHAR(64) $extra_col_opts,
+ v65000 VARCHAR(65000) $extra_col_opts,
+ PRIMARY KEY (v64)
+) ENGINE=rocksdb;
+
+SHOW COLUMNS IN t1;
+
+CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb;
+SHOW COLUMNS IN t2;
+
+# Valid values
+
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','','');
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+
+ If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+ For developers who want to code on MariaDB or MySQL
+
+ * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+ o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+ o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+ * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+ o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+
+ For MariaDB / MySQL end users
+
+ * MariaDB Crash Course by Ben Forta
+ o First MariaDB book!
+ o For people who want to learn SQL and the basics of MariaDB.
+ o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+
+ * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+ o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+ o Free to read in the Knowledgebase!
+
+ * MySQL (4th Edition) by Paul DuBois
+ o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+
+ * MySQL Cookbook by Paul DuBois
+ o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+
+ * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+ o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+
+ * MySQL Admin Cookbook
+ o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+
+ * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+ o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ');
+
+--sorted_result
+SELECT v0,v1,v64,v65000 FROM t1;
+
+# Invalid values
+
+INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501));
+INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1;
+
+--sorted_result
+SELECT v0, v1, v64, LENGTH(v65000) FROM t1;
+
+eval ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) $extra_col_opts;
+SHOW COLUMNS IN t1;
+
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test
new file mode 100644
index 00000000000..6416af7308f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test
@@ -0,0 +1,75 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1,t2;
+--enable_warnings
+
+#
+# VARCHAR column types
+#
+
+--echo #
+--echo # A basic test whether endspace-aware variable length encoding
+--echo # works when in PK
+--echo #
+create table t1 ( pk varchar(10) primary key, col1 int not null) ;
+insert into t1 values ('a', 1);
+select * from t1;
+drop table t1;
+
+--source type_varchar.inc
+
+
+--echo #
+--echo # Endspace-comparison tests:
+--echo #
+
+--let $character_set_collate=CHARACTER SET latin1 COLLATE latin1_bin
+--source type_varchar_endspace.inc
+
+--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_bin
+--source type_varchar_endspace.inc
+
+--let $character_set_collate=CHARACTER SET ucs2 COLLATE ucs2_bin
+--source type_varchar_endspace.inc
+
+--let $character_set_collate=CHARACTER SET utf8mb4 COLLATE utf8mb4_bin
+--source type_varchar_endspace.inc
+
+--let $character_set_collate=CHARACTER SET utf16 COLLATE utf16_bin
+--source type_varchar_endspace.inc
+
+create table t1 (
+ pk int primary key,
+ col1 varchar(10) collate utf8mb4_bin not null,
+ col2 varchar(20),
+ key(col1)
+) engine=rocksdb;
+
+insert into t1 values (1, 'ab','ab');
+insert into t1 values (2, 'ab\0','ab0');
+
+select pk, hex(col1), col2 from t1 force index(col1) order by col1;
+select pk, hex(col1), col2 from t1 ignore index(col1) order by col1;
+drop table t1;
+
+# Issue #306 - Do not store trailing spaces for prefixed keys.
+create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
+insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc ');
+--replace_column 9 #
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+drop table t;
+
+set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct;
+set @save_rocksdb_verify_checksums = @@session.rocksdb_verify_checksums;
+set global rocksdb_checksums_pct = 100;
+set session rocksdb_verify_checksums = on;
+create table t (id int primary key, email varchar(100), KEY email_i (email(30)));
+insert into t values (1, 'a');
+--replace_column 9 #
+explain select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+select 'email_i' as index_name, count(*) AS count from t force index(email_i);
+drop table t;
+set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct;
+set session rocksdb_verify_checksums = @save_rocksdb_verify_checksums;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test
new file mode 100644
index 00000000000..d61e85ed204
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_debug.test
@@ -0,0 +1,137 @@
+#
+# VARCHAR encoding tests that require debug support
+#
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1,t2;
+--enable_warnings
+
+
+set session debug= "+d,myrocks_enable_unknown_collation_index_only_scans";
+
+--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_general_ci
+--source type_varchar_endspace.inc
+
+set session debug= "-d,myrocks_enable_unknown_collation_index_only_scans";
+
+--echo #
+--echo # Check backwards compatibility:
+--echo #
+
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+
+--echo # Create the tables in the old format
+
+create table t1 (
+ pk varchar(64) collate latin1_bin,
+ col1 varchar(64),
+ primary key (pk)
+);
+insert into t1 values ('a','a');
+--echo # The following will not produce an error:
+insert into t1 values ('a ', 'a-space');
+select pk, hex(pk), col1 from t1;
+
+create table t2 (
+ pk int not null primary key,
+ col1 varchar(64) collate latin1_bin,
+ col2 varchar(64),
+ unique key (col1)
+);
+
+insert into t2 values (0, 'ab', 'a-b');
+--echo # The following will not produce an error:
+insert into t2 values (1, 'a ', 'a-space');
+insert into t2 values (2, 'a', 'a');
+select pk, col1, hex(col1), col2 from t2;
+
+--echo # Check the format version:
+select table_name,index_name,kv_format_version
+from information_schema.ROCKSDB_DDL
+where TABLE_SCHEMA=database() AND table_name in ('t1','t2');
+
+flush tables;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+select pk, hex(pk), col1 from t1;
+select pk, col1, hex(col1), col2 from t2;
+
+## Check that we can still read the data when starting on the old datadir:
+--source include/restart_mysqld.inc
+
+select pk, hex(pk), col1 from t1;
+select pk, col1, hex(col1), col2 from t2;
+
+drop table t1,t2;
+
+
+--echo #
+--echo # General upgrade tests to see that they work.
+--echo #
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+ id int primary key,
+ col1 varchar(64) collate latin1_swedish_ci,
+ unique key (col1)
+) engine=rocksdb;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+
+insert into t2 values (1, 'a');
+insert into t2 values (2, 'b');
+insert into t2 values (3, 'c');
+# Check if this is indeed the old format
+insert into t2 values (4, 'c ');
+select col1 from t2;
+delete from t2 where id = 4;
+alter table t2 engine=rocksdb;
+select col1 from t2;
+# Check if this is indeed the new format
+--error ER_DUP_ENTRY
+insert into t2 values (4, 'c ');
+drop table t2;
+
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+ id int primary key,
+ col1 varchar(64) collate latin1_bin,
+ unique key (col1)
+) engine=rocksdb;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+
+insert into t2 values (1, 'a');
+insert into t2 values (2, 'b');
+insert into t2 values (3, 'c');
+# Check if this is indeed the old format
+insert into t2 values (4, 'c ');
+select col1 from t2;
+delete from t2 where id = 4;
+alter table t2 engine=rocksdb;
+select col1 from t2;
+# Check if this is indeed the new format
+--error ER_DUP_ENTRY
+insert into t2 values (4, 'c ');
+drop table t2;
+
+--echo #
+--echo # Check what happens when one tries to 'upgrade' to the new data format
+--echo # and causes a unique key violation:
+--echo #
+set session debug= '+d,MYROCKS_FORMAT_VERSION_INITIAL';
+create table t2 (
+ pk int not null primary key,
+ col1 varchar(64) collate latin1_bin,
+ col2 varchar(64),
+ unique key (col1)
+);
+
+insert into t2 values (1, 'a ', 'a-space');
+insert into t2 values (2, 'a', 'a');
+
+select * from t2;
+set session debug= '-d,MYROCKS_FORMAT_VERSION_INITIAL';
+
+--error ER_DUP_ENTRY
+alter table t2 engine=rocksdb;
+drop table t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc
new file mode 100644
index 00000000000..f813b8871b1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc
@@ -0,0 +1,84 @@
+--echo #
+--echo # Issue 257: Sort order for varchars is different between
+--echo # MyISAM/InnoDB vs MyRocks
+--echo #
+
+eval create table t1 (
+ pk varchar(64) $character_set_collate,
+ col1 varchar(64),
+ primary key (pk)
+);
+insert into t1 values ('a','a');
+--error ER_DUP_ENTRY
+insert into t1 values ('a ', 'a-space');
+
+insert into t1 values('b ', 'b-2x-space');
+--error ER_DUP_ENTRY
+insert into t1 values ('b', 'b');
+
+select pk, hex(pk), col1 from t1;
+
+insert into t1 values ('a\t', 'a-tab');
+insert into t1 values ('a \t', 'a-space-tab');
+select pk, hex(pk), col1 from t1 order by pk;
+
+--echo # Try longer values
+--error ER_DUP_ENTRY
+insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space');
+
+insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space');
+select * from t1;
+
+drop table t1;
+
+--echo # Secondary index
+eval create table t1 (
+ pk int not null primary key,
+ col1 varchar(64) $character_set_collate,
+ col2 varchar(64),
+ key (col1)
+);
+insert into t1 values (0, 'ab', 'a-b');
+insert into t1 values (1, 'a ', 'a-space');
+insert into t1 values (2, 'a', 'a');
+insert into t1 values (3, 'a \t', 'a-tab');
+
+--echo # Must show 'using index' for latin1_bin and utf8_bin:
+--replace_column 9 #
+explain
+select col1, hex(col1) from t1;
+select col1, hex(col1) from t1;
+
+--echo # Must show 'using index' for latin1_bin and utf8_bin:
+--replace_column 4 # 9 #
+explain
+select col1, hex(col1) from t1 where col1 < 'b';
+select col1, hex(col1) from t1 where col1 < 'b';
+
+delete from t1;
+insert into t1 values(10, '', 'empty');
+insert into t1 values(11, repeat(' ', 8), '8x-space');
+insert into t1 values(12, repeat(' ', 16), '16x-space');
+insert into t1 values(13, repeat(' ', 24), '24x-space');
+insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a');
+
+insert into t1 values(21, repeat(' ', 9), '9x-space');
+insert into t1 values(22, repeat(' ',17), '17x-space');
+insert into t1 values(23, repeat(' ',18), '18x-space');
+
+--replace_column 4 # 9 #
+explain
+select pk, col1, hex(col1), length(col1) from t1;
+select pk, col1, hex(col1), length(col1) from t1;
+drop table t1;
+
+create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb;
+insert into t1 values (1, concat('a', repeat(' ', 300)));
+insert into t1 values (2, concat('b', repeat(' ', 300)));
+
+select pk,length(a) from t1 force index(a) where a < 'zz';
+select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz';
+select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz';
+
+drop table t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test
new file mode 100644
index 00000000000..73465e7627a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test
@@ -0,0 +1,145 @@
+--source include/have_rocksdb.inc
+--source include/have_debug_sync.inc
+
+# For GitHub issue#167 -- Unique key check doesn't work
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+connect (con3, localhost, root,,);
+connection default;
+
+--disable_warnings
+set debug_sync='RESET';
+drop table if exists t1;
+--enable_warnings
+
+create table t1 (id int, value int, primary key (id)) engine=rocksdb;
+create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
+
+# 1) second insert should be blocked at GetForUpdate(), then returning duplicate key error
+# after con1's commit
+connection con1;
+begin;
+insert into t1 values (1,1);
+
+connection con2;
+let $ID= `select connection_id()`;
+set session rocksdb_lock_wait_timeout=50;
+begin;
+send insert into t1 values (1,2);
+
+connection con1;
+let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST
+ where ID = $ID and STATE = "Waiting for row lock";
+--source include/wait_condition.inc
+commit;
+
+connection con2;
+--error ER_DUP_ENTRY
+reap;
+commit;
+select * from t1;
+truncate table t1;
+
+# 2) same as 1) but using secondary unique key constraint
+connection con1;
+begin;
+insert into t2 values (1,1,1);
+
+connection con2;
+begin;
+send insert into t2 values (2,1,2);
+
+connection con1;
+--source include/wait_condition.inc
+commit;
+
+connection con2;
+--error ER_DUP_ENTRY
+reap;
+commit;
+select * from t2;
+truncate table t2;
+
+# 3) similar to 1),2) but rolled back
+connection con1;
+begin;
+insert into t1 values (1,1);
+
+connection con2;
+begin;
+send insert into t1 values (1,2);
+
+connection con1;
+--source include/wait_condition.inc
+rollback;
+
+connection con2;
+reap;
+commit;
+select * from t1;
+truncate table t1;
+
+connection con1;
+begin;
+insert into t2 values (1,1,1);
+
+connection con2;
+begin;
+send insert into t2 values (2,1,2);
+
+connection con1;
+--source include/wait_condition.inc
+rollback;
+
+connection con2;
+reap;
+commit;
+select * from t2;
+truncate table t2;
+
+
+# 4) simulating T1 GetForUpdate() -> T2 GetForUpdate(). T2 should fail with lock wait timeout.
+connection con1;
+set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1';
+send insert into t1 values (1,1);
+
+connection con2;
+set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2';
+send insert into t2 values (1,1,1);
+
+connection default;
+set debug_sync='now WAIT_FOR parked1';
+set debug_sync='now WAIT_FOR parked2';
+
+connection con3;
+set session rocksdb_lock_wait_timeout=1;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values (1,2);
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t2 values (2,1,2);
+
+connection default;
+set debug_sync='now SIGNAL go1';
+set debug_sync='now SIGNAL go2';
+
+connection con1;
+reap;
+
+connection con2;
+reap;
+
+connection default;
+--error ER_DUP_ENTRY
+insert into t1 values (1,2);
+--error ER_DUP_ENTRY
+insert into t2 values (2,1,2);
+select * from t1;
+select * from t2;
+# Cleanup
+connection default;
+set debug_sync='RESET';
+disconnect con1;
+disconnect con2;
+disconnect con3;
+drop table t1, t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc
new file mode 100644
index 00000000000..2f11cd3b65a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc
@@ -0,0 +1,198 @@
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+
+CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32),
+ id4 INT, id5 VARCHAR(32),
+ value1 INT, value2 INT, value3 VARCHAR(32),
+ PRIMARY KEY (id1, id2) ##CF##,
+ UNIQUE INDEX (id2, id1) ##CF##,
+ UNIQUE INDEX (id2, id3, id4) ##CF##,
+ INDEX (id1) ##CF##,
+ INDEX (id3, id1) ##CF##,
+ UNIQUE INDEX(id5) ##CF##,
+ INDEX (id2, id5)) ENGINE=ROCKSDB;
+
+--disable_query_log
+let $max = 10;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i, $i, $i, $i, $i, $i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Test inserting a key that returns duplicate error
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11);
+
+--echo # Test updating a key that returns duplicate error
+--error ER_DUP_ENTRY
+UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2;
+--error ER_DUP_ENTRY
+UPDATE t1 SET id2=1, id3=1, id4=1;
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Test updating a key to itself
+UPDATE t1 set id2=id4;
+UPDATE t1 set id5=id3, value1=value2;
+UPDATE t1 set value3=value1;
+
+--echo # Test modifying values should not cause duplicates
+UPDATE t1 SET value1=value3+1;
+UPDATE t1 SET value3=value3 div 2;
+UPDATE t1 SET value2=value3;
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Test NULL values are considered unique
+INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20);
+INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20);
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Adding multiple rows where one of the rows fail the duplicate
+--echo # check should fail the whole statement
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23),
+ (24, 24, 24, 24, 24, 24, 24, 24),
+ (25, 10, 10, 10, 25, 25, 25, 25),
+ (26, 26, 26, 26, 26, 26, 26, 26);
+SELECT COUNT(*) FROM t1;
+
+# Test open transactions can prevent duplicate keys
+connection con1;
+BEGIN;
+INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30);
+
+connection con2;
+BEGIN;
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Primary key should prevent duplicate on insert
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30);
+
+--echo # Primary key should prevent duplicate on update
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET id1=30, id2=31 WHERE id2=10;
+
+--echo # Unique secondary key should prevent duplicate on insert
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30);
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32);
+
+--echo # Unique secondary key should prevent duplicate on update
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET id5=34 WHERE id2=8;
+
+--echo # Adding multiple rows where one of the rows fail the duplicate
+--echo # check should fail the whole statement
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+ (36, 36, 36, 36, 36, 36, 36, 36),
+ (37, 31, 32, 33, 37, 37, 37, 37),
+ (38, 38, 38, 38, 38, 38, 38, 38);
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35),
+ (36, 36, 36, 36, 36, 36, 36, 36),
+ (37, 37, 37, 37, 34, 37, 37, 37),
+ (38, 38, 38, 38, 38, 38, 38, 38);
+
+--echo # NULL values are unique and duplicates in value fields are ignored
+INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37),
+ (38, 31, 32, NULL, 38, 37, 37, 37),
+ (39, 31, 32, NULL, 39, 37, 37, 37);
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Fail on duplicate key update for row added in our transaction
+--error ER_DUP_ENTRY
+UPDATE t1 SET id5=37 WHERE id1=38;
+
+--echo # Fail on lock timeout for row modified in another transaction
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t1 SET id5=34 WHERE id1=38;
+
+--echo # NULL values are unique
+UPDATE t1 SET id5=NULL WHERE value1 > 37;
+
+connection con1;
+COMMIT;
+
+connection con2;
+COMMIT;
+
+# Test transaction is reading from latest data
+connection con2;
+BEGIN;
+SELECT COUNT(*) FROM t1;
+
+connection con1;
+BEGIN;
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+
+connection con2;
+--echo # When transaction is pending, fail on lock acquisition
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+
+SELECT COUNT(*) FROM t1;
+
+connection con1;
+COMMIT;
+
+connection con2;
+--echo # When transaction is committed, fail on duplicate key
+--error ER_DUP_ENTRY,ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40);
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40);
+
+ROLLBACK;
+
+SELECT * FROM t1;
+
+disconnect con1;
+disconnect con2;
+
+connection default;
+DROP TABLE t1;
+
+# Wait till we reached the initial number of concurrent sessions
+--source include/wait_until_count_sessions.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test
new file mode 100644
index 00000000000..28b52f262cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test
@@ -0,0 +1,33 @@
+--source include/have_rocksdb.inc
+
+let ddl= $MYSQL_TMP_DIR/unique_sec.sql;
+--exec sed s/##CF##//g suite/rocksdb/t/unique_sec.inc > $ddl
+--source $ddl
+
+--echo #
+--echo # Issue #88: Creating unique index over column with duplicate values succeeds
+--echo #
+create table t1 (pk int primary key, a int) engine=rocksdb;
+
+insert into t1 values
+(1, 1),
+(2, 2),
+(3, 3),
+(4, 1),
+(5, 5);
+
+--error ER_DUP_ENTRY
+alter table t1 add unique(a);
+drop table t1;
+
+--echo #
+--echo # Issue #111
+--echo #
+CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16;
+
+INSERT INTO t2 VALUES (1,1);
+--error ER_DUP_ENTRY
+INSERT INTO t2 VALUES (1,1);
+--error ER_DUP_ENTRY
+INSERT INTO t2 VALUES (2,1);
+DROP TABLE t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test
new file mode 100644
index 00000000000..d6a8e3d5a1b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test
@@ -0,0 +1,5 @@
+--source include/have_rocksdb.inc
+
+let ddl= $MYSQL_TMP_DIR/unique_sec_rev_cf.sql;
+--exec sed s/##CF##/" COMMENT 'rev:cf'"/g suite/rocksdb/t/unique_sec.inc > $ddl
+--source $ddl
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test
new file mode 100644
index 00000000000..0c7df020ef5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test
@@ -0,0 +1,25 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+create table t1 (id int primary key, value int) engine=rocksdb;
+insert into t1 values (1,1);
+
+SET session transaction isolation level read uncommitted;
+begin;
+--error ER_UNKNOWN_ERROR
+insert into t1 values (2,1);
+--error ER_UNKNOWN_ERROR
+select * from t1 where id=1;
+rollback;
+
+SET session transaction isolation level serializable;
+begin;
+--error ER_UNKNOWN_ERROR
+insert into t1 values (2,1);
+--error ER_UNKNOWN_ERROR
+select * from t1 where id=1;
+rollback;
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update.test b/storage/rocksdb/mysql-test/rocksdb/t/update.test
new file mode 100644
index 00000000000..e0a49ee8ca5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update.test
@@ -0,0 +1,72 @@
+--source include/have_rocksdb.inc
+
+#
+# Basic UPDATE statements.
+# UPDATE LOW_PRIORITY is covered in update_low_prio test
+# UPDATE IGNORE is covered in update_ignore test
+# Multi-table update is covered in update_multi test
+#
+
+########################################
+# TODO:
+# The results of the transactional part
+# are unusual due to "can't-see-own-changes"
+########################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+UPDATE t1 SET a=a+100;
+--sorted_result
+SELECT a,b FROM t1;
+
+UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100;
+--sorted_result
+SELECT a,b FROM t1;
+
+# ORDER BY and LIMIT
+UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1;
+--sorted_result
+SELECT a,b FROM t1;
+
+DROP TABLE t1;
+
+#
+# Transactional UPDATE
+#
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+BEGIN;
+UPDATE t1 SET a=a+100;
+UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100;
+COMMIT;
+--sorted_result
+SELECT * FROM t1 ORDER BY pk;
+
+BEGIN;
+UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3;
+UPDATE t1 SET b = '';
+ROLLBACK;
+SELECT * FROM t1 ORDER BY pk;
+
+BEGIN;
+UPDATE t1 SET b = 'update2' WHERE a <= 100;
+SAVEPOINT spt1;
+UPDATE t1 SET b = '';
+--error ER_UNKNOWN_ERROR
+ROLLBACK TO SAVEPOINT spt1;
+UPDATE t1 SET b = 'upd' WHERE a = 10050;
+--error ER_UNKNOWN_ERROR
+COMMIT;
+SELECT * FROM t1 ORDER BY pk;
+
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt
new file mode 100644
index 00000000000..ba9364e1523
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt
@@ -0,0 +1 @@
+--rocksdb_debug_optimizer_n_rows=1000
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test
new file mode 100644
index 00000000000..0208e1b3563
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test
@@ -0,0 +1,35 @@
+--source include/have_rocksdb.inc
+
+#
+# UPDATE IGNORE
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb;
+
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+
+UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1;
+SELECT a,b FROM t1 ORDER BY pk;
+
+--error ER_SUBQUERY_NO_1_ROW
+UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b'
+ WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+
+UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b'
+ WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 );
+
+SELECT a,b FROM t1 ORDER BY pk;
+SELECT c,d FROM t2 ORDER BY pk;
+
+# Cleanup
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test
new file mode 100644
index 00000000000..1cb2f12804d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test
@@ -0,0 +1,15 @@
+--source include/have_rocksdb.inc
+
+#
+# Multi-table UPDATE statements.
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1,t2;
+--enable_warnings
+
+let $i= 0;
+while ($i < 10) {
+ --source update_multi_exec.inc
+ inc $i;
+}
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc
new file mode 100644
index 00000000000..40d736693c6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc
@@ -0,0 +1,27 @@
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES
+(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+INSERT INTO t1 (a,b) SELECT a, b FROM t1;
+
+CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT
+PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t2 (c,d) SELECT b, a FROM t1;
+
+UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi'
+WHERE c < b AND a + d != 1;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT
+WHERE c = 'foobar' and b = c;
+--sorted_result
+SELECT a,b FROM t1;
+--sorted_result
+SELECT c,d FROM t2;
+
+# Cleanup
+DROP TABLE t1, t2;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test
new file mode 100644
index 00000000000..c53eb9be85c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test
@@ -0,0 +1,78 @@
+--source include/have_rocksdb.inc
+
+#
+# UPDATE statements for tables with keys
+#
+
+#############################################
+# TODO:
+# The test doesn't work quite as expected,
+# apparently due to "can't see own changes"
+#############################################
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z');
+UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c');
+UPDATE t1 SET b='m' WHERE b = 'f';
+UPDATE t1 SET b='z' WHERE a < 2;
+UPDATE t1 SET b='';
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+--echo # RocksDB: skip the test for secondary UNIQUE keys.
+--disable_parsing
+--error ER_GET_ERRMSG
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=innodb;
+
+
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a');
+UPDATE t1 SET a=a+200;
+UPDATE t1 SET a=0 WHERE a > 250;
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=205 WHERE a=200;
+UPDATE t1 SET a=12345 ORDER BY a, b LIMIT 1;
+--sorted_result
+SELECT a,b FROM t1;
+
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=80 WHERE a IN (202,203);
+--sorted_result
+SELECT a,b FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'a'),(6,'f');
+UPDATE t1 SET a=6 WHERE a=3;
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=100 WHERE a=1;
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=4, b='d' WHERE b='f';
+UPDATE t1 SET a=a+1;
+--sorted_result
+SELECT a,b FROM t1;
+--error ER_DUP_ENTRY
+UPDATE t1 SET b='z';
+DROP TABLE t1;
+
+--enable_parsing
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
+INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a');
+UPDATE t1 SET a=a+200;
+UPDATE t1 SET a=0 WHERE a > 250;
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=205 WHERE a=200;
+UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1;
+
+--sorted_result
+SELECT a,b FROM t1;
+
+--error ER_DUP_ENTRY
+UPDATE t1 SET a=80 WHERE a IN (202,203);
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test
new file mode 100644
index 00000000000..b34c85eb4c4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test
@@ -0,0 +1,102 @@
+--source include/have_rocksdb.inc
+
+#
+# Validate that the server starts when everything is okay, but detects errors
+# if a table exists in the data dictionary but not as an .frm or vice versa.
+# The default mode causes these failures to keep the server from starting, but
+# this is problematic for the test as a server start failure is not easily
+# trappable. Instead use the mode where it will detect the problem and report
+# it in the log bug still start: --rocksdb_validate_tables=2
+#
+
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
+CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4;
+
+# Write file to make mysql-test-run.pl expect the "crash", but don't restart the
+# server until it is told to
+--let $_server_id= `SELECT @@server_id`
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
+--exec echo "wait" >$_expect_file_name
+
+# Send shutdown to the connected server and give it 10 seconds to die before
+# zapping it
+shutdown_server 10;
+
+# Write file to make mysql-test-run.pl start up the server again
+--exec echo "restart" >$_expect_file_name
+--sleep 5
+
+# Turn on reconnect
+--enable_reconnect
+
+# Call script that will poll the server waiting for it to be back online again
+--source include/wait_until_connected_again.inc
+
+# Turn off reconnect again
+--disable_reconnect
+
+# Now shut down again and rename one of the .frm files
+--exec echo "wait" >$_expect_file_name
+shutdown_server 10;
+
+# Clear the log
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+
+# Rename the file
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp
+
+# Attempt to restart the server
+--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name
+--sleep 5
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
+
+# We should now have an error message
+--exec echo "Expect errors that we are missing two .frm files"
+--exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+# Now shut down again and rename one the .frm file back and make a copy of it
+--exec echo "wait" >$_expect_file_name
+shutdown_server 10;
+
+# Clear the log
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+
+# Rename the file
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm
+--exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm
+--exec cp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm
+
+# Attempt to restart the server
+--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name
+--sleep 5
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
+
+# We should now have an error message for the second case
+--exec echo "Expect an error that we have an extra .frm file"
+--exec grep "Schema mismatch" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
+
+# Shut down an clean up
+--exec echo "wait" >$_expect_file_name
+shutdown_server 10;
+--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
+--exec rm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm
+--exec echo "restart" >$_expect_file_name
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
+
+--disable_warnings
+DROP TABLE t1, t2;
+--enable_warnings
+
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test
new file mode 100644
index 00000000000..84a85fab32d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test
@@ -0,0 +1,42 @@
+--source include/have_rocksdb.inc
+
+SET GLOBAL rocksdb_write_disable_wal=false;
+SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
+
+create table aaa (id int primary key, i int) engine rocksdb;
+
+SET LOCAL rocksdb_write_sync=off;
+--exec sleep 30
+select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(1,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(2,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(3,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+
+SET LOCAL rocksdb_write_sync=1;
+insert aaa(id, i) values(4,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(5,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+insert aaa(id, i) values(6,1);
+select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
+
+SET GLOBAL rocksdb_background_sync=on;
+SET LOCAL rocksdb_write_sync=off;
+insert aaa(id, i) values(7,1);
+
+let $status_var=rocksdb_wal_synced;
+let $status_var_value=`select @a+4`;
+source include/wait_for_status_var.inc;
+
+truncate table aaa;
+
+# Cleanup
+drop table aaa;
+SET GLOBAL rocksdb_write_sync=off;
+SET GLOBAL rocksdb_write_disable_wal=false;
+SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
+SET GLOBAL rocksdb_background_sync=off;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf
new file mode 100644
index 00000000000..101dbce2385
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf
@@ -0,0 +1,25 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+!include include/default_client.cnf
+
+[mysqld.1]
+rocksdb
+default-storage-engine=rocksdb
+skip-innodb
+default-tmp-storage-engine=MyISAM
+binlog_format=row
+
+[mysqld.2]
+rocksdb
+default-storage-engine=rocksdb
+skip-innodb
+default-tmp-storage-engine=MyISAM
+binlog_format=row
+
+[ENV]
+MASTER_MYPORT= @mysqld.1.port
+MASTER_MYSOCK= @mysqld.1.socket
+
+SLAVE_MYPORT= @mysqld.2.port
+SLAVE_MYSOCK= @mysqld.2.socket
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc
new file mode 100644
index 00000000000..947bf0270e2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc
@@ -0,0 +1,3 @@
+
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh
new file mode 100755
index 00000000000..f3836ab75e5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh
@@ -0,0 +1,43 @@
+set -e
+
+# Insert 100 batches of 100 records each to a table with following schema:
+# create table db1.t1 (
+# `id` int(10) not null auto_increment,
+# `k` int(10),
+# `data` varchar(2048),
+# primary key (`id`),
+# key (`k`)
+# ) engine=innodb;
+
+MAX_INSERTS=100
+MAX_ROWS_PER_INSERT=100
+
+insertData() {
+ for ((i=1; i<=$MAX_INSERTS; i++));
+ do
+ stmt='INSERT INTO db1.t1 values'
+ for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++));
+ do
+ k=$RANDOM
+ data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9')
+ stmt=$stmt' (NULL, '$k', "'$data'")'
+ if [ $j -lt $MAX_ROWS_PER_INSERT ]; then
+ stmt=$stmt','
+ fi
+ done
+ stmt=$stmt';'
+ $MYSQL --defaults-group-suffix=.1 -e "$stmt"
+ done
+}
+
+NUM_PARALLEL_INSERTS=25
+pids=()
+for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++));
+do
+ insertData &
+ pids+=($!)
+done
+for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++));
+do
+ wait ${pids[k]}
+done
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh
new file mode 100755
index 00000000000..a4e4afab9d4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh
@@ -0,0 +1,9 @@
+set -e
+
+# Initially loads a chunk of data.
+# Then start loading another chunk of data,
+# while simultaneously running a backup
+
+suite/rocksdb_hotbackup/include/load_data.sh 2>&1
+suite/rocksdb_hotbackup/include/load_data.sh 2>&1 &
+suite/rocksdb_hotbackup/include/stream_run.sh 2>&1
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc
new file mode 100644
index 00000000000..26c3f2ce7f1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc
@@ -0,0 +1,16 @@
+--let $rpl_server_count= 2
+--let $rpl_topology= none
+--source include/rpl_init.inc
+--source include/rpl_default_connections.inc
+
+connection server_1;
+create database db1;
+
+create table db1.t1 (
+ `id` int(10) not null auto_increment,
+ `k` int(10),
+ `data` varchar(2048),
+ primary key (`id`),
+ key (`k`)
+) engine=rocksdb;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh
new file mode 100755
index 00000000000..18e1feeda96
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh
@@ -0,0 +1,20 @@
+set -e
+
+binlog_line=($(grep -o "Last binlog file position [0-9]*, file name .*\.[0-9]*" ${MYSQLTEST_VARDIR}/log/mysqld.2.err | tail -1))
+binlog_pos=${binlog_line[4]%?}
+binlog_file=${binlog_line[7]}
+
+sql="show gtid_executed in '$binlog_file' from $binlog_pos"
+result=($($MYSQL --defaults-group-suffix=.1 -e "$sql"))
+gtid_executed=${result[1]}
+
+sql="reset master;"
+sql="$sql reset slave;"
+sql="$sql change master to master_host='127.0.0.1', master_port=${MASTER_MYPORT}, master_user='root', master_auto_position=1, master_connect_retry=1;"
+sql="$sql set global gtid_purged='$gtid_executed';"
+sql="$sql start slave;"
+sql="$sql stop slave;"
+sql="$sql change master to master_auto_position=0;"
+sql="$sql start slave;"
+$MYSQL --defaults-group-suffix=.2 -e "$sql"
+echo "$sql" > ${MYSQL_TMP_DIR}/gtid_stmt
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc
new file mode 100644
index 00000000000..75dc31964da
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc
@@ -0,0 +1,4 @@
+--exec suite/rocksdb_hotbackup/include/setup_replication_gtid.sh
+
+let $slave_sync_timeout = 1800;
+source include/wait_for_slave_to_sync_with_master.inc;
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh
new file mode 100755
index 00000000000..b83b957cff0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh
@@ -0,0 +1,71 @@
+if [ "$STREAM_TYPE" == 'wdt' ]; then
+ which wdt >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ # change to tar if wdt is not installed
+ STREAM_TYPE='tar'
+ fi
+fi
+
+set -e
+
+# Takes a full backup from server_1 to server_2
+# using myrocks_hotbackup streaming
+
+checkpoint_dir="${MYSQLTEST_VARDIR}/checkpoint"
+backup_dir="${MYSQLTEST_VARDIR}/backup"
+dest_data_dir="${MYSQLTEST_VARDIR}/mysqld.2/data/"
+
+mysql_dir=$(echo $MYSQL | awk '{print $1}' | xargs dirname)
+PATH=$mysql_dir:$PATH
+
+mkdir -p $checkpoint_dir
+rm -rf $checkpoint_dir/*
+
+mkdir -p $backup_dir
+rm -rf $backup_dir/*
+# delete and recreate the dest dir to make sure all hidden files
+# and directories (such as .rocksdb) are blown away
+rm -rf $dest_data_dir/
+mkdir $dest_data_dir
+
+COPY_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_copy_log"
+
+if [ "$STREAM_TYPE" == 'tar' ]; then
+ BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \
+ --stream=tar --checkpoint_dir=$checkpoint_dir 2> \
+ $COPY_LOG | tar -xi -C $backup_dir"
+elif [ "$STREAM_TYPE" == 'xbstream' ]; then
+ BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \
+ --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \
+ $COPY_LOG | xbstream -x \
+ --directory=$backup_dir"
+else
+ BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --stream=wdt \
+ --port=${MASTER_MYPORT} --destination=localhost --backup_dir=$backup_dir \
+ --avg_mbytes_per_sec=10 --interval=5 \
+ --extra_wdt_sender_options='--block_size_mbytes=1' \
+ --checkpoint_dir=$checkpoint_dir 2> \
+ $COPY_LOG"
+fi
+
+echo "myrocks_hotbackup copy phase"
+eval "$BACKUP_CMD"
+if [ $? -ne 0 ]; then
+ tail $COPY_LOG
+ exit 1
+fi
+
+mkdir ${backup_dir}/test # TODO: Fix skipping empty directories
+
+MOVEBACK_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_moveback_log"
+
+echo "myrocks_hotbackup move-back phase"
+$MYSQL_MYROCKS_HOTBACKUP --move_back --datadir=$dest_data_dir \
+ --rocksdb_datadir=$dest_data_dir/.rocksdb \
+ --rocksdb_waldir=$dest_data_dir/.rocksdb \
+ --backup_dir=$backup_dir > $MOVEBACK_LOG 2>&1
+
+if [ $? -ne 0 ]; then
+ tail $MOVEBACK_LOG
+ exit 1
+fi
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf
new file mode 100644
index 00000000000..bd9af04c813
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf
@@ -0,0 +1,2 @@
+# Use settings from xb_base.cnf
+!include base.cnf
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result
new file mode 100644
index 00000000000..6cec6ca5d69
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result
@@ -0,0 +1,23 @@
+include/rpl_init.inc [topology=none]
+include/rpl_default_connections.inc
+create database db1;
+create table db1.t1 (
+`id` int(10) not null auto_increment,
+`k` int(10),
+`data` varchar(2048),
+primary key (`id`),
+key (`k`)
+) engine=rocksdb;
+include/rpl_stop_server.inc [server_number=2]
+myrocks_hotbackup copy phase
+myrocks_hotbackup move-back phase
+include/rpl_start_server.inc [server_number=2]
+stop slave;
+start slave;
+select count(*) from db1.t1;
+count(*)
+500000
+drop database db1;
+stop slave;
+reset slave;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result
new file mode 100644
index 00000000000..d3f2ebc4e6f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result
@@ -0,0 +1,20 @@
+include/rpl_init.inc [topology=none]
+include/rpl_default_connections.inc
+create database db1;
+create table db1.t1 (
+`id` int(10) not null auto_increment,
+`k` int(10),
+`data` varchar(2048),
+primary key (`id`),
+key (`k`)
+) engine=rocksdb;
+include/rpl_stop_server.inc [server_number=2]
+myrocks_hotbackup copy phase
+myrocks_hotbackup move-back phase
+include/rpl_start_server.inc [server_number=2]
+select count(*) from db1.t1;
+count(*)
+250000
+drop database db1;
+drop database db1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result
new file mode 100644
index 00000000000..d3f2ebc4e6f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result
@@ -0,0 +1,20 @@
+include/rpl_init.inc [topology=none]
+include/rpl_default_connections.inc
+create database db1;
+create table db1.t1 (
+`id` int(10) not null auto_increment,
+`k` int(10),
+`data` varchar(2048),
+primary key (`id`),
+key (`k`)
+) engine=rocksdb;
+include/rpl_stop_server.inc [server_number=2]
+myrocks_hotbackup copy phase
+myrocks_hotbackup move-back phase
+include/rpl_start_server.inc [server_number=2]
+select count(*) from db1.t1;
+count(*)
+250000
+drop database db1;
+drop database db1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result
new file mode 100644
index 00000000000..d3f2ebc4e6f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result
@@ -0,0 +1,20 @@
+include/rpl_init.inc [topology=none]
+include/rpl_default_connections.inc
+create database db1;
+create table db1.t1 (
+`id` int(10) not null auto_increment,
+`k` int(10),
+`data` varchar(2048),
+primary key (`id`),
+key (`k`)
+) engine=rocksdb;
+include/rpl_stop_server.inc [server_number=2]
+myrocks_hotbackup copy phase
+myrocks_hotbackup move-back phase
+include/rpl_start_server.inc [server_number=2]
+select count(*) from db1.t1;
+count(*)
+250000
+drop database db1;
+drop database db1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt
new file mode 100644
index 00000000000..9d7af67eec9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt
@@ -0,0 +1 @@
+--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt
new file mode 100644
index 00000000000..9d7af67eec9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt
@@ -0,0 +1 @@
+--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test
new file mode 100644
index 00000000000..f9d58da093e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test
@@ -0,0 +1,47 @@
+
+source suite/rocksdb_hotbackup/include/setup.inc;
+
+--let $rpl_server_number= 2
+--source include/rpl_stop_server.inc
+
+--exec suite/rocksdb_hotbackup/include/load_data_and_run.sh 2>&1
+
+--let $rpl_server_number= 2
+--source include/rpl_start_server.inc
+
+connection server_2;
+let $num_rows = `select count(*) from db1.t1`;
+let $max_id = `select id from db1.t1 order by id desc limit 1`;
+
+if($num_rows != $max_id) {
+ echo Number of rows($num_rows) and last_id($max_id) does not match;
+}
+if($num_rows < 250000) {
+ echo Number of rows($num_rows) is less than 250000;
+}
+if($num_rows > 500000) {
+ echo Number of rows($num_rows) is more than 500000;
+}
+
+--source suite/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc
+
+connection server_2;
+select count(*) from db1.t1;
+
+connection server_1;
+let $checksum1 = `checksum tables db1.t1`;
+connection server_2;
+let $checksum2 = `checksum tables db1.t1`;
+
+if($checksum1 != $checksum2) {
+ echo Checksums ($checksum1 and $checksum2) do not match;
+}
+
+connection server_1;
+drop database db1;
+sync_slave_with_master;
+connection server_2;
+stop slave;
+reset slave;
+
+source suite/rocksdb_hotbackup/include/cleanup.inc;
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test
new file mode 100644
index 00000000000..2b999f3fce7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test
@@ -0,0 +1,22 @@
+
+source suite/rocksdb_hotbackup/include/setup.inc;
+
+--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1
+--let $rpl_server_number= 2
+--source include/rpl_stop_server.inc
+
+--exec STREAM_TYPE=tar suite/rocksdb_hotbackup/include/stream_run.sh 2>&1
+
+--let $rpl_server_number= 2
+--source include/rpl_start_server.inc
+
+connection server_2;
+select count(*) from db1.t1;
+
+connection server_1;
+drop database db1;
+connection server_2;
+drop database db1;
+
+source suite/rocksdb_hotbackup/include/cleanup.inc;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test
new file mode 100644
index 00000000000..2d2ed89112b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test
@@ -0,0 +1,22 @@
+
+source suite/rocksdb_hotbackup/include/setup.inc;
+
+--let $rpl_server_number= 2
+--source include/rpl_stop_server.inc
+
+--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1
+--exec STREAM_TYPE=wdt suite/rocksdb_hotbackup/include/stream_run.sh 2>&1
+
+--let $rpl_server_number= 2
+--source include/rpl_start_server.inc
+
+connection server_2;
+select count(*) from db1.t1;
+
+connection server_1;
+drop database db1;
+connection server_2;
+drop database db1;
+
+source suite/rocksdb_hotbackup/include/cleanup.inc;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test
new file mode 100644
index 00000000000..9bfab4252c4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test
@@ -0,0 +1,22 @@
+
+source suite/rocksdb_hotbackup/include/setup.inc;
+
+--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1
+--let $rpl_server_number= 2
+--source include/rpl_stop_server.inc
+
+--exec STREAM_TYPE=xbstream suite/rocksdb_hotbackup/include/stream_run.sh 2>&1
+
+--let $rpl_server_number= 2
+--source include/rpl_start_server.inc
+
+connection server_2;
+select count(*) from db1.t1;
+
+connection server_1;
+drop database db1;
+connection server_2;
+drop database db1;
+
+source suite/rocksdb_hotbackup/include/cleanup.inc;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations
new file mode 100644
index 00000000000..f09d338c357
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations
@@ -0,0 +1,2 @@
+[row]
+binlog-format=row
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc
new file mode 100644
index 00000000000..8f03c16e2f1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc
@@ -0,0 +1,71 @@
+--source include/master-slave.inc
+--source include/have_binlog_format_row.inc
+--source include/not_embedded.inc
+--source include/not_valgrind.inc
+
+call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1");
+call mtr.add_suppression(".*Worker.*failed executing transaction");
+call mtr.add_suppression(".*The slave coordinator and worker threads are stopped");
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+connection master;
+CREATE TABLE t1 (id int primary key, value int) engine=RocksDB;
+sync_slave_with_master;
+--let $rsbm = query_get_value(select @@global.reset_seconds_behind_master, @@global.reset_seconds_behind_master, 1)
+set global reset_seconds_behind_master=1;
+
+connection slave;
+INSERT INTO t1 VALUES(1, 0);
+INSERT INTO t1 VALUES(2, 0);
+INSERT INTO t1 VALUES(3, 0);
+
+connection master;
+sync_slave_with_master;
+connection master;
+INSERT INTO t1 VALUES(1, 1);
+
+connection slave;
+--let $slave_sql_errno= 1062
+--let $not_switch_connection= 0
+--source include/wait_for_slave_sql_error_and_skip.inc
+set global reset_seconds_behind_master=0;
+--source include/stop_slave_io.inc
+
+connection master;
+INSERT INTO t1 values (4,0);
+--sleep 11
+INSERT INTO t1 VALUES(2, 1);
+
+connection slave;
+--source include/start_slave_io.inc
+
+connection master;
+sync_slave_with_master;
+
+connection slave;
+set global reset_seconds_behind_master=1;
+
+connection master;
+insert into t1 values (5,0);
+--sleep 1
+sync_slave_with_master;
+
+connection master;
+INSERT INTO t1 VALUES(3, 1);
+
+connection slave;
+--let $slave_sql_errno= 1062
+--let $not_switch_connection= 0
+--source include/wait_for_slave_sql_error_and_skip.inc
+
+--echo #
+--echo # Cleanup
+--echo #
+
+connection master;
+DROP TABLE t1;
+eval set global reset_seconds_behind_master=$rsbm;
+--source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result
new file mode 100644
index 00000000000..31777c45c68
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result
@@ -0,0 +1,68 @@
+DROP TABLE IF EXISTS t1;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection con1;
+create table i1 (id int primary key , value int) engine=innodb;
+create table r1 (id int primary key , value int) engine=rocksdb;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection con2;
+insert into i1 values (1,1);
+insert into r1 values (1,1);
+connection con1;
+select * from i1;
+id value
+select * from r1;
+id value
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 1115 uuid:1-5
+connection con2;
+insert into i1 values (2,2);
+insert into r1 values (2,2);
+connection con1;
+select * from i1;
+id value
+1 1
+2 2
+select * from r1;
+id value
+1 1
+connection con2;
+insert into i1 values (3,2);
+insert into r1 values (3,2);
+connection con1;
+select * from i1;
+id value
+1 1
+2 2
+select * from r1;
+id value
+1 1
+START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 2015 uuid:1-9
+connection con2;
+insert into r1 values (4,4);
+connection con1;
+select * from r1;
+id value
+1 1
+2 2
+3 2
+4 4
+connection con2;
+insert into r1 values (5,5);
+connection con1;
+select * from r1;
+id value
+1 1
+2 2
+3 2
+4 4
+drop table i1;
+drop table r1;
+connection default;
+disconnect con1;
+disconnect con2;
+reset master;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result
new file mode 100644
index 00000000000..71c0d6d5dbf
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result
@@ -0,0 +1,27 @@
+DROP TABLE IF EXISTS t1;
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb;
+'con1'
+SET SESSION debug="d,crash_commit_after_log";
+SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go';
+insert into t1 values (1, 1, "iamtheogthealphaandomega");;
+'con2'
+insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush");
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+SET GLOBAL ROCKSDB_WRITE_SYNC = OFF;
+SET GLOBAL SYNC_BINLOG = 0;
+SET DEBUG_SYNC='now WAIT_FOR parked';
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+SET GLOBAL ROCKSDB_WRITE_SYNC = ON;
+SET GLOBAL SYNC_BINLOG = 1;
+insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush");
+SET DEBUG_SYNC='now SIGNAL go';
+**found 'prepare' log entry**
+**found 'commit' log entry**
+select * from t1 where a=1;
+a b c
+1 1 iamtheogthealphaandomega
+select count(*) from t1;
+count(*)
+1000000
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result
new file mode 100644
index 00000000000..6d061e99846
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result
@@ -0,0 +1,135 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists x;
+select @@binlog_format;
+@@binlog_format
+ROW
+create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into x values (1,1,1);
+insert into x values (2,1,1);
+insert into x values (3,1,1);
+insert into x values (4,1,1);
+insert into x values (5,1,1);
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+
+--- slave state before crash ---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+include/rpl_start_server.inc [server_number=2]
+
+--- slave state after crash recovery, slave stop, one transaction recovered---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+
+--- slave state after restart, slave start ---
+include/start_slave.inc
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+insert into x values (6,1,1);
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+insert into x values (7,1,1);
+insert into x values (8,1,1);
+insert into x values (9,1,1);
+insert into x values (10,1,1);
+insert into x values (11,1,1);
+insert into x values (12,1,1);
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+10 1 1
+11 1 1
+12 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+include/rpl_start_server.inc [server_number=2]
+
+--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 ---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+include/start_slave.inc
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+10 1 1
+11 1 1
+12 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+drop table x;
+include/rpl_end.inc
+Binlog Info Found
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result
new file mode 100644
index 00000000000..352ceff236c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result
@@ -0,0 +1,361 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression("Recovery from master pos");
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 mtr uuid:1
+2 test uuid:4
+SET GLOBAL debug = '+d,crash_before_update_pos';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 mtr uuid:1
+2 test uuid:4
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 mtr uuid:1
+2 test uuid:7
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_after_update_pos_before_apply';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_before_writing_xid';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,half_binlogged_transaction';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_commit_before';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_commit_after_log';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_commit_after_prepare';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_reset.inc
+create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB;
+insert into t1 values(1);
+insert into t1 values(2);
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+SET GLOBAL debug = '+d,crash_commit_after';
+insert into t1 values(3);
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:3
+use test;
+select * from t1;
+a
+1
+2
+change master to master_auto_position = 1;
+include/start_slave.inc
+rename table t1 to test1;
+use test;
+select * from test1;
+a
+1
+2
+3
+use test;
+select * from test1;
+a
+1
+2
+3
+drop table test1;
+include/stop_slave.inc
+change master to master_auto_position = 0;
+include/start_slave.inc
+use mysql;
+select * from slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result
new file mode 100644
index 00000000000..e765e338cb5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result
@@ -0,0 +1,140 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+drop table if exists x;
+select @@binlog_format;
+@@binlog_format
+ROW
+create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into x values (1,1,1);
+insert into x values (2,1,1);
+insert into x values (3,1,1);
+insert into x values (4,1,1);
+insert into x values (5,1,1);
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-7
+
+--- slave state before crash ---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-7
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:7
+include/rpl_start_server.inc [server_number=2]
+
+--- slave state after crash recovery, slave stop, one transaction recovered---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-6
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:6
+
+--- slave state after restart, slave start ---
+include/start_slave.inc
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-7
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:7
+insert into x values (6,1,1);
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-8
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:8
+insert into x values (7,1,1);
+insert into x values (8,1,1);
+insert into x values (9,1,1);
+insert into x values (10,1,1);
+insert into x values (11,1,1);
+insert into x values (12,1,1);
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+10 1 1
+11 1 1
+12 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-14
+include/rpl_start_server.inc [server_number=2]
+
+--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 ---
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+include/start_slave.inc
+select * from x;
+id value value2
+1 1 1
+2 1 1
+3 1 1
+4 1 1
+5 1 1
+6 1 1
+7 1 1
+8 1 1
+9 1 1
+10 1 1
+11 1 1
+12 1 1
+select @@global.gtid_executed;
+@@global.gtid_executed
+uuid:1-14
+select * from mysql.slave_gtid_info;
+Id Database_name Last_gtid
+1 test uuid:14
+drop table x;
+include/rpl_end.inc
+Binlog Info Found
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result
new file mode 100644
index 00000000000..b2703ee0cbb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result
@@ -0,0 +1,16 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+create table t1 (a int primary key) engine=rocksdb;
+insert into t1 values(1);
+SET GLOBAL debug = '+d,crash_before_writing_xid';
+insert into t1 values(2);
+ERROR HY000: Lost connection to MySQL server during query
+include/rpl_reconnect.inc
+SET GLOBAL debug = ``;
+include/start_slave.inc
+RocksDB: Last MySQL Gtid master_uuid:2
+drop table t1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result
new file mode 100644
index 00000000000..905b56dacb5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result
@@ -0,0 +1,34 @@
+#
+# Ensure skip_unique_check is set when lag exceeds lag_threshold
+#
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1");
+call mtr.add_suppression(".*Worker.*failed executing transaction");
+call mtr.add_suppression(".*The slave coordinator and worker threads are stopped");
+drop table if exists t1;
+CREATE TABLE t1 (id int primary key, value int) engine=RocksDB;
+set global reset_seconds_behind_master=1;
+INSERT INTO t1 VALUES(1, 0);
+INSERT INTO t1 VALUES(2, 0);
+INSERT INTO t1 VALUES(3, 0);
+INSERT INTO t1 VALUES(1, 1);
+include/wait_for_slave_sql_error_and_skip.inc [errno=1062]
+set global reset_seconds_behind_master=0;
+include/stop_slave_io.inc
+INSERT INTO t1 values (4,0);
+INSERT INTO t1 VALUES(2, 1);
+include/start_slave_io.inc
+set global reset_seconds_behind_master=1;
+insert into t1 values (5,0);
+INSERT INTO t1 VALUES(3, 1);
+include/wait_for_slave_sql_error_and_skip.inc [errno=1062]
+#
+# Cleanup
+#
+DROP TABLE t1;
+set global reset_seconds_behind_master=1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result
new file mode 100644
index 00000000000..6c58cb16fed
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result
@@ -0,0 +1,31 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1");
+call mtr.add_suppression(".*Worker.*failed executing transaction");
+call mtr.add_suppression(".*The slave coordinator and worker threads are stopped");
+drop table if exists t1;
+CREATE TABLE t1 (id int primary key, value int) engine=RocksDB;
+set global reset_seconds_behind_master=1;
+INSERT INTO t1 VALUES(1, 0);
+INSERT INTO t1 VALUES(2, 0);
+INSERT INTO t1 VALUES(3, 0);
+INSERT INTO t1 VALUES(1, 1);
+include/wait_for_slave_sql_error_and_skip.inc [errno=1062]
+set global reset_seconds_behind_master=0;
+include/stop_slave_io.inc
+INSERT INTO t1 values (4,0);
+INSERT INTO t1 VALUES(2, 1);
+include/start_slave_io.inc
+set global reset_seconds_behind_master=1;
+insert into t1 values (5,0);
+INSERT INTO t1 VALUES(3, 1);
+include/wait_for_slave_sql_error_and_skip.inc [errno=1062]
+#
+# Cleanup
+#
+DROP TABLE t1;
+set global reset_seconds_behind_master=1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result
new file mode 100644
index 00000000000..325df314216
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result
@@ -0,0 +1,44 @@
+DROP TABLE IF EXISTS t1;
+create table t1 (a int primary key, msg varchar(255)) engine=rocksdb;
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+SET SESSION debug="d,crash_commit_after_prepare";
+insert into t1 values (1, 'dogz');
+select * from t1;
+a msg
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+SET SESSION debug="d,crash_commit_after_log";
+insert into t1 values (2, 'catz'), (3, 'men');
+select * from t1;
+a msg
+2 catz
+3 men
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+SET SESSION debug="d,crash_commit_after";
+insert into t1 values (4, 'cars'), (5, 'foo');
+select * from t1;
+a msg
+2 catz
+3 men
+4 cars
+5 foo
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+SET SESSION debug="d,crash_commit_after_log";
+insert into t1 values (6, 'shipz'), (7, 'tankz');
+select * from t1;
+a msg
+2 catz
+3 men
+4 cars
+5 foo
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+SET SESSION debug="d,crash_commit_after";
+insert into t1 values (8, 'space'), (9, 'time');
+select * from t1;
+a msg
+2 catz
+3 men
+4 cars
+5 foo
+8 space
+9 time
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result
new file mode 100644
index 00000000000..eb2c6cfcda3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result
@@ -0,0 +1,222 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+DROP TABLE IF EXISTS t1;
+# Establish connection con1 (user=root)
+# Establish connection con2 (user=root)
+# Establish connection con3 (user=root)
+# Establish connection con4 (user=root)
+# reset replication to guarantee that master-bin.000001 is used
+include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+RESET MASTER;
+CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root";
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+include/start_slave.inc
+# Switch to connection con1
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine.
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine.
+ROLLBACK;
+SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 531 UUID:1-2
+# Switch to connection con2
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+# Switch to connection con1
+SELECT * FROM t1;
+a
+1
+COMMIT;
+SELECT * FROM t1;
+a
+1
+2
+3
+DROP TABLE t1;
+# Switch to connection con1
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 1510 UUID:1-7
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 1510 UUID:1-7
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 1510 UUID:1-7
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 1510 UUID:1-7
+# Switch to connection con2
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+# Switch to connection con1
+SELECT * FROM t1;
+a
+1
+SELECT * INTO OUTFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' FROM t1;
+COMMIT;
+# Switch to slave
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+DELETE FROM t1;
+LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1;
+SELECT * FROM t1;
+a
+1
+CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos;
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+include/start_slave.inc
+SELECT * FROM t1;
+a
+1
+2
+3
+SELECT * FROM t1_backup;
+a
+1
+2
+3
+DROP TABLE t1_backup;
+DROP TABLE t1;
+# Switch to connection con1
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+# async queries from con2
+INSERT INTO t1 VALUES(2);
+# async queries from con3
+INSERT INTO t1 VALUES(21);
+# Switch to connection con1
+# Switch to connection con4
+INSERT INTO t1 VALUES(9);
+# Switch to connection con1
+SELECT * INTO OUTFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' FROM t1;
+COMMIT;
+# reap async statements
+# Switch to slave
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+DELETE FROM t1;
+LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1;
+CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos;
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+include/start_slave.inc
+# sync and then query slave
+ShouldBeZero
+0
+DROP TABLE t1_backup;
+DROP TABLE t1;
+# Switch to connection con1
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 3688 UUID:1-18
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 3688 UUID:1-18
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 3688 UUID:1-18
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000001 3688 UUID:1-18
+# Switch to connection con2
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+# Switch to connection con1
+SELECT * FROM t1;
+a
+1
+SELECT * INTO OUTFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' FROM t1;
+COMMIT;
+# Switch to slave
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+SET @@global.gtid_purged='gtid_executed_from_snapshot';
+DELETE FROM t1;
+LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1;
+SELECT * FROM t1;
+a
+1
+CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1;
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+include/start_slave.inc
+SELECT * FROM t1;
+a
+1
+2
+3
+SELECT * FROM t1_backup;
+a
+1
+2
+3
+DROP TABLE t1_backup;
+DROP TABLE t1;
+# Switch to connection con1
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+# async queries from con2
+INSERT INTO t1 VALUES(2);
+# async queries from con3
+INSERT INTO t1 VALUES(21);
+# Switch to connection con1
+# Switch to connection con4
+INSERT INTO t1 VALUES(9);
+# Switch to connection con1
+SELECT * INTO OUTFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' FROM t1;
+COMMIT;
+# reap async statements
+# Switch to slave
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+SET @@global.gtid_purged='gtid_executed_from_snapshot';
+DELETE FROM t1;
+LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1;
+CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1;
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+include/start_slave.inc
+# sync and then query slave
+ShouldBeZero
+0
+DROP TABLE t1_backup;
+DROP TABLE t1;
+# Switch to connection default + close connections con1 and con2
+include/stop_slave.inc
+CHANGE MASTER to master_auto_position=0;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result
new file mode 100644
index 00000000000..57c1d0822c9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result
@@ -0,0 +1,15 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+create table t1(a int primary key);
+FLUSH LOGS;
+insert into t1 values(1);
+insert into t1 values(2);
+FLUSH LOGS;
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+File Position Gtid_executed
+master-bin.000003 120
+drop table t1;
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result
new file mode 100644
index 00000000000..d4920b14705
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result
@@ -0,0 +1,28 @@
+include/master-slave.inc
+Warnings:
+Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
+Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+[connection master]
+call mtr.add_suppression(".*");
+include/stop_slave.inc
+change master to master_auto_position=1;
+include/start_slave.inc
+call mtr.add_suppression('Slave: Error dropping database');
+stop slave sql_thread;
+insert into test0.benchmark set state='slave is processing load';
+start slave sql_thread;
+use test0;
+insert into benchmark set state='slave ends load';
+use test;
+select * from test1.benchmark into outfile 'benchmark.out';
+select ts from test0.benchmark where state like 'master started load' into @m_0;
+select ts from test0.benchmark where state like 'master ends load' into @m_1;
+select ts from test0.benchmark where state like 'slave takes on load' into @s_m0;
+select ts from test0.benchmark where state like 'slave is supposed to finish with load' into @s_m1;
+select ts from test0.benchmark where state like 'slave ends load' into @s_1;
+select ts from test0.benchmark where state like 'slave is processing load' into @s_0;
+select time_to_sec(@m_1) - time_to_sec(@m_0) as 'delta.out';
+include/stop_slave.inc
+change master to master_auto_position=0;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf
new file mode 100644
index 00000000000..ed8c77bcc0b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf
@@ -0,0 +1,51 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+!include include/default_client.cnf
+
+[mysqld.1]
+
+# Run the master.sh script before starting this process
+#!run-master-sh
+
+log-bin= master-bin
+
+loose-innodb
+
+[mysqld.2]
+# Run the slave.sh script before starting this process
+#!run-slave-sh
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+innodb_use_native_aio = 0
+
+log-bin= slave-bin
+relay-log= slave-relay-bin
+
+log-slave-updates
+master-retry-count= 10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host= 127.0.0.1
+report-port= @mysqld.2.port
+report-user= root
+
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir= ../../tmp
+
+loose-innodb
+
+
+[ENV]
+MASTER_MYPORT= @mysqld.1.port
+MASTER_MYSOCK= @mysqld.1.socket
+
+SLAVE_MYPORT= @mysqld.2.port
+SLAVE_MYSOCK= @mysqld.2.socket
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations
new file mode 100644
index 00000000000..f09d338c357
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations
@@ -0,0 +1,2 @@
+[row]
+binlog-format=row
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt
new file mode 100644
index 00000000000..c747adc94d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test
new file mode 100644
index 00000000000..acea1903c05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test
@@ -0,0 +1,81 @@
+--source include/have_log_bin.inc
+--source include/have_rocksdb.inc
+--source include/have_innodb.inc
+--enable_connect_log
+-- let $uuid = `select @@server_uuid;`
+
+# Save the initial number of concurrent sessions
+--source include/count_sessions.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+connection con1;
+create table i1 (id int primary key , value int) engine=innodb;
+create table r1 (id int primary key , value int) engine=rocksdb;
+
+
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+
+# Without setting engine, this takes both InnoDB and RocksDB snapshots
+-- replace_result $uuid uuid
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+connection con2;
+insert into i1 values (1,1);
+insert into r1 values (1,1);
+
+connection con1;
+select * from i1;
+select * from r1;
+
+# This takes RocksDB snapshot only but both InnoDB participates in transaction.
+-- replace_result $uuid uuid
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+connection con2;
+insert into i1 values (2,2);
+insert into r1 values (2,2);
+
+connection con1;
+# takes InnoDB snapshot here so changes after that not visible
+select * from i1;
+select * from r1;
+
+connection con2;
+insert into i1 values (3,2);
+insert into r1 values (3,2);
+
+connection con1;
+select * from i1;
+select * from r1;
+
+# RocksDB also partipates in transaction
+-- replace_result $uuid uuid
+START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT;
+
+connection con2;
+insert into r1 values (4,4);
+
+connection con1;
+# takes RocksDB snapshot here so changes after that are not visible
+select * from r1;
+
+connection con2;
+insert into r1 values (5,5);
+
+connection con1;
+select * from r1;
+
+drop table i1;
+drop table r1;
+
+connection default;
+disconnect con1;
+disconnect con2;
+reset master;
+--source include/wait_until_count_sessions.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt
new file mode 100644
index 00000000000..c747adc94d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test
new file mode 100644
index 00000000000..69d2e87e40e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test
@@ -0,0 +1,71 @@
+--source include/have_rocksdb.inc
+--source include/have_binlog_format_row.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/big_test.inc
+
+--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb;
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+
+# On connection one we insert a row and pause after commit marker is written to WAL.
+# Connection two then inserts many rows. After connection two
+# completes connection one continues only to crash before commit but after
+# binlog write. On crash recovery we see that connection one's value
+# has been recovered and commited
+connection con1;
+--echo 'con1'
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after_log";
+SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go';
+--error 0,2013
+--send insert into t1 values (1, 1, "iamtheogthealphaandomega");
+
+connection con2;
+--echo 'con2'
+insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush");
+
+# Disable 2PC and syncing for faster inserting of dummy rows
+# These rows only purpose is to rotate the binlog
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+SET GLOBAL ROCKSDB_WRITE_SYNC = OFF;
+SET GLOBAL SYNC_BINLOG = 0;
+
+SET DEBUG_SYNC='now WAIT_FOR parked';
+--disable_query_log
+--let $pk= 3
+while ($pk < 1000000) {
+ eval insert into t1 values ($pk, 1, "foobardatagoesheresothatmorelogsrollwhichiswhatwewant");
+ --inc $pk
+}
+--enable_query_log
+
+# re-enable 2PC an syncing then write to trigger a flush
+# before we trigger the crash to simulate full-durability
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+SET GLOBAL ROCKSDB_WRITE_SYNC = ON;
+SET GLOBAL SYNC_BINLOG = 1;
+
+insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush");
+
+SET DEBUG_SYNC='now SIGNAL go';
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+--exec sleep 60
+
+--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.1.err commit,prepare,rollback
+
+select * from t1 where a=1;
+select count(*) from t1;
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl
new file mode 100644
index 00000000000..a5e4d9d8035
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl
@@ -0,0 +1,19 @@
+my $pid_file = $ARGV[0];
+my $log_file = $ARGV[1];
+
+open(my $fh, '<', $pid_file) || die "Cannot open pid file $pid_file";
+my $slave_pid = <$fh>;
+close($fh);
+
+$slave_pid =~ s/\s//g;
+open(my $log_fh, '<', $log_file) || die "Cannot open log file $log_file";
+
+my $pid_found = 0;
+while (my $line = <$log_fh>) {
+ next unless ($pid_found || $line =~ /^[\d-]* [\d:]* $slave_pid /);
+ $pid_found = 1 unless ($pid_found);
+ if ($line =~ /^RocksDB: Last binlog file position.*slave-bin\..*\n/) {
+ print "Binlog Info Found\n";
+ }
+}
+close($log_fh);
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf
new file mode 100644
index 00000000000..454c9eb887a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf
@@ -0,0 +1,9 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+log_slave_updates
+
+[mysqld.2]
+relay_log_recovery=1
+relay_log_info_repository=TABLE
+log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test
new file mode 100644
index 00000000000..0e40e5423a2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test
@@ -0,0 +1,12 @@
+--source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc
+
+connection slave;
+--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1)
+
+# Verify the log file contains the Last binlog line, but only if the slave server's pid is found
+--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err
+
+--disable_query_log
+connection slave;
+call mtr.add_suppression("Recovery from master pos");
+--enable_query_log
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt
new file mode 100644
index 00000000000..d828b6c01f4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt
new file mode 100644
index 00000000000..aac6c6caadb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt
@@ -0,0 +1,2 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
+--sync_binlog=1000 --relay_log_recovery=1
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test
new file mode 100644
index 00000000000..949fbad666d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test
@@ -0,0 +1,41 @@
+-- source include/have_gtid.inc
+-- source include/master-slave.inc
+-- source include/have_debug.inc
+-- source include/not_valgrind.inc
+
+-- let $engine = ROCKSDB
+
+call mtr.add_suppression("Recovery from master pos");
+
+-- let $debug_option = crash_before_update_pos
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_after_update_pos_before_apply
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_before_writing_xid
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = half_binlogged_transaction
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_commit_before
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_commit_after_log
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_commit_after_prepare
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_reset.inc
+-- let $debug_option = crash_commit_after
+-- source extra/rpl_tests/rpl_gtid_crash_safe.inc
+
+-- source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf
new file mode 100644
index 00000000000..b6e8beb8fcb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf
@@ -0,0 +1,14 @@
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+log_slave_updates
+gtid_mode=ON
+enforce_gtid_consistency=ON
+
+[mysqld.2]
+sync_relay_log_info=100
+relay_log_recovery=1
+relay_log_info_repository=FILE
+log_slave_updates
+gtid_mode=ON
+enforce_gtid_consistency=ON
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc
new file mode 100644
index 00000000000..43ee7ec526c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc
@@ -0,0 +1,153 @@
+source include/master-slave.inc;
+-- let $uuid = `select @@server_uuid;`
+
+--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+connection master;
+--disable_warnings
+drop table if exists x;
+--enable_warnings
+
+connection master;
+
+select @@binlog_format;
+
+create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
+insert into x values (1,1,1);
+insert into x values (2,1,1);
+insert into x values (3,1,1);
+insert into x values (4,1,1);
+insert into x values (5,1,1);
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+
+sync_slave_with_master;
+connection slave;
+--let slave_data_dir= query_get_value(SELECT @@DATADIR, @@DATADIR, 1)
+--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1)
+--disable_query_log
+select "--- slave state before crash ---" as "";
+--enable_query_log
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+-- replace_result $uuid uuid
+select * from mysql.slave_gtid_info;
+
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+
+--write_file $MYSQL_TMP_DIR/truncate_tail_wal.sh
+#!/bin/bash
+
+F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1`
+SIZE=`stat -c %s $F`
+NEW_SIZE=`expr $SIZE - 10`
+truncate -s $NEW_SIZE $F
+rc=$?
+if [[ $rc != 0 ]]; then
+ exit 1
+fi
+
+kill -9 `head -1 $slave_pid_file`
+
+exit 0
+EOF
+--chmod 0755 $MYSQL_TMP_DIR/truncate_tail_wal.sh
+--exec $MYSQL_TMP_DIR/truncate_tail_wal.sh
+
+--let $rpl_skip_start_slave= 1
+--source include/rpl_start_server.inc
+--disable_query_log
+select "--- slave state after crash recovery, slave stop, one transaction recovered---" as "";
+--enable_query_log
+connection slave;
+--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.2.err commit,prepare,rollback
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+-- replace_result $uuid uuid
+select * from mysql.slave_gtid_info;
+
+--disable_query_log
+select "--- slave state after restart, slave start ---" as "";
+--enable_query_log
+--source include/start_slave.inc
+connection master;
+sync_slave_with_master;
+connection slave;
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+-- replace_result $uuid uuid
+select * from mysql.slave_gtid_info;
+
+connection master;
+insert into x values (6,1,1);
+
+sync_slave_with_master;
+connection slave;
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+-- replace_result $uuid uuid
+select * from mysql.slave_gtid_info;
+
+connection master;
+insert into x values (7,1,1);
+insert into x values (8,1,1);
+insert into x values (9,1,1);
+insert into x values (10,1,1);
+insert into x values (11,1,1);
+insert into x values (12,1,1);
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+sync_slave_with_master;
+
+connection slave;
+
+# Corrupting WAL. MyRocks does point in time recovery with wal_recovery_mode=2.
+# It loses some data but can resync after restarting slave.
+
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+
+--write_file $MYSQL_TMP_DIR/corrupt_wal.sh
+#!/bin/bash
+
+# expected to be around 950 bytes
+F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1`
+SIZE=`stat -c %s $F`
+OFFSET=$(( $SIZE-500 ))
+dd bs=1 if=/dev/zero of=$F count=100 seek=$OFFSET conv=notrunc
+
+kill -9 `head -1 $slave_pid_file`
+
+exit 0
+EOF
+--chmod 0755 $MYSQL_TMP_DIR/corrupt_wal.sh
+--exec $MYSQL_TMP_DIR/corrupt_wal.sh
+
+--let $rpl_skip_start_slave= 1
+--source include/rpl_start_server.inc
+--disable_query_log
+select "--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 ---" as "";
+--enable_query_log
+select * from x;
+--source include/start_slave.inc
+connection master;
+sync_slave_with_master;
+connection slave;
+select * from x;
+-- replace_result $uuid uuid
+select @@global.gtid_executed;
+-- replace_result $uuid uuid
+select * from mysql.slave_gtid_info;
+
+connection master;
+drop table x;
+
+
+--remove_file $MYSQL_TMP_DIR/truncate_tail_wal.sh
+--remove_file $MYSQL_TMP_DIR/corrupt_wal.sh
+--source include/rpl_end.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test
new file mode 100644
index 00000000000..3b660b2640f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test
@@ -0,0 +1,12 @@
+-- source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc
+
+connection slave;
+-- let _SLAVE_PID_FILE= query_get_value(SELECT @@pid_file, @@pid_file, 1)
+
+# Verify the log file contains the Last binlog line, but only if the slave server's pid is found
+--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err
+
+--disable_query_log
+connection slave;
+call mtr.add_suppression("Recovery from master pos");
+--enable_query_log
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt
new file mode 100644
index 00000000000..d828b6c01f4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt
new file mode 100644
index 00000000000..d828b6c01f4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test
new file mode 100644
index 00000000000..56c0eac2517
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test
@@ -0,0 +1,39 @@
+# based on rpl/rpl_gtid_innondb_sys_header.test
+source include/master-slave.inc;
+source include/have_gtid.inc;
+source include/have_debug.inc;
+source include/not_valgrind.inc;
+
+--let $old_debug = `select @@global.debug;`
+
+connection master;
+create table t1 (a int primary key) engine=rocksdb;
+insert into t1 values(1);
+--eval SET GLOBAL debug = '+d,crash_before_writing_xid'
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--error 2013
+insert into t1 values(2);
+
+--source include/wait_until_disconnected.inc
+--let $rpl_server_number = 1
+--source include/rpl_reconnect.inc
+
+--eval SET GLOBAL debug = `$old_debug`
+
+connection slave;
+disable_warnings;
+source include/start_slave.inc;
+enable_warnings;
+connection master;
+sync_slave_with_master;
+
+connection master;
+--let $master_uuid= query_get_value(select @@server_uuid, @@server_uuid, 1)
+--replace_result $master_uuid master_uuid
+--exec grep 'RocksDB: Last MySQL Gtid $master_uuid' $MYSQLTEST_VARDIR/log/mysqld.1.err
+
+drop table t1;
+source include/rpl_end.inc;
+-- move_file $MYSQLTEST_VARDIR/log/mysqld.1.err $MYSQLTEST_VARDIR/log/mysqld.1.err.orig
+-- write_file $MYSQLTEST_VARDIR/log/mysqld.1.err
+EOF
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt
new file mode 100644
index 00000000000..1c8dc1e62e9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt
@@ -0,0 +1 @@
+--unique-check-lag-threshold=5
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test
new file mode 100644
index 00000000000..8c79d2afa03
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test
@@ -0,0 +1,6 @@
+--echo #
+--echo # Ensure skip_unique_check is set when lag exceeds lag_threshold
+--echo #
+
+--source ../include/rpl_no_unique_check_on_lag.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt
new file mode 100644
index 00000000000..1c8dc1e62e9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt
@@ -0,0 +1 @@
+--unique-check-lag-threshold=5
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test
new file mode 100644
index 00000000000..c5cf1a8ae92
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test
@@ -0,0 +1,2 @@
+--source ../include/rpl_no_unique_check_on_lag.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt
new file mode 100644
index 00000000000..a990dc22129
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt
new file mode 100644
index 00000000000..c747adc94d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test
new file mode 100644
index 00000000000..5f99e1aabd1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test
@@ -0,0 +1,56 @@
+--source include/have_binlog_format_row.inc
+--source include/have_rocksdb.inc
+--source include/have_debug.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+create table t1 (a int primary key, msg varchar(255)) engine=rocksdb;
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after_prepare";
+--error 0,2013
+insert into t1 values (1, 'dogz');
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+select * from t1;
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after_log";
+--error 0,2013
+insert into t1 values (2, 'catz'), (3, 'men');
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+select * from t1;
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = OFF;
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after";
+--error 0,2013
+insert into t1 values (4, 'cars'), (5, 'foo');
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+select * from t1;
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after_log";
+--error 0,2013
+insert into t1 values (6, 'shipz'), (7, 'tankz');
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+select * from t1;
+
+SET GLOBAL ROCKSDB_DISABLE_2PC = ON;
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+SET SESSION debug="d,crash_commit_after";
+--error 0,2013
+insert into t1 values (8, 'space'), (9, 'time');
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+select * from t1;
+
+drop table t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt
new file mode 100644
index 00000000000..c747adc94d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt
new file mode 100644
index 00000000000..c747adc94d5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt
@@ -0,0 +1 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test
new file mode 100644
index 00000000000..37f80c8ace5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test
@@ -0,0 +1,373 @@
+--source include/master-slave.inc
+--source include/have_binlog_format_row.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo # Establish connection con1 (user=root)
+connect (con1,localhost,root,,);
+--echo # Establish connection con2 (user=root)
+connect (con2,localhost,root,,);
+--echo # Establish connection con3 (user=root)
+connect (con3,localhost,root,,);
+--echo # Establish connection con4 (user=root)
+connect (con4,localhost,root,,);
+
+--echo # reset replication to guarantee that master-bin.000001 is used
+connection slave;
+--source include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+
+connection master;
+RESET MASTER;
+
+connection slave;
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root";
+--source include/start_slave.inc
+
+### Test 1:
+### - While a consistent snapshot transaction is executed,
+### no external inserts should be visible to the transaction.
+
+--echo # Switch to connection con1
+connection con1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+
+SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
+--error ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+--error ER_UNKNOWN_ERROR
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+ROLLBACK;
+SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+
+
+--disable_query_log
+--disable_result_log
+let $x=1000;
+while ($x) {
+ START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+ dec $x;
+}
+--enable_query_log
+--enable_result_log
+
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+--echo # Switch to connection con2
+connection con2;
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+
+--echo # Switch to connection con1
+connection con1;
+SELECT * FROM t1; # should fetch one row
+COMMIT;
+
+SELECT * FROM t1; # should fetch three rows
+
+DROP TABLE t1;
+
+### Test 2:
+### - confirm result from snapshot select and replication replay matches original
+
+--echo # Switch to connection con1
+connection con1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1);
+
+--echo # Switch to connection con2
+connection con2;
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+
+--echo # Switch to connection con1
+connection con1;
+SELECT * FROM t1;
+
+--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file
+
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval SELECT * INTO OUTFILE '$outfile' FROM t1;
+COMMIT;
+
+--echo # Switch to slave
+sync_slave_with_master slave;
+
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+--source include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+DELETE FROM t1;
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval LOAD DATA INFILE '$outfile' INTO TABLE t1;
+SELECT * FROM t1;
+
+--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos
+eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos;
+--source include/start_slave.inc
+
+connection master;
+sync_slave_with_master slave;
+
+SELECT * FROM t1;
+SELECT * FROM t1_backup;
+DROP TABLE t1_backup;
+
+connection master;
+DROP TABLE t1;
+--remove_file $outfile
+
+### Test 3:
+### - confirm result from snapshot select and replication replay matches original
+### - use non-deterministic concurrency
+
+--echo # Switch to connection con1
+connection con1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+
+--echo # async queries from con2
+connection con2;
+send INSERT INTO t1 VALUES(2);
+
+--echo # async queries from con3
+connection con3;
+send INSERT INTO t1 VALUES(21);
+
+--echo # Switch to connection con1
+connection con1;
+
+let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1);
+
+--echo # Switch to connection con4
+connection con4;
+INSERT INTO t1 VALUES(9);
+
+--echo # Switch to connection con1
+connection con1;
+
+--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file
+
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval SELECT * INTO OUTFILE '$outfile' FROM t1;
+COMMIT;
+
+--echo # reap async statements
+connection con2;
+reap;
+
+connection con3;
+reap;
+
+--echo # Switch to slave
+sync_slave_with_master slave;
+
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+--source include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+DELETE FROM t1;
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval LOAD DATA INFILE '$outfile' INTO TABLE t1;
+
+--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos
+eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos;
+--source include/start_slave.inc
+
+--echo # sync and then query slave
+connection master;
+sync_slave_with_master slave;
+
+let $sum1 = `SELECT SUM(a) from t1`;
+let $sum2 = `SELECT SUM(a) from t1_backup`;
+--disable_query_log
+eval select $sum2 - $sum1 ShouldBeZero;
+--enable_query_log
+
+DROP TABLE t1_backup;
+
+connection master;
+DROP TABLE t1;
+--remove_file $outfile
+
+### Test 4:
+### - confirm result from snapshot select and replication relay using gtid protocol matches original
+
+--echo # Switch to connection con1
+connection con1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1);
+
+--echo # Switch to connection con2
+connection con2;
+INSERT INTO t1 VALUES(2);
+INSERT INTO t1 VALUES(3);
+
+--echo # Switch to connection con1
+connection con1;
+SELECT * FROM t1;
+
+--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file
+
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval SELECT * INTO OUTFILE '$outfile' FROM t1;
+COMMIT;
+
+--echo # Switch to slave
+sync_slave_with_master slave;
+
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+--source include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+--replace_result $gtid_executed gtid_executed_from_snapshot
+eval SET @@global.gtid_purged='$gtid_executed';
+DELETE FROM t1;
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval LOAD DATA INFILE '$outfile' INTO TABLE t1;
+SELECT * FROM t1;
+
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1;
+--source include/start_slave.inc
+
+connection master;
+sync_slave_with_master slave;
+
+SELECT * FROM t1;
+SELECT * FROM t1_backup;
+DROP TABLE t1_backup;
+
+connection master;
+DROP TABLE t1;
+--remove_file $outfile
+
+### Test 5:
+### - confirm result from snapshot select and replication replay using gtid_protocol matches original
+### - use non-deterministic concurrency
+
+--echo # Switch to connection con1
+connection con1;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb;
+INSERT INTO t1 VALUES(1);
+
+--echo # async queries from con2
+connection con2;
+send INSERT INTO t1 VALUES(2);
+
+--echo # async queries from con3
+connection con3;
+send INSERT INTO t1 VALUES(21);
+
+--echo # Switch to connection con1
+connection con1;
+
+let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1);
+
+--echo # Switch to connection con4
+connection con4;
+INSERT INTO t1 VALUES(9);
+
+--echo # Switch to connection con1
+connection con1;
+
+--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file
+
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval SELECT * INTO OUTFILE '$outfile' FROM t1;
+COMMIT;
+
+--echo # reap async statements
+connection con2;
+reap;
+
+connection con3;
+reap;
+
+--echo # Switch to slave
+sync_slave_with_master slave;
+
+CREATE TABLE t1_backup LIKE t1;
+INSERT INTO t1_backup SELECT * FROM t1;
+--source include/stop_slave.inc
+RESET SLAVE;
+RESET MASTER;
+--replace_result $gtid_executed gtid_executed_from_snapshot
+eval SET @@global.gtid_purged='$gtid_executed';
+DELETE FROM t1;
+
+--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
+eval LOAD DATA INFILE '$outfile' INTO TABLE t1;
+
+--replace_result $MASTER_MYPORT MASTER_PORT
+eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1;
+--source include/start_slave.inc
+
+--echo # sync and then query slave
+connection master;
+sync_slave_with_master slave;
+
+let $sum1 = `SELECT SUM(a) from t1`;
+let $sum2 = `SELECT SUM(a) from t1_backup`;
+--disable_query_log
+eval select $sum2 - $sum1 ShouldBeZero;
+--enable_query_log
+
+DROP TABLE t1_backup;
+
+connection master;
+DROP TABLE t1;
+--remove_file $outfile
+
+--echo # Switch to connection default + close connections con1 and con2
+connection con1;
+disconnect con1;
+--source include/wait_until_disconnected.inc
+connection con2;
+disconnect con2;
+--source include/wait_until_disconnected.inc
+connection con3;
+disconnect con3;
+--source include/wait_until_disconnected.inc
+connection con4;
+disconnect con4;
+--source include/wait_until_disconnected.inc
+
+connection default;
+sync_slave_with_master slave;
+--source include/stop_slave.inc
+CHANGE MASTER to master_auto_position=0;
+--source include/start_slave.inc
+
+--source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test
new file mode 100644
index 00000000000..2b590f84653
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test
@@ -0,0 +1,17 @@
+--source include/master-slave.inc
+--source include/have_binlog_format_row.inc
+
+--connection master
+create table t1(a int primary key);
+
+FLUSH LOGS;
+
+insert into t1 values(1);
+insert into t1 values(2);
+
+FLUSH LOGS;
+
+START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
+
+drop table t1;
+-- source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt
new file mode 100644
index 00000000000..5c5a73bf2a4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt
@@ -0,0 +1,2 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
+--binlog_rows_query_log_events=TRUE --rocksdb_unsafe_for_binlog=TRUE
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt
new file mode 100644
index 00000000000..67f0fcf77f0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt
@@ -0,0 +1,2 @@
+--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --max_binlog_size=50000
+--slave_parallel_workers=30 --relay_log_recovery=1 --rocksdb_unsafe_for_binlog=TRUE
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test
new file mode 100644
index 00000000000..17b866060b7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test
@@ -0,0 +1,26 @@
+-- source include/big_test.inc
+-- source include/master-slave.inc
+-- source include/not_valgrind.inc
+-- source include/have_gtid.inc
+-- source include/have_rocksdb.inc
+
+connection master;
+call mtr.add_suppression(".*");
+sync_slave_with_master;
+-- source include/stop_slave.inc
+change master to master_auto_position=1;
+-- source include/start_slave.inc
+
+-- let $iter=100
+-- let $databases=30
+-- let $num_crashes=100
+-- let $include_silent=1
+-- let $storage_engine='rocksdb'
+-- source extra/rpl_tests/rpl_parallel_load_innodb.test
+-- let $include_silent=0
+
+-- source include/stop_slave.inc
+change master to master_auto_position=0;
+-- source include/start_slave.inc
+
+-- source include/rpl_end.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc
new file mode 100644
index 00000000000..a8ac90fcc3f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc
@@ -0,0 +1,56 @@
+# Run the load generator to populate the table and generate concurrent
+# updates. After the load generator is complete, verify the tables on the
+# master and the slave are consistent
+
+--sync_slave_with_master
+
+--connection master
+--let $master_host = 127.0.0.1
+let $MYSQL_BASEDIR = `SELECT @@BASEDIR`;
+
+let $exec =
+ python $MYSQL_BASEDIR/mysql-test/suite/rocksdb_stress/t/load_generator.py
+ -L $MYSQL_TMP_DIR/load_generator.log -H $master_host -t $table
+ -P $MASTER_MYPORT -n $num_records -m $max_records
+ -l $num_loaders -c $num_checkers -r $num_requests
+ -E $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+ -D $reap_delay;
+
+exec $exec;
+
+enable_reconnect;
+source include/wait_until_connected_again.inc;
+
+--let $master_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1)
+
+# if sync_slave_with_master had a configurable timeout this would not be needed
+let $slave_sync_timeout = 7200;
+--source include/wait_for_slave_to_sync_with_master.inc
+
+--connection slave
+--let $slave_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1)
+
+let $not_same = `SELECT $master_checksum-$slave_checksum`;
+if ($not_same)
+{
+ --die "The checksums of table $table for the master and slave do not match!"
+}
+
+# Cleanup
+--connection master
+--let $cleanup = DROP TABLE $table
+eval $cleanup;
+
+# if sync_slave_with_master had a configurable timeout this would not be needed
+let $slave_sync_timeout = 7200;
+--source include/wait_for_slave_to_sync_with_master.inc
+
+--connection slave
+--source include/stop_slave.inc
+# For stress tests sometimes the replication thread can not connect to master
+# temporarily. This is either because the master crashed and it is recovering
+# or the master is too busy and could not service the slave's requests.
+# mtr's internal check requires that there be no errors in slave status.
+# restarting replication clears the errors.
+--source include/start_slave.inc
+--source include/stop_slave.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf
new file mode 100644
index 00000000000..fb985f5d1b4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf
@@ -0,0 +1,8 @@
+!include include/default_my.cnf
+!include suite/rocksdb/my.cnf
+!include suite/rpl/my.cnf
+
+[mysqld.1]
+binlog_format=row
+[mysqld.2]
+binlog_format=row
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result
new file mode 100644
index 00000000000..3d76e035e05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result
@@ -0,0 +1,21 @@
+include/master-slave.inc
+[connection master]
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(id INT PRIMARY KEY,
+thread_id INT NOT NULL,
+request_id BIGINT UNSIGNED NOT NULL,
+update_count INT UNSIGNED NOT NULL DEFAULT 0,
+zero_sum INT DEFAULT 0,
+msg VARCHAR(1024),
+msg_length int,
+msg_checksum varchar(128),
+KEY msg_i(msg(255), zero_sum))
+ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+stop slave;
+start slave;
+DROP TABLE t1;
+stop slave;
+start slave;
+include/stop_slave.inc
+include/start_slave.inc
+include/stop_slave.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result
new file mode 100644
index 00000000000..3d76e035e05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result
@@ -0,0 +1,21 @@
+include/master-slave.inc
+[connection master]
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(id INT PRIMARY KEY,
+thread_id INT NOT NULL,
+request_id BIGINT UNSIGNED NOT NULL,
+update_count INT UNSIGNED NOT NULL DEFAULT 0,
+zero_sum INT DEFAULT 0,
+msg VARCHAR(1024),
+msg_length int,
+msg_checksum varchar(128),
+KEY msg_i(msg(255), zero_sum))
+ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+stop slave;
+start slave;
+DROP TABLE t1;
+stop slave;
+start slave;
+include/stop_slave.inc
+include/start_slave.inc
+include/stop_slave.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py
new file mode 100644
index 00000000000..20098f49b42
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py
@@ -0,0 +1,1029 @@
+import cStringIO
+import array
+import hashlib
+import MySQLdb
+from MySQLdb.constants import CR
+from MySQLdb.constants import ER
+from collections import deque
+import os
+import random
+import signal
+import sys
+import threading
+import time
+import string
+import traceback
+import logging
+import argparse
+
+# This is a generic load_generator for mysqld which persists across server
+# restarts and attempts to verify both committed and uncommitted transactions
+# are persisted correctly.
+#
+# The table schema used should look something like:
+#
+# CREATE TABLE t1(id INT PRIMARY KEY,
+# thread_id INT NOT NULL,
+# request_id BIGINT UNSIGNED NOT NULL,
+# update_count INT UNSIGNED NOT NULL DEFAULT 0,
+# zero_sum INT DEFAULT 0,
+# msg VARCHAR(1024),
+# msg_length int,
+# msg_checksum varchar(128),
+# KEY msg_i(msg(255), zero_sum))
+# ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+#
+# zero_sum should always sum up to 0 regardless of when the transaction tries
+# to process the transaction. Each transaction always maintain this sum to 0.
+#
+# request_id should be unique across transactions. It is used during
+# transaction verification and is monotonically increasing..
+#
+# Several threads are spawned at the start of the test to populate the table.
+# Once the table is populated, both loader and checker threads are created.
+#
+# The row id space is split into two sections: exclusive and shared. Each
+# loader thread owns some part of the exclusive section which it maintains
+# complete information on insert/updates/deletes. Since this section is only
+# modified by one thread, the thread can maintain an accurate picture of all
+# changes. The shared section contains rows which multiple threads can
+# update/delete/insert. For checking purposes, the request_id is used to
+# determine if a row is consistent with a committed transaction.
+#
+# Each loader thread's transaction consists of selecting some number of rows
+# randomly. The thread can choose to delete the row, update the row or insert
+# the row if it doesn't exist. The state of rows that are owned by the loader
+# thread are tracked within the thread's id_map. This map contains the row id
+# and the request_id of the latest update. For indicating deleted rows, the
+# -request_id marker is used. Thus, at any point in time, the thread's id_map
+# should reflect the exact state of the rows that are owned.
+#
+# The loader thread also maintains the state of older transactions that were
+# successfully processed in addition to the current transaction, which may or
+# may not be committed. Each transaction state consists of the row id, and the
+# request_id. Again, -request_id is used to indicate a delete. For committed
+# transactions, the thread can verify the request_id of the row is larger than
+# what the thread has recorded. For uncommitted transactions, the thread would
+# verify the request_id of the row does not match that of the transaction. To
+# determine whether or not a transaction succeeded in case of a crash right at
+# commit, each thread always includes a particular row in the transaction which
+# it could use to check the request id against.
+#
+# Checker threads run continuously to verify the checksums on the rows and to
+# verify the zero_sum column sums up to zero at any point in time. The checker
+# threads run both point lookups and range scans for selecting the rows.
+
+class ValidateError(Exception):
+ """Raised when validation fails"""
+ pass
+
+class TestError(Exception):
+ """Raised when the test cannot make forward progress"""
+ pass
+
+CHARS = string.letters + string.digits
+OPTIONS = {}
+
+# max number of rows per transaction
+MAX_ROWS_PER_REQ = 10
+
+# global variable checked by threads to determine if the test is stopping
+TEST_STOP = False
+LOADERS_READY = 0
+
+# global monotonically increasing request id counter
+REQUEST_ID = 1
+REQUEST_ID_LOCK = threading.Lock()
+
+def get_next_request_id():
+ global REQUEST_ID
+ with REQUEST_ID_LOCK:
+ REQUEST_ID += 1
+ return REQUEST_ID
+
+# given a percentage value, rolls a 100-sided die and return whether the
+# given value is above or equal to the die roll
+#
+# passing 0 should always return false and 100 should always return true
+def roll_d100(p):
+ assert p >= 0 and p <= 100
+ return p >= random.randint(1, 100)
+
+def sha1(x):
+ return hashlib.sha1(str(x)).hexdigest()
+
+def is_connection_error(exc):
+ error_code = exc.args[0]
+ return (error_code == MySQLdb.constants.CR.CONNECTION_ERROR or
+ error_code == MySQLdb.constants.CR.CONN_HOST_ERROR or
+ error_code == MySQLdb.constants.CR.SERVER_LOST or
+ error_code == MySQLdb.constants.CR.SERVER_GONE_ERROR or
+ error_code == MySQLdb.constants.ER.QUERY_INTERRUPTED or
+ error_code == MySQLdb.constants.ER.SERVER_SHUTDOWN)
+
+def is_deadlock_error(exc):
+ error_code = exc.args[0]
+ return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK or
+ error_code == MySQLdb.constants.ER.LOCK_WAIT_TIMEOUT)
+
+# should be deterministic given an idx
+def gen_msg(idx, thread_id, request_id):
+ random.seed(idx);
+ # field length is 1024 bytes, but 32 are reserved for the tid and req tag
+ blob_length = random.randint(1, 1024 - 32)
+
+ if roll_d100(50):
+ # blob that cannot be compressed (well, compresses to 85% of original size)
+ msg = ''.join([random.choice(CHARS) for x in xrange(blob_length)])
+ else:
+ # blob that can be compressed
+ msg = random.choice(CHARS) * blob_length
+
+ # append the thread_id and request_id to the end of the msg
+ return ''.join([msg, ' tid: %d req: %d' % (thread_id, request_id)])
+
+def execute(cur, stmt):
+ ROW_COUNT_ERROR = 18446744073709551615L
+ logging.debug("Executing %s" % stmt)
+ cur.execute(stmt)
+ if cur.rowcount < 0 or cur.rowcount == ROW_COUNT_ERROR:
+ raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR,
+ "Possible connection error, rowcount is %d"
+ % cur.rowcount)
+
+def wait_for_workers(workers, min_active = 0):
+ logging.info("Waiting for %d workers", len(workers))
+ # min_active needs to include the current waiting thread
+ min_active += 1
+
+ # polling here allows this thread to be responsive to keyboard interrupt
+ # exceptions, otherwise a user hitting ctrl-c would see the load_generator as
+ # hanging and unresponsive
+ try:
+ while threading.active_count() > min_active:
+ time.sleep(1)
+ except KeyboardInterrupt, e:
+ os._exit(1)
+
+ num_failures = 0
+ for w in workers:
+ w.join()
+ if w.exception:
+ logging.error(w.exception)
+ num_failures += 1
+
+ return num_failures
+
+# base class for worker threads and contains logic for handling reconnecting to
+# the mysqld server during connection failure
+class WorkerThread(threading.Thread):
+ def __init__(self, name):
+ threading.Thread.__init__(self)
+ self.name = name
+ self.exception = None
+ self.con = None
+ self.cur = None
+ self.isolation_level = None
+ self.start_time = time.time()
+ self.total_time = 0
+
+ def run(self):
+ global TEST_STOP
+
+ try:
+ logging.info("Started")
+ self.runme()
+ logging.info("Completed successfully")
+ except Exception, e:
+ self.exception = traceback.format_exc()
+ logging.error(self.exception)
+ TEST_STOP = True
+ finally:
+ self.total_time = time.time() - self.start_time
+ logging.info("Total run time: %.2f s" % self.total_time)
+ self.finish()
+
+ def reconnect(self, timeout=900):
+ global TEST_STOP
+
+ self.con = None
+ SECONDS_BETWEEN_RETRY = 10
+ attempts = 1
+ logging.info("Attempting to connect to MySQL Server")
+ while not self.con and timeout > 0 and not TEST_STOP:
+ try:
+ self.con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host,
+ port=OPTIONS.port, db=OPTIONS.db)
+ if self.con:
+ self.con.autocommit(False)
+ self.cur = self.con.cursor()
+ self.set_isolation_level(self.isolation_level)
+ logging.info("Connection successful after attempt %d" % attempts)
+ break
+ except MySQLdb.Error, e:
+ logging.debug(traceback.format_exc())
+ time.sleep(SECONDS_BETWEEN_RETRY)
+ timeout -= SECONDS_BETWEEN_RETRY
+ attempts += 1
+ return self.con is None
+
+ def get_isolation_level(self):
+ execute(self.cur, "SELECT @@SESSION.tx_isolation")
+ if self.cur.rowcount != 1:
+ raise TestError("Unable to retrieve tx_isolation")
+ return self.cur.fetchone()[0]
+
+ def set_isolation_level(self, isolation_level, persist = False):
+ if isolation_level is not None:
+ execute(self.cur, "SET @@SESSION.tx_isolation = '%s'" % isolation_level)
+ if self.cur.rowcount != 0:
+ raise TestError("Unable to set the isolation level to %s")
+
+ if isolation_level is None or persist:
+ self.isolation_level = isolation_level
+
+# periodically kills the server
+class ReaperWorker(WorkerThread):
+ def __init__(self):
+ WorkerThread.__init__(self, 'reaper')
+ self.start()
+ self.kills = 0
+
+ def finish(self):
+ logging.info('complete with %d kills' % self.kills)
+ if self.con:
+ self.con.close()
+
+ def get_server_pid(self):
+ execute(self.cur, "SELECT @@pid_file")
+ if self.cur.rowcount != 1:
+ raise TestError("Unable to retrieve pid_file")
+ return int(open(self.cur.fetchone()[0]).read())
+
+ def runme(self):
+ global TEST_STOP
+ time_remain = random.randint(10, 30)
+ while not TEST_STOP:
+ if time_remain > 0:
+ time_remain -= 1
+ time.sleep(1)
+ continue
+ if self.reconnect():
+ raise Exception("Unable to connect to MySQL server")
+ logging.info('killing server...')
+ with open(OPTIONS.expect_file, 'w+') as expect_file:
+ expect_file.write('restart')
+ os.kill(self.get_server_pid(), signal.SIGTERM)
+ self.kills += 1
+ time_remain = random.randint(0, 30) + OPTIONS.reap_delay;
+
+# runs initially to populate the table with the given number of rows
+class PopulateWorker(WorkerThread):
+ def __init__(self, thread_id, start_id, num_to_add):
+ WorkerThread.__init__(self, 'populate-%d' % thread_id)
+ self.thread_id = thread_id
+ self.start_id = start_id
+ self.num_to_add = num_to_add
+ self.table = OPTIONS.table
+ self.start()
+
+ def finish(self):
+ if self.con:
+ self.con.commit()
+ self.con.close()
+
+ def runme(self):
+ if self.reconnect():
+ raise Exception("Unable to connect to MySQL server")
+
+ stmt = None
+ for i in xrange(self.start_id, self.start_id + self.num_to_add):
+ stmt = gen_insert(self.table, i, 0, 0, 0)
+ execute(self.cur, stmt)
+ if i % 101 == 0:
+ self.con.commit()
+ self.con.commit()
+ logging.info("Inserted %d rows starting at id %d" %
+ (self.num_to_add, self.start_id))
+
+def populate_table(num_records):
+
+ logging.info("Populate_table started for %d records" % num_records)
+ if num_records == 0:
+ return False
+
+ num_workers = min(10, num_records / 100)
+ workers = []
+
+ N = num_records / num_workers
+ start_id = 0
+ for i in xrange(num_workers):
+ workers.append(PopulateWorker(i, start_id, N))
+ start_id += N
+ if num_records > start_id:
+ workers.append(PopulateWorker(num_workers, start_id,
+ num_records - start_id))
+
+ # Wait for the populate threads to complete
+ return wait_for_workers(workers) > 0
+
+def gen_insert(table, idx, thread_id, request_id, zero_sum):
+ msg = gen_msg(idx, thread_id, request_id)
+ return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, "
+ "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s')"
+ % (table, idx, thread_id, request_id,
+ zero_sum, msg, len(msg), sha1(msg)))
+
+def gen_update(table, idx, thread_id, request_id, zero_sum):
+ msg = gen_msg(idx, thread_id, request_id)
+ return ("UPDATE %s SET thread_id = %d, request_id = %d, "
+ "update_count = update_count + 1, zero_sum = zero_sum + (%d), "
+ "msg = '%s', msg_length = %d, msg_checksum = '%s' WHERE id = %d "
+ % (table, thread_id, request_id, zero_sum, msg, len(msg),
+ sha1(msg), idx))
+
+def gen_delete(table, idx):
+ return "DELETE FROM %s WHERE id = %d" % (table, idx)
+
+def gen_insert_on_dup(table, idx, thread_id, request_id, zero_sum):
+ msg = gen_msg(idx, thread_id, request_id)
+ msg_checksum = sha1(msg)
+ return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, "
+ "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s') "
+ "ON DUPLICATE KEY UPDATE "
+ "thread_id=%d, request_id=%d, "
+ "update_count=update_count+1, "
+ "zero_sum=zero_sum + (%d), msg='%s', msg_length=%d, "
+ "msg_checksum='%s'" %
+ (table, idx, thread_id, request_id,
+ zero_sum, msg, len(msg), msg_checksum, thread_id, request_id,
+ zero_sum, msg, len(msg), msg_checksum))
+
+# Each loader thread owns a part of the id space which it maintains inventory
+# for. The loader thread generates inserts, updates and deletes for the table.
+# The latest successful transaction and the latest open transaction are kept to
+# verify after a disconnect that the rows were recovered properly.
+class LoadGenWorker(WorkerThread):
+ TXN_UNCOMMITTED = 0
+ TXN_COMMIT_STARTED = 1
+ TXN_COMMITTED = 2
+
+ def __init__(self, thread_id):
+ WorkerThread.__init__(self, 'loader-%02d' % thread_id)
+ self.thread_id = thread_id
+ self.rand = random.Random()
+ self.rand.seed(thread_id)
+ self.loop_num = 0
+
+ # id_map contains the array of id's owned by this worker thread. It needs
+ # to be offset by start_id for the actual id
+ self.id_map = array.array('l')
+ self.start_id = thread_id * OPTIONS.ids_per_loader
+ self.num_id = OPTIONS.ids_per_loader
+ self.start_share_id = OPTIONS.num_loaders * OPTIONS.ids_per_loader
+ self.max_id = OPTIONS.max_id
+ self.table = OPTIONS.table
+ self.num_requests = OPTIONS.num_requests
+
+ # stores information about the latest series of successful transactions
+ #
+ # each transaction is simply a map of id -> request_id
+ # deleted rows are indicated by -request_id
+ self.prev_txn = deque()
+ self.cur_txn = None
+ self.cur_txn_state = None
+
+ self.start()
+
+ def finish(self):
+ if self.total_time:
+ req_per_sec = self.loop_num / self.total_time
+ else:
+ req_per_sec = -1
+ logging.info("total txns: %d, txn/s: %.2f rps" %
+ (self.loop_num, req_per_sec))
+
+ # constructs the internal hash map of the ids owned by this thread and
+ # the request id of each id
+ def populate_id_map(self):
+ logging.info("Populating id map")
+
+ REQ_ID_COL = 0
+ stmt = "SELECT request_id FROM %s WHERE id = %d"
+
+ # the start_id is used for tracking active transactions, so the row needs
+ # to exist
+ idx = self.start_id
+ execute(self.cur, stmt % (self.table, idx))
+ if self.cur.rowcount > 0:
+ request_id = self.cur.fetchone()[REQ_ID_COL]
+ else:
+ request_id = get_next_request_id()
+ execute(self.cur, gen_insert(self.table, idx, self.thread_id,
+ request_id, 0))
+ self.con.commit()
+
+ self.id_map.append(request_id)
+
+ self.cur_txn = {idx:request_id}
+ self.cur_txn_state = self.TXN_COMMITTED
+ for i in xrange(OPTIONS.committed_txns):
+ self.prev_txn.append(self.cur_txn)
+
+ # fetch the rest of the row for the id space owned by this thread
+ for idx in xrange(self.start_id + 1, self.start_id + self.num_id):
+ execute(self.cur, stmt % (self.table, idx))
+ if self.cur.rowcount == 0:
+ # Negative number is used to indicated a missing row
+ self.id_map.append(-1)
+ else:
+ res = self.cur.fetchone()
+ self.id_map.append(res[REQ_ID_COL])
+
+ self.con.commit()
+
+ def apply_cur_txn_changes(self):
+ # apply the changes to the id_map
+ for idx in self.cur_txn:
+ if idx < self.start_id + self.num_id:
+ assert idx >= self.start_id
+ self.id_map[idx - self.start_id] = self.cur_txn[idx]
+ self.cur_txn_state = self.TXN_COMMITTED
+
+ self.prev_txn.append(self.cur_txn)
+ self.prev_txn.popleft()
+
+ def verify_txn(self, txn, committed):
+ request_id = txn[self.start_id]
+ if not committed:
+ # if the transaction was not committed, then there should be no rows
+ # in the table that have this request_id
+ cond = '='
+ # it is possible the start_id used to track this transaction is in
+ # the process of being deleted
+ if request_id < 0:
+ request_id = -request_id
+ else:
+ # if the transaction was committed, then no rows modified by this
+ # transaction should have a request_id less than this transaction's id
+ cond = '<'
+ stmt = ("SELECT COUNT(*) FROM %s WHERE id IN (%s) AND request_id %s %d" %
+ (self.table, ','.join(str(x) for x in txn), cond, request_id))
+ execute(self.cur, stmt)
+ if (self.cur.rowcount != 1):
+ raise TestError("Unable to retrieve results for query '%s'" % stmt)
+ count = self.cur.fetchone()[0]
+ if (count > 0):
+ raise TestError("Expected '%s' to return 0 rows, but %d returned "
+ "instead" % (stmt, count))
+ self.con.commit()
+
+ def verify_data(self):
+ # if the state of the current transaction is unknown (i.e. a commit was
+ # issued, but the connection failed before, check the start_id row to
+ # determine if it was committed
+ request_id = self.cur_txn[self.start_id]
+ if self.cur_txn_state == self.TXN_COMMIT_STARTED:
+ assert request_id >= 0
+ idx = self.start_id
+ stmt = "SELECT id, request_id FROM %s where id = %d" % (self.table, idx)
+ execute(self.cur, stmt)
+ if (self.cur.rowcount == 0):
+ raise TestError("Fetching start_id %d via '%s' returned no data! "
+ "This row should never be deleted!" % (idx, stmt))
+ REQUEST_ID_COL = 1
+ res = self.cur.fetchone()
+ if res[REQUEST_ID_COL] == self.cur_txn[idx]:
+ self.apply_cur_txn_changes()
+ else:
+ self.cur_txn_state = self.TXN_UNCOMMITTED
+ self.con.commit()
+
+ # if the transaction was not committed, verify there are no rows at this
+ # request id
+ #
+ # however, if the transaction was committed, then verify none of the rows
+ # have a request_id below the request_id recorded by the start_id row.
+ if self.cur_txn_state == self.TXN_UNCOMMITTED:
+ self.verify_txn(self.cur_txn, False)
+
+ # verify all committed transactions
+ for txn in self.prev_txn:
+ self.verify_txn(txn, True)
+
+ # verify the rows owned by this worker matches the request_id at which
+ # they were set.
+ idx = self.start_id
+ max_map_id = self.start_id + self.num_id
+ row_count = 0
+ ID_COL = 0
+ REQ_ID_COL = ID_COL + 1
+
+ while idx < max_map_id:
+ if (row_count == 0):
+ num_rows_to_check = random.randint(50, 100)
+ execute(self.cur,
+ "SELECT id, request_id FROM %s where id >= %d and id < %d "
+ "ORDER BY id LIMIT %d"
+ % (self.table, idx, max_map_id, num_rows_to_check))
+
+ # prevent future queries from being issued since we've hit the end of
+ # the rows that exist in the table
+ row_count = self.cur.rowcount if self.cur.rowcount != 0 else -1
+
+ # determine the id of the next available row in the table
+ if (row_count > 0):
+ res = self.cur.fetchone()
+ assert idx <= res[ID_COL]
+ next_id = res[ID_COL]
+ row_count -= 1
+ else:
+ next_id = max_map_id
+
+ # rows up to the next id don't exist within the table, verify our
+ # map has them as removed
+ while idx < next_id:
+ # see if the latest transaction may have modified this id. If so, use
+ # that value.
+ if self.id_map[idx - self.start_id] >= 0:
+ raise ValidateError("Row id %d was not found in table, but "
+ "id_map has it at request_id %d" %
+ (idx, self.id_map[idx - self.start_id]))
+ idx += 1
+
+ if idx == max_map_id:
+ break
+
+ if (self.id_map[idx - self.start_id] != res[REQ_ID_COL]):
+ raise ValidateError("Row id %d has req id %d, but %d is the "
+ "expected value!" %
+ (idx, res[REQ_ID_COL],
+ self.id_map[idx - self.start_id]))
+ idx += 1
+
+ self.con.commit()
+ logging.debug("Verified data successfully")
+
+ def execute_one(self):
+ # select a number of rows; perform an insert; update or delete operation on
+ # them
+ num_rows = random.randint(1, MAX_ROWS_PER_REQ)
+ ids = array.array('L')
+
+ # allocate at least one row in the id space owned by this worker
+ idx = random.randint(self.start_id, self.start_id + self.num_id - 1)
+ ids.append(idx)
+
+ for i in xrange(1, num_rows):
+ # The valid ranges for ids is from start_id to start_id + num_id and from
+ # start_share_id to max_id. The randint() uses the range from
+ # start_share_id to max_id + num_id - 1. start_share_id to max_id covers
+ # the shared range. The exclusive range is covered by max_id to max_id +
+ # num_id - 1. If any number lands in this >= max_id section, it is
+ # remapped to start_id and used for selecting a row in the exclusive
+ # section.
+ idx = random.randint(self.start_share_id, self.max_id + self.num_id - 1)
+ if idx >= self.max_id:
+ idx -= self.max_id - self.start_id
+ if ids.count(idx) == 0:
+ ids.append(idx)
+
+ # perform a read of these rows
+ ID_COL = 0
+ ZERO_SUM_COL = ID_COL + 1
+
+ # For repeatable-read isolation levels on MyRocks, during the lock
+ # acquisition part of this transaction, it is possible the selected rows
+ # conflict with another thread's transaction. This results in a deadlock
+ # error that requires the whole transaction to be rolled back because the
+ # transaction's current snapshot will always be reading an older version of
+ # the row. MyRocks will prevent any updates to this row until the
+ # snapshot is released and re-acquired.
+ NUM_RETRIES = 100
+ for i in xrange(NUM_RETRIES):
+ ids_found = {}
+ try:
+ for idx in ids:
+ stmt = ("SELECT id, zero_sum FROM %s WHERE id = %d "
+ "FOR UPDATE" % (self.table, idx))
+ execute(self.cur, stmt)
+ if self.cur.rowcount > 0:
+ res = self.cur.fetchone()
+ ids_found[res[ID_COL]] = res[ZERO_SUM_COL]
+ break
+ except MySQLdb.OperationalError, e:
+ if not is_deadlock_error(e):
+ raise e
+
+ # if a deadlock occurred, rollback the transaction and wait a short time
+ # before retrying.
+ logging.debug("%s generated deadlock, retry %d of %d" %
+ (stmt, i, NUM_RETRIES))
+ self.con.rollback()
+ time.sleep(0.2)
+
+ if i == NUM_RETRIES - 1:
+ raise TestError("Unable to acquire locks after a number of retries "
+ "for query '%s'" % stmt)
+
+ # ensure that the zero_sum column remains summed up to zero at the
+ # end of this operation
+ current_sum = 0
+
+ # all row locks acquired at this point, so allocate a request_id
+ request_id = get_next_request_id()
+ self.cur_txn = {self.start_id:request_id}
+ self.cur_txn_state = self.TXN_UNCOMMITTED
+
+ for idx in ids:
+ stmt = None
+ zero_sum = self.rand.randint(-1000, 1000)
+ action = self.rand.randint(0, 3)
+ is_delete = False
+
+ if idx in ids_found:
+ # for each row found, determine if it should be updated or deleted
+ if action == 0:
+ stmt = gen_delete(self.table, idx)
+ is_delete = True
+ current_sum -= ids_found[idx]
+ else:
+ stmt = gen_update(self.table, idx, self.thread_id, request_id,
+ zero_sum)
+ current_sum += zero_sum
+ else:
+ # if it does not exist, then determine if an insert should happen
+ if action <= 1:
+ stmt = gen_insert(self.table, idx, self.thread_id, request_id,
+ zero_sum)
+ current_sum += zero_sum
+
+ if stmt is not None:
+ # mark in self.cur_txn what these new changes will be
+ if is_delete:
+ self.cur_txn[idx] = -request_id
+ else:
+ self.cur_txn[idx] = request_id
+ execute(self.cur, stmt)
+ if self.cur.rowcount == 0:
+ raise TestError("Executing %s returned row count of 0!" % stmt)
+
+ # the start_id row is used to determine if this transaction has been
+ # committed if the connect fails and it is used to adjust the zero_sum
+ # correctly
+ idx = self.start_id
+ ids.append(idx)
+ self.cur_txn[idx] = request_id
+ stmt = gen_insert_on_dup(self.table, idx, self.thread_id, request_id,
+ -current_sum)
+ execute(self.cur, stmt)
+ if self.cur.rowcount == 0:
+ raise TestError("Executing '%s' returned row count of 0!" % stmt)
+
+ # 90% commit, 10% rollback
+ if roll_d100(90):
+ self.con.rollback()
+ logging.debug("request %s was rolled back" % request_id)
+ else:
+ self.cur_txn_state = self.TXN_COMMIT_STARTED
+ self.con.commit()
+ if not self.con.get_server_info():
+ raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR,
+ "Possible connection error on commit")
+ self.apply_cur_txn_changes()
+
+ self.loop_num += 1
+ if self.loop_num % 1000 == 0:
+ logging.info("Processed %d transactions so far" % self.loop_num)
+
+ def runme(self):
+ global TEST_STOP, LOADERS_READY
+
+ self.start_time = time.time()
+ if self.reconnect():
+ raise Exception("Unable to connect to MySQL server")
+
+ self.populate_id_map()
+ self.verify_data()
+
+ logging.info("Starting load generator")
+ reconnected = False
+ LOADERS_READY += 1
+
+ while self.loop_num < self.num_requests and not TEST_STOP:
+ try:
+ # verify our data on each reconnect and also on ocassion
+ if reconnected or random.randint(1, 500) == 1:
+ self.verify_data()
+ reconnected = False
+
+ self.execute_one()
+ self.loop_num += 1
+ except MySQLdb.OperationalError, e:
+ if not is_connection_error(e):
+ raise e
+ if self.reconnect():
+ raise Exception("Unable to connect to MySQL server")
+ reconnected = True
+ return
+
+# the checker thread is running read only transactions to verify the row
+# checksums match the message.
+class CheckerWorker(WorkerThread):
+ def __init__(self, thread_id):
+ WorkerThread.__init__(self, 'checker-%02d' % thread_id)
+ self.thread_id = thread_id
+ self.rand = random.Random()
+ self.rand.seed(thread_id)
+ self.max_id = OPTIONS.max_id
+ self.table = OPTIONS.table
+ self.loop_num = 0
+ self.start()
+
+ def finish(self):
+ logging.info("total loops: %d" % self.loop_num)
+
+ def check_zerosum(self):
+ # two methods for checking zero sum
+ # 1. request the server to do it (90% of the time for now)
+ # 2. read all rows and calculate directly
+ if roll_d100(90):
+ stmt = "SELECT SUM(zero_sum) FROM %s" % self.table
+ if roll_d100(50):
+ stmt += " FORCE INDEX(msg_i)"
+ execute(self.cur, stmt)
+
+ if self.cur.rowcount != 1:
+ raise ValidateError("Error with query '%s'" % stmt)
+ res = self.cur.fetchone()[0]
+ if res != 0:
+ raise ValidateError("Expected zero_sum to be 0, but %d returned "
+ "instead" % res)
+ else:
+ cur_isolation_level = self.get_isolation_level()
+ self.set_isolation_level('REPEATABLE-READ')
+ num_rows_to_check = random.randint(500, 1000)
+ idx = 0
+ sum = 0
+
+ stmt = "SELECT id, zero_sum FROM %s where id >= %d ORDER BY id LIMIT %d"
+ ID_COL = 0
+ ZERO_SUM_COL = 1
+
+ while idx < self.max_id:
+ execute(self.cur, stmt % (self.table, idx, num_rows_to_check))
+ if self.cur.rowcount == 0:
+ break
+
+ for i in xrange(self.cur.rowcount - 1):
+ sum += self.cur.fetchone()[ZERO_SUM_COL]
+
+ last_row = self.cur.fetchone()
+ idx = last_row[ID_COL] + 1
+ sum += last_row[ZERO_SUM_COL]
+
+ if sum != 0:
+ raise TestError("Zero sum column expected to total 0, but sum is %d "
+ "instead!" % sum)
+ self.set_isolation_level(cur_isolation_level)
+
+ def check_rows(self):
+ class id_range():
+ def __init__(self, min_id, min_inclusive, max_id, max_inclusive):
+ self.min_id = min_id if min_inclusive else min_id + 1
+ self.max_id = max_id if max_inclusive else max_id - 1
+ def count(self, idx):
+ return idx >= self.min_id and idx <= self.max_id
+
+ stmt = ("SELECT id, msg, msg_length, msg_checksum FROM %s WHERE " %
+ self.table)
+
+ # two methods for checking rows
+ # 1. pick a number of rows at random
+ # 2. range scan
+ if roll_d100(90):
+ ids = []
+ for i in xrange(random.randint(1, MAX_ROWS_PER_REQ)):
+ ids.append(random.randint(0, self.max_id - 1))
+ stmt += "id in (%s)" % ','.join(str(x) for x in ids)
+ else:
+ id1 = random.randint(0, self.max_id - 1)
+ id2 = random.randint(0, self.max_id - 1)
+ min_inclusive = random.randint(0, 1)
+ cond1 = '>=' if min_inclusive else '>'
+ max_inclusive = random.randint(0, 1)
+ cond2 = '<=' if max_inclusive else '<'
+ stmt += ("id %s %d AND id %s %d" %
+ (cond1, min(id1, id2), cond2, max(id1, id2)))
+ ids = id_range(min(id1, id2), min_inclusive, max(id1, id2), max_inclusive)
+
+ execute(self.cur, stmt)
+
+ ID_COL = 0
+ MSG_COL = ID_COL + 1
+ MSG_LENGTH_COL = MSG_COL + 1
+ MSG_CHECKSUM_COL = MSG_LENGTH_COL + 1
+
+ for row in self.cur.fetchall():
+ idx = row[ID_COL]
+ msg = row[MSG_COL]
+ msg_length = row[MSG_LENGTH_COL]
+ msg_checksum = row[MSG_CHECKSUM_COL]
+ if ids.count(idx) < 1:
+ raise ValidateError(
+ "id %d returned from database, but query was '%s'" % (idx, stmt))
+ if (len(msg) != msg_length):
+ raise ValidateError(
+ "id %d contains msg_length %d, but msg '%s' is only %d "
+ "characters long" % (idx, msg_length, msg, len(msg)))
+ if (sha1(msg) != msg_checksum):
+ raise ValidateError("id %d has checksum '%s', but expected checksum "
+ "is '%s'" % (idx, msg_checksum, sha1(msg)))
+
+ def runme(self):
+ global TEST_STOP
+
+ self.start_time = time.time()
+ if self.reconnect():
+ raise Exception("Unable to connect to MySQL server")
+ logging.info("Starting checker")
+
+ while not TEST_STOP:
+ try:
+ # choose one of three options:
+ # 1. compute zero_sum across all rows is 0
+ # 2. read a number of rows and verify checksums
+ if roll_d100(25):
+ self.check_zerosum()
+ else:
+ self.check_rows()
+
+ self.con.commit()
+ self.loop_num += 1
+ if self.loop_num % 10000 == 0:
+ logging.info("Processed %d transactions so far" % self.loop_num)
+ except MySQLdb.OperationalError, e:
+ if not is_connection_error(e):
+ raise e
+ if self.reconnect():
+ raise Exception("Unable to reconnect to MySQL server")
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Concurrent load generator.')
+
+ parser.add_argument('-C, --committed-txns', dest='committed_txns',
+ default=3, type=int,
+ help="number of committed txns to verify")
+
+ parser.add_argument('-c, --num-checkers', dest='num_checkers', type=int,
+ default=4,
+ help="number of reader/checker threads to test with")
+
+ parser.add_argument('-d, --db', dest='db', default='test',
+ help="mysqld server database to test with")
+
+ parser.add_argument('-H, --host', dest='host', default='127.0.0.1',
+ help="mysqld server host ip address")
+
+ parser.add_argument('-i, --ids-per-loader', dest='ids_per_loader',
+ type=int, default=100,
+ help="number of records which each loader owns "
+ "exclusively, up to max-id / 2 / num-loaders")
+
+ parser.add_argument('-L, --log-file', dest='log_file', default=None,
+ help="log file for output")
+
+ parser.add_argument('-l, --num-loaders', dest='num_loaders', type=int,
+ default=16,
+ help="number of loader threads to test with")
+
+ parser.add_argument('-m, --max-id', dest='max_id', type=int, default=1000,
+ help="maximum number of records which the table "
+ "extends to, must be larger than ids_per_loader * "
+ "num_loaders")
+
+ parser.add_argument('-n, --num-records', dest='num_records', type=int,
+ default=0,
+ help="number of records to populate the table with")
+
+ parser.add_argument('-P, --port', dest='port', default=3307, type=int,
+ help='mysqld server host port')
+
+ parser.add_argument('-r, --num-requests', dest='num_requests', type=int,
+ default=100000000,
+ help="number of requests issued per worker thread")
+
+ parser.add_argument('-T, --truncate', dest='truncate', action='store_true',
+ help="truncates or creates the table before the test")
+
+ parser.add_argument('-t, --table', dest='table', default='t1',
+ help="mysqld server table to test with")
+
+ parser.add_argument('-u, --user', dest='user', default='root',
+ help="user to log into the mysql server")
+
+ parser.add_argument('-v, --verbose', dest='verbose', action='store_true',
+ help="enable debug logging")
+
+ parser.add_argument('-E, --expect-file', dest='expect_file', default=None,
+ help="expect file for server restart")
+
+ parser.add_argument('-D, --reap-delay', dest='reap_delay', type=int,
+ default=0,
+ help="seconds to sleep after each server reap")
+
+ OPTIONS = parser.parse_args()
+
+ if OPTIONS.verbose:
+ log_level = logging.DEBUG
+ else:
+ log_level = logging.INFO
+
+ logging.basicConfig(level=log_level,
+ format='%(asctime)s %(threadName)s [%(levelname)s] '
+ '%(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ filename=OPTIONS.log_file)
+
+ logging.info("Command line given: %s" % ' '.join(sys.argv))
+
+ if (OPTIONS.max_id < 0 or OPTIONS.ids_per_loader <= 0 or
+ OPTIONS.max_id < OPTIONS.ids_per_loader * OPTIONS.num_loaders):
+ logging.error("ids-per-loader must be larger tha 0 and max-id must be "
+ "larger than ids_per_loader * num_loaders")
+ exit(1)
+
+ logging.info("Using table %s.%s for test" % (OPTIONS.db, OPTIONS.table))
+
+ if OPTIONS.truncate:
+ logging.info("Truncating table")
+ con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host,
+ port=OPTIONS.port, db=OPTIONS.db)
+ if not con:
+ raise TestError("Unable to connect to mysqld server to create/truncate "
+ "table")
+ cur = con.cursor()
+ cur.execute("SELECT COUNT(*) FROM INFORMATION_SCHEMA.tables WHERE "
+ "table_schema = '%s' AND table_name = '%s'" %
+ (OPTIONS.db, OPTIONS.table))
+ if cur.rowcount != 1:
+ logging.error("Unable to retrieve information about table %s "
+ "from information_schema!" % OPTIONS.table)
+ exit(1)
+
+ if cur.fetchone()[0] == 0:
+ logging.info("Table %s not found, creating a new one" % OPTIONS.table)
+ cur.execute("CREATE TABLE %s (id INT PRIMARY KEY, "
+ "thread_id INT NOT NULL, "
+ "request_id BIGINT UNSIGNED NOT NULL, "
+ "update_count INT UNSIGNED NOT NULL DEFAULT 0, "
+ "zero_sum INT DEFAULT 0, "
+ "msg VARCHAR(1024), "
+ "msg_length int, "
+ "msg_checksum varchar(128), "
+ "KEY msg_i(msg(255), zero_sum)) "
+ "ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin" %
+ OPTIONS.table)
+ else:
+ logging.info("Table %s found, truncating" % OPTIONS.table)
+ cur.execute("TRUNCATE TABLE %s" % OPTIONS.table)
+ con.commit()
+
+ if populate_table(OPTIONS.num_records):
+ logging.error("Populate table returned an error")
+ exit(1)
+
+ logging.info("Starting %d loaders" % OPTIONS.num_loaders)
+ loaders = []
+ for i in xrange(OPTIONS.num_loaders):
+ loaders.append(LoadGenWorker(i))
+
+ logging.info("Starting %d checkers" % OPTIONS.num_checkers)
+ checkers = []
+ for i in xrange(OPTIONS.num_checkers):
+ checkers.append(CheckerWorker(i))
+
+ while LOADERS_READY < OPTIONS.num_loaders:
+ time.sleep(0.5)
+
+ if OPTIONS.expect_file and OPTIONS.reap_delay > 0:
+ logging.info('Starting reaper')
+ checkers.append(ReaperWorker())
+
+ workers_failed = 0
+ workers_failed += wait_for_workers(loaders, len(checkers))
+
+ if TEST_STOP:
+ logging.error("Detected test failure, aborting")
+ os._exit(1)
+
+ TEST_STOP = True
+
+ workers_failed += wait_for_workers(checkers)
+
+ if workers_failed > 0:
+ logging.error("Test detected %d failures, aborting" % workers_failed)
+ sys.exit(1)
+
+ logging.info("Test completed successfully")
+ sys.exit(0)
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test
new file mode 100644
index 00000000000..7d92bb3f83a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test
@@ -0,0 +1,31 @@
+# basic stress tests for myrocks, just runs the load generator without any crashes
+
+# Don't test this under valgrind, memory leaks will occur
+--disable_warnings
+--source include/not_valgrind.inc
+--source include/have_rocksdb.inc
+--source include/master-slave.inc
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# create the actual table
+CREATE TABLE t1(id INT PRIMARY KEY,
+ thread_id INT NOT NULL,
+ request_id BIGINT UNSIGNED NOT NULL,
+ update_count INT UNSIGNED NOT NULL DEFAULT 0,
+ zero_sum INT DEFAULT 0,
+ msg VARCHAR(1024),
+ msg_length int,
+ msg_checksum varchar(128),
+ KEY msg_i(msg(255), zero_sum))
+ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+
+--let $table = t1
+--let $num_loaders = 8
+--let $num_checkers = 2
+--let $num_records = 200
+--let $max_records = 100000
+--let $num_requests = 10000
+--let $reap_delay = 0
+
+--source suite/rocksdb_stress/include/rocksdb_stress.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test
new file mode 100644
index 00000000000..6f6128579b5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test
@@ -0,0 +1,32 @@
+# basic stress tests for myrocks, runs the load generator with periodic crashes
+
+# Don't test this under valgrind, memory leaks will occur
+--disable_warnings
+--source include/not_valgrind.inc
+--source include/have_rocksdb.inc
+--source include/master-slave.inc
+--source include/have_binlog_format_row.inc
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+# create the actual table
+CREATE TABLE t1(id INT PRIMARY KEY,
+ thread_id INT NOT NULL,
+ request_id BIGINT UNSIGNED NOT NULL,
+ update_count INT UNSIGNED NOT NULL DEFAULT 0,
+ zero_sum INT DEFAULT 0,
+ msg VARCHAR(1024),
+ msg_length int,
+ msg_checksum varchar(128),
+ KEY msg_i(msg(255), zero_sum))
+ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
+
+--let $table = t1
+--let $num_loaders = 8
+--let $num_checkers = 2
+--let $num_records = 200
+--let $max_records = 100000
+--let $num_requests = 10000
+--let $reap_delay = 180
+
+--source suite/rocksdb_stress/include/rocksdb_stress.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result
new file mode 100644
index 00000000000..159d6a983c8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result
@@ -0,0 +1,13 @@
+create table t1 (test_name text) engine=MyISAM;
+create table t2 (variable_name text) engine=MyISAM;
+load data infile "MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1;
+insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%";
+insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%";
+select variable_name as `There should be *no* long test name listed below:` from t2
+where length(variable_name) > 50;
+There should be *no* long test name listed below:
+select variable_name as `There should be *no* variables listed below:` from t2
+left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name;
+There should be *no* variables listed below:
+drop table t1;
+drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result
new file mode 100644
index 00000000000..4398563d064
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START = 444;
+ERROR HY000: Variable 'rocksdb_access_hint_on_compaction_start' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result
new file mode 100644
index 00000000000..f7175fd91a3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN = 444;
+ERROR HY000: Variable 'rocksdb_advise_random_on_open' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result
new file mode 100644
index 00000000000..93ec1aec407
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 1"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 1;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 0"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 0;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to on"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = on;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+"Trying to set variable @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 444;
+ERROR HY000: Variable 'rocksdb_allow_concurrent_memtable_write' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'aaa'"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'bbb'"
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = @start_global_value;
+SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE;
+@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result
new file mode 100644
index 00000000000..f0f1b077ae0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_READS;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_READS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ALLOW_MMAP_READS = 444;
+ERROR HY000: Variable 'rocksdb_allow_mmap_reads' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result
new file mode 100644
index 00000000000..3fa1f14e1df
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_WRITES;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_WRITES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ALLOW_MMAP_WRITES = 444;
+ERROR HY000: Variable 'rocksdb_allow_mmap_writes' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result
new file mode 100644
index 00000000000..6099c3af344
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_os_buffer_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ALLOW_OS_BUFFER;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_ALLOW_OS_BUFFER to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ALLOW_OS_BUFFER = 444;
+ERROR HY000: Variable 'rocksdb_allow_os_buffer' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result
new file mode 100644
index 00000000000..8998bfee64d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result
@@ -0,0 +1,68 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_BACKGROUND_SYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 1"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = 1;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 0"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = 0;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to on"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = on;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to off"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = off;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+"Trying to set variable @@session.ROCKSDB_BACKGROUND_SYNC to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_BACKGROUND_SYNC = 444;
+ERROR HY000: Variable 'rocksdb_background_sync' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 'aaa'"
+SET @@global.ROCKSDB_BACKGROUND_SYNC = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+SET @@global.ROCKSDB_BACKGROUND_SYNC = @start_global_value;
+SELECT @@global.ROCKSDB_BACKGROUND_SYNC;
+@@global.ROCKSDB_BACKGROUND_SYNC
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result
new file mode 100644
index 00000000000..09acaada0c6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS = 444;
+ERROR HY000: Variable 'rocksdb_base_background_compactions' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result
new file mode 100644
index 00000000000..fbd9d97e994
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE;
+SELECT @start_global_value;
+@start_global_value
+8388608
+"Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result
new file mode 100644
index 00000000000..4d02e197a67
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BLOCK_RESTART_INTERVAL;
+SELECT @start_global_value;
+@start_global_value
+16
+"Trying to set variable @@global.ROCKSDB_BLOCK_RESTART_INTERVAL to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BLOCK_RESTART_INTERVAL = 444;
+ERROR HY000: Variable 'rocksdb_block_restart_interval' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result
new file mode 100644
index 00000000000..0382184f2a0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE;
+SELECT @start_global_value;
+@start_global_value
+4096
+"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BLOCK_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_block_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result
new file mode 100644
index 00000000000..83513f814ed
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE_DEVIATION;
+SELECT @start_global_value;
+@start_global_value
+10
+"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE_DEVIATION to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BLOCK_SIZE_DEVIATION = 444;
+ERROR HY000: Variable 'rocksdb_block_size_deviation' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result
new file mode 100644
index 00000000000..96b78cf669e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_BULK_LOAD;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_BULK_LOAD;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 1"
+SET @@global.ROCKSDB_BULK_LOAD = 1;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 0"
+SET @@global.ROCKSDB_BULK_LOAD = 0;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD to on"
+SET @@global.ROCKSDB_BULK_LOAD = on;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 1"
+SET @@session.ROCKSDB_BULK_LOAD = 1;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+0
+"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 0"
+SET @@session.ROCKSDB_BULK_LOAD = 0;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+0
+"Trying to set variable @@session.ROCKSDB_BULK_LOAD to on"
+SET @@session.ROCKSDB_BULK_LOAD = on;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_BULK_LOAD = DEFAULT;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'aaa'"
+SET @@global.ROCKSDB_BULK_LOAD = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'bbb'"
+SET @@global.ROCKSDB_BULK_LOAD = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+SET @@global.ROCKSDB_BULK_LOAD = @start_global_value;
+SELECT @@global.ROCKSDB_BULK_LOAD;
+@@global.ROCKSDB_BULK_LOAD
+0
+SET @@session.ROCKSDB_BULK_LOAD = @start_session_value;
+SELECT @@session.ROCKSDB_BULK_LOAD;
+@@session.ROCKSDB_BULK_LOAD
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result
new file mode 100644
index 00000000000..40404d2fab5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result
@@ -0,0 +1,72 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_BULK_LOAD_SIZE;
+SELECT @start_global_value;
+@start_global_value
+1000
+SET @start_session_value = @@session.ROCKSDB_BULK_LOAD_SIZE;
+SELECT @start_session_value;
+@start_session_value
+1000
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1"
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1;
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1000
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1024"
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1024;
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1000
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1"
+SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1;
+SELECT @@session.ROCKSDB_BULK_LOAD_SIZE;
+@@session.ROCKSDB_BULK_LOAD_SIZE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT;
+SELECT @@session.ROCKSDB_BULK_LOAD_SIZE;
+@@session.ROCKSDB_BULK_LOAD_SIZE
+1000
+"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1024"
+SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1024;
+SELECT @@session.ROCKSDB_BULK_LOAD_SIZE;
+@@session.ROCKSDB_BULK_LOAD_SIZE
+1024
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT;
+SELECT @@session.ROCKSDB_BULK_LOAD_SIZE;
+@@session.ROCKSDB_BULK_LOAD_SIZE
+1000
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 'aaa'"
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1000
+SET @@global.ROCKSDB_BULK_LOAD_SIZE = @start_global_value;
+SELECT @@global.ROCKSDB_BULK_LOAD_SIZE;
+@@global.ROCKSDB_BULK_LOAD_SIZE
+1000
+SET @@session.ROCKSDB_BULK_LOAD_SIZE = @start_session_value;
+SELECT @@session.ROCKSDB_BULK_LOAD_SIZE;
+@@session.ROCKSDB_BULK_LOAD_SIZE
+1000
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result
new file mode 100644
index 00000000000..ede02afcb60
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_BYTES_PER_SYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_BYTES_PER_SYNC = 444;
+ERROR HY000: Variable 'rocksdb_bytes_per_sync' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result
new file mode 100644
index 00000000000..12c25ad63dc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS = 444;
+ERROR HY000: Variable 'rocksdb_cache_index_and_filter_blocks' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result
new file mode 100644
index 00000000000..694c9a4f1dc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result
@@ -0,0 +1,93 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(99);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_CHECKSUMS_PCT;
+SELECT @start_global_value;
+@start_global_value
+100
+SET @start_session_value = @@session.ROCKSDB_CHECKSUMS_PCT;
+SELECT @start_session_value;
+@start_session_value
+100
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 0"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = 0;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+100
+"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 1"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = 1;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+100
+"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 99"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = 99;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+99
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+100
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 0"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = 0;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+100
+"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 1"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = 1;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+100
+"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 99"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = 99;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+99
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+100
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 'aaa'"
+SET @@global.ROCKSDB_CHECKSUMS_PCT = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+100
+SET @@global.ROCKSDB_CHECKSUMS_PCT = @start_global_value;
+SELECT @@global.ROCKSDB_CHECKSUMS_PCT;
+@@global.ROCKSDB_CHECKSUMS_PCT
+100
+SET @@session.ROCKSDB_CHECKSUMS_PCT = @start_session_value;
+SELECT @@session.ROCKSDB_CHECKSUMS_PCT;
+@@session.ROCKSDB_CHECKSUMS_PCT
+100
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result
new file mode 100644
index 00000000000..2f101987332
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_COLLECT_SST_PROPERTIES;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_COLLECT_SST_PROPERTIES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_COLLECT_SST_PROPERTIES = 444;
+ERROR HY000: Variable 'rocksdb_collect_sst_properties' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result
new file mode 100644
index 00000000000..4664ccb2b1e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to on"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = on;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to on"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = on;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'aaa'"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'bbb'"
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_global_value;
+SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_session_value;
+SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE;
+@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result
new file mode 100644
index 00000000000..85517df6ce6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result
@@ -0,0 +1,39 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('abc');
+INSERT INTO valid_values VALUES('def');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+SET @start_global_value = @@global.ROCKSDB_COMPACT_CF;
+SELECT @start_global_value;
+@start_global_value
+
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACT_CF to abc"
+SET @@global.ROCKSDB_COMPACT_CF = abc;
+SELECT @@global.ROCKSDB_COMPACT_CF;
+@@global.ROCKSDB_COMPACT_CF
+
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACT_CF = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACT_CF;
+@@global.ROCKSDB_COMPACT_CF
+
+"Trying to set variable @@global.ROCKSDB_COMPACT_CF to def"
+SET @@global.ROCKSDB_COMPACT_CF = def;
+SELECT @@global.ROCKSDB_COMPACT_CF;
+@@global.ROCKSDB_COMPACT_CF
+
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACT_CF = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACT_CF;
+@@global.ROCKSDB_COMPACT_CF
+
+"Trying to set variable @@session.ROCKSDB_COMPACT_CF to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACT_CF = 444;
+ERROR HY000: Variable 'rocksdb_compact_cf' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+SET @@global.ROCKSDB_COMPACT_CF = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACT_CF;
+@@global.ROCKSDB_COMPACT_CF
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result
new file mode 100644
index 00000000000..d971396f9e8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result
@@ -0,0 +1,70 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @@global.rocksdb_compaction_readahead_size = -1;
+Warnings:
+Warning 1292 Truncated incorrect rocksdb_compaction_readahead_siz value: '-1'
+SELECT @@global.rocksdb_compaction_readahead_size;
+@@global.rocksdb_compaction_readahead_size
+0
+SET @start_global_value = @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 1"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 1;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 0"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 0;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 222333"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 222333;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+222333
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+"Trying to set variable @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_compaction_readahead_size' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'aaa'"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'bbb'"
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE;
+@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result
new file mode 100644
index 00000000000..311184a17d4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+INSERT INTO valid_values VALUES(2000000);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'2000001\'');
+SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1024"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1024;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 2000000"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 2000000;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+2000000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 444;
+ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 'aaa'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to '2000001'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = '2000001';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result
new file mode 100644
index 00000000000..d4e7e28bebc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 1"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 1;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 0"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 0;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to on"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = on;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 444;
+ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_count_sd' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'aaa'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'bbb'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result
new file mode 100644
index 00000000000..703e235ed18
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result
@@ -0,0 +1,46 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1024"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1024;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+0
+"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_file_size' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 'aaa'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+0
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result
new file mode 100644
index 00000000000..84436b65795
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+INSERT INTO valid_values VALUES(2000000);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'2000001\'');
+SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1024"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1024;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 2000000"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 2000000;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+2000000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 444;
+ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_window' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 'aaa'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to '2000001'"
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = '2000001';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = @start_global_value;
+SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW;
+@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result
new file mode 100644
index 00000000000..35e4d252e11
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result
@@ -0,0 +1,15 @@
+SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT;
+SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/abc';
+SELECT @@global.ROCKSDB_CREATE_CHECKPOINT;
+@@global.ROCKSDB_CREATE_CHECKPOINT
+
+SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT;
+SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/def';
+SELECT @@global.ROCKSDB_CREATE_CHECKPOINT;
+@@global.ROCKSDB_CREATE_CHECKPOINT
+
+SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT;
+SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444;
+ERROR HY000: Variable 'rocksdb_create_checkpoint' is a GLOBAL variable and should be set with SET GLOBAL
+SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value;
+ERROR HY000: RocksDB: Failed to create checkpoint directory. status 5 IO error: .tmp: No such file or directory
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result
new file mode 100644
index 00000000000..26dd14fbb68
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result
@@ -0,0 +1,14 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_CREATE_IF_MISSING;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_CREATE_IF_MISSING to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_CREATE_IF_MISSING = 444;
+ERROR HY000: Variable 'rocksdb_create_if_missing' is a read only variable
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result
new file mode 100644
index 00000000000..7debadc2bb1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result
@@ -0,0 +1,14 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES = 444;
+ERROR HY000: Variable 'rocksdb_create_missing_column_families' is a read only variable
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result
new file mode 100644
index 00000000000..a3f9eff6c1f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_DATADIR;
+SELECT @start_global_value;
+@start_global_value
+./.rocksdb
+"Trying to set variable @@global.ROCKSDB_DATADIR to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_DATADIR = 444;
+ERROR HY000: Variable 'rocksdb_datadir' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result
new file mode 100644
index 00000000000..6c588b7e060
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_db_write_buffer_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result
new file mode 100644
index 00000000000..5e64ccc69c3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+SELECT @start_global_value;
+@start_global_value
+1
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 1"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 1;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 0"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 0;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to on"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = on;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Trying to set variable @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 444;
+ERROR HY000: Variable 'rocksdb_debug_optimizer_no_zero_cardinality' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'aaa'"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'bbb'"
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = @start_global_value;
+SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY;
+@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+1
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result
new file mode 100644
index 00000000000..b2b1c0e4c97
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_DEFAULT_CF_OPTIONS;
+SELECT @start_global_value;
+@start_global_value
+
+"Trying to set variable @@global.ROCKSDB_DEFAULT_CF_OPTIONS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_DEFAULT_CF_OPTIONS = 444;
+ERROR HY000: Variable 'rocksdb_default_cf_options' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result
new file mode 100644
index 00000000000..2dc220fbe20
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS;
+SELECT @start_global_value;
+@start_global_value
+21600000000
+"Trying to set variable @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS = 444;
+ERROR HY000: Variable 'rocksdb_delete_obsolete_files_period_micros' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result
new file mode 100644
index 00000000000..708dd462dfe
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result
@@ -0,0 +1,75 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_DISABLE_2PC;
+SELECT @start_global_value;
+@start_global_value
+1
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 1"
+SET @@global.ROCKSDB_DISABLE_2PC = 1;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 0"
+SET @@global.ROCKSDB_DISABLE_2PC = 0;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to on"
+SET @@global.ROCKSDB_DISABLE_2PC = on;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to off"
+SET @@global.ROCKSDB_DISABLE_2PC = off;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_DISABLE_2PC = DEFAULT;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Trying to set variable @@session.ROCKSDB_DISABLE_2PC to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_DISABLE_2PC = 444;
+ERROR HY000: Variable 'rocksdb_disable_2pc' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'aaa'"
+SET @@global.ROCKSDB_DISABLE_2PC = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+"Trying to set variable @@global.ROCKSDB_DISABLE_2PC to 'bbb'"
+SET @@global.ROCKSDB_DISABLE_2PC = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+SET @@global.ROCKSDB_DISABLE_2PC = @start_global_value;
+SELECT @@global.ROCKSDB_DISABLE_2PC;
+@@global.ROCKSDB_DISABLE_2PC
+1
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result
new file mode 100644
index 00000000000..9b3000f8f3c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disabledatasync_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_DISABLEDATASYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_DISABLEDATASYNC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_DISABLEDATASYNC = 444;
+ERROR HY000: Variable 'rocksdb_disabledatasync' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result
new file mode 100644
index 00000000000..2c0ff289d8a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result
@@ -0,0 +1,14 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_ENABLE_BULK_LOAD_API;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_ENABLE_BULK_LOAD_API to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ENABLE_BULK_LOAD_API = 444;
+ERROR HY000: Variable 'rocksdb_enable_bulk_load_api' is a read only variable
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result
new file mode 100644
index 00000000000..f12e39fff93
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_ENABLE_THREAD_TRACKING;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_ENABLE_THREAD_TRACKING to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ENABLE_THREAD_TRACKING = 444;
+ERROR HY000: Variable 'rocksdb_enable_thread_tracking' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result
new file mode 100644
index 00000000000..c93152c4756
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 1"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 1;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 0"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 0;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to on"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = on;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+"Trying to set variable @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 444;
+ERROR HY000: Variable 'rocksdb_enable_write_thread_adaptive_yield' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'aaa'"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'bbb'"
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = @start_global_value;
+SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD;
+@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result
new file mode 100644
index 00000000000..650e2956e23
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result
@@ -0,0 +1,14 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_ERROR_IF_EXISTS;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_ERROR_IF_EXISTS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_ERROR_IF_EXISTS = 444;
+ERROR HY000: Variable 'rocksdb_error_if_exists' is a read only variable
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result
new file mode 100644
index 00000000000..ae4b0ac05a1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result
@@ -0,0 +1,58 @@
+drop table if exists t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+INSERT INTO t1 (b) VALUES (1);
+INSERT INTO t1 (b) VALUES (2);
+INSERT INTO t1 (b) VALUES (3);
+SELECT * FROM t1;
+a b
+1 1
+2 2
+3 3
+set session rocksdb_flush_memtable_on_analyze=off;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SHOW INDEXES FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
+set session rocksdb_flush_memtable_on_analyze=on;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SHOW INDEXES FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+INSERT INTO t1 (b) VALUES (1);
+INSERT INTO t1 (b) VALUES (2);
+INSERT INTO t1 (b) VALUES (3);
+SELECT * FROM t1;
+a b
+1 1
+2 2
+3 3
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 0 0 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SHOW TABLE STATUS LIKE 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 ROCKSDB 10 Fixed 3 8 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result
new file mode 100644
index 00000000000..30444e26d98
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result
@@ -0,0 +1,50 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+SET @start_global_value = @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 1"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 1;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 0"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 0;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to on"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = on;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+"Trying to set variable @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 444;
+ERROR HY000: Variable 'rocksdb_force_flush_memtable_now' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = @start_global_value;
+SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW;
+@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result
new file mode 100644
index 00000000000..1a7a21c3a9f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result
@@ -0,0 +1,106 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @@session.rocksdb_force_index_records_in_range = -1;
+Warnings:
+Warning 1292 Truncated incorrect rocksdb_force_index_records_in_r value: '-1'
+SELECT @@session.rocksdb_force_index_records_in_range;
+@@session.rocksdb_force_index_records_in_range
+0
+SET @start_global_value = @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+222333
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+222333
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'aaa'"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'bbb'"
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_global_value;
+SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_session_value;
+SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE;
+@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result
new file mode 100644
index 00000000000..34deca6ce85
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION = 444;
+ERROR HY000: Variable 'rocksdb_hash_index_allow_collision' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result
new file mode 100644
index 00000000000..97c6ed84de7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_INDEX_TYPE;
+SELECT @start_global_value;
+@start_global_value
+kBinarySearch
+"Trying to set variable @@global.ROCKSDB_INDEX_TYPE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_INDEX_TYPE = 444;
+ERROR HY000: Variable 'rocksdb_index_type' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result
new file mode 100644
index 00000000000..1509f9ae95d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result
@@ -0,0 +1,93 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('fatal_level');
+INSERT INTO valid_values VALUES('error_level');
+INSERT INTO valid_values VALUES('warn_level');
+INSERT INTO valid_values VALUES('info_level');
+INSERT INTO valid_values VALUES('debug_level');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES(5);
+INSERT INTO invalid_values VALUES(6);
+INSERT INTO invalid_values VALUES('foo');
+SET @start_global_value = @@global.ROCKSDB_INFO_LOG_LEVEL;
+SELECT @start_global_value;
+@start_global_value
+error_level
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to fatal_level"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = fatal_level;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+fatal_level
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to error_level"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = error_level;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to warn_level"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = warn_level;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+warn_level
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to info_level"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = info_level;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+info_level
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to debug_level"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = debug_level;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+debug_level
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@session.ROCKSDB_INFO_LOG_LEVEL to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_INFO_LOG_LEVEL = 444;
+ERROR HY000: Variable 'rocksdb_info_log_level' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 5"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = 5;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 6"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = 6;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to foo"
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = foo;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+SET @@global.ROCKSDB_INFO_LOG_LEVEL = @start_global_value;
+SELECT @@global.ROCKSDB_INFO_LOG_LEVEL;
+@@global.ROCKSDB_INFO_LOG_LEVEL
+error_level
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result
new file mode 100644
index 00000000000..87dd0e90511
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC = 444;
+ERROR HY000: Variable 'rocksdb_is_fd_close_on_exec' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result
new file mode 100644
index 00000000000..3a0c5060d00
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_KEEP_LOG_FILE_NUM;
+SELECT @start_global_value;
+@start_global_value
+1000
+"Trying to set variable @@global.ROCKSDB_KEEP_LOG_FILE_NUM to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_KEEP_LOG_FILE_NUM = 444;
+ERROR HY000: Variable 'rocksdb_keep_log_file_num' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result
new file mode 100644
index 00000000000..eff9e619967
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result
@@ -0,0 +1,170 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+INSERT INTO valid_values VALUES('true');
+INSERT INTO valid_values VALUES('false');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES(2);
+INSERT INTO invalid_values VALUES(1000);
+SET @start_global_value = @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 0"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 0;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to on"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = on;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to off"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = off;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to true"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = true;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to false"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = false;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 1"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 1;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 0"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 0;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to on"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = on;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to off"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = off;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to true"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = true;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to false"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = false;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 'aaa'"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 2"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 2;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1000"
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1000;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = @start_global_value;
+SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS;
+@@global.ROCKSDB_LOCK_SCANNED_ROWS
+0
+SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = @start_session_value;
+SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS;
+@@session.ROCKSDB_LOCK_SCANNED_ROWS
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result
new file mode 100644
index 00000000000..38df5820298
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result
@@ -0,0 +1,72 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+SELECT @start_global_value;
+@start_global_value
+1
+SET @start_session_value = @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+SELECT @start_session_value;
+@start_session_value
+1
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1"
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1;
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024"
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024;
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT;
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1"
+SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1;
+SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@session.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@session.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024"
+SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024;
+SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@session.ROCKSDB_LOCK_WAIT_TIMEOUT
+1024
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT;
+SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@session.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 'aaa'"
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_global_value;
+SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@global.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_session_value;
+SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT;
+@@session.ROCKSDB_LOCK_WAIT_TIMEOUT
+1
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result
new file mode 100644
index 00000000000..24cff58426a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL = 444;
+ERROR HY000: Variable 'rocksdb_log_file_time_to_roll' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result
new file mode 100644
index 00000000000..dbb331d235d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE;
+SELECT @start_global_value;
+@start_global_value
+4194304
+"Trying to set variable @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_manifest_preallocation_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result
new file mode 100644
index 00000000000..903e393d5ea
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444;
+ERROR HY000: Variable 'rocksdb_max_background_compactions' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result
new file mode 100644
index 00000000000..ff8f2b5997b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES = 444;
+ERROR HY000: Variable 'rocksdb_max_background_flushes' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result
new file mode 100644
index 00000000000..4359ee725d4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_LOG_FILE_SIZE;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_MAX_LOG_FILE_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_LOG_FILE_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_max_log_file_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result
new file mode 100644
index 00000000000..27cddc9f60a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE;
+SELECT @start_global_value;
+@start_global_value
+18446744073709551615
+"Trying to set variable @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_max_manifest_file_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result
new file mode 100644
index 00000000000..b058ebf05f8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_OPEN_FILES;
+SELECT @start_global_value;
+@start_global_value
+-1
+"Trying to set variable @@global.ROCKSDB_MAX_OPEN_FILES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_OPEN_FILES = 444;
+ERROR HY000: Variable 'rocksdb_max_open_files' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result
new file mode 100644
index 00000000000..e417e4d5c4e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result
@@ -0,0 +1,72 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_MAX_ROW_LOCKS;
+SELECT @start_global_value;
+@start_global_value
+1073741824
+SET @start_session_value = @@session.ROCKSDB_MAX_ROW_LOCKS;
+SELECT @start_session_value;
+@start_session_value
+1073741824
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1"
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1;
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT;
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1024"
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1024;
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT;
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1"
+SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1;
+SELECT @@session.ROCKSDB_MAX_ROW_LOCKS;
+@@session.ROCKSDB_MAX_ROW_LOCKS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT;
+SELECT @@session.ROCKSDB_MAX_ROW_LOCKS;
+@@session.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1024"
+SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1024;
+SELECT @@session.ROCKSDB_MAX_ROW_LOCKS;
+@@session.ROCKSDB_MAX_ROW_LOCKS
+1024
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT;
+SELECT @@session.ROCKSDB_MAX_ROW_LOCKS;
+@@session.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 'aaa'"
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+SET @@global.ROCKSDB_MAX_ROW_LOCKS = @start_global_value;
+SELECT @@global.ROCKSDB_MAX_ROW_LOCKS;
+@@global.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+SET @@session.ROCKSDB_MAX_ROW_LOCKS = @start_session_value;
+SELECT @@session.ROCKSDB_MAX_ROW_LOCKS;
+@@session.ROCKSDB_MAX_ROW_LOCKS
+1073741824
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result
new file mode 100644
index 00000000000..58452f580f2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_SUBCOMPACTIONS;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_MAX_SUBCOMPACTIONS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_SUBCOMPACTIONS = 444;
+ERROR HY000: Variable 'rocksdb_max_subcompactions' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result
new file mode 100644
index 00000000000..22c17c24e19
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE = 444;
+ERROR HY000: Variable 'rocksdb_max_total_wal_size' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result
new file mode 100644
index 00000000000..e82e987bf96
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result
@@ -0,0 +1,43 @@
+drop table if exists t1;
+set session rocksdb_merge_buf_size=250;
+set session rocksdb_merge_combine_read_size=1000;
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `kj` (`j`),
+ KEY `kij` (`i`,`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP INDEX kj on t1;
+DROP INDEX kij ON t1;
+ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `kj` (`j`),
+ KEY `kij` (`i`,`j`),
+ KEY `kji` (`j`,`i`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
+ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `kb` (`b`) COMMENT 'rev:cf1'
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+COUNT(*)
+100
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result
new file mode 100644
index 00000000000..122e2451f39
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result
@@ -0,0 +1,29 @@
+drop table if exists t1;
+set session rocksdb_merge_buf_size=250;
+set session rocksdb_merge_combine_read_size=1000;
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `kj` (`j`),
+ KEY `kij` (`i`,`j`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP INDEX kj on t1;
+DROP INDEX kij ON t1;
+ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) NOT NULL DEFAULT '0',
+ `j` int(11) DEFAULT NULL,
+ PRIMARY KEY (`i`),
+ KEY `kj` (`j`),
+ KEY `kij` (`i`,`j`),
+ KEY `kji` (`j`,`i`)
+) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result
new file mode 100644
index 00000000000..c2daec327a2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS = 444;
+ERROR HY000: Variable 'rocksdb_new_table_reader_for_compaction_inputs' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result
new file mode 100644
index 00000000000..7bd32950303
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_NO_BLOCK_CACHE;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_NO_BLOCK_CACHE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_NO_BLOCK_CACHE = 444;
+ERROR HY000: Variable 'rocksdb_no_block_cache' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result
new file mode 100644
index 00000000000..59042124dc8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_OVERRIDE_CF_OPTIONS;
+SELECT @start_global_value;
+@start_global_value
+
+"Trying to set variable @@global.ROCKSDB_OVERRIDE_CF_OPTIONS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_OVERRIDE_CF_OPTIONS = 444;
+ERROR HY000: Variable 'rocksdb_override_cf_options' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result
new file mode 100644
index 00000000000..102d4926e65
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_PARANOID_CHECKS;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_PARANOID_CHECKS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_PARANOID_CHECKS = 444;
+ERROR HY000: Variable 'rocksdb_paranoid_checks' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result
new file mode 100644
index 00000000000..5849fe09a20
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result
@@ -0,0 +1,75 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 1"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 0"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 0;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to on"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = on;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to off"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = off;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Trying to set variable @@session.ROCKSDB_PAUSE_BACKGROUND_WORK to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_PAUSE_BACKGROUND_WORK = 444;
+ERROR HY000: Variable 'rocksdb_pause_background_work' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'aaa'"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'bbb'"
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = @start_global_value;
+SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK;
+@@global.ROCKSDB_PAUSE_BACKGROUND_WORK
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result
new file mode 100644
index 00000000000..292ba58a3a3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result
@@ -0,0 +1,114 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(2);
+INSERT INTO valid_values VALUES(3);
+INSERT INTO valid_values VALUES(4);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 1"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 1;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 2"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 2;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+2
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 3"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 3;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+3
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 4"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 4;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+4
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 1"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 1;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 2"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 2;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+2
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 3"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 3;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+3
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 4"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 4;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+4
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 'aaa'"
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = @start_global_value;
+SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@global.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = @start_session_value;
+SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL;
+@@session.ROCKSDB_PERF_CONTEXT_LEVEL
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result
new file mode 100644
index 00000000000..c152ecf1e5a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE = 444;
+ERROR HY000: Variable 'rocksdb_pin_l0_filter_and_index_blocks_in_cache' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result
new file mode 100644
index 00000000000..94eb9e34057
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result
@@ -0,0 +1,101 @@
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\''), (3.14);
+SET @start_global_value = @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+SELECT @start_global_value;
+@start_global_value
+10000
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000000"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000000;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000000
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000000
+"Trying to set variable @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 444;
+ERROR HY000: Variable 'rocksdb_rate_limiter_bytes_per_sec' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 'aaa'"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000000
+"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 3.14"
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 3.14;
+Got one of the listed errors
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+1000000000000
+SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = @start_global_value;
+SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC;
+@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+10000
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0;
+Warnings:
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1;
+Warnings:
+Warning 1292 Truncated incorrect rocksdb_rate_limiter_bytes_per_s value: '-1'
+Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0.
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result
new file mode 100644
index 00000000000..b218fe034aa
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result
@@ -0,0 +1,65 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('a');
+INSERT INTO valid_values VALUES('b');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+SET @start_global_value = @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+SELECT @start_global_value;
+@start_global_value
+
+SET @start_session_value = @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+SELECT @start_session_value;
+@start_session_value
+
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to a"
+SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = a;
+SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+@@global.ROCKSDB_READ_FREE_RPL_TABLES
+a
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT;
+SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+@@global.ROCKSDB_READ_FREE_RPL_TABLES
+
+"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to b"
+SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = b;
+SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+@@global.ROCKSDB_READ_FREE_RPL_TABLES
+b
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT;
+SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+@@global.ROCKSDB_READ_FREE_RPL_TABLES
+
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to a"
+SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = a;
+SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+@@session.ROCKSDB_READ_FREE_RPL_TABLES
+a
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT;
+SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+@@session.ROCKSDB_READ_FREE_RPL_TABLES
+
+"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to b"
+SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = b;
+SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+@@session.ROCKSDB_READ_FREE_RPL_TABLES
+b
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT;
+SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+@@session.ROCKSDB_READ_FREE_RPL_TABLES
+
+'# Testing with invalid values in global scope #'
+SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = @start_global_value;
+SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES;
+@@global.ROCKSDB_READ_FREE_RPL_TABLES
+
+SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = @start_session_value;
+SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES;
+@@session.ROCKSDB_READ_FREE_RPL_TABLES
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result
new file mode 100644
index 00000000000..e866787efe0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_RECORDS_IN_RANGE;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_RECORDS_IN_RANGE;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 1"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = 1;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 0"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = 0;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 222333"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = 222333;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+222333
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 1"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = 1;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+0
+"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 0"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = 0;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+0
+"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 222333"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = 222333;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+222333
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'aaa'"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'bbb'"
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+SET @@global.ROCKSDB_RECORDS_IN_RANGE = @start_global_value;
+SELECT @@global.ROCKSDB_RECORDS_IN_RANGE;
+@@global.ROCKSDB_RECORDS_IN_RANGE
+0
+SET @@session.ROCKSDB_RECORDS_IN_RANGE = @start_session_value;
+SELECT @@session.ROCKSDB_RECORDS_IN_RANGE;
+@@session.ROCKSDB_RECORDS_IN_RANGE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test
new file mode 100644
index 00000000000..5f6522e4488
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rpl_skip_tx_api_basic.test
@@ -0,0 +1,68 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_RPL_SKIP_TX_API;
+SELECT @start_global_value;
+@start_global_value
+1
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 1"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = 1;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 0"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = 0;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to on"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = on;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to off"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = off;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = DEFAULT;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+"Trying to set variable @@session.ROCKSDB_RPL_SKIP_TX_API to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_RPL_SKIP_TX_API = 444;
+ERROR HY000: Variable 'rocksdb_rpl_skip_tx_api' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_RPL_SKIP_TX_API to 'aaa'"
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+SET @@global.ROCKSDB_RPL_SKIP_TX_API = @start_global_value;
+SELECT @@global.ROCKSDB_RPL_SKIP_TX_API;
+@@global.ROCKSDB_RPL_SKIP_TX_API
+1
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result
new file mode 100644
index 00000000000..ea80d88f653
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1024);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+SELECT @start_global_value;
+@start_global_value
+3600
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 0"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 0;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1024"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1024;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+1024
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+"Trying to set variable @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 444;
+ERROR HY000: Variable 'rocksdb_seconds_between_stat_computes' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'aaa'"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'bbb'"
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = @start_global_value;
+SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES;
+@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+3600
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result
new file mode 100644
index 00000000000..94a15275900
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result
@@ -0,0 +1,64 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+SELECT @start_global_value;
+@start_global_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 1"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 1;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 0"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 0;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to on"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = on;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Trying to set variable @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 444;
+ERROR HY000: Variable 'rocksdb_signal_drop_index_thread' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'aaa'"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'bbb'"
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = @start_global_value;
+SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD;
+@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result
new file mode 100644
index 00000000000..201bc5009ce
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'aaa'"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'bbb'"
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_global_value;
+SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_session_value;
+SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ;
+@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result
new file mode 100644
index 00000000000..a843851cf26
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_SKIP_FILL_CACHE;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_SKIP_FILL_CACHE;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 1"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = 1;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 0"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = 0;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to on"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = on;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 1"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = 1;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 0"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = 0;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to on"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = on;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'aaa'"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'bbb'"
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+SET @@global.ROCKSDB_SKIP_FILL_CACHE = @start_global_value;
+SELECT @@global.ROCKSDB_SKIP_FILL_CACHE;
+@@global.ROCKSDB_SKIP_FILL_CACHE
+0
+SET @@session.ROCKSDB_SKIP_FILL_CACHE = @start_session_value;
+SELECT @@session.ROCKSDB_SKIP_FILL_CACHE;
+@@session.ROCKSDB_SKIP_FILL_CACHE
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result
new file mode 100644
index 00000000000..a1244723b05
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_basic.result
@@ -0,0 +1,163 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+INSERT INTO valid_values VALUES('true');
+INSERT INTO valid_values VALUES('false');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 0"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 0;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 1"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 1;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to on"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = on;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to off"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = off;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to true"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = true;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to false"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = false;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 0"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 0;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to 1"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = 1;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to on"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = on;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to off"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = off;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to true"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = true;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK to false"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = false;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'aaa'"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK to 'bbb'"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK = @start_global_value;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK = @start_session_value;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result
new file mode 100644
index 00000000000..3e169671cc0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result
@@ -0,0 +1,65 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES("aaa");
+INSERT INTO valid_values VALUES("bbb");
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+SELECT @start_global_value;
+@start_global_value
+.*
+SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+SELECT @start_session_value;
+@start_session_value
+.*
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+aaa
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+bbb
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+aaa
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+bbb
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+'# Testing with invalid values in global scope #'
+SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_global_value;
+SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_session_value;
+SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES;
+@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+.*
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result
new file mode 100644
index 00000000000..2dbf5a55b87
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC;
+SELECT @start_global_value;
+@start_global_value
+600
+"Trying to set variable @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC = 444;
+ERROR HY000: Variable 'rocksdb_stats_dump_period_sec' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result
new file mode 100644
index 00000000000..904a0bc536e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_checksums_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_STORE_CHECKSUMS;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_STORE_CHECKSUMS;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 1"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = 1;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 0"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = 0;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to on"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = on;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 1"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = 1;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+0
+"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to 0"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = 0;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+0
+"Trying to set variable @@session.ROCKSDB_STORE_CHECKSUMS to on"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = on;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_STORE_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'aaa'"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_STORE_CHECKSUMS to 'bbb'"
+SET @@global.ROCKSDB_STORE_CHECKSUMS = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+SET @@global.ROCKSDB_STORE_CHECKSUMS = @start_global_value;
+SELECT @@global.ROCKSDB_STORE_CHECKSUMS;
+@@global.ROCKSDB_STORE_CHECKSUMS
+0
+SET @@session.ROCKSDB_STORE_CHECKSUMS = @start_session_value;
+SELECT @@session.ROCKSDB_STORE_CHECKSUMS;
+@@session.ROCKSDB_STORE_CHECKSUMS
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result
new file mode 100644
index 00000000000..46d238d1fa3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result
@@ -0,0 +1,75 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+SELECT @start_global_value;
+@start_global_value
+1
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 1"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 1;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 0"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 0;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to on"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = on;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to off"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = off;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Trying to set variable @@session.ROCKSDB_STRICT_COLLATION_CHECK to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_STRICT_COLLATION_CHECK = 444;
+ERROR HY000: Variable 'rocksdb_strict_collation_check' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'aaa'"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'bbb'"
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = @start_global_value;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK;
+@@global.ROCKSDB_STRICT_COLLATION_CHECK
+1
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result
new file mode 100644
index 00000000000..5f748621d25
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result
@@ -0,0 +1,36 @@
+SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+SELECT @start_global_value;
+@start_global_value
+
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+mytable
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+t.*
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+s.*,t.*
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+
+"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444;
+ERROR HY000: Variable 'rocksdb_strict_collation_exceptions' is a GLOBAL variable and should be set with SET GLOBAL
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result
new file mode 100644
index 00000000000..0161a339082
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS;
+SELECT @start_global_value;
+@start_global_value
+6
+"Trying to set variable @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS = 444;
+ERROR HY000: Variable 'rocksdb_table_cache_numshardbits' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result
new file mode 100644
index 00000000000..6ff47ab9569
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result
@@ -0,0 +1,85 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(100);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+INSERT INTO invalid_values VALUES('\'-1\'');
+INSERT INTO invalid_values VALUES('\'101\'');
+INSERT INTO invalid_values VALUES('\'484436\'');
+SET @start_global_value = @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+SELECT @start_global_value;
+@start_global_value
+10
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 100"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+100
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 1"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 1;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 0"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 0;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 444;
+ERROR HY000: Variable 'rocksdb_table_stats_sampling_pct' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'aaa'"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'bbb'"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '-1'"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '-1';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '101'"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '101';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '484436'"
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '484436';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = @start_global_value;
+SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT;
+@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT
+10
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result
new file mode 100644
index 00000000000..c9748cc6306
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 1"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 1;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 0"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 0;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to on"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = on;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 1"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 1;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 0"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 0;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to on"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = on;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'aaa'"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'bbb'"
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = @start_global_value;
+SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@global.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = @start_session_value;
+SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG;
+@@session.ROCKSDB_UNSAFE_FOR_BINLOG
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result
new file mode 100644
index 00000000000..ef4007c7549
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_USE_ADAPTIVE_MUTEX;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_USE_ADAPTIVE_MUTEX to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_USE_ADAPTIVE_MUTEX = 444;
+ERROR HY000: Variable 'rocksdb_use_adaptive_mutex' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result
new file mode 100644
index 00000000000..254cc2ceb5d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_USE_FSYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_USE_FSYNC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_USE_FSYNC = 444;
+ERROR HY000: Variable 'rocksdb_use_fsync' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result
new file mode 100644
index 00000000000..c7b874877f8
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_VALIDATE_TABLES;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_VALIDATE_TABLES to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_VALIDATE_TABLES = 444;
+ERROR HY000: Variable 'rocksdb_validate_tables' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result
new file mode 100644
index 00000000000..da4cae7a151
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_checksums_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_VERIFY_CHECKSUMS;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_VERIFY_CHECKSUMS;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 1"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 1;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 0"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 0;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to on"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = on;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 1"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 1;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to 0"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = 0;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Trying to set variable @@session.ROCKSDB_VERIFY_CHECKSUMS to on"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = on;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = DEFAULT;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'aaa'"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+"Trying to set variable @@global.ROCKSDB_VERIFY_CHECKSUMS to 'bbb'"
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+SET @@global.ROCKSDB_VERIFY_CHECKSUMS = @start_global_value;
+SELECT @@global.ROCKSDB_VERIFY_CHECKSUMS;
+@@global.ROCKSDB_VERIFY_CHECKSUMS
+0
+SET @@session.ROCKSDB_VERIFY_CHECKSUMS = @start_session_value;
+SELECT @@session.ROCKSDB_VERIFY_CHECKSUMS;
+@@session.ROCKSDB_VERIFY_CHECKSUMS
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result
new file mode 100644
index 00000000000..7da628b73fd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_WAL_BYTES_PER_SYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 444;
+ERROR HY000: Variable 'rocksdb_wal_bytes_per_sync' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result
new file mode 100644
index 00000000000..fd76a5ec00f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_WAL_DIR;
+SELECT @start_global_value;
+@start_global_value
+
+"Trying to set variable @@global.ROCKSDB_WAL_DIR to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_WAL_DIR = 444;
+ERROR HY000: Variable 'rocksdb_wal_dir' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result
new file mode 100644
index 00000000000..cf11f295c29
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result
@@ -0,0 +1,46 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE;
+SELECT @start_global_value;
+@start_global_value
+2
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1"
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1;
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+2
+"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0"
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0;
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+2
+"Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session."
+SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444;
+ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 'aaa'"
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+2
+SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value;
+SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
+@@global.ROCKSDB_WAL_RECOVERY_MODE
+2
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result
new file mode 100644
index 00000000000..5f03597df3a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_WAL_SIZE_LIMIT_MB;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_WAL_SIZE_LIMIT_MB to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_WAL_SIZE_LIMIT_MB = 444;
+ERROR HY000: Variable 'rocksdb_wal_size_limit_mb' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result
new file mode 100644
index 00000000000..23f7fc81e7f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_WAL_TTL_SECONDS;
+SELECT @start_global_value;
+@start_global_value
+0
+"Trying to set variable @@global.ROCKSDB_WAL_TTL_SECONDS to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_WAL_TTL_SECONDS = 444;
+ERROR HY000: Variable 'rocksdb_wal_ttl_seconds' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result
new file mode 100644
index 00000000000..0d6f7216e9a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result
@@ -0,0 +1,7 @@
+SET @start_global_value = @@global.ROCKSDB_WHOLE_KEY_FILTERING;
+SELECT @start_global_value;
+@start_global_value
+1
+"Trying to set variable @@global.ROCKSDB_WHOLE_KEY_FILTERING to 444. It should fail because it is readonly."
+SET @@global.ROCKSDB_WHOLE_KEY_FILTERING = 444;
+ERROR HY000: Variable 'rocksdb_whole_key_filtering' is a read only variable
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result
new file mode 100644
index 00000000000..b71ee7f91cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result
@@ -0,0 +1,114 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_WRITE_DISABLE_WAL;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_WRITE_DISABLE_WAL;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 1"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 1;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 0"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 0;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to on"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = on;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to off"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = off;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 1"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 1;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 0"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 0;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to on"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = on;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to off"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = off;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 'aaa'"
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+SET @@global.ROCKSDB_WRITE_DISABLE_WAL = @start_global_value;
+SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL;
+@@global.ROCKSDB_WRITE_DISABLE_WAL
+0
+SET @@session.ROCKSDB_WRITE_DISABLE_WAL = @start_session_value;
+SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL;
+@@session.ROCKSDB_WRITE_DISABLE_WAL
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result
new file mode 100644
index 00000000000..dbe46858c94
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result
@@ -0,0 +1,100 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+SET @start_global_value = @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'aaa'"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'bbb'"
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'bbb';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_global_value;
+SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_session_value;
+SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES;
+@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result
new file mode 100644
index 00000000000..9848e491b80
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_sync_basic.result
@@ -0,0 +1,114 @@
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+SET @start_global_value = @@global.ROCKSDB_WRITE_SYNC;
+SELECT @start_global_value;
+@start_global_value
+0
+SET @start_session_value = @@session.ROCKSDB_WRITE_SYNC;
+SELECT @start_session_value;
+@start_session_value
+0
+'# Setting to valid values in global scope#'
+"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 1"
+SET @@global.ROCKSDB_WRITE_SYNC = 1;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 0"
+SET @@global.ROCKSDB_WRITE_SYNC = 0;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to on"
+SET @@global.ROCKSDB_WRITE_SYNC = on;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+1
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to off"
+SET @@global.ROCKSDB_WRITE_SYNC = off;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+"Setting the global scope variable back to default"
+SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+'# Setting to valid values in session scope#'
+"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 1"
+SET @@session.ROCKSDB_WRITE_SYNC = 1;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 0"
+SET @@session.ROCKSDB_WRITE_SYNC = 0;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to on"
+SET @@session.ROCKSDB_WRITE_SYNC = on;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+1
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to off"
+SET @@session.ROCKSDB_WRITE_SYNC = off;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+"Setting the session scope variable back to default"
+SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+'# Testing with invalid values in global scope #'
+"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 'aaa'"
+SET @@global.ROCKSDB_WRITE_SYNC = 'aaa';
+Got one of the listed errors
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+SET @@global.ROCKSDB_WRITE_SYNC = @start_global_value;
+SELECT @@global.ROCKSDB_WRITE_SYNC;
+@@global.ROCKSDB_WRITE_SYNC
+0
+SET @@session.ROCKSDB_WRITE_SYNC = @start_session_value;
+SELECT @@session.ROCKSDB_WRITE_SYNC;
+@@session.ROCKSDB_WRITE_SYNC
+0
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test
new file mode 100644
index 00000000000..fefd9e39af2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test
@@ -0,0 +1,39 @@
+--source include/not_embedded.inc
+--source include/not_threadpool.inc
+
+# This test verifies that *all* MyRocks system variables are tested by the
+# rocksdb_sys_vars suite. For every MyRocks system variable there must be a
+# <variable_name>_basic.test file.
+#
+
+#
+# we can diff in perl or in sql, as it's my_SQL_test suite, do it in sql
+#
+
+perl;
+ use File::Basename;
+ my $dirname=dirname($ENV{MYSQLTEST_FILE});
+ my @all_tests=<$dirname/*_basic{,_32,_64}.test>;
+ open(F, '>', "$ENV{MYSQLTEST_VARDIR}/tmp/rocksdb_sys_vars.all_vars.txt") or die;
+ binmode F;
+ print F join "\n", sort map { s/_basic(_32|_64)?\.test$//; basename $_ } @all_tests;
+EOF
+
+create table t1 (test_name text) engine=MyISAM;
+create table t2 (variable_name text) engine=MyISAM;
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval load data infile "$MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1;
+
+insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%";
+insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%";
+
+--sorted_result
+select variable_name as `There should be *no* long test name listed below:` from t2
+ where length(variable_name) > 50;
+
+--sorted_result
+select variable_name as `There should be *no* variables listed below:` from t2
+ left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name;
+
+drop table t1;
+drop table t2;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test
new file mode 100644
index 00000000000..a6b753ba87a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ACCESS_HINT_ON_COMPACTION_START
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test
new file mode 100644
index 00000000000..b6ccea0f882
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ADVISE_RANDOM_ON_OPEN
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test
new file mode 100644
index 00000000000..b250aa5eb7f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test
new file mode 100644
index 00000000000..067f5820045
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ALLOW_MMAP_READS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test
new file mode 100644
index 00000000000..51fbf62d5a9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ALLOW_MMAP_WRITES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test
new file mode 100644
index 00000000000..c38d0c7b210
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_os_buffer_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ALLOW_OS_BUFFER
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test
new file mode 100644
index 00000000000..e0c2bd366cc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_BACKGROUND_SYNC
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test
new file mode 100644
index 00000000000..8e49110513a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BASE_BACKGROUND_COMPACTIONS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test
new file mode 100644
index 00000000000..68715796a04
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BLOCK_CACHE_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test
new file mode 100644
index 00000000000..2b14e1fb654
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BLOCK_RESTART_INTERVAL
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test
new file mode 100644
index 00000000000..11d18e3223f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BLOCK_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test
new file mode 100644
index 00000000000..a54700aae4d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BLOCK_SIZE_DEVIATION
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test
new file mode 100644
index 00000000000..6cd9e0e1560
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_BULK_LOAD
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test
new file mode 100644
index 00000000000..1b57255202b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_BULK_LOAD_SIZE
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test
new file mode 100644
index 00000000000..2958273695d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_BYTES_PER_SYNC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test
new file mode 100644
index 00000000000..db1f5936812
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test
new file mode 100644
index 00000000000..44126e35f57
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test
@@ -0,0 +1,17 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(99);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_CHECKSUMS_PCT
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test
new file mode 100644
index 00000000000..c47c62e41b4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test
@@ -0,0 +1,8 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_COLLECT_SST_PROPERTIES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test
new file mode 100644
index 00000000000..62c8e680aab
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_COMMIT_IN_THE_MIDDLE
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test
new file mode 100644
index 00000000000..c65f722fe6e
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('abc');
+INSERT INTO valid_values VALUES('def');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+
+--let $sys_var=ROCKSDB_COMPACT_CF
+--let $read_only=0
+--let $session=0
+--let $sticky=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test
new file mode 100644
index 00000000000..ba45defb7a1
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as
+# we can't set it to or from 0
+SET @@global.rocksdb_compaction_readahead_size = -1;
+SELECT @@global.rocksdb_compaction_readahead_size;
+
+--let $sys_var=ROCKSDB_COMPACTION_READAHEAD_SIZE
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test
new file mode 100644
index 00000000000..5ec719baeb6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+INSERT INTO valid_values VALUES(2000000);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'2000001\'');
+
+--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test
new file mode 100644
index 00000000000..6c35ed634f7
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test
new file mode 100644
index 00000000000..ff132f7049c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test
new file mode 100644
index 00000000000..b38c79b5ef0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+INSERT INTO valid_values VALUES(2000000);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'2000001\'');
+
+--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test
new file mode 100644
index 00000000000..2850c7a1a38
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test
@@ -0,0 +1,29 @@
+--source include/have_rocksdb.inc
+
+--eval SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT
+
+# Test using tmp/abc
+--replace_result $MYSQL_TMP_DIR TMP
+--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/abc'
+--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT
+--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT
+
+# Test using tmp/def
+--replace_result $MYSQL_TMP_DIR TMP
+--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/def'
+--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT
+--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT
+
+# Should fail because it is not a session
+--Error ER_GLOBAL_VARIABLE
+--eval SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444
+
+# Set back to original value
+# validate that DEFAULT causes failure in creating checkpoint since
+# DEFAULT == ''
+--error ER_UNKNOWN_ERROR
+--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value
+
+# clean up
+--exec rm -r $MYSQL_TMP_DIR/abc
+--exec rm -r $MYSQL_TMP_DIR/def
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test
new file mode 100644
index 00000000000..77422aa164c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_CREATE_IF_MISSING
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test
new file mode 100644
index 00000000000..b8aeb6c9b19
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test
new file mode 100644
index 00000000000..20f33d6bdfd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_DATADIR
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test
new file mode 100644
index 00000000000..7ef5422dcd3
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_DB_WRITE_BUFFER_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test
new file mode 100644
index 00000000000..52e25ab358f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test
new file mode 100644
index 00000000000..f756d1eb2f5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_DEFAULT_CF_OPTIONS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test
new file mode 100644
index 00000000000..744bd946d9a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test
new file mode 100644
index 00000000000..061a4c902b5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test
@@ -0,0 +1,20 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_DISABLE_2PC
+--let $read_only=0
+--let $session=0
+--let $sticky=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test
new file mode 100644
index 00000000000..b365370f214
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disabledatasync_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_DISABLEDATASYNC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test
new file mode 100644
index 00000000000..407093acbea
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_ENABLE_BULK_LOAD_API
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test
new file mode 100644
index 00000000000..251d7d5803d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_ENABLE_THREAD_TRACKING
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test
new file mode 100644
index 00000000000..9d6502598b0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test
new file mode 100644
index 00000000000..495770e8efb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_ERROR_IF_EXISTS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test
new file mode 100644
index 00000000000..7fc4c3a77f9
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test
@@ -0,0 +1,44 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+##
+## test cardinality for analyze statements after flushing table
+##
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 (b) VALUES (1);
+INSERT INTO t1 (b) VALUES (2);
+INSERT INTO t1 (b) VALUES (3);
+--sorted_result
+SELECT * FROM t1;
+
+set session rocksdb_flush_memtable_on_analyze=off;
+ANALYZE TABLE t1;
+SHOW INDEXES FROM t1;
+
+set session rocksdb_flush_memtable_on_analyze=on;
+ANALYZE TABLE t1;
+SHOW INDEXES FROM t1;
+DROP TABLE t1;
+
+##
+## test data length for show table status statements for tables with few rows
+##
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 (b) VALUES (1);
+INSERT INTO t1 (b) VALUES (2);
+INSERT INTO t1 (b) VALUES (3);
+--sorted_result
+SELECT * FROM t1;
+
+SHOW TABLE STATUS LIKE 't1';
+ANALYZE TABLE t1;
+SHOW TABLE STATUS LIKE 't1';
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test
new file mode 100644
index 00000000000..9529fae7516
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test
@@ -0,0 +1,17 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+
+--let $sys_var=ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW
+--let $read_only=0
+--let $session=0
+--let $sticky=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test
new file mode 100644
index 00000000000..08e8d0c16de
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test
@@ -0,0 +1,23 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as
+# we can't set it to or from 0
+SET @@session.rocksdb_force_index_records_in_range = -1;
+SELECT @@session.rocksdb_force_index_records_in_range;
+
+--let $sys_var=ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test
new file mode 100644
index 00000000000..5899f7b67d0
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_HASH_INDEX_ALLOW_COLLISION
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test
new file mode 100644
index 00000000000..711703c2148
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_INDEX_TYPE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test
new file mode 100644
index 00000000000..990a9a62148
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test
@@ -0,0 +1,21 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('fatal_level');
+INSERT INTO valid_values VALUES('error_level');
+INSERT INTO valid_values VALUES('warn_level');
+INSERT INTO valid_values VALUES('info_level');
+INSERT INTO valid_values VALUES('debug_level');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES(5);
+INSERT INTO invalid_values VALUES(6);
+INSERT INTO invalid_values VALUES('foo');
+
+--let $sys_var=ROCKSDB_INFO_LOG_LEVEL
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test
new file mode 100644
index 00000000000..741e20fac9f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_IS_FD_CLOSE_ON_EXEC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test
new file mode 100644
index 00000000000..511f9f8a06d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_KEEP_LOG_FILE_NUM
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test
new file mode 100644
index 00000000000..52f7f502d96
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test
@@ -0,0 +1,22 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+INSERT INTO valid_values VALUES('true');
+INSERT INTO valid_values VALUES('false');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES(2);
+INSERT INTO invalid_values VALUES(1000);
+
+--let $sys_var=ROCKSDB_LOCK_SCANNED_ROWS
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test
new file mode 100644
index 00000000000..0c524db9cbd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_LOCK_WAIT_TIMEOUT
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test
new file mode 100644
index 00000000000..76aee161efc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_LOG_FILE_TIME_TO_ROLL
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test
new file mode 100644
index 00000000000..48d14fbf9f6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MANIFEST_PREALLOCATION_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test
new file mode 100644
index 00000000000..441c0577c10
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test
new file mode 100644
index 00000000000..de3ab148ec6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_BACKGROUND_FLUSHES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test
new file mode 100644
index 00000000000..b0dca55e18b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_LOG_FILE_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test
new file mode 100644
index 00000000000..9464f0aa1ad
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_MANIFEST_FILE_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test
new file mode 100644
index 00000000000..c82af39f7b5
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_OPEN_FILES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test
new file mode 100644
index 00000000000..a9e440d4b98
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test
@@ -0,0 +1,16 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_MAX_ROW_LOCKS
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test
new file mode 100644
index 00000000000..0ebc9c204fb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_SUBCOMPACTIONS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test
new file mode 100644
index 00000000000..0f881868ae2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_MAX_TOTAL_WAL_SIZE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test
new file mode 100644
index 00000000000..8e2dda64d4a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test
@@ -0,0 +1,50 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+set session rocksdb_merge_buf_size=250;
+set session rocksdb_merge_combine_read_size=1000;
+
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100));
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+DROP INDEX kj on t1;
+DROP INDEX kij ON t1;
+
+ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+# Reverse CF testing, needs to be added to SSTFileWriter in reverse order
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB;
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100));
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+SELECT COUNT(*) FROM t1 FORCE INDEX(kb);
+DROP TABLE t1;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test
new file mode 100644
index 00000000000..48e89137344
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test
@@ -0,0 +1,32 @@
+--source include/have_rocksdb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+set session rocksdb_merge_buf_size=250;
+set session rocksdb_merge_combine_read_size=1000;
+
+CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
+
+--disable_query_log
+let $max = 100;
+let $i = 1;
+while ($i <= $max) {
+ let $insert = INSERT INTO t1 VALUES ($i, $i);
+ inc $i;
+ eval $insert;
+}
+--enable_query_log
+
+ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
+ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+DROP INDEX kj on t1;
+DROP INDEX kij ON t1;
+
+ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test
new file mode 100644
index 00000000000..cc84a2c60be
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test
new file mode 100644
index 00000000000..39c84fb2c2d
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_NO_BLOCK_CACHE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test
new file mode 100644
index 00000000000..bc680c0772a
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_OVERRIDE_CF_OPTIONS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test
new file mode 100644
index 00000000000..5b0e4798678
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test
@@ -0,0 +1,7 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_PARANOID_CHECKS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test
new file mode 100644
index 00000000000..fd2f3098840
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test
@@ -0,0 +1,20 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_PAUSE_BACKGROUND_WORK
+--let $read_only=0
+--let $session=0
+--let $sticky=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test
new file mode 100644
index 00000000000..1fd61a80955
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(2);
+INSERT INTO valid_values VALUES(3);
+INSERT INTO valid_values VALUES(4);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_PERF_CONTEXT_LEVEL
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test
new file mode 100644
index 00000000000..af095097909
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test
new file mode 100644
index 00000000000..d683e8045da
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test
@@ -0,0 +1,63 @@
+--source include/have_rocksdb.inc
+
+# Attempt to set the value - this should generate a warning as we can't set it to or from 0
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000;
+
+# Now shut down and come back up with the rate limiter enabled and retest setting the variable
+
+# Write file to make mysql-test-run.pl expect the "crash", but don't restart the
+# server until it is told to
+--let $_server_id= `SELECT @@server_id`
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
+--exec echo "wait" >$_expect_file_name
+
+# Send shutdown to the connected server and give it 10 seconds to die before
+# zapping it
+shutdown_server 10;
+
+# Attempt to restart the server with the rate limiter on
+--exec echo "restart:--rocksdb_rate_limiter_bytes_per_sec=10000" >$_expect_file_name
+--sleep 5
+
+# Wait for reconnect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
+
+# The valid_values table lists the values that we want to make sure that the system will allow
+# us to set for rocksdb_rate_limiter_bytes_per_sec
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000);
+
+# The invalid_values table lists the values that we don't want to allow for the variable
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\''), (3.14);
+
+# Test all the valid and invalid values
+--let $sys_var=ROCKSDB_RATE_LIMITER_BYTES_PER_SEC
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
+
+# Zero is an invalid value if the rate limiter is turned on, but it won't be rejected by the
+# SET command but will generate a warning.
+
+# Attempt to set the value to 0 - this should generate a warning as we can't set it to or from 0
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0;
+
+# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as
+# we can't set it to or from 0
+SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1;
+
+# Restart the server without the rate limiter
+--exec echo "wait" >$_expect_file_name
+shutdown_server 10;
+--exec echo "restart" >$_expect_file_name
+--sleep 5
+
+# Wait for reconnect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--disable_reconnect
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test
new file mode 100644
index 00000000000..9ff20edcfb2
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test
@@ -0,0 +1,15 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES('a');
+INSERT INTO valid_values VALUES('b');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+
+--let $sys_var=ROCKSDB_READ_FREE_RPL_TABLES
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test
new file mode 100644
index 00000000000..4fab0b3123c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(222333);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_RECORDS_IN_RANGE
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test
new file mode 100644
index 00000000000..f6c0a219a9f
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rpl_skip_tx_api_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_RPL_SKIP_TX_API
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test
new file mode 100644
index 00000000000..a71df41affc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1024);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test
new file mode 100644
index 00000000000..b33f444199b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test
@@ -0,0 +1,19 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_SIGNAL_DROP_INDEX_THREAD
+--let $read_only=0
+--let $session=0
+--let $sticky=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test
new file mode 100644
index 00000000000..80a9c4b3c43
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_SKIP_BLOOM_FILTER_ON_READ
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test
new file mode 100644
index 00000000000..2465e569f79
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_SKIP_FILL_CACHE
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test
new file mode 100644
index 00000000000..fe90a49365b
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_basic.test
@@ -0,0 +1,21 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+INSERT INTO valid_values VALUES('true');
+INSERT INTO valid_values VALUES('false');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test
new file mode 100644
index 00000000000..c64eeedb594
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test
@@ -0,0 +1,15 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES("aaa");
+INSERT INTO valid_values VALUES("bbb");
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+
+--let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK_TABLES
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test
new file mode 100644
index 00000000000..7854faa8ddf
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_STATS_DUMP_PERIOD_SEC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test
new file mode 100644
index 00000000000..023b6420b96
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_checksums_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_STORE_CHECKSUMS
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test
new file mode 100644
index 00000000000..eabc45ef6be
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test
@@ -0,0 +1,19 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_STRICT_COLLATION_CHECK
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test
new file mode 100644
index 00000000000..4eb96488840
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test
@@ -0,0 +1,35 @@
+--source include/have_rocksdb.inc
+
+# We cannot use the rocskdb_sys_var.inc script as some of the strings we set
+# need to be quoted and that doesn't work with this script. Run through
+# valid options by hand.
+
+SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+SELECT @start_global_value;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "";
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default."
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
+
+--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session."
+--Error ER_GLOBAL_VARIABLE
+SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444;
+
+SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value;
+SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test
new file mode 100644
index 00000000000..77da9df9acd
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_TABLE_CACHE_NUMSHARDBITS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test
new file mode 100644
index 00000000000..c3016742042
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test
@@ -0,0 +1,22 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(100);
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+INSERT INTO invalid_values VALUES('\'-1\'');
+INSERT INTO invalid_values VALUES('\'101\'');
+INSERT INTO invalid_values VALUES('\'484436\'');
+
+--let $sys_var=ROCKSDB_TABLE_STATS_SAMPLING_PCT
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test
new file mode 100644
index 00000000000..302a4173efc
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_UNSAFE_FOR_BINLOG
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test
new file mode 100644
index 00000000000..a0f0a212987
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_USE_ADAPTIVE_MUTEX
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test
new file mode 100644
index 00000000000..0d8e35d03cb
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_USE_FSYNC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test
new file mode 100644
index 00000000000..6eb965c5863
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_VALIDATE_TABLES
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test
new file mode 100644
index 00000000000..d8c9c559703
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_checksums_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_VERIFY_CHECKSUMS
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test
new file mode 100644
index 00000000000..eeeeed8f767
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test
new file mode 100644
index 00000000000..0d667d8de2c
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_WAL_DIR
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test
new file mode 100644
index 00000000000..e0d345b4268
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test
@@ -0,0 +1,17 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_WAL_RECOVERY_MODE
+--let $read_only=0
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
+
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test
new file mode 100644
index 00000000000..d597b5d8016
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_WAL_SIZE_LIMIT_MB
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test
new file mode 100644
index 00000000000..9342c861168
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_WAL_TTL_SECONDS
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test
new file mode 100644
index 00000000000..ff332a6fbe4
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test
@@ -0,0 +1,6 @@
+--source include/have_rocksdb.inc
+
+--let $sys_var=ROCKSDB_WHOLE_KEY_FILTERING
+--let $read_only=1
+--let $session=0
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test
new file mode 100644
index 00000000000..f7fa33e03e6
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_WRITE_DISABLE_WAL
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test
new file mode 100644
index 00000000000..1c9d35f4a57
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+INSERT INTO invalid_values VALUES('\'bbb\'');
+
+--let $sys_var=ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test
new file mode 100644
index 00000000000..3655ddb8c69
--- /dev/null
+++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_sync_basic.test
@@ -0,0 +1,18 @@
+--source include/have_rocksdb.inc
+
+CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO valid_values VALUES(1);
+INSERT INTO valid_values VALUES(0);
+INSERT INTO valid_values VALUES('on');
+INSERT INTO valid_values VALUES('off');
+
+CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
+INSERT INTO invalid_values VALUES('\'aaa\'');
+
+--let $sys_var=ROCKSDB_WRITE_SYNC
+--let $read_only=0
+--let $session=1
+--source suite/sys_vars/inc/rocksdb_sys_var.inc
+
+DROP TABLE valid_values;
+DROP TABLE invalid_values;
diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc
new file mode 100644
index 00000000000..fc8cd97fc72
--- /dev/null
+++ b/storage/rocksdb/properties_collector.cc
@@ -0,0 +1,555 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header file */
+#include "./properties_collector.h"
+
+/* Standard C++ header files */
+#include <algorithm>
+#include <map>
+#include <string>
+#include <vector>
+
+/* MySQL header files */
+#include "./log.h"
+#include "./my_stacktrace.h"
+#include "./sql_array.h"
+
+/* MyRocks header files */
+#include "./rdb_datadic.h"
+#include "./rdb_utils.h"
+
+namespace myrocks {
+
+std::atomic<uint64_t> rocksdb_num_sst_entry_put(0);
+std::atomic<uint64_t> rocksdb_num_sst_entry_delete(0);
+std::atomic<uint64_t> rocksdb_num_sst_entry_singledelete(0);
+std::atomic<uint64_t> rocksdb_num_sst_entry_merge(0);
+std::atomic<uint64_t> rocksdb_num_sst_entry_other(0);
+my_bool rocksdb_compaction_sequential_deletes_count_sd = false;
+
+Rdb_tbl_prop_coll::Rdb_tbl_prop_coll(
+ Rdb_ddl_manager* ddl_manager,
+ Rdb_compact_params params,
+ uint32_t cf_id,
+ const uint8_t table_stats_sampling_pct
+) :
+ m_cf_id(cf_id),
+ m_ddl_manager(ddl_manager),
+ m_last_stats(nullptr),
+ m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l),
+ m_file_size(0), m_params(params),
+ m_table_stats_sampling_pct(table_stats_sampling_pct),
+ m_seed(time(nullptr)),
+ m_card_adj_extra(1.)
+{
+ // We need to adjust the index cardinality numbers based on the sampling
+ // rate so that the output of "SHOW INDEX" command will reflect reality
+ // more closely. It will still be an approximation, just a better one.
+ if (m_table_stats_sampling_pct > 0) {
+ m_card_adj_extra = 100. / m_table_stats_sampling_pct;
+ }
+
+ m_deleted_rows_window.resize(m_params.m_window, false);
+}
+
+/*
+ This function is called by RocksDB for every key in the SST file
+*/
+rocksdb::Status
+Rdb_tbl_prop_coll::AddUserKey(
+ const rocksdb::Slice& key, const rocksdb::Slice& value,
+ rocksdb::EntryType type, rocksdb::SequenceNumber seq,
+ uint64_t file_size
+) {
+ if (key.size() >= 4) {
+ AdjustDeletedRows(type);
+
+ m_rows++;
+
+ CollectStatsForRow(key, value, type, file_size);
+ }
+
+ return rocksdb::Status::OK();
+}
+
+void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type)
+{
+ if (m_params.m_window > 0)
+ {
+ // record the "is deleted" flag into the sliding window
+ // the sliding window is implemented as a circular buffer
+ // in m_deleted_rows_window vector
+ // the current position in the circular buffer is pointed at by
+ // m_rows % m_deleted_rows_window.size()
+ // m_deleted_rows is the current number of 1's in the vector
+ // --update the counter for the element which will be overridden
+ bool is_delete= (type == rocksdb::kEntryDelete ||
+ (type == rocksdb::kEntrySingleDelete &&
+ rocksdb_compaction_sequential_deletes_count_sd));
+
+ // Only make changes if the value at the current position needs to change
+ if (is_delete != m_deleted_rows_window[m_window_pos])
+ {
+ // Set or clear the flag at the current position as appropriate
+ m_deleted_rows_window[m_window_pos]= is_delete;
+ if (!is_delete)
+ {
+ m_deleted_rows--;
+ }
+ else if (++m_deleted_rows > m_max_deleted_rows)
+ {
+ m_max_deleted_rows = m_deleted_rows;
+ }
+ }
+
+ if (++m_window_pos == m_params.m_window)
+ {
+ m_window_pos = 0;
+ }
+ }
+}
+
+Rdb_index_stats* Rdb_tbl_prop_coll::AccessStats(
+ const rocksdb::Slice& key)
+{
+ GL_INDEX_ID gl_index_id = {
+ .cf_id = m_cf_id,
+ .index_id = rdb_netbuf_to_uint32(reinterpret_cast<const uchar*>(key.data()))
+ };
+
+ if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id)
+ {
+ m_keydef = nullptr;
+
+ // starting a new table
+ // add the new element into m_stats
+ m_stats.emplace_back(gl_index_id);
+ m_last_stats = &m_stats.back();
+
+ if (m_ddl_manager)
+ {
+ // safe_find() returns a std::shared_ptr<Rdb_key_def> with the count
+ // incremented (so it can't be deleted out from under us) and with
+ // the mutex locked (if setup has not occurred yet). We must make
+ // sure to free the mutex (via unblock_setup()) when we are done
+ // with this object. Currently this happens earlier in this function
+ // when we are switching to a new Rdb_key_def and when this object
+ // is destructed.
+ m_keydef = m_ddl_manager->safe_find(gl_index_id);
+ if (m_keydef != nullptr)
+ {
+ // resize the array to the number of columns.
+ // It will be initialized with zeroes
+ m_last_stats->m_distinct_keys_per_prefix.resize(
+ m_keydef->get_key_parts());
+ m_last_stats->m_name = m_keydef->get_name();
+ }
+ }
+ m_last_key.clear();
+ }
+
+ return m_last_stats;
+}
+
+void Rdb_tbl_prop_coll::CollectStatsForRow(
+ const rocksdb::Slice& key, const rocksdb::Slice& value,
+ rocksdb::EntryType type, uint64_t file_size)
+{
+ auto stats = AccessStats(key);
+
+ stats->m_data_size += key.size()+value.size();
+
+ // Incrementing per-index entry-type statistics
+ switch (type) {
+ case rocksdb::kEntryPut:
+ stats->m_rows++;
+ break;
+ case rocksdb::kEntryDelete:
+ stats->m_entry_deletes++;
+ break;
+ case rocksdb::kEntrySingleDelete:
+ stats->m_entry_single_deletes++;
+ break;
+ case rocksdb::kEntryMerge:
+ stats->m_entry_merges++;
+ break;
+ case rocksdb::kEntryOther:
+ stats->m_entry_others++;
+ break;
+ default:
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Unexpected entry type found: %u. "
+ "This should not happen so aborting the system.", type);
+ abort_with_stack_traces();
+ break;
+ }
+
+ stats->m_actual_disk_size += file_size - m_file_size;
+ m_file_size = file_size;
+
+ if (m_keydef != nullptr && ShouldCollectStats())
+ {
+ std::size_t column = 0;
+ bool new_key = true;
+
+ if (!m_last_key.empty())
+ {
+ rocksdb::Slice last(m_last_key.data(), m_last_key.size());
+ new_key = (m_keydef->compare_keys(&last, &key, &column) == 0);
+ }
+
+ if (new_key)
+ {
+ DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size());
+
+ for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++)
+ {
+ stats->m_distinct_keys_per_prefix[i]++;
+ }
+
+ // assign new last_key for the next call
+ // however, we only need to change the last key
+ // if one of the first n-1 columns is different
+ // If the n-1 prefix is the same, no sense in storing
+ // the new key
+ if (column < stats->m_distinct_keys_per_prefix.size())
+ {
+ m_last_key.assign(key.data(), key.size());
+ }
+ }
+ }
+}
+
+const char* Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__";
+
+/*
+ This function is called by RocksDB to compute properties to store in sst file
+*/
+rocksdb::Status
+Rdb_tbl_prop_coll::Finish(
+ rocksdb::UserCollectedProperties* properties
+) {
+ uint64_t num_sst_entry_put = 0;
+ uint64_t num_sst_entry_delete = 0;
+ uint64_t num_sst_entry_singledelete = 0;
+ uint64_t num_sst_entry_merge = 0;
+ uint64_t num_sst_entry_other = 0;
+
+ for (auto it = m_stats.begin(); it != m_stats.end(); it++)
+ {
+ num_sst_entry_put += it->m_rows;
+ num_sst_entry_delete += it->m_entry_deletes;
+ num_sst_entry_singledelete += it->m_entry_single_deletes;
+ num_sst_entry_merge += it->m_entry_merges;
+ num_sst_entry_other += it->m_entry_others;
+ }
+
+ if (num_sst_entry_put > 0)
+ {
+ rocksdb_num_sst_entry_put += num_sst_entry_put;
+ }
+
+ if (num_sst_entry_delete > 0)
+ {
+ rocksdb_num_sst_entry_delete += num_sst_entry_delete;
+ }
+
+ if (num_sst_entry_singledelete > 0)
+ {
+ rocksdb_num_sst_entry_singledelete += num_sst_entry_singledelete;
+ }
+
+ if (num_sst_entry_merge > 0)
+ {
+ rocksdb_num_sst_entry_merge += num_sst_entry_merge;
+ }
+
+ if (num_sst_entry_other > 0)
+ {
+ rocksdb_num_sst_entry_other += num_sst_entry_other;
+ }
+
+ properties->insert({INDEXSTATS_KEY,
+ Rdb_index_stats::materialize(m_stats, m_card_adj_extra)});
+ return rocksdb::Status::OK();
+}
+
+bool Rdb_tbl_prop_coll::NeedCompact() const {
+ return
+ m_params.m_deletes &&
+ (m_params.m_window > 0) &&
+ (m_file_size > m_params.m_file_size) &&
+ (m_max_deleted_rows > m_params.m_deletes);
+}
+
+bool Rdb_tbl_prop_coll::ShouldCollectStats() {
+ // Zero means that we'll use all the keys to update statistics.
+ if (!m_table_stats_sampling_pct ||
+ RDB_TBL_STATS_SAMPLE_PCT_MAX == m_table_stats_sampling_pct) {
+ return true;
+ }
+
+ int val = rand_r(&m_seed) %
+ (RDB_TBL_STATS_SAMPLE_PCT_MAX - RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) +
+ RDB_TBL_STATS_SAMPLE_PCT_MIN;
+
+ DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN);
+ DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX);
+
+ return val <= m_table_stats_sampling_pct;
+}
+
+/*
+ Returns the same as above, but in human-readable way for logging
+*/
+rocksdb::UserCollectedProperties
+Rdb_tbl_prop_coll::GetReadableProperties() const {
+ std::string s;
+#ifdef DBUG_OFF
+ s.append("[...");
+ s.append(std::to_string(m_stats.size()));
+ s.append(" records...]");
+#else
+ bool first = true;
+ for (auto it : m_stats) {
+ if (first) {
+ first = false;
+ } else {
+ s.append(",");
+ }
+ s.append(GetReadableStats(it));
+ }
+ #endif
+ return rocksdb::UserCollectedProperties{{INDEXSTATS_KEY, s}};
+}
+
+std::string
+Rdb_tbl_prop_coll::GetReadableStats(
+ const Rdb_index_stats& it
+) {
+ std::string s;
+ s.append("(");
+ s.append(std::to_string(it.m_gl_index_id.cf_id));
+ s.append(", ");
+ s.append(std::to_string(it.m_gl_index_id.index_id));
+ s.append("):{name:");
+ s.append(it.m_name);
+ s.append(", size:");
+ s.append(std::to_string(it.m_data_size));
+ s.append(", m_rows:");
+ s.append(std::to_string(it.m_rows));
+ s.append(", m_actual_disk_size:");
+ s.append(std::to_string(it.m_actual_disk_size));
+ s.append(", deletes:");
+ s.append(std::to_string(it.m_entry_deletes));
+ s.append(", single_deletes:");
+ s.append(std::to_string(it.m_entry_single_deletes));
+ s.append(", merges:");
+ s.append(std::to_string(it.m_entry_merges));
+ s.append(", others:");
+ s.append(std::to_string(it.m_entry_others));
+ s.append(", distincts per prefix: [");
+ for (auto num : it.m_distinct_keys_per_prefix) {
+ s.append(std::to_string(num));
+ s.append(" ");
+ }
+ s.append("]}");
+ return s;
+}
+
+/*
+ Given the properties of an SST file, reads the stats from it and returns it.
+*/
+
+void Rdb_tbl_prop_coll::read_stats_from_tbl_props(
+ const std::shared_ptr<const rocksdb::TableProperties>& table_props,
+ std::vector<Rdb_index_stats>* out_stats_vector)
+{
+ DBUG_ASSERT(out_stats_vector != nullptr);
+ const auto& user_properties = table_props->user_collected_properties;
+ auto it2 = user_properties.find(std::string(INDEXSTATS_KEY));
+ if (it2 != user_properties.end())
+ {
+ auto result __attribute__((__unused__)) =
+ Rdb_index_stats::unmaterialize(it2->second, out_stats_vector);
+ DBUG_ASSERT(result == 0);
+ }
+}
+
+
+/*
+ Serializes an array of Rdb_index_stats into a network string.
+*/
+std::string Rdb_index_stats::materialize(
+ const std::vector<Rdb_index_stats>& stats,
+ const float card_adj_extra)
+{
+ String ret;
+ rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES);
+ for (auto i : stats) {
+ rdb_netstr_append_uint32(&ret, i.m_gl_index_id.cf_id);
+ rdb_netstr_append_uint32(&ret, i.m_gl_index_id.index_id);
+ DBUG_ASSERT(sizeof i.m_data_size <= 8);
+ rdb_netstr_append_uint64(&ret, i.m_data_size);
+ rdb_netstr_append_uint64(&ret, i.m_rows);
+ rdb_netstr_append_uint64(&ret, i.m_actual_disk_size);
+ rdb_netstr_append_uint64(&ret, i.m_distinct_keys_per_prefix.size());
+ rdb_netstr_append_uint64(&ret, i.m_entry_deletes);
+ rdb_netstr_append_uint64(&ret, i.m_entry_single_deletes);
+ rdb_netstr_append_uint64(&ret, i.m_entry_merges);
+ rdb_netstr_append_uint64(&ret, i.m_entry_others);
+ for (auto num_keys : i.m_distinct_keys_per_prefix) {
+ float upd_num_keys = num_keys * card_adj_extra;
+ rdb_netstr_append_uint64(&ret, static_cast<int64_t>(upd_num_keys));
+ }
+ }
+
+ return std::string((char*) ret.ptr(), ret.length());
+}
+
+/**
+ @brief
+ Reads an array of Rdb_index_stats from a string.
+ @return 1 if it detects any inconsistency in the input
+ @return 0 if completes successfully
+*/
+int Rdb_index_stats::unmaterialize(
+ const std::string& s, std::vector<Rdb_index_stats>* ret)
+{
+ const uchar* p= rdb_std_str_to_uchar_ptr(s);
+ const uchar* p2= p + s.size();
+
+ DBUG_ASSERT(ret != nullptr);
+
+ if (p+2 > p2)
+ {
+ return 1;
+ }
+
+ int version= rdb_netbuf_read_uint16(&p);
+ Rdb_index_stats stats;
+ // Make sure version is within supported range.
+ if (version < INDEX_STATS_VERSION_INITIAL ||
+ version > INDEX_STATS_VERSION_ENTRY_TYPES)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Index stats version %d was outside of supported range. "
+ "This should not happen so aborting the system.", version);
+ abort_with_stack_traces();
+ }
+
+ size_t needed = sizeof(stats.m_gl_index_id.cf_id)+
+ sizeof(stats.m_gl_index_id.index_id)+
+ sizeof(stats.m_data_size)+
+ sizeof(stats.m_rows)+
+ sizeof(stats.m_actual_disk_size)+
+ sizeof(uint64);
+ if (version >= INDEX_STATS_VERSION_ENTRY_TYPES)
+ {
+ needed += sizeof(stats.m_entry_deletes)+
+ sizeof(stats.m_entry_single_deletes)+
+ sizeof(stats.m_entry_merges)+
+ sizeof(stats.m_entry_others);
+ }
+
+ while (p < p2)
+ {
+ if (p+needed > p2)
+ {
+ return 1;
+ }
+ rdb_netbuf_read_gl_index(&p, &stats.m_gl_index_id);
+ stats.m_data_size= rdb_netbuf_read_uint64(&p);
+ stats.m_rows= rdb_netbuf_read_uint64(&p);
+ stats.m_actual_disk_size= rdb_netbuf_read_uint64(&p);
+ stats.m_distinct_keys_per_prefix.resize(rdb_netbuf_read_uint64(&p));
+ if (version >= INDEX_STATS_VERSION_ENTRY_TYPES)
+ {
+ stats.m_entry_deletes= rdb_netbuf_read_uint64(&p);
+ stats.m_entry_single_deletes= rdb_netbuf_read_uint64(&p);
+ stats.m_entry_merges= rdb_netbuf_read_uint64(&p);
+ stats.m_entry_others= rdb_netbuf_read_uint64(&p);
+ }
+ if (p+stats.m_distinct_keys_per_prefix.size()
+ *sizeof(stats.m_distinct_keys_per_prefix[0]) > p2)
+ {
+ return 1;
+ }
+ for (std::size_t i= 0; i < stats.m_distinct_keys_per_prefix.size(); i++)
+ {
+ stats.m_distinct_keys_per_prefix[i]= rdb_netbuf_read_uint64(&p);
+ }
+ ret->push_back(stats);
+ }
+ return 0;
+}
+
+/*
+ Merges one Rdb_index_stats into another. Can be used to come up with the stats
+ for the index based on stats for each sst
+*/
+void Rdb_index_stats::merge(
+ const Rdb_index_stats& s, bool increment, int64_t estimated_data_len)
+{
+ std::size_t i;
+
+ m_gl_index_id = s.m_gl_index_id;
+ if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size())
+ {
+ m_distinct_keys_per_prefix.resize(s.m_distinct_keys_per_prefix.size());
+ }
+ if (increment)
+ {
+ m_rows += s.m_rows;
+ m_data_size += s.m_data_size;
+
+ /*
+ The Data_length and Avg_row_length are trailing statistics, meaning
+ they don't get updated for the current SST until the next SST is
+ written. So, if rocksdb reports the data_length as 0,
+ we make a reasoned estimate for the data_file_length for the
+ index in the current SST.
+ */
+ m_actual_disk_size += s.m_actual_disk_size ? s.m_actual_disk_size :
+ estimated_data_len * s.m_rows;
+ m_entry_deletes += s.m_entry_deletes;
+ m_entry_single_deletes += s.m_entry_single_deletes;
+ m_entry_merges += s.m_entry_merges;
+ m_entry_others += s.m_entry_others;
+ for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++)
+ {
+ m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i];
+ }
+ }
+ else
+ {
+ m_rows -= s.m_rows;
+ m_data_size -= s.m_data_size;
+ m_actual_disk_size -= s.m_actual_disk_size ? s.m_actual_disk_size :
+ estimated_data_len * s.m_rows;
+ m_entry_deletes -= s.m_entry_deletes;
+ m_entry_single_deletes -= s.m_entry_single_deletes;
+ m_entry_merges -= s.m_entry_merges;
+ m_entry_others -= s.m_entry_others;
+ for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++)
+ {
+ m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i];
+ }
+ }
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h
new file mode 100644
index 00000000000..1781d8a26c6
--- /dev/null
+++ b/storage/rocksdb/properties_collector.h
@@ -0,0 +1,190 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ system header files */
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+/* RocksDB header files */
+#include "rocksdb/db.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+
+namespace myrocks {
+
+class Rdb_ddl_manager;
+class Rdb_key_def;
+
+extern std::atomic<uint64_t> rocksdb_num_sst_entry_put;
+extern std::atomic<uint64_t> rocksdb_num_sst_entry_delete;
+extern std::atomic<uint64_t> rocksdb_num_sst_entry_singledelete;
+extern std::atomic<uint64_t> rocksdb_num_sst_entry_merge;
+extern std::atomic<uint64_t> rocksdb_num_sst_entry_other;
+extern my_bool rocksdb_compaction_sequential_deletes_count_sd;
+
+
+struct Rdb_compact_params
+{
+ uint64_t m_deletes, m_window, m_file_size;
+};
+
+
+struct Rdb_index_stats
+{
+ enum {
+ INDEX_STATS_VERSION_INITIAL= 1,
+ INDEX_STATS_VERSION_ENTRY_TYPES= 2,
+ };
+ GL_INDEX_ID m_gl_index_id;
+ int64_t m_data_size, m_rows, m_actual_disk_size;
+ int64_t m_entry_deletes, m_entry_single_deletes;
+ int64_t m_entry_merges, m_entry_others;
+ std::vector<int64_t> m_distinct_keys_per_prefix;
+ std::string m_name; // name is not persisted
+
+ static std::string materialize(const std::vector<Rdb_index_stats>& stats,
+ const float card_adj_extra);
+ static int unmaterialize(const std::string& s,
+ std::vector<Rdb_index_stats>* ret);
+
+ Rdb_index_stats() : Rdb_index_stats({0, 0}) {}
+ explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) :
+ m_gl_index_id(gl_index_id),
+ m_data_size(0),
+ m_rows(0),
+ m_actual_disk_size(0),
+ m_entry_deletes(0),
+ m_entry_single_deletes(0),
+ m_entry_merges(0),
+ m_entry_others(0) {}
+
+ void merge(const Rdb_index_stats& s, bool increment = true,
+ int64_t estimated_data_len = 0);
+};
+
+
+class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector
+{
+ public:
+ Rdb_tbl_prop_coll(
+ Rdb_ddl_manager* ddl_manager,
+ Rdb_compact_params params,
+ uint32_t cf_id,
+ const uint8_t table_stats_sampling_pct
+ );
+
+ /*
+ Override parent class's virtual methods of interest.
+ */
+
+ virtual rocksdb::Status AddUserKey(
+ const rocksdb::Slice& key, const rocksdb::Slice& value,
+ rocksdb::EntryType type, rocksdb::SequenceNumber seq,
+ uint64_t file_size);
+
+ virtual rocksdb::Status Finish(rocksdb::UserCollectedProperties* properties) override;
+
+ virtual const char* Name() const override {
+ return "Rdb_tbl_prop_coll";
+ }
+
+ rocksdb::UserCollectedProperties GetReadableProperties() const override;
+
+ bool NeedCompact() const override;
+
+ public:
+ uint64_t GetMaxDeletedRows() const {
+ return m_max_deleted_rows;
+ }
+
+ static void read_stats_from_tbl_props(
+ const std::shared_ptr<const rocksdb::TableProperties>& table_props,
+ std::vector<Rdb_index_stats>* out_stats_vector);
+
+ private:
+ static std::string GetReadableStats(const Rdb_index_stats& it);
+
+ bool ShouldCollectStats();
+ void CollectStatsForRow(const rocksdb::Slice& key,
+ const rocksdb::Slice& value, rocksdb::EntryType type, uint64_t file_size);
+ Rdb_index_stats* AccessStats(const rocksdb::Slice& key);
+ void AdjustDeletedRows(rocksdb::EntryType type);
+
+ private:
+ uint32_t m_cf_id;
+ std::shared_ptr<Rdb_key_def> m_keydef;
+ Rdb_ddl_manager* m_ddl_manager;
+ std::vector<Rdb_index_stats> m_stats;
+ Rdb_index_stats* m_last_stats;
+ static const char* INDEXSTATS_KEY;
+
+ // last added key
+ std::string m_last_key;
+
+ // floating window to count deleted rows
+ std::vector<bool> m_deleted_rows_window;
+ uint64_t m_rows, m_window_pos, m_deleted_rows, m_max_deleted_rows;
+ uint64_t m_file_size;
+ Rdb_compact_params m_params;
+ uint8_t m_table_stats_sampling_pct;
+ unsigned int m_seed;
+ float m_card_adj_extra;
+};
+
+
+class Rdb_tbl_prop_coll_factory
+ : public rocksdb::TablePropertiesCollectorFactory {
+ public:
+ explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager* ddl_manager)
+ : m_ddl_manager(ddl_manager) {
+ }
+
+ /*
+ Override parent class's virtual methods of interest.
+ */
+
+ virtual rocksdb::TablePropertiesCollector* CreateTablePropertiesCollector(
+ rocksdb::TablePropertiesCollectorFactory::Context context) override {
+ return new Rdb_tbl_prop_coll(
+ m_ddl_manager, m_params, context.column_family_id,
+ m_table_stats_sampling_pct);
+ }
+
+ virtual const char* Name() const override {
+ return "Rdb_tbl_prop_coll_factory";
+ }
+
+ public:
+ void SetCompactionParams(const Rdb_compact_params& params) {
+ m_params = params;
+ }
+
+ void SetTableStatsSamplingPct(const uint8_t table_stats_sampling_pct) {
+ m_table_stats_sampling_pct = table_stats_sampling_pct;
+ }
+
+ private:
+ Rdb_ddl_manager* m_ddl_manager;
+ Rdb_compact_params m_params;
+ uint8_t m_table_stats_sampling_pct;
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h
new file mode 100644
index 00000000000..d29f365ba31
--- /dev/null
+++ b/storage/rocksdb/rdb_buff.h
@@ -0,0 +1,452 @@
+/*
+ Portions Copyright (c) 2016-Present, Facebook, Inc.
+ Portions Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+namespace myrocks {
+
+/*
+ Basic composition functions for a network buffer presented as a MySQL String
+ ("netstr") which stores data in Network Byte Order (Big Endian).
+*/
+
+inline void rdb_netstr_append_uint64(my_core::String *out_netstr, uint64 val)
+{
+ DBUG_ASSERT(out_netstr != nullptr);
+
+ // Convert from host machine byte order (usually Little Endian) to network
+ // byte order (Big Endian).
+ uint64 net_val= htobe64(val);
+ out_netstr->append(reinterpret_cast<char*>(&net_val), sizeof(net_val));
+}
+
+inline void rdb_netstr_append_uint32(my_core::String *out_netstr, uint32 val)
+{
+ DBUG_ASSERT(out_netstr != nullptr);
+
+ // Convert from host machine byte order (usually Little Endian) to network
+ // byte order (Big Endian).
+ uint32 net_val= htobe32(val);
+ out_netstr->append(reinterpret_cast<char*>(&net_val), sizeof(net_val));
+}
+
+inline void rdb_netstr_append_uint16(my_core::String *out_netstr, uint16 val)
+{
+ DBUG_ASSERT(out_netstr != nullptr);
+
+ // Convert from host machine byte order (usually Little Endian) to network
+ // byte order (Big Endian).
+ uint16 net_val= htobe16(val);
+ out_netstr->append(reinterpret_cast<char*>(&net_val), sizeof(net_val));
+}
+
+
+/*
+ Basic network buffer ("netbuf") write helper functions.
+*/
+
+inline void rdb_netbuf_store_uint64(uchar *dst_netbuf, uint64 n)
+{
+ // Convert from host byte order (usually Little Endian) to network byte order
+ // (Big Endian).
+ uint64 net_val= htobe64(n);
+ memcpy(dst_netbuf, &net_val, sizeof(net_val));
+}
+
+inline void rdb_netbuf_store_uint32(uchar *dst_netbuf, uint32 n)
+{
+ DBUG_ASSERT(dst_netbuf != nullptr);
+
+ // Convert from host byte order (usually Little Endian) to network byte order
+ // (Big Endian).
+ uint32 net_val= htobe32(n);
+ memcpy(dst_netbuf, &net_val, sizeof(net_val));
+}
+
+inline void rdb_netbuf_store_uint16(uchar *dst_netbuf, uint16 n)
+{
+ DBUG_ASSERT(dst_netbuf != nullptr);
+
+ // Convert from host byte order (usually Little Endian) to network byte order
+ // (Big Endian).
+ uint16 net_val= htobe16(n);
+ memcpy(dst_netbuf, &net_val, sizeof(net_val));
+}
+
+inline void rdb_netbuf_store_byte(uchar *dst_netbuf, uchar c)
+{
+ DBUG_ASSERT(dst_netbuf != nullptr);
+
+ *dst_netbuf= c;
+}
+
+inline void rdb_netbuf_store_index(uchar *dst_netbuf, uint32 number)
+{
+ DBUG_ASSERT(dst_netbuf != nullptr);
+
+ rdb_netbuf_store_uint32(dst_netbuf, number);
+}
+
+
+/*
+ Basic conversion helper functions from network byte order (Big Endian) to host
+ machine byte order (usually Little Endian).
+*/
+
+inline uint64 rdb_netbuf_to_uint64(const uchar *netbuf)
+{
+ DBUG_ASSERT(netbuf != nullptr);
+
+ uint64 net_val;
+ memcpy(&net_val, netbuf, sizeof(net_val));
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ return be64toh(net_val);
+}
+
+inline uint32 rdb_netbuf_to_uint32(const uchar *netbuf)
+{
+ DBUG_ASSERT(netbuf != nullptr);
+
+ uint32 net_val;
+ memcpy(&net_val, netbuf, sizeof(net_val));
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ return be32toh(net_val);
+}
+
+inline uint16 rdb_netbuf_to_uint16(const uchar *netbuf)
+{
+ DBUG_ASSERT(netbuf != nullptr);
+
+ uint16 net_val;
+ memcpy(&net_val, netbuf, sizeof(net_val));
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ return be16toh(net_val);
+}
+
+inline uchar rdb_netbuf_to_byte(const uchar* netbuf)
+{
+ DBUG_ASSERT(netbuf != nullptr);
+
+ return(uchar)netbuf[0];
+}
+
+
+/*
+ Basic network buffer ("netbuf") read helper functions.
+ Network buffer stores data in Network Byte Order (Big Endian).
+ NB: The netbuf is passed as an input/output param, hence after reading,
+ the netbuf pointer gets advanced to the following byte.
+*/
+
+inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr)
+{
+ DBUG_ASSERT(netbuf_ptr != nullptr);
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ uint64 host_val= rdb_netbuf_to_uint64(*netbuf_ptr);
+
+ // Advance pointer.
+ *netbuf_ptr += sizeof(host_val);
+
+ return host_val;
+}
+
+inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr)
+{
+ DBUG_ASSERT(netbuf_ptr != nullptr);
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ uint32 host_val= rdb_netbuf_to_uint32(*netbuf_ptr);
+
+ // Advance pointer.
+ *netbuf_ptr += sizeof(host_val);
+
+ return host_val;
+}
+
+inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr)
+{
+ DBUG_ASSERT(netbuf_ptr != nullptr);
+
+ // Convert from network byte order (Big Endian) to host machine byte order
+ // (usually Little Endian).
+ uint16 host_val= rdb_netbuf_to_uint16(*netbuf_ptr);
+
+ // Advance pointer.
+ *netbuf_ptr += sizeof(host_val);
+
+ return host_val;
+}
+
+inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr,
+ GL_INDEX_ID *gl_index_id)
+{
+ DBUG_ASSERT(gl_index_id != nullptr);
+ DBUG_ASSERT(netbuf_ptr != nullptr);
+
+ gl_index_id->cf_id= rdb_netbuf_read_uint32(netbuf_ptr);
+ gl_index_id->index_id= rdb_netbuf_read_uint32(netbuf_ptr);
+}
+
+/*
+ A simple string reader:
+ - it keeps position within the string that we read from
+ - it prevents one from reading beyond the end of the string.
+*/
+
+class Rdb_string_reader
+{
+ const char* m_ptr;
+ uint m_len;
+ public:
+ explicit Rdb_string_reader(const std::string &str)
+ {
+ m_len= str.length();
+ if (m_len)
+ {
+ m_ptr= &str.at(0);
+ }
+ else
+ {
+ /*
+ One can a create a Rdb_string_reader for reading from an empty string
+ (although attempts to read anything will fail).
+ We must not access str.at(0), since len==0, we can set ptr to any
+ value.
+ */
+ m_ptr= nullptr;
+ }
+ }
+
+ explicit Rdb_string_reader(const rocksdb::Slice *slice)
+ {
+ m_ptr= slice->data();
+ m_len= slice->size();
+ }
+
+ /*
+ Read the next @param size bytes. Returns pointer to the bytes read, or
+ nullptr if the remaining string doesn't have that many bytes.
+ */
+ const char *read(uint size)
+ {
+ const char *res;
+ if (m_len < size)
+ {
+ res= nullptr;
+ }
+ else
+ {
+ res= m_ptr;
+ m_ptr += size;
+ m_len -= size;
+ }
+ return res;
+ }
+
+ bool read_uint8(uint* res)
+ {
+ const uchar *p;
+ if (!(p= reinterpret_cast<const uchar*>(read(1))))
+ return true; // error
+ else
+ {
+ *res= *p;
+ return false; // Ok
+ }
+ }
+
+ bool read_uint16(uint* res)
+ {
+ const uchar *p;
+ if (!(p= reinterpret_cast<const uchar*>(read(2))))
+ return true; // error
+ else
+ {
+ *res= rdb_netbuf_to_uint16(p);
+ return false; // Ok
+ }
+ }
+
+ uint remaining_bytes() const { return m_len; }
+
+ /*
+ Return pointer to data that will be read by next read() call (if there is
+ nothing left to read, returns pointer to beyond the end of previous read()
+ call)
+ */
+ const char *get_current_ptr() const { return m_ptr; }
+};
+
+
+/*
+ @brief
+ A buffer one can write the data to.
+
+ @detail
+ Suggested usage pattern:
+
+ writer->clear();
+ writer->write_XXX(...);
+ ...
+ // Ok, writer->ptr() points to the data written so far,
+ // and writer->get_current_pos() is the length of the data
+
+*/
+
+class Rdb_string_writer
+{
+ std::vector<uchar> m_data;
+ public:
+ void clear() { m_data.clear(); }
+ void write_uint8(uint val)
+ {
+ m_data.push_back(static_cast<uchar>(val));
+ }
+
+ void write_uint16(uint val)
+ {
+ auto size= m_data.size();
+ m_data.resize(size + 2);
+ rdb_netbuf_store_uint16(m_data.data() + size, val);
+ }
+
+ void write_uint32(uint val)
+ {
+ auto size= m_data.size();
+ m_data.resize(size + 4);
+ rdb_netbuf_store_uint32(m_data.data() + size, val);
+ }
+
+ void write(uchar *new_data, size_t len)
+ {
+ m_data.insert(m_data.end(), new_data, new_data + len);
+ }
+
+ uchar* ptr() { return m_data.data(); }
+ size_t get_current_pos() const { return m_data.size(); }
+
+ void write_uint8_at(size_t pos, uint new_val)
+ {
+ // This function will only overwrite what was written
+ DBUG_ASSERT(pos < get_current_pos());
+ m_data.data()[pos]= new_val;
+ }
+
+ void write_uint16_at(size_t pos, uint new_val)
+ {
+ // This function will only overwrite what was written
+ DBUG_ASSERT(pos < get_current_pos() && (pos + 1) < get_current_pos());
+ rdb_netbuf_store_uint16(m_data.data() + pos, new_val);
+ }
+};
+
+
+/*
+ A helper class for writing bits into Rdb_string_writer.
+
+ The class assumes (but doesn't check) that nobody tries to write
+ anything to the Rdb_string_writer that it is writing to.
+*/
+class Rdb_bit_writer
+{
+ Rdb_string_writer *m_writer;
+ uchar m_offset;
+ public:
+ explicit Rdb_bit_writer(Rdb_string_writer* writer_arg)
+ : m_writer(writer_arg),
+ m_offset(0)
+ {
+ }
+
+ void write(uint size, uint value)
+ {
+ DBUG_ASSERT((value & ((1 << size) - 1)) == value);
+
+ while (size > 0)
+ {
+ if (m_offset == 0)
+ {
+ m_writer->write_uint8(0);
+ }
+ // number of bits to put in this byte
+ uint bits = std::min(size, (uint)(8 - m_offset));
+ uchar *last_byte= m_writer->ptr() + m_writer->get_current_pos() - 1;
+ *last_byte |=
+ (uchar) ((value >> (size - bits)) & ((1 << bits) - 1)) << m_offset;
+ size -= bits;
+ m_offset = (m_offset + bits) & 0x7;
+ }
+ }
+};
+
+class Rdb_bit_reader
+{
+ const uchar *m_cur;
+ uchar m_offset;
+ uint m_ret;
+ Rdb_string_reader *m_reader;
+ public:
+ explicit Rdb_bit_reader(Rdb_string_reader *reader)
+ : m_cur(nullptr),
+ m_offset(0),
+ m_reader(reader)
+ {
+ }
+
+ // Returns a pointer to an uint containing the bits read. On subsequent
+ // reads, the value being pointed to will be overwritten. Returns nullptr
+ // on failure.
+ uint *read(uint size)
+ {
+ m_ret= 0;
+ DBUG_ASSERT(size <= 32);
+
+ while (size > 0)
+ {
+ if (m_offset == 0)
+ {
+ m_cur= (const uchar *)m_reader->read(1);
+ if (m_cur == nullptr)
+ {
+ return nullptr;
+ }
+ }
+ // how many bits from the current byte?
+ uint bits = std::min((uint)(8 - m_offset), size);
+ m_ret <<= bits;
+ m_ret |= (*m_cur >> m_offset) & ((1 << bits) - 1);
+ size -= bits;
+ m_offset = (m_offset + bits) & 0x7;
+ }
+
+ return &m_ret;
+ }
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc
new file mode 100644
index 00000000000..3ee28b49cb6
--- /dev/null
+++ b/storage/rocksdb/rdb_cf_manager.cc
@@ -0,0 +1,236 @@
+/*
+ Copyright (c) 2014, SkySQL Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+/* This C++ files header file */
+#include "./rdb_cf_manager.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./ha_rocksdb_proto.h"
+
+namespace myrocks {
+
+/* Check if ColumnFamily name says it's a reverse-ordered CF */
+bool Rdb_cf_manager::is_cf_name_reverse(const char *name)
+{
+ /* nullptr means the default CF is used.. (TODO: can the default CF be
+ * reverse?) */
+ if (name && !strncmp(name, "rev:", 4))
+ return true;
+ else
+ return false;
+}
+
+#ifdef HAVE_PSI_INTERFACE
+static PSI_mutex_key ex_key_cfm;
+#endif
+
+void Rdb_cf_manager::init(
+ Rdb_cf_options* cf_options,
+ std::vector<rocksdb::ColumnFamilyHandle*> *handles)
+{
+ mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST);
+
+ DBUG_ASSERT(cf_options != nullptr);
+ DBUG_ASSERT(handles != nullptr);
+ DBUG_ASSERT(handles->size() > 0);
+
+ m_cf_options = cf_options;
+
+ for (auto cfh : *handles) {
+ DBUG_ASSERT(cfh != nullptr);
+ m_cf_name_map[cfh->GetName()] = cfh;
+ m_cf_id_map[cfh->GetID()] = cfh;
+ }
+}
+
+
+void Rdb_cf_manager::cleanup()
+{
+ for (auto it : m_cf_name_map) {
+ delete it.second;
+ }
+ mysql_mutex_destroy(&m_mutex);
+}
+
+
+/**
+ Generate Column Family name for per-index column families
+
+ @param res OUT Column Family name
+*/
+
+void Rdb_cf_manager::get_per_index_cf_name(const std::string& db_table_name,
+ const char *index_name,
+ std::string *res)
+{
+ DBUG_ASSERT(index_name != nullptr);
+ DBUG_ASSERT(res != nullptr);
+
+ *res = db_table_name + "." + index_name;
+}
+
+
+/*
+ @brief
+ Find column family by name. If it doesn't exist, create it
+
+ @detail
+ See Rdb_cf_manager::get_cf
+*/
+rocksdb::ColumnFamilyHandle*
+Rdb_cf_manager::get_or_create_cf(rocksdb::DB *rdb,
+ const char *cf_name,
+ const std::string& db_table_name,
+ const char *index_name,
+ bool *is_automatic)
+{
+ DBUG_ASSERT(rdb != nullptr);
+ DBUG_ASSERT(is_automatic != nullptr);
+
+ rocksdb::ColumnFamilyHandle* cf_handle;
+
+ mysql_mutex_lock(&m_mutex);
+ *is_automatic= false;
+ if (cf_name == nullptr)
+ cf_name= DEFAULT_CF_NAME;
+
+ std::string per_index_name;
+ if (!strcmp(cf_name, PER_INDEX_CF_NAME))
+ {
+ get_per_index_cf_name(db_table_name, index_name, &per_index_name);
+ cf_name= per_index_name.c_str();
+ *is_automatic= true;
+ }
+
+ auto it = m_cf_name_map.find(cf_name);
+ if (it != m_cf_name_map.end())
+ cf_handle= it->second;
+ else
+ {
+ /* Create a Column Family. */
+ std::string cf_name_str(cf_name);
+ rocksdb::ColumnFamilyOptions opts;
+ m_cf_options->get_cf_options(cf_name_str, &opts);
+
+ sql_print_information("RocksDB: creating column family %s", cf_name_str.c_str());
+ sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size);
+ sql_print_information(" target_file_size_base=%" PRIu64,
+ opts.target_file_size_base);
+
+ rocksdb::Status s= rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle);
+ if (s.ok()) {
+ m_cf_name_map[cf_handle->GetName()] = cf_handle;
+ m_cf_id_map[cf_handle->GetID()] = cf_handle;
+ } else {
+ cf_handle= nullptr;
+ }
+ }
+ mysql_mutex_unlock(&m_mutex);
+
+ return cf_handle;
+}
+
+
+/*
+ Find column family by its cf_name.
+
+ @detail
+ dbname.tablename and index_name are also parameters, because
+ cf_name=PER_INDEX_CF_NAME means that column family name is a function
+ of table/index name.
+
+ @param out is_automatic TRUE<=> column family name is auto-assigned based on
+ db_table_name and index_name.
+*/
+
+rocksdb::ColumnFamilyHandle*
+Rdb_cf_manager::get_cf(const char *cf_name,
+ const std::string& db_table_name,
+ const char *index_name,
+ bool *is_automatic) const
+{
+ DBUG_ASSERT(cf_name != nullptr);
+ DBUG_ASSERT(is_automatic != nullptr);
+
+ rocksdb::ColumnFamilyHandle* cf_handle;
+
+ *is_automatic= false;
+ mysql_mutex_lock(&m_mutex);
+ if (cf_name == nullptr)
+ cf_name= DEFAULT_CF_NAME;
+
+ std::string per_index_name;
+ if (!strcmp(cf_name, PER_INDEX_CF_NAME))
+ {
+ get_per_index_cf_name(db_table_name, index_name, &per_index_name);
+ cf_name= per_index_name.c_str();
+ *is_automatic= true;
+ }
+
+ auto it = m_cf_name_map.find(cf_name);
+ cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr;
+
+ mysql_mutex_unlock(&m_mutex);
+
+ return cf_handle;
+}
+
+rocksdb::ColumnFamilyHandle* Rdb_cf_manager::get_cf(const uint32_t id) const
+{
+ rocksdb::ColumnFamilyHandle* cf_handle = nullptr;
+
+ mysql_mutex_lock(&m_mutex);
+ auto it = m_cf_id_map.find(id);
+ if (it != m_cf_id_map.end())
+ cf_handle = it->second;
+ mysql_mutex_unlock(&m_mutex);
+
+ return cf_handle;
+}
+
+std::vector<std::string>
+Rdb_cf_manager::get_cf_names(void) const
+{
+ std::vector<std::string> names;
+
+ mysql_mutex_lock(&m_mutex);
+ for (auto it : m_cf_name_map) {
+ names.push_back(it.first);
+ }
+ mysql_mutex_unlock(&m_mutex);
+ return names;
+}
+
+std::vector<rocksdb::ColumnFamilyHandle*>
+Rdb_cf_manager::get_all_cf(void) const
+{
+ std::vector<rocksdb::ColumnFamilyHandle*> list;
+
+ mysql_mutex_lock(&m_mutex);
+ for (auto it : m_cf_id_map) {
+ list.push_back(it.second);
+ }
+ mysql_mutex_unlock(&m_mutex);
+
+ return list;
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h
new file mode 100644
index 00000000000..5a43b533c6d
--- /dev/null
+++ b/storage/rocksdb/rdb_cf_manager.h
@@ -0,0 +1,106 @@
+/*
+ Copyright (c) 2014, SkySQL Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#pragma once
+
+/* C++ system header files */
+#include <map>
+#include <string>
+#include <vector>
+
+/* MySQL header files */
+#include "./sql_class.h"
+
+/* RocksDB header files */
+#include "rocksdb/db.h"
+
+/* MyRocks header files */
+#include "./rdb_cf_options.h"
+
+namespace myrocks {
+
+/*
+ We need a Column Family (CF) manager. Its functions:
+ - create column families (synchronized, don't create the same twice)
+ - keep count in each column family.
+ = the count is kept on-disk.
+ = there are no empty CFs. initially count=1.
+ = then, when doing DDL, we increase or decrease it.
+ (atomicity is maintained by being in the same WriteBatch with DDLs)
+ = if DROP discovers that now count=0, it removes the CF.
+
+ Current state is:
+ - CFs are created in a synchronized way. We can't remove them, yet.
+*/
+
+class Rdb_cf_manager
+{
+ std::map<std::string, rocksdb::ColumnFamilyHandle*> m_cf_name_map;
+ std::map<uint32_t, rocksdb::ColumnFamilyHandle*> m_cf_id_map;
+
+ mutable mysql_mutex_t m_mutex;
+
+ static
+ void get_per_index_cf_name(const std::string& db_table_name,
+ const char *index_name, std::string *res);
+
+ Rdb_cf_options* m_cf_options= nullptr;
+
+public:
+ static bool is_cf_name_reverse(const char *name);
+
+ /*
+ This is called right after the DB::Open() call. The parameters describe column
+ families that are present in the database. The first CF is the default CF.
+ */
+ void init(Rdb_cf_options* cf_options,
+ std::vector<rocksdb::ColumnFamilyHandle*> *handles);
+ void cleanup();
+
+ /*
+ Used by CREATE TABLE.
+ - cf_name=nullptr means use default column family
+ - cf_name=_auto_ means use 'dbname.tablename.indexname'
+ */
+ rocksdb::ColumnFamilyHandle* get_or_create_cf(
+ rocksdb::DB *rdb, const char *cf_name, const std::string& db_table_name,
+ const char *index_name, bool *is_automatic);
+
+ /* Used by table open */
+ rocksdb::ColumnFamilyHandle* get_cf(const char *cf_name,
+ const std::string& db_table_name,
+ const char *index_name,
+ bool *is_automatic) const;
+
+ /* Look up cf by id; used by datadic */
+ rocksdb::ColumnFamilyHandle* get_cf(const uint32_t id) const;
+
+ /* Used to iterate over column families for show status */
+ std::vector<std::string> get_cf_names(void) const;
+
+ /* Used to iterate over column families */
+ std::vector<rocksdb::ColumnFamilyHandle*> get_all_cf(void) const;
+
+ // void drop_cf(); -- not implemented so far.
+
+ void get_cf_options(
+ const std::string &cf_name,
+ rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__)) {
+ m_cf_options->get_cf_options(cf_name, opts);
+ }
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc
new file mode 100644
index 00000000000..ccdb46a654d
--- /dev/null
+++ b/storage/rocksdb/rdb_cf_options.cc
@@ -0,0 +1,340 @@
+/*
+ Copyright (c) 2014, SkySQL Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+/* This C++ files header file */
+#include "./rdb_cf_options.h"
+
+/* C++ system header files */
+#include <string>
+
+/* MySQL header files */
+#include "./log.h"
+
+/* RocksDB header files */
+#include "rocksdb/utilities/convenience.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./rdb_cf_manager.h"
+#include "./rdb_compact_filter.h"
+
+namespace myrocks {
+
+Rdb_pk_comparator Rdb_cf_options::s_pk_comparator;
+Rdb_rev_comparator Rdb_cf_options::s_rev_pk_comparator;
+
+bool Rdb_cf_options::init(
+ size_t default_write_buffer_size,
+ const rocksdb::BlockBasedTableOptions& table_options,
+ std::shared_ptr<rocksdb::TablePropertiesCollectorFactory> prop_coll_factory,
+ const char * default_cf_options,
+ const char * override_cf_options)
+{
+ m_default_cf_opts.comparator = &s_pk_comparator;
+ m_default_cf_opts.compaction_filter_factory.reset(
+ new Rdb_compact_filter_factory);
+ m_default_cf_opts.write_buffer_size = default_write_buffer_size;
+
+ m_default_cf_opts.table_factory.reset(
+ rocksdb::NewBlockBasedTableFactory(table_options));
+
+ if (prop_coll_factory) {
+ m_default_cf_opts.table_properties_collector_factories.push_back(
+ prop_coll_factory);
+ }
+
+ if (!set_default(std::string(default_cf_options)) ||
+ !set_override(std::string(override_cf_options))) {
+ return false;
+ }
+
+ return true;
+}
+
+void Rdb_cf_options::get(const std::string &cf_name,
+ rocksdb::ColumnFamilyOptions *opts)
+{
+ DBUG_ASSERT(opts != nullptr);
+
+ // set defaults
+ rocksdb::GetColumnFamilyOptionsFromString(*opts,
+ m_default_config,
+ opts);
+
+ // set per-cf config if we have one
+ Name_to_config_t::iterator it = m_name_map.find(cf_name);
+ if (it != m_name_map.end()) {
+ rocksdb::GetColumnFamilyOptionsFromString(*opts,
+ it->second,
+ opts);
+ }
+}
+
+bool Rdb_cf_options::set_default(const std::string &default_config)
+{
+ rocksdb::ColumnFamilyOptions options;
+
+ if (!default_config.empty() &&
+ !rocksdb::GetColumnFamilyOptionsFromString(options,
+ default_config,
+ &options).ok()) {
+ fprintf(stderr,
+ "Invalid default column family config: %s\n",
+ default_config.c_str());
+ return false;
+ }
+
+ m_default_config = default_config;
+ return true;
+}
+
+// Skip over any spaces in the input string.
+void Rdb_cf_options::skip_spaces(const std::string& input, size_t* pos)
+{
+ DBUG_ASSERT(pos != nullptr);
+
+ while (*pos < input.size() && isspace(input[*pos]))
+ ++(*pos);
+}
+
+// Find a valid column family name. Note that all characters except a
+// semicolon are valid (should this change?) and all spaces are trimmed from
+// the beginning and end but are not removed between other characters.
+bool Rdb_cf_options::find_column_family(const std::string& input, size_t* pos,
+ std::string* key)
+{
+ DBUG_ASSERT(pos != nullptr);
+ DBUG_ASSERT(key != nullptr);
+
+ size_t beg_pos = *pos;
+ size_t end_pos = *pos - 1;
+
+ // Loop through the characters in the string until we see a '='.
+ for ( ; *pos < input.size() && input[*pos] != '='; ++(*pos))
+ {
+ // If this is not a space, move the end position to the current position.
+ if (input[*pos] != ' ')
+ end_pos = *pos;
+ }
+
+ if (end_pos == beg_pos - 1)
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("No column family found (options: %s)", input.c_str());
+ return false;
+ }
+
+ *key = input.substr(beg_pos, end_pos - beg_pos + 1);
+ return true;
+}
+
+// Find a valid options portion. Everything is deemed valid within the options
+// portion until we hit as many close curly braces as we have seen open curly
+// braces.
+bool Rdb_cf_options::find_options(const std::string& input, size_t* pos,
+ std::string* options)
+{
+ DBUG_ASSERT(pos != nullptr);
+ DBUG_ASSERT(options != nullptr);
+
+ // Make sure we have an open curly brace at the current position.
+ if (*pos < input.size() && input[*pos] != '{')
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("Invalid cf options, '{' expected (options: %s)",
+ input.c_str());
+ return false;
+ }
+
+ // Skip the open curly brace and any spaces.
+ ++(*pos);
+ skip_spaces(input, pos);
+
+ // Set up our brace_count, the begin position and current end position.
+ size_t brace_count = 1;
+ size_t beg_pos = *pos;
+
+ // Loop through the characters in the string until we find the appropriate
+ // number of closing curly braces.
+ while (*pos < input.size())
+ {
+ switch (input[*pos])
+ {
+ case '}':
+ // If this is a closing curly brace and we bring the count down to zero
+ // we can exit the loop with a valid options string.
+ if (--brace_count == 0)
+ {
+ *options = input.substr(beg_pos, *pos - beg_pos);
+ ++(*pos); // Move past the last closing curly brace
+ return true;
+ }
+
+ break;
+
+ case '{':
+ // If this is an open curly brace increment the count.
+ ++brace_count;
+ break;
+
+ default:
+ break;
+ }
+
+ // Move to the next character.
+ ++(*pos);
+ }
+
+ // We never found the correct number of closing curly braces.
+ // Generate an error.
+ // NO_LINT_DEBUG
+ sql_print_warning("Mismatched cf options, '}' expected (options: %s)",
+ input.c_str());
+ return false;
+}
+
+bool Rdb_cf_options::find_cf_options_pair(const std::string& input,
+ size_t* pos,
+ std::string* cf,
+ std::string* opt_str)
+{
+ DBUG_ASSERT(pos != nullptr);
+ DBUG_ASSERT(cf != nullptr);
+ DBUG_ASSERT(opt_str != nullptr);
+
+ // Skip any spaces.
+ skip_spaces(input, pos);
+
+ // We should now have a column family name.
+ if (!find_column_family(input, pos, cf))
+ return false;
+
+ // If we are at the end of the input then we generate an error.
+ if (*pos == input.size())
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("Invalid cf options, '=' expected (options: %s)",
+ input.c_str());
+ return false;
+ }
+
+ // Skip equal sign and any spaces after it
+ ++(*pos);
+ skip_spaces(input, pos);
+
+ // Find the options for this column family. This should be in the format
+ // {<options>} where <options> may contain embedded pairs of curly braces.
+ if (!find_options(input, pos, opt_str))
+ return false;
+
+ // Skip any trailing spaces after the option string.
+ skip_spaces(input, pos);
+
+ // We should either be at the end of the input string or at a semicolon.
+ if (*pos < input.size())
+ {
+ if (input[*pos] != ';')
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("Invalid cf options, ';' expected (options: %s)",
+ input.c_str());
+ return false;
+ }
+
+ ++(*pos);
+ }
+
+ return true;
+}
+
+bool Rdb_cf_options::set_override(const std::string &override_config)
+{
+ // TODO(???): support updates?
+
+ std::string cf;
+ std::string opt_str;
+ rocksdb::ColumnFamilyOptions options;
+ Name_to_config_t configs;
+
+ // Loop through the characters of the string until we reach the end.
+ size_t pos = 0;
+ while (pos < override_config.size())
+ {
+ // Attempt to find <cf>={<opt_str>}.
+ if (!find_cf_options_pair(override_config, &pos, &cf, &opt_str))
+ return false;
+
+ // Generate an error if we have already seen this column family.
+ if (configs.find(cf) != configs.end())
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning(
+ "Duplicate entry for %s in override options (options: %s)",
+ cf.c_str(), override_config.c_str());
+ return false;
+ }
+
+ // Generate an error if the <opt_str> is not valid according to RocksDB.
+ if (!rocksdb::GetColumnFamilyOptionsFromString(
+ options, opt_str, &options).ok())
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning(
+ "Invalid cf config for %s in override options (options: %s)",
+ cf.c_str(), override_config.c_str());
+ return false;
+ }
+
+ // If everything is good, add this cf/opt_str pair to the map.
+ configs[cf] = opt_str;
+ }
+
+ // Everything checked out - make the map live
+ m_name_map = configs;
+
+ return true;
+}
+
+const rocksdb::Comparator* Rdb_cf_options::get_cf_comparator(
+ const std::string& cf_name)
+{
+ if (Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str()))
+ {
+ return &s_rev_pk_comparator;
+ }
+ else
+ {
+ return &s_pk_comparator;
+ }
+}
+
+void Rdb_cf_options::get_cf_options(const std::string &cf_name,
+ rocksdb::ColumnFamilyOptions *opts)
+{
+ DBUG_ASSERT(opts != nullptr);
+
+ *opts = m_default_cf_opts;
+ get(cf_name, opts);
+
+ // Set the comparator according to 'rev:'
+ opts->comparator= get_cf_comparator(cf_name);
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h
new file mode 100644
index 00000000000..e709e42e8b5
--- /dev/null
+++ b/storage/rocksdb/rdb_cf_options.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2014, SkySQL Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#pragma once
+
+/* C++ system header files */
+#include <string>
+#include <unordered_map>
+
+/* RocksDB header files */
+#include "rocksdb/table.h"
+#include "rocksdb/utilities/options_util.h"
+
+/* MyRocks header files */
+#include "./rdb_comparator.h"
+
+namespace myrocks {
+
+/*
+ Per-column family options configs.
+
+ Per-column family option can be set
+ - Globally (the same value applies to all column families)
+ - Per column family: there is a {cf_name -> value} map,
+ and also there is a default value which applies to column
+ families not found in the map.
+*/
+class Rdb_cf_options
+{
+ public:
+ void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions *opts);
+
+ bool init(
+ size_t default_write_buffer_size,
+ const rocksdb::BlockBasedTableOptions& table_options,
+ std::shared_ptr<rocksdb::TablePropertiesCollectorFactory> prop_coll_factory,
+ const char * default_cf_options,
+ const char * override_cf_options);
+
+ const rocksdb::ColumnFamilyOptions& get_defaults() const {
+ return m_default_cf_opts;
+ }
+
+ static const rocksdb::Comparator* get_cf_comparator(
+ const std::string& cf_name);
+
+ void get_cf_options(
+ const std::string &cf_name,
+ rocksdb::ColumnFamilyOptions *opts) __attribute__((__nonnull__));
+
+ private:
+ bool set_default(const std::string &default_config);
+ bool set_override(const std::string &overide_config);
+
+ /* Helper string manipulation functions */
+ static void skip_spaces(const std::string& input, size_t* pos);
+ static bool find_column_family(const std::string& input, size_t* pos,
+ std::string* key);
+ static bool find_options(const std::string& input, size_t* pos,
+ std::string* options);
+ static bool find_cf_options_pair(const std::string& input, size_t* pos,
+ std::string* cf, std::string* opt_str);
+
+ private:
+ static Rdb_pk_comparator s_pk_comparator;
+ static Rdb_rev_comparator s_rev_pk_comparator;
+
+ typedef std::unordered_map<std::string, std::string> Name_to_config_t;
+
+ /* CF name -> value map */
+ Name_to_config_t m_name_map;
+
+ /* The default value (if there is only one value, it is stored here) */
+ std::string m_default_config;
+
+ rocksdb::ColumnFamilyOptions m_default_cf_opts;
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h
new file mode 100644
index 00000000000..db2011721b7
--- /dev/null
+++ b/storage/rocksdb/rdb_compact_filter.h
@@ -0,0 +1,115 @@
+/*
+ Portions Copyright (c) 2016-Present, Facebook, Inc.
+ Portions Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+/* C++ system header files */
+#include <string>
+
+/* RocksDB includes */
+#include "rocksdb/compaction_filter.h"
+
+/* MyRocks includes */
+#include "./ha_rocksdb_proto.h"
+#include "./rdb_datadic.h"
+
+namespace myrocks {
+
+class Rdb_compact_filter : public rocksdb::CompactionFilter
+{
+ public:
+ explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {}
+ ~Rdb_compact_filter() {}
+
+ // keys are passed in sorted order within the same sst.
+ // V1 Filter is thread safe on our usage (creating from Factory).
+ // Make sure to protect instance variables when switching to thread
+ // unsafe in the future.
+ virtual bool Filter(int level,
+ const rocksdb::Slice& key,
+ const rocksdb::Slice& existing_value,
+ std::string* new_value,
+ bool* value_changed) const override
+ {
+ DBUG_ASSERT(key.size() >= sizeof(uint32));
+
+ GL_INDEX_ID gl_index_id;
+ gl_index_id.cf_id= m_cf_id;
+ gl_index_id.index_id= rdb_netbuf_to_uint32((const uchar*)key.data());
+ DBUG_ASSERT(gl_index_id.index_id >= 1);
+
+ if (gl_index_id != m_prev_index) // processing new index id
+ {
+ if (m_num_deleted > 0)
+ {
+ m_num_deleted= 0;
+ }
+ m_should_delete=
+ rdb_get_dict_manager()->is_drop_index_ongoing(gl_index_id);
+ m_prev_index= gl_index_id;
+ }
+
+ if (m_should_delete)
+ {
+ m_num_deleted++;
+ }
+
+ return m_should_delete;
+ }
+
+ virtual bool IgnoreSnapshots() const override { return true; }
+
+ virtual const char* Name() const override
+ {
+ return "Rdb_compact_filter";
+ }
+
+ private:
+ // Column family for this compaction filter
+ const uint32_t m_cf_id;
+ // Index id of the previous record
+ mutable GL_INDEX_ID m_prev_index= {0, 0};
+ // Number of rows deleted for the same index id
+ mutable uint64 m_num_deleted= 0;
+ // Current index id should be deleted or not (should be deleted if true)
+ mutable bool m_should_delete= false;
+};
+
+class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory
+{
+ public:
+ Rdb_compact_filter_factory() {}
+
+ ~Rdb_compact_filter_factory() {}
+
+ const char* Name() const override
+ {
+ return "Rdb_compact_filter_factory";
+ }
+
+ std::unique_ptr<rocksdb::CompactionFilter> CreateCompactionFilter(
+ const rocksdb::CompactionFilter::Context& context) override
+ {
+ return std::unique_ptr<rocksdb::CompactionFilter>(
+ new Rdb_compact_filter(context.column_family_id));
+ }
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_comparator.h b/storage/rocksdb/rdb_comparator.h
new file mode 100644
index 00000000000..7e56c5ab00a
--- /dev/null
+++ b/storage/rocksdb/rdb_comparator.h
@@ -0,0 +1,93 @@
+/*
+ Copyright (c) 2012,2015 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ system header files */
+#include <string>
+
+/* MySQL includes */
+#include "./m_ctype.h"
+
+/* RocksDB header files */
+#include "rocksdb/comparator.h"
+
+namespace myrocks {
+
+/*
+ The keys are in form: {index_number} {mem-comparable-key}
+
+ (todo: knowledge about this format is shared between this class and
+ Rdb_key_def)
+*/
+class Rdb_pk_comparator : public rocksdb::Comparator
+{
+ public:
+ static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b)
+ {
+ size_t a_size= a.size();
+ size_t b_size= b.size();
+ size_t len= (a_size < b_size) ? a_size : b_size;
+ int res;
+
+ if ((res= memcmp(a.data(), b.data(), len)))
+ return res;
+
+ /* Ok, res== 0 */
+ if (a_size != b_size)
+ {
+ return a_size < b_size? -1 : 1;
+ }
+ return 0;
+ }
+
+ /* Override virtual methods of interest */
+
+ int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override
+ {
+ return bytewise_compare(a,b);
+ }
+
+ const char* Name() const override { return "RocksDB_SE_v3.10"; }
+
+ //TODO: advanced funcs:
+ // - FindShortestSeparator
+ // - FindShortSuccessor
+
+ // for now, do-nothing implementations:
+ void FindShortestSeparator(std::string* start,
+ const rocksdb::Slice& limit) const override {}
+ void FindShortSuccessor(std::string* key) const override {}
+};
+
+class Rdb_rev_comparator : public rocksdb::Comparator
+{
+ public:
+ static int bytewise_compare(const rocksdb::Slice& a, const rocksdb::Slice& b)
+ {
+ return -Rdb_pk_comparator::bytewise_compare(a, b);
+ }
+
+ int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override
+ {
+ return -Rdb_pk_comparator::bytewise_compare(a, b);
+ }
+ const char* Name() const override { return "rev:RocksDB_SE_v3.10"; }
+ void FindShortestSeparator(std::string* start,
+ const rocksdb::Slice& limit) const override {}
+ void FindShortSuccessor(std::string* key) const override {}
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc
new file mode 100644
index 00000000000..d80c81a3d9a
--- /dev/null
+++ b/storage/rocksdb/rdb_datadic.cc
@@ -0,0 +1,4584 @@
+/*
+ Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+/* This C++ file's header file */
+#include "./rdb_datadic.h"
+
+/* C++ standard header files */
+#include <algorithm>
+#include <array>
+#include <limits>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+/* MySQL header files */
+#include "./key.h"
+#include "./m_ctype.h"
+#include "./my_bit.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb_proto.h"
+#include "./my_stacktrace.h"
+#include "./rdb_cf_manager.h"
+#include "./rdb_utils.h"
+
+namespace myrocks {
+
+void get_mem_comparable_space(const CHARSET_INFO *cs,
+ const std::vector<uchar> **xfrm,
+ size_t *xfrm_len,
+ size_t *mb_len);
+
+/*
+ Rdb_key_def class implementation
+*/
+
+Rdb_key_def::Rdb_key_def(
+ uint indexnr_arg, uint keyno_arg,
+ rocksdb::ColumnFamilyHandle* cf_handle_arg,
+ uint16_t index_dict_version_arg,
+ uchar index_type_arg,
+ uint16_t kv_format_version_arg,
+ bool is_reverse_cf_arg, bool is_auto_cf_arg,
+ const char* _name,
+ Rdb_index_stats _stats
+) :
+ m_index_number(indexnr_arg),
+ m_cf_handle(cf_handle_arg),
+ m_index_dict_version(index_dict_version_arg),
+ m_index_type(index_type_arg),
+ m_kv_format_version(kv_format_version_arg),
+ m_is_reverse_cf(is_reverse_cf_arg),
+ m_is_auto_cf(is_auto_cf_arg),
+ m_name(_name),
+ m_stats(_stats),
+ m_pk_part_no(nullptr),
+ m_pack_info(nullptr),
+ m_keyno(keyno_arg),
+ m_key_parts(0),
+ m_maxlength(0) // means 'not intialized'
+{
+ mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST);
+ rdb_netbuf_store_index(m_index_number_storage_form, m_index_number);
+ DBUG_ASSERT(m_cf_handle != nullptr);
+}
+
+Rdb_key_def::Rdb_key_def(const Rdb_key_def& k) :
+ m_index_number(k.m_index_number),
+ m_cf_handle(k.m_cf_handle),
+ m_is_reverse_cf(k.m_is_reverse_cf),
+ m_is_auto_cf(k.m_is_auto_cf),
+ m_name(k.m_name),
+ m_stats(k.m_stats),
+ m_pk_part_no(k.m_pk_part_no),
+ m_pack_info(k.m_pack_info),
+ m_keyno(k.m_keyno),
+ m_key_parts(k.m_key_parts),
+ m_maxlength(k.m_maxlength)
+{
+ mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST);
+ rdb_netbuf_store_index(m_index_number_storage_form, m_index_number);
+ if (k.m_pack_info)
+ {
+ size_t size= sizeof(Rdb_field_packing) * k.m_key_parts;
+ m_pack_info= reinterpret_cast<Rdb_field_packing*>(my_malloc(size, MYF(0)));
+ memcpy(m_pack_info, k.m_pack_info, size);
+ }
+
+ if (k.m_pk_part_no)
+ {
+ size_t size = sizeof(uint)*m_key_parts;
+ m_pk_part_no= reinterpret_cast<uint*>(my_malloc(size, MYF(0)));
+ memcpy(m_pk_part_no, k.m_pk_part_no, size);
+ }
+}
+
+Rdb_key_def::~Rdb_key_def()
+{
+ mysql_mutex_destroy(&m_mutex);
+
+ my_free(m_pk_part_no);
+ m_pk_part_no = nullptr;
+
+ my_free(m_pack_info);
+ m_pack_info = nullptr;
+}
+
+void Rdb_key_def::setup(const TABLE *tbl, const Rdb_tbl_def *tbl_def)
+{
+ DBUG_ASSERT(tbl != nullptr);
+ DBUG_ASSERT(tbl_def != nullptr);
+
+ /*
+ Set max_length based on the table. This can be called concurrently from
+ multiple threads, so there is a mutex to protect this code.
+ */
+ const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY);
+ const bool hidden_pk_exists= table_has_hidden_pk(tbl);
+ const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY);
+ if (!m_maxlength)
+ {
+ mysql_mutex_lock(&m_mutex);
+ if (m_maxlength != 0)
+ {
+ mysql_mutex_unlock(&m_mutex);
+ return;
+ }
+
+ KEY *key_info= nullptr;
+ KEY *pk_info= nullptr;
+ if (!is_hidden_pk)
+ {
+ key_info= &tbl->key_info[m_keyno];
+ if (!hidden_pk_exists)
+ pk_info= &tbl->key_info[tbl->s->primary_key];
+ m_name= std::string(key_info->name);
+ }
+ else
+ {
+ m_name= HIDDEN_PK_NAME;
+ }
+
+ if (secondary_key)
+ m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->actual_key_parts;
+ else
+ {
+ pk_info= nullptr;
+ m_pk_key_parts= 0;
+ }
+
+ // "unique" secondary keys support:
+ m_key_parts= is_hidden_pk ? 1 : key_info->actual_key_parts;
+
+ if (secondary_key)
+ {
+ /*
+ In most cases, SQL layer puts PK columns as invisible suffix at the
+ end of secondary key. There are cases where this doesn't happen:
+ - unique secondary indexes.
+ - partitioned tables.
+
+ Internally, we always need PK columns as suffix (and InnoDB does,
+ too, if you were wondering).
+
+ The loop below will attempt to put all PK columns at the end of key
+ definition. Columns that are already included in the index (either
+ by the user or by "extended keys" feature) are not included for the
+ second time.
+ */
+ m_key_parts += m_pk_key_parts;
+ }
+
+ if (secondary_key)
+ m_pk_part_no= reinterpret_cast<uint*>(my_malloc(sizeof(uint)*m_key_parts,
+ MYF(0)));
+ else
+ m_pk_part_no= nullptr;
+
+ size_t size= sizeof(Rdb_field_packing) * m_key_parts;
+ m_pack_info= reinterpret_cast<Rdb_field_packing*>(my_malloc(size, MYF(0)));
+
+ size_t max_len= INDEX_NUMBER_SIZE;
+ int unpack_len= 0;
+ int max_part_len= 0;
+ bool simulating_extkey= false;
+ uint dst_i= 0;
+
+ uint keyno_to_set= m_keyno;
+ uint keypart_to_set= 0;
+
+ if (is_hidden_pk)
+ {
+ Field *field= nullptr;
+ m_pack_info[dst_i].setup(this, field, keyno_to_set, 0, 0);
+ m_pack_info[dst_i].m_unpack_data_offset= unpack_len;
+ max_len += m_pack_info[dst_i].m_max_image_len;
+ max_part_len= std::max(max_part_len, m_pack_info[dst_i].m_max_image_len);
+ dst_i++;
+ }
+ else
+ {
+ KEY_PART_INFO *key_part= key_info->key_part;
+
+ /* this loop also loops over the 'extended key' tail */
+ for (uint src_i= 0; src_i < m_key_parts; src_i++, keypart_to_set++)
+ {
+ Field *field= key_part ? key_part->field : nullptr;
+
+ if (simulating_extkey && !hidden_pk_exists)
+ {
+ /* Check if this field is already present in the key definition */
+ bool found= false;
+ for (uint j= 0; j < key_info->actual_key_parts; j++)
+ {
+ if (field->field_index == key_info->key_part[j].field->field_index)
+ {
+ found= true;
+ break;
+ }
+ }
+
+ if (found)
+ {
+ key_part++;
+ continue;
+ }
+ }
+
+ if (field && field->real_maybe_null())
+ max_len +=1; // NULL-byte
+
+ m_pack_info[dst_i].setup(this, field, keyno_to_set, keypart_to_set,
+ key_part ? key_part->length : 0);
+ m_pack_info[dst_i].m_unpack_data_offset= unpack_len;
+
+ if (pk_info)
+ {
+ m_pk_part_no[dst_i]= -1;
+ for (uint j= 0; j < m_pk_key_parts; j++)
+ {
+ if (field->field_index == pk_info->key_part[j].field->field_index)
+ {
+ m_pk_part_no[dst_i]= j;
+ break;
+ }
+ }
+ }
+ else if (secondary_key && hidden_pk_exists)
+ {
+ /*
+ The hidden pk can never be part of the sk. So it is always
+ appended to the end of the sk.
+ */
+ m_pk_part_no[dst_i]= -1;
+ if (simulating_extkey)
+ m_pk_part_no[dst_i]= 0;
+ }
+
+ max_len += m_pack_info[dst_i].m_max_image_len;
+
+ max_part_len= std::max(max_part_len,
+ m_pack_info[dst_i].m_max_image_len);
+
+ key_part++;
+ /*
+ For "unique" secondary indexes, pretend they have
+ "index extensions"
+ */
+ if (secondary_key && src_i+1 == key_info->actual_key_parts)
+ {
+ simulating_extkey= true;
+ if (!hidden_pk_exists)
+ {
+ keyno_to_set= tbl->s->primary_key;
+ key_part= pk_info->key_part;
+ keypart_to_set= (uint)-1;
+ }
+ else
+ {
+ keyno_to_set= tbl_def->m_key_count - 1;
+ key_part= nullptr;
+ keypart_to_set= 0;
+ }
+ }
+
+ dst_i++;
+ }
+ }
+
+ m_key_parts= dst_i;
+
+ /* Initialize the memory needed by the stats structure */
+ m_stats.m_distinct_keys_per_prefix.resize(get_key_parts());
+
+ /*
+ This should be the last member variable set before releasing the mutex
+ so that other threads can't see the object partially set up.
+ */
+ m_maxlength= max_len;
+
+ mysql_mutex_unlock(&m_mutex);
+ }
+}
+
+
+/**
+ Get a mem-comparable form of Primary Key from mem-comparable form of this key
+
+ @param
+ pk_descr Primary Key descriptor
+ key Index tuple from this key in mem-comparable form
+ pk_buffer OUT Put here mem-comparable form of the Primary Key.
+
+ @note
+ It may or may not be possible to restore primary key columns to their
+ mem-comparable form. To handle all cases, this function copies mem-
+ comparable forms directly.
+
+ RocksDB SE supports "Extended keys". This means that PK columns are present
+ at the end of every key. If the key already includes PK columns, then
+ these columns are not present at the end of the key.
+
+ Because of the above, we copy each primary key column.
+
+ @todo
+ If we checked crc32 checksums in this function, we would catch some CRC
+ violations that we currently don't. On the other hand, there is a broader
+ set of queries for which we would check the checksum twice.
+*/
+
+uint Rdb_key_def::get_primary_key_tuple(TABLE *table,
+ const std::shared_ptr<const Rdb_key_def>& pk_descr,
+ const rocksdb::Slice *key,
+ uchar *pk_buffer) const
+{
+ DBUG_ASSERT(table != nullptr);
+ DBUG_ASSERT(pk_descr != nullptr);
+ DBUG_ASSERT(key != nullptr);
+ DBUG_ASSERT(pk_buffer);
+
+ uint size= 0;
+ uchar *buf= pk_buffer;
+ DBUG_ASSERT(m_pk_key_parts);
+
+ /* Put the PK number */
+ rdb_netbuf_store_index(buf, pk_descr->m_index_number);
+ buf += INDEX_NUMBER_SIZE;
+ size += INDEX_NUMBER_SIZE;
+
+ const char* start_offs[MAX_REF_PARTS];
+ const char* end_offs[MAX_REF_PARTS];
+ int pk_key_part;
+ uint i;
+ Rdb_string_reader reader(key);
+
+ // Skip the index number
+ if ((!reader.read(INDEX_NUMBER_SIZE)))
+ return RDB_INVALID_KEY_LEN;
+
+ for (i= 0; i < m_key_parts; i++)
+ {
+ if ((pk_key_part= m_pk_part_no[i]) != -1)
+ {
+ start_offs[pk_key_part]= reader.get_current_ptr();
+ }
+
+ bool have_value= true;
+ /* It is impossible to unpack the column. Skip it. */
+ if (m_pack_info[i].m_maybe_null)
+ {
+ const char* nullp;
+ if (!(nullp= reader.read(1)))
+ return RDB_INVALID_KEY_LEN;
+ if (*nullp == 0)
+ {
+ /* This is a NULL value */
+ have_value= false;
+ }
+ else
+ {
+ /* If NULL marker is not '0', it can be only '1' */
+ if (*nullp != 1)
+ return RDB_INVALID_KEY_LEN;
+ }
+ }
+
+ if (have_value)
+ {
+ Rdb_field_packing *fpi= &m_pack_info[i];
+
+ DBUG_ASSERT(table->s != nullptr);
+ bool is_hidden_pk_part= (i + 1 == m_key_parts) &&
+ (table->s->primary_key == MAX_INDEXES);
+ Field *field= nullptr;
+ if (!is_hidden_pk_part)
+ field= fpi->get_field_in_table(table);
+ if (fpi->m_skip_func(fpi, field, &reader))
+ return RDB_INVALID_KEY_LEN;
+ }
+
+ if (pk_key_part != -1)
+ {
+ end_offs[pk_key_part]= reader.get_current_ptr();
+ }
+ }
+
+ for (i= 0; i < m_pk_key_parts; i++)
+ {
+ uint part_size= end_offs[i] - start_offs[i];
+ memcpy(buf, start_offs[i], end_offs[i] - start_offs[i]);
+ buf += part_size;
+ size += part_size;
+ }
+
+ return size;
+}
+
+
+/**
+ Convert index tuple into storage (i.e. mem-comparable) format
+
+ @detail
+ Currently this is done by unpacking into table->record[0] and then
+ packing index columns into storage format.
+
+ @param pack_buffer Temporary area for packing varchar columns. Its
+ size is at least max_storage_fmt_length() bytes.
+*/
+
+uint Rdb_key_def::pack_index_tuple(TABLE *tbl, uchar *pack_buffer,
+ uchar *packed_tuple, const uchar *key_tuple,
+ key_part_map keypart_map) const
+{
+ DBUG_ASSERT(tbl != nullptr);
+ DBUG_ASSERT(pack_buffer != nullptr);
+ DBUG_ASSERT(packed_tuple != nullptr);
+ DBUG_ASSERT(key_tuple != nullptr);
+
+ /* We were given a record in KeyTupleFormat. First, save it to record */
+ uint key_len= calculate_key_len(tbl, m_keyno, key_tuple, keypart_map);
+ key_restore(tbl->record[0], key_tuple, &tbl->key_info[m_keyno], key_len);
+
+ uint n_used_parts= my_count_bits(keypart_map);
+ if (keypart_map == HA_WHOLE_KEY)
+ n_used_parts= 0; // Full key is used
+
+ /* Then, convert the record into a mem-comparable form */
+ return pack_record(tbl, pack_buffer, tbl->record[0], packed_tuple, nullptr,
+ false, 0, n_used_parts);
+}
+
+
+/**
+ @brief
+ Check if "unpack info" data includes checksum.
+
+ @detail
+ This is used only by CHECK TABLE to count the number of rows that have
+ checksums.
+*/
+
+bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info)
+{
+ const uchar* ptr= (const uchar*)unpack_info.data();
+ size_t size= unpack_info.size();
+
+ // Skip unpack info if present.
+ if (size >= RDB_UNPACK_HEADER_SIZE &&
+ ptr[0] == RDB_UNPACK_DATA_TAG)
+ {
+ uint16 skip_len= rdb_netbuf_to_uint16(ptr + 1);
+ SHIP_ASSERT(size >= skip_len);
+
+ size -= skip_len;
+ ptr += skip_len;
+ }
+
+ return (size == RDB_CHECKSUM_CHUNK_SIZE && ptr[0] == RDB_CHECKSUM_DATA_TAG);
+}
+
+/*
+ @return Number of bytes that were changed
+*/
+int Rdb_key_def::successor(uchar *packed_tuple, uint len)
+{
+ DBUG_ASSERT(packed_tuple != nullptr);
+
+ int changed= 0;
+ uchar *p= packed_tuple + len - 1;
+ for (; p > packed_tuple; p--)
+ {
+ changed++;
+ if (*p != uchar(0xFF))
+ {
+ *p= *p + 1;
+ break;
+ }
+ *p='\0';
+ }
+ return changed;
+}
+
+
+/**
+ Get index columns from the record and pack them into mem-comparable form.
+
+ @param
+ tbl Table we're working on
+ record IN Record buffer with fields in table->record format
+ pack_buffer IN Temporary area for packing varchars. The size is
+ at least max_storage_fmt_length() bytes.
+ packed_tuple OUT Key in the mem-comparable form
+ unpack_info OUT Unpack data
+ unpack_info_len OUT Unpack data length
+ n_key_parts Number of keyparts to process. 0 means all of them.
+ n_null_fields OUT Number of key fields with NULL value.
+
+ @detail
+ Some callers do not need the unpack information, they can pass
+ unpack_info=nullptr, unpack_info_len=nullptr.
+
+ @return
+ Length of the packed tuple
+*/
+
+uint Rdb_key_def::pack_record(const TABLE *tbl, uchar *pack_buffer,
+ const uchar *record, uchar *packed_tuple,
+ Rdb_string_writer *unpack_info,
+ bool should_store_checksums,
+ longlong hidden_pk_id, uint n_key_parts,
+ uint *n_null_fields) const
+{
+ DBUG_ASSERT(tbl != nullptr);
+ DBUG_ASSERT(pack_buffer != nullptr);
+ DBUG_ASSERT(record != nullptr);
+ DBUG_ASSERT(packed_tuple != nullptr);
+ // Checksums for PKs are made when record is packed.
+ // We should never attempt to make checksum just from PK values
+ DBUG_ASSERT_IMP(should_store_checksums,
+ (m_index_type == INDEX_TYPE_SECONDARY));
+
+ uchar *tuple= packed_tuple;
+ size_t unpack_len_pos= size_t(-1);
+ const bool hidden_pk_exists= table_has_hidden_pk(tbl);
+
+ rdb_netbuf_store_index(tuple, m_index_number);
+ tuple += INDEX_NUMBER_SIZE;
+
+ // If n_key_parts is 0, it means all columns.
+ // The following includes the 'extended key' tail.
+ // The 'extended key' includes primary key. This is done to 'uniqify'
+ // non-unique indexes
+ bool use_all_columns = n_key_parts == 0 || n_key_parts == MAX_REF_PARTS;
+
+ // If hidden pk exists, but hidden pk wasnt passed in, we can't pack the
+ // hidden key part. So we skip it (its always 1 part).
+ if (hidden_pk_exists && !hidden_pk_id && use_all_columns)
+ n_key_parts= m_key_parts - 1;
+ else if (use_all_columns)
+ n_key_parts= m_key_parts;
+
+ if (n_null_fields)
+ *n_null_fields = 0;
+
+ if (unpack_info)
+ {
+ unpack_info->clear();
+ unpack_info->write_uint8(RDB_UNPACK_DATA_TAG);
+ unpack_len_pos= unpack_info->get_current_pos();
+ // we don't know the total length yet, so write a zero
+ unpack_info->write_uint16(0);
+ }
+
+ for (uint i=0; i < n_key_parts; i++)
+ {
+ // Fill hidden pk id into the last key part for secondary keys for tables
+ // with no pk
+ if (hidden_pk_exists && hidden_pk_id && i + 1 == n_key_parts)
+ {
+ m_pack_info[i].fill_hidden_pk_val(&tuple, hidden_pk_id);
+ break;
+ }
+
+ Field *field= m_pack_info[i].get_field_in_table(tbl);
+ DBUG_ASSERT(field != nullptr);
+
+ // Old Field methods expected the record pointer to be at tbl->record[0].
+ // The quick and easy way to fix this was to pass along the offset
+ // for the pointer.
+ my_ptrdiff_t ptr_diff= record - tbl->record[0];
+
+ if (field->real_maybe_null())
+ {
+ DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1));
+ if (field->is_real_null(ptr_diff))
+ {
+ /* NULL value. store '\0' so that it sorts before non-NULL values */
+ *tuple++ = 0;
+ /* That's it, don't store anything else */
+ if (n_null_fields)
+ (*n_null_fields)++;
+ continue;
+ }
+ else
+ {
+ /* Not a NULL value. Store '1' */
+ *tuple++ = 1;
+ }
+ }
+
+ bool create_unpack_info=
+ (unpack_info && // we were requested to generate unpack_info
+ m_pack_info[i].uses_unpack_info() && // and this keypart uses it
+ index_format_min_check(PRIMARY_FORMAT_VERSION_UPDATE1,
+ SECONDARY_FORMAT_VERSION_UPDATE1));
+ Rdb_pack_field_context pack_ctx(unpack_info);
+
+ // Set the offset for methods which do not take an offset as an argument
+ DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
+ m_pack_info[i].m_max_image_len));
+ field->move_field_offset(ptr_diff);
+
+ m_pack_info[i].m_pack_func(&m_pack_info[i], field,
+ pack_buffer, &tuple, &pack_ctx);
+
+ /* Make "unpack info" to be stored in the value */
+ if (create_unpack_info)
+ {
+ m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec,
+ field, &pack_ctx);
+ }
+ field->move_field_offset(-ptr_diff);
+ }
+
+ if (unpack_info)
+ {
+ size_t len= unpack_info->get_current_pos();
+ DBUG_ASSERT(len <= std::numeric_limits<uint16_t>::max());
+
+ // Don't store the unpack_info if it has only the header (that is, there's
+ // no meaningful content).
+ // Primary Keys are special: for them, store the unpack_info even if it's
+ // empty (provided m_maybe_unpack_info==true, see
+ // ha_rocksdb::convert_record_to_storage_format)
+ if (len == RDB_UNPACK_HEADER_SIZE &&
+ m_index_type != Rdb_key_def::INDEX_TYPE_PRIMARY)
+ {
+ unpack_info->clear();
+ }
+ else
+ {
+ unpack_info->write_uint16_at(unpack_len_pos, len);
+ }
+
+ //
+ // Secondary keys have key and value checksums in the value part
+ // Primary key is a special case (the value part has non-indexed columns),
+ // so the checksums are computed and stored by
+ // ha_rocksdb::convert_record_to_storage_format
+ //
+ if (should_store_checksums)
+ {
+ uint32_t key_crc32= crc32(0, packed_tuple, tuple - packed_tuple);
+ uint32_t val_crc32= crc32(0, unpack_info->ptr(),
+ unpack_info->get_current_pos());
+
+ unpack_info->write_uint8(RDB_CHECKSUM_DATA_TAG);
+ unpack_info->write_uint32(key_crc32);
+ unpack_info->write_uint32(val_crc32);
+ }
+ }
+
+ DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0));
+
+ return tuple - packed_tuple;
+}
+
+/**
+ Pack the hidden primary key into mem-comparable form.
+
+ @param
+ tbl Table we're working on
+ hidden_pk_id IN New value to be packed into key
+ packed_tuple OUT Key in the mem-comparable form
+
+ @return
+ Length of the packed tuple
+*/
+
+uint Rdb_key_def::pack_hidden_pk(longlong hidden_pk_id,
+ uchar *packed_tuple) const
+{
+ DBUG_ASSERT(packed_tuple != nullptr);
+
+ uchar *tuple= packed_tuple;
+ rdb_netbuf_store_index(tuple, m_index_number);
+ tuple += INDEX_NUMBER_SIZE;
+ DBUG_ASSERT(m_key_parts == 1);
+ DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
+ m_pack_info[0].m_max_image_len));
+
+ m_pack_info[0].fill_hidden_pk_val(&tuple, hidden_pk_id);
+
+ DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0));
+ return tuple - packed_tuple;
+}
+
+
+/*
+ Function of type rdb_index_field_pack_t
+*/
+
+void rdb_pack_with_make_sort_key(Rdb_field_packing *fpi, Field *field,
+ uchar *buf __attribute__((__unused__)),
+ uchar **dst,
+ Rdb_pack_field_context *pack_ctx
+ __attribute__((__unused__)))
+{
+ DBUG_ASSERT(fpi != nullptr);
+ DBUG_ASSERT(field != nullptr);
+ DBUG_ASSERT(dst != nullptr);
+ DBUG_ASSERT(*dst != nullptr);
+
+ const int max_len= fpi->m_max_image_len;
+ field->make_sort_key(*dst, max_len);
+ *dst += max_len;
+}
+
+/*
+ Compares two keys without unpacking
+
+ @detail
+ @return
+ 0 - Ok. column_index is the index of the first column which is different.
+ -1 if two kes are equal
+ 1 - Data format error.
+*/
+int Rdb_key_def::compare_keys(
+ const rocksdb::Slice *key1,
+ const rocksdb::Slice *key2,
+ std::size_t* column_index
+) const
+{
+ DBUG_ASSERT(key1 != nullptr);
+ DBUG_ASSERT(key2 != nullptr);
+ DBUG_ASSERT(column_index != nullptr);
+
+ // the caller should check the return value and
+ // not rely on column_index being valid
+ *column_index = 0xbadf00d;
+
+ Rdb_string_reader reader1(key1);
+ Rdb_string_reader reader2(key2);
+
+ // Skip the index number
+ if ((!reader1.read(INDEX_NUMBER_SIZE)))
+ return 1;
+
+ if ((!reader2.read(INDEX_NUMBER_SIZE)))
+ return 1;
+
+ for (uint i= 0; i < m_key_parts ; i++)
+ {
+ Rdb_field_packing *fpi= &m_pack_info[i];
+ if (fpi->m_maybe_null)
+ {
+ auto nullp1= reader1.read(1);
+ auto nullp2= reader2.read(1);
+ if (nullp1 == nullptr || nullp2 == nullptr)
+ return 1; //error
+
+ if (*nullp1 != *nullp2)
+ {
+ *column_index = i;
+ return 0;
+ }
+
+ if (*nullp1 == 0)
+ {
+ /* This is a NULL value */
+ continue;
+ }
+ }
+
+ auto before_skip1 = reader1.get_current_ptr();
+ auto before_skip2 = reader2.get_current_ptr();
+ DBUG_ASSERT(fpi->m_skip_func);
+ if (fpi->m_skip_func(fpi, nullptr, &reader1))
+ return 1;
+ if (fpi->m_skip_func(fpi, nullptr, &reader2))
+ return 1;
+ auto size1 = reader1.get_current_ptr() - before_skip1;
+ auto size2 = reader2.get_current_ptr() - before_skip2;
+ if (size1 != size2)
+ {
+ *column_index = i;
+ return 0;
+ }
+
+ if (memcmp(before_skip1, before_skip2, size1) != 0) {
+ *column_index = i;
+ return 0;
+ }
+ }
+
+ *column_index = m_key_parts;
+ return 0;
+
+}
+
+
+/*
+ @brief
+ Given a zero-padded key, determine its real key length
+
+ @detail
+ Fixed-size skip functions just read.
+*/
+
+size_t Rdb_key_def::key_length(TABLE *table, const rocksdb::Slice &key) const
+{
+ DBUG_ASSERT(table != nullptr);
+
+ Rdb_string_reader reader(&key);
+
+ if ((!reader.read(INDEX_NUMBER_SIZE)))
+ return size_t(-1);
+
+ for (uint i= 0; i < m_key_parts ; i++)
+ {
+ Rdb_field_packing *fpi= &m_pack_info[i];
+ Field *field= nullptr;
+ if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY)
+ field= fpi->get_field_in_table(table);
+ if (fpi->m_skip_func(fpi, field, &reader))
+ return size_t(-1);
+ }
+ return key.size() - reader.remaining_bytes();
+}
+
+
+/*
+ Take mem-comparable form and unpack_info and unpack it to Table->record
+
+ @detail
+ not all indexes support this
+
+ @return
+ UNPACK_SUCCESS - Ok
+ UNPACK_FAILURE - Data format error.
+ UNPACK_INFO_MISSING - Unpack info was unavailable and was required for
+ unpacking.
+*/
+
+int Rdb_key_def::unpack_record(TABLE *table, uchar *buf,
+ const rocksdb::Slice *packed_key,
+ const rocksdb::Slice *unpack_info,
+ bool verify_checksums) const
+{
+ Rdb_string_reader reader(packed_key);
+ Rdb_string_reader unp_reader("");
+ const bool is_hidden_pk= (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY);
+ const bool hidden_pk_exists= table_has_hidden_pk(table);
+ const bool secondary_key= (m_index_type == INDEX_TYPE_SECONDARY);
+ // There is no checksuming data after unpack_info for primary keys, because
+ // the layout there is different. The checksum is verified in
+ // ha_rocksdb::convert_record_from_storage_format instead.
+ DBUG_ASSERT_IMP(!secondary_key, !verify_checksums);
+
+ if (unpack_info)
+ {
+ unp_reader= Rdb_string_reader(unpack_info);
+ }
+
+ // Old Field methods expected the record pointer to be at tbl->record[0].
+ // The quick and easy way to fix this was to pass along the offset
+ // for the pointer.
+ my_ptrdiff_t ptr_diff= buf - table->record[0];
+
+ // Skip the index number
+ if ((!reader.read(INDEX_NUMBER_SIZE)))
+ {
+ return 1;
+ }
+
+ // For secondary keys, we expect the value field to contain unpack data and
+ // checksum data in that order. One or both can be missing, but they cannot
+ // be reordered.
+ bool has_unpack_info= unp_reader.remaining_bytes() &&
+ *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG;
+ if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE))
+ {
+ return 1;
+ }
+
+ for (uint i= 0; i < m_key_parts ; i++)
+ {
+ Rdb_field_packing *fpi= &m_pack_info[i];
+
+ /*
+ Hidden pk field is packed at the end of the secondary keys, but the SQL
+ layer does not know about it. Skip retrieving field if hidden pk.
+ */
+ if ((secondary_key && hidden_pk_exists && i + 1 == m_key_parts) ||
+ is_hidden_pk)
+ {
+ DBUG_ASSERT(fpi->m_unpack_func);
+ if (fpi->m_skip_func(fpi, nullptr, &reader))
+ {
+ return 1;
+ }
+ continue;
+ }
+
+ Field *field= fpi->get_field_in_table(table);
+
+ bool do_unpack= secondary_key ||
+ !fpi->uses_unpack_info() ||
+ (m_kv_format_version >= Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1);
+ if (fpi->m_unpack_func && do_unpack)
+ {
+ /* It is possible to unpack this column. Do it. */
+
+ if (fpi->m_maybe_null)
+ {
+ const char* nullp;
+ if (!(nullp= reader.read(1)))
+ return 1;
+ if (*nullp == 0)
+ {
+ /* Set the NULL-bit of this field */
+ field->set_null(ptr_diff);
+ /* Also set the field to its default value */
+ uint field_offset= field->ptr - table->record[0];
+ memcpy(buf + field_offset,
+ table->s->default_values + field_offset,
+ field->pack_length());
+ continue;
+ }
+ else if (*nullp == 1)
+ field->set_notnull(ptr_diff);
+ else
+ return 1;
+ }
+
+ // If we need unpack info, but there is none, tell the unpack function
+ // this by passing unp_reader as nullptr. If we never read unpack_info
+ // during unpacking anyway, then there won't an error.
+ int res;
+ bool maybe_missing_unpack= !has_unpack_info && fpi->uses_unpack_info();
+ res= fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff,
+ &reader,
+ maybe_missing_unpack ? nullptr : &unp_reader);
+
+ if (res)
+ return res;
+ }
+ else
+ {
+ /* It is impossible to unpack the column. Skip it. */
+ if (fpi->m_maybe_null)
+ {
+ const char* nullp;
+ if (!(nullp= reader.read(1)))
+ return 1;
+ if (*nullp == 0)
+ {
+ /* This is a NULL value */
+ continue;
+ }
+ /* If NULL marker is not '0', it can be only '1' */
+ if (*nullp != 1)
+ return 1;
+ }
+ if (fpi->m_skip_func(fpi, field, &reader))
+ return 1;
+ }
+ }
+
+ /*
+ Check checksum values if present
+ */
+ const char* ptr;
+ if ((ptr= unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG)
+ {
+ if (verify_checksums)
+ {
+ uint32_t stored_key_chksum= rdb_netbuf_to_uint32(
+ (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE));
+ uint32_t stored_val_chksum= rdb_netbuf_to_uint32(
+ (const uchar*)unp_reader.read(RDB_CHECKSUM_SIZE));
+
+ uint32_t computed_key_chksum=
+ crc32(0, (const uchar*)packed_key->data(), packed_key->size());
+ uint32_t computed_val_chksum=
+ crc32(0, (const uchar*) unpack_info->data(),
+ unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE);
+
+ DBUG_EXECUTE_IF("myrocks_simulate_bad_key_checksum1",
+ stored_key_chksum++;);
+
+ if (stored_key_chksum != computed_key_chksum)
+ {
+ report_checksum_mismatch(true, packed_key->data(),
+ packed_key->size());
+ return 1;
+ }
+
+ if (stored_val_chksum != computed_val_chksum)
+ {
+ report_checksum_mismatch(
+ false, unpack_info->data(),
+ unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE);
+ return 1;
+ }
+ }
+ else
+ {
+ /* The checksums are present but we are not checking checksums */
+ }
+ }
+
+ if (reader.remaining_bytes())
+ return 1;
+
+ return 0;
+}
+
+bool Rdb_key_def::table_has_hidden_pk(const TABLE* table)
+{
+ return table->s->primary_key == MAX_INDEXES;
+}
+
+void Rdb_key_def::report_checksum_mismatch(bool is_key, const char *data,
+ size_t data_size) const
+{
+ std::string buf;
+ // NO_LINT_DEBUG
+ sql_print_error("Checksum mismatch in %s of key-value pair for index 0x%x",
+ is_key? "key" : "value", get_index_number());
+
+ buf = rdb_hexdump(data, data_size, 1000);
+ // NO_LINT_DEBUG
+ sql_print_error("Data with incorrect checksum (%" PRIu64 " bytes): %s",
+ (uint64_t)data_size, buf.c_str());
+
+ my_error(ER_INTERNAL_ERROR, MYF(0), "Record checksum mismatch");
+}
+
+bool Rdb_key_def::index_format_min_check(int pk_min, int sk_min) const
+{
+ switch (m_index_type)
+ {
+ case INDEX_TYPE_PRIMARY:
+ case INDEX_TYPE_HIDDEN_PRIMARY:
+ return (m_kv_format_version >= pk_min);
+ case INDEX_TYPE_SECONDARY:
+ return (m_kv_format_version >= sk_min);
+ default:
+ DBUG_ASSERT(0);
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+// Rdb_field_packing
+///////////////////////////////////////////////////////////////////////////////////////////
+
+/*
+ Function of type rdb_index_field_skip_t
+*/
+
+int rdb_skip_max_length(const Rdb_field_packing *fpi,
+ const Field *field __attribute__((__unused__)),
+ Rdb_string_reader *reader)
+{
+ if (!reader->read(fpi->m_max_image_len))
+ return 1;
+ return 0;
+}
+
+/*
+ (RDB_ESCAPE_LENGTH-1) must be an even number so that pieces of lines are not
+ split in the middle of an UTF-8 character. See the implementation of
+ rdb_unpack_binary_or_utf8_varchar.
+*/
+
+const uint RDB_ESCAPE_LENGTH= 9;
+static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0,
+ "RDB_ESCAPE_LENGTH-1 must be even.");
+
+/*
+ Function of type rdb_index_field_skip_t
+*/
+
+static int rdb_skip_variable_length(
+ const Rdb_field_packing *fpi __attribute__((__unused__)),
+ const Field *field, Rdb_string_reader *reader)
+{
+ const uchar *ptr;
+ bool finished= false;
+
+ size_t dst_len; /* How much data can be there */
+ if (field)
+ {
+ const Field_varstring* field_var=
+ static_cast<const Field_varstring*>(field);
+ dst_len= field_var->pack_length() - field_var->length_bytes;
+ }
+ else
+ {
+ dst_len= UINT_MAX;
+ }
+
+ /* Decode the length-emitted encoding here */
+ while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH)))
+ {
+ /* See rdb_pack_with_varchar_encoding. */
+ uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes
+ uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad;
+
+ if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len)
+ {
+ return 1; /* cannot store that much, invalid data */
+ }
+
+ if (used_bytes < RDB_ESCAPE_LENGTH - 1)
+ {
+ finished= true;
+ break;
+ }
+ dst_len -= used_bytes;
+ }
+
+ if (!finished)
+ {
+ return 1;
+ }
+
+ return 0;
+}
+
+const int VARCHAR_CMP_LESS_THAN_SPACES = 1;
+const int VARCHAR_CMP_EQUAL_TO_SPACES = 2;
+const int VARCHAR_CMP_GREATER_THAN_SPACES = 3;
+
+/*
+ Skip a keypart that uses Variable-Length Space-Padded encoding
+*/
+
+static int rdb_skip_variable_space_pad(
+ const Rdb_field_packing *fpi,
+ const Field *field, Rdb_string_reader *reader)
+{
+ const uchar *ptr;
+ bool finished= false;
+
+ size_t dst_len= UINT_MAX; /* How much data can be there */
+
+ if (field)
+ {
+ const Field_varstring* field_var=
+ static_cast<const Field_varstring*>(field);
+ dst_len= field_var->pack_length() - field_var->length_bytes;
+ }
+
+ /* Decode the length-emitted encoding here */
+ while ((ptr= (const uchar*)reader->read(fpi->m_segment_size)))
+ {
+ // See rdb_pack_with_varchar_space_pad
+ uchar c= ptr[fpi->m_segment_size-1];
+ if (c == VARCHAR_CMP_EQUAL_TO_SPACES)
+ {
+ // This is the last segment
+ finished= true;
+ break;
+ }
+ else if (c == VARCHAR_CMP_LESS_THAN_SPACES ||
+ c == VARCHAR_CMP_GREATER_THAN_SPACES)
+ {
+ // This is not the last segment
+ if ((fpi->m_segment_size-1) > dst_len)
+ {
+ // The segment is full of data but the table field can't hold that
+ // much! This must be data corruption.
+ return 1;
+ }
+ dst_len -= (fpi->m_segment_size-1);
+ }
+ else
+ {
+ // Encountered a value that's none of the VARCHAR_CMP* constants
+ // It's data corruption.
+ return 1;
+ }
+ }
+ return finished? 0: 1;
+}
+
+
+/*
+ Function of type rdb_index_field_unpack_t
+*/
+
+int rdb_unpack_integer(
+ Rdb_field_packing *fpi, Field *field, uchar *to,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ const int length= fpi->m_max_image_len;
+
+ const uchar *from;
+ if (!(from= (const uchar*)reader->read(length)))
+ return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */
+
+#ifdef WORDS_BIGENDIAN
+ {
+ if (((Field_num*)field)->unsigned_flag)
+ to[0]= from[0];
+ else
+ to[0]= (char)(from[0] ^ 128); // Reverse the sign bit.
+ memcpy(to + 1, from + 1, length - 1);
+ }
+#else
+ {
+ const int sign_byte= from[0];
+ if (((Field_num*)field)->unsigned_flag)
+ to[length - 1]= sign_byte;
+ else
+ to[length - 1]= static_cast<char>(sign_byte ^ 128); // Reverse the sign bit.
+ for (int i= 0, j= length - 1; i < length-1; ++i, --j)
+ to[i]= from[j];
+ }
+#endif
+ return UNPACK_SUCCESS;
+}
+
+#if !defined(WORDS_BIGENDIAN)
+static void rdb_swap_double_bytes(uchar *dst, const uchar *src)
+{
+#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN)
+ // A few systems store the most-significant _word_ first on little-endian
+ dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0];
+ dst[4] = src[7]; dst[5] = src[6]; dst[6] = src[5]; dst[7] = src[4];
+#else
+ dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4];
+ dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0];
+#endif
+}
+
+static void rdb_swap_float_bytes(uchar *dst, const uchar *src)
+{
+ dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0];
+}
+#else
+#define rdb_swap_double_bytes nullptr
+#define rdb_swap_float_bytes nullptr
+#endif
+
+static int rdb_unpack_floating_point(
+ uchar *dst, Rdb_string_reader *reader,
+ size_t size, int exp_digit,
+ const uchar *zero_pattern,
+ const uchar *zero_val,
+ void (*swap_func)(uchar *, const uchar *))
+{
+ const uchar* from;
+
+ from= (const uchar*) reader->read(size);
+ if (from == nullptr)
+ return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */
+
+ /* Check to see if the value is zero */
+ if (memcmp(from, zero_pattern, size) == 0)
+ {
+ memcpy(dst, zero_val, size);
+ return UNPACK_SUCCESS;
+ }
+
+#if defined(WORDS_BIGENDIAN)
+ // On big-endian, output can go directly into result
+ uchar *tmp = dst;
+#else
+ // Otherwise use a temporary buffer to make byte-swapping easier later
+ uchar tmp[8];
+#endif
+
+ memcpy(tmp, from, size);
+
+ if (tmp[0] & 0x80)
+ {
+ // If the high bit is set the original value was positive so
+ // remove the high bit and subtract one from the exponent.
+ ushort exp_part= ((ushort) tmp[0] << 8) | (ushort) tmp[1];
+ exp_part &= 0x7FFF; // clear high bit;
+ exp_part -= (ushort) 1 << (16 - 1 - exp_digit); // subtract from exponent
+ tmp[0] = (uchar) (exp_part >> 8);
+ tmp[1] = (uchar) exp_part;
+ }
+ else
+ {
+ // Otherwise the original value was negative and all bytes have been
+ // negated.
+ for (size_t ii = 0; ii < size; ii++)
+ tmp[ii] ^= 0xFF;
+ }
+
+#if !defined(WORDS_BIGENDIAN)
+ // On little-endian, swap the bytes around
+ swap_func(dst, tmp);
+#else
+ static_assert(swap_func == nullptr, "Assuming that no swapping is needed.");
+#endif
+
+ return UNPACK_SUCCESS;
+}
+
+#if !defined(DBL_EXP_DIG)
+#define DBL_EXP_DIG (sizeof(double) * 8 - DBL_MANT_DIG)
+#endif
+
+
+/*
+ Function of type rdb_index_field_unpack_t
+
+ Unpack a double by doing the reverse action of change_double_for_sort
+ (sql/filesort.cc). Note that this only works on IEEE values.
+ Note also that this code assumes that NaN and +/-Infinity are never
+ allowed in the database.
+*/
+static int rdb_unpack_double(
+ Rdb_field_packing *fpi __attribute__((__unused__)),
+ Field *field __attribute__((__unused__)),
+ uchar *field_ptr,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ static double zero_val = 0.0;
+ static const uchar zero_pattern[8] = { 128, 0, 0, 0, 0, 0, 0, 0 };
+
+ return rdb_unpack_floating_point(field_ptr, reader,
+ sizeof(double),
+ DBL_EXP_DIG, zero_pattern, (const uchar *) &zero_val,
+ rdb_swap_double_bytes);
+}
+
+#if !defined(FLT_EXP_DIG)
+#define FLT_EXP_DIG (sizeof(float) * 8 - FLT_MANT_DIG)
+#endif
+
+/*
+ Function of type rdb_index_field_unpack_t
+
+ Unpack a float by doing the reverse action of Field_float::make_sort_key
+ (sql/field.cc). Note that this only works on IEEE values.
+ Note also that this code assumes that NaN and +/-Infinity are never
+ allowed in the database.
+*/
+static int rdb_unpack_float(
+ Rdb_field_packing *, Field *field __attribute__((__unused__)),
+ uchar *field_ptr,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ static float zero_val = 0.0;
+ static const uchar zero_pattern[4] = { 128, 0, 0, 0 };
+
+ return rdb_unpack_floating_point(field_ptr, reader,
+ sizeof(float),
+ FLT_EXP_DIG, zero_pattern, (const uchar *) &zero_val,
+ rdb_swap_float_bytes);
+}
+
+/*
+ Function of type rdb_index_field_unpack_t used to
+ Unpack by doing the reverse action to Field_newdate::make_sort_key.
+*/
+
+int rdb_unpack_newdate(
+ Rdb_field_packing *fpi, Field *field,
+ uchar *field_ptr,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ const char* from;
+ DBUG_ASSERT(fpi->m_max_image_len == 3);
+
+ if (!(from= reader->read(3)))
+ return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */
+
+ field_ptr[0]= from[2];
+ field_ptr[1]= from[1];
+ field_ptr[2]= from[0];
+ return UNPACK_SUCCESS;
+}
+
+
+/*
+ Function of type rdb_index_field_unpack_t, used to
+ Unpack the string by copying it over.
+ This is for BINARY(n) where the value occupies the whole length.
+*/
+
+static int rdb_unpack_binary_str(
+ Rdb_field_packing *fpi, Field *field,
+ uchar *to,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ const char* from;
+ if (!(from= reader->read(fpi->m_max_image_len)))
+ return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */
+
+ memcpy(to, from, fpi->m_max_image_len);
+ return UNPACK_SUCCESS;
+}
+
+
+/*
+ Function of type rdb_index_field_unpack_t.
+ For UTF-8, we need to convert 2-byte wide-character entities back into
+ UTF8 sequences.
+*/
+
+static int rdb_unpack_utf8_str(
+ Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ my_core::CHARSET_INFO *cset= (my_core::CHARSET_INFO*)field->charset();
+ const uchar *src;
+ if (!(src= (const uchar*)reader->read(fpi->m_max_image_len)))
+ return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */
+
+ const uchar *src_end= src + fpi->m_max_image_len;
+ uchar *dst_end= dst + field->pack_length();
+
+ while (src < src_end)
+ {
+ my_wc_t wc= (src[0] <<8) | src[1];
+ src += 2;
+ int res= cset->cset->wc_mb(cset, wc, dst, dst_end);
+ DBUG_ASSERT(res > 0 && res <=3);
+ if (res < 0)
+ return UNPACK_FAILURE;
+ dst += res;
+ }
+
+ cset->cset->fill(cset, reinterpret_cast<char *>(dst),
+ dst_end - dst, cset->pad_char);
+ return UNPACK_SUCCESS;
+}
+
+
+/*
+ Function of type rdb_index_field_pack_t
+*/
+
+static void rdb_pack_with_varchar_encoding(
+ Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst,
+ Rdb_pack_field_context *pack_ctx __attribute__((__unused__)))
+{
+ /*
+ Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0
+ when the var length field filled all N-1 previous bytes and #pad is
+ otherwise the number of padding bytes used.
+
+ If N=8 and the field is:
+ * 3 bytes (1, 2, 3) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 251
+ * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252
+ And the 4 byte string compares as greater than the 3 byte string
+ */
+ const CHARSET_INFO *charset= field->charset();
+ Field_varstring *field_var= (Field_varstring*)field;
+
+ size_t value_length= (field_var->length_bytes == 1) ?
+ (uint) *field->ptr :
+ uint2korr(field->ptr);
+ size_t xfrm_len;
+ xfrm_len= charset->coll->strnxfrm(charset,
+ buf, fpi->m_max_image_len,
+ field_var->char_length(),
+ field_var->ptr + field_var->length_bytes,
+ value_length,
+ 0);
+
+ /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */
+
+ size_t encoded_size= 0;
+ uchar *ptr= *dst;
+ while (1)
+ {
+ size_t copy_len= std::min((size_t)RDB_ESCAPE_LENGTH-1, xfrm_len);
+ size_t padding_bytes= RDB_ESCAPE_LENGTH - 1 - copy_len;
+ memcpy(ptr, buf, copy_len);
+ ptr += copy_len;
+ buf += copy_len;
+ // pad with zeros if necessary;
+ for (size_t idx= 0; idx < padding_bytes; idx++)
+ *(ptr++)= 0;
+ *(ptr++) = 255 - padding_bytes;
+
+ xfrm_len -= copy_len;
+ encoded_size += RDB_ESCAPE_LENGTH;
+ if (padding_bytes !=0)
+ break;
+ }
+ *dst += encoded_size;
+}
+
+
+/*
+ Compare the string in [buf..buf_end) with a string that is an infinite
+ sequence of strings in space_xfrm
+*/
+
+static
+int rdb_compare_string_with_spaces(const uchar *buf, const uchar *buf_end,
+ const std::vector<uchar> *space_xfrm)
+{
+ int cmp= 0;
+ while (buf < buf_end)
+ {
+ size_t bytes = std::min((size_t) (buf_end - buf), space_xfrm->size());
+ if ((cmp= memcmp(buf, space_xfrm->data(), bytes)) != 0)
+ break;
+ buf += bytes;
+ }
+ return cmp;
+}
+
+static const int RDB_TRIMMED_CHARS_OFFSET= 8;
+/*
+ Pack the data with Variable-Length Space-Padded Encoding.
+
+ The encoding is there to meet two goals:
+
+ Goal#1. Comparison. The SQL standard says
+
+ " If the collation for the comparison has the PAD SPACE characteristic,
+ for the purposes of the comparison, the shorter value is effectively
+ extended to the length of the longer by concatenation of <space>s on the
+ right.
+
+ At the moment, all MySQL collations except one have the PAD SPACE
+ characteristic. The exception is the "binary" collation that is used by
+ [VAR]BINARY columns. (Note that binary collations for specific charsets,
+ like utf8_bin or latin1_bin are not the same as "binary" collation, they have
+ the PAD SPACE characteristic).
+
+ Goal#2 is to preserve the number of trailing spaces in the original value.
+
+ This is achieved by using the following encoding:
+ The key part:
+ - Stores mem-comparable image of the column
+ - It is stored in chunks of fpi->m_segment_size bytes (*)
+ = If the remainder of the chunk is not occupied, it is padded with mem-
+ comparable image of the space character (cs->pad_char to be precise).
+ - The last byte of the chunk shows how the rest of column's mem-comparable
+ image would compare to mem-comparable image of the column extended with
+ spaces. There are three possible values.
+ - VARCHAR_CMP_LESS_THAN_SPACES,
+ - VARCHAR_CMP_EQUAL_TO_SPACES
+ - VARCHAR_CMP_GREATER_THAN_SPACES
+
+ VARCHAR_CMP_EQUAL_TO_SPACES means that this chunk is the last one (the rest
+ is spaces, or something that sorts as spaces, so there is no reason to store
+ it).
+
+ Example: if fpi->m_segment_size=5, and the collation is latin1_bin:
+
+ 'abcd\0' => [ 'abcd' <VARCHAR_CMP_LESS> ]['\0 ' <VARCHAR_CMP_EQUAL> ]
+ 'abcd' => [ 'abcd' <VARCHAR_CMP_EQUAL>]
+ 'abcd ' => [ 'abcd' <VARCHAR_CMP_EQUAL>]
+ 'abcdZZZZ' => [ 'abcd' <VARCHAR_CMP_GREATER>][ 'ZZZZ' <VARCHAR_CMP_EQUAL>]
+
+ As mentioned above, the last chunk is padded with mem-comparable images of
+ cs->pad_char. It can be 1-byte long (latin1), 2 (utf8_bin), 3 (utf8mb4), etc.
+
+ fpi->m_segment_size depends on the used collation. It is chosen to be such
+ that no mem-comparable image of space will ever stretch across the segments
+ (see get_segment_size_from_collation).
+
+ == The value part (aka unpack_info) ==
+ The value part stores the number of space characters that one needs to add
+ when unpacking the string.
+ - If the number is positive, it means add this many spaces at the end
+ - If the number is negative, it means padding has added extra spaces which
+ must be removed.
+
+ Storage considerations
+ - depending on column's max size, the number may occupy 1 or 2 bytes
+ - the number of spaces that need to be removed is not more than
+ RDB_TRIMMED_CHARS_OFFSET=8, so we offset the number by that value and
+ then store it as unsigned.
+
+ @seealso
+ rdb_unpack_binary_or_utf8_varchar_space_pad
+ rdb_unpack_simple_varchar_space_pad
+ rdb_dummy_make_unpack_info
+ rdb_skip_variable_space_pad
+*/
+
+static void rdb_pack_with_varchar_space_pad(
+ Rdb_field_packing *fpi, Field *field, uchar *buf, uchar **dst,
+ Rdb_pack_field_context *pack_ctx)
+{
+ Rdb_string_writer *unpack_info= pack_ctx->writer;
+ const CHARSET_INFO *charset= field->charset();
+ auto field_var= static_cast<Field_varstring *>(field);
+
+ size_t value_length= (field_var->length_bytes == 1) ?
+ (uint) *field->ptr :
+ uint2korr(field->ptr);
+
+ size_t trimmed_len=
+ charset->cset->lengthsp(charset,
+ (const char*)field_var->ptr +
+ field_var->length_bytes,
+ value_length);
+ size_t xfrm_len;
+ xfrm_len= charset->coll->strnxfrm(charset,
+ buf, fpi->m_max_image_len,
+ field_var->char_length(),
+ field_var->ptr + field_var->length_bytes,
+ trimmed_len,
+ 0);
+
+ /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */
+ uchar *buf_end= buf + xfrm_len;
+
+ size_t encoded_size= 0;
+ uchar *ptr= *dst;
+ size_t padding_bytes;
+ while (true)
+ {
+ size_t copy_len= std::min<size_t>(fpi->m_segment_size-1, buf_end - buf);
+ padding_bytes= fpi->m_segment_size - 1 - copy_len;
+ memcpy(ptr, buf, copy_len);
+ ptr += copy_len;
+ buf += copy_len;
+
+ if (padding_bytes)
+ {
+ memcpy(ptr, fpi->space_xfrm->data(), padding_bytes);
+ ptr+= padding_bytes;
+ *ptr= VARCHAR_CMP_EQUAL_TO_SPACES; // last segment
+ }
+ else
+ {
+ // Compare the string suffix with a hypothetical infinite string of
+ // spaces. It could be that the first difference is beyond the end of
+ // current chunk.
+ int cmp= rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm);
+
+ if (cmp < 0)
+ *ptr= VARCHAR_CMP_LESS_THAN_SPACES;
+ else if (cmp > 0)
+ *ptr= VARCHAR_CMP_GREATER_THAN_SPACES;
+ else
+ {
+ // It turns out all the rest are spaces.
+ *ptr= VARCHAR_CMP_EQUAL_TO_SPACES;
+ }
+ }
+ encoded_size += fpi->m_segment_size;
+
+ if (*(ptr++) == VARCHAR_CMP_EQUAL_TO_SPACES)
+ break;
+ }
+
+ // m_unpack_info_stores_value means unpack_info stores the whole original
+ // value. There is no need to store the number of trimmed/padded endspaces
+ // in that case.
+ if (unpack_info && !fpi->m_unpack_info_stores_value)
+ {
+ // (value_length - trimmed_len) is the number of trimmed space *characters*
+ // then, padding_bytes is the number of *bytes* added as padding
+ // then, we add 8, because we don't store negative values.
+ DBUG_ASSERT(padding_bytes % fpi->space_xfrm_len == 0);
+ DBUG_ASSERT((value_length - trimmed_len)% fpi->space_mb_len == 0);
+ size_t removed_chars= RDB_TRIMMED_CHARS_OFFSET +
+ (value_length - trimmed_len) / fpi->space_mb_len -
+ padding_bytes/fpi->space_xfrm_len;
+
+ if (fpi->m_unpack_info_uses_two_bytes)
+ {
+ unpack_info->write_uint16(removed_chars);
+ }
+ else
+ {
+ DBUG_ASSERT(removed_chars < 0x100);
+ unpack_info->write_uint8(removed_chars);
+ }
+ }
+
+ *dst += encoded_size;
+}
+
+/*
+ Function of type rdb_index_field_unpack_t
+*/
+
+static int rdb_unpack_binary_or_utf8_varchar(
+ Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader __attribute__((__unused__)))
+{
+ const uchar *ptr;
+ size_t len= 0;
+ bool finished= false;
+ uchar *d0= dst;
+ Field_varstring* field_var= (Field_varstring*)field;
+ dst += field_var->length_bytes;
+ // How much we can unpack
+ size_t dst_len= field_var->pack_length() - field_var->length_bytes;
+ uchar *dst_end= dst + dst_len;
+
+ /* Decode the length-emitted encoding here */
+ while ((ptr= (const uchar*)reader->read(RDB_ESCAPE_LENGTH)))
+ {
+ /* See rdb_pack_with_varchar_encoding. */
+ uchar pad= 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes
+ uchar used_bytes= RDB_ESCAPE_LENGTH - 1 - pad;
+
+ if (used_bytes > RDB_ESCAPE_LENGTH - 1)
+ {
+ return UNPACK_FAILURE; /* cannot store that much, invalid data */
+ }
+
+ if (dst_len < used_bytes)
+ {
+ /* Encoded index tuple is longer than the size in the record buffer? */
+ return UNPACK_FAILURE;
+ }
+
+ /*
+ Now, we need to decode used_bytes of data and append them to the value.
+ */
+ if (fpi->m_varchar_charset == &my_charset_utf8_bin)
+ {
+ if (used_bytes & 1)
+ {
+ /*
+ UTF-8 characters are encoded into two-byte entities. There is no way
+ we can have an odd number of bytes after encoding.
+ */
+ return UNPACK_FAILURE;
+ }
+
+ const uchar *src= ptr;
+ const uchar *src_end= ptr + used_bytes;
+ while (src < src_end)
+ {
+ my_wc_t wc= (src[0] <<8) | src[1];
+ src += 2;
+ const CHARSET_INFO *cset= fpi->m_varchar_charset;
+ int res= cset->cset->wc_mb(cset, wc, dst, dst_end);
+ DBUG_ASSERT(res > 0 && res <=3);
+ if (res < 0)
+ return UNPACK_FAILURE;
+ dst += res;
+ len += res;
+ dst_len -= res;
+ }
+ }
+ else
+ {
+ memcpy(dst, ptr, used_bytes);
+ dst += used_bytes;
+ dst_len -= used_bytes;
+ len += used_bytes;
+ }
+
+ if (used_bytes < RDB_ESCAPE_LENGTH - 1)
+ {
+ finished= true;
+ break;
+ }
+ }
+
+ if (!finished)
+ return UNPACK_FAILURE;
+
+ /* Save the length */
+ if (field_var->length_bytes == 1)
+ {
+ d0[0]= len;
+ }
+ else
+ {
+ DBUG_ASSERT(field_var->length_bytes == 2);
+ int2store(d0, len);
+ }
+ return UNPACK_SUCCESS;
+}
+
+/*
+ @seealso
+ rdb_pack_with_varchar_space_pad - packing function
+ rdb_unpack_simple_varchar_space_pad - unpacking function for 'simple'
+ charsets.
+ rdb_skip_variable_space_pad - skip function
+*/
+static int rdb_unpack_binary_or_utf8_varchar_space_pad(
+ Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader)
+{
+ const uchar *ptr;
+ size_t len= 0;
+ bool finished= false;
+ Field_varstring* field_var= static_cast<Field_varstring *>(field);
+ uchar *d0= dst;
+ uchar *dst_end= dst + field_var->pack_length();
+ dst += field_var->length_bytes;
+
+ uint space_padding_bytes= 0;
+ uint extra_spaces;
+ if ((fpi->m_unpack_info_uses_two_bytes?
+ unp_reader->read_uint16(&extra_spaces):
+ unp_reader->read_uint8(&extra_spaces)))
+ {
+ return UNPACK_FAILURE;
+ }
+
+ if (extra_spaces <= RDB_TRIMMED_CHARS_OFFSET)
+ {
+ space_padding_bytes= -(static_cast<int>(extra_spaces) -
+ RDB_TRIMMED_CHARS_OFFSET);
+ extra_spaces= 0;
+ }
+ else
+ extra_spaces -= RDB_TRIMMED_CHARS_OFFSET;
+
+ space_padding_bytes *= fpi->space_xfrm_len;
+
+ /* Decode the length-emitted encoding here */
+ while ((ptr= (const uchar*)reader->read(fpi->m_segment_size)))
+ {
+ char last_byte= ptr[fpi->m_segment_size - 1];
+ size_t used_bytes;
+ if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment
+ {
+ if (space_padding_bytes > (fpi->m_segment_size-1))
+ return UNPACK_FAILURE; // Cannot happen, corrupted data
+ used_bytes= (fpi->m_segment_size-1) - space_padding_bytes;
+ finished= true;
+ }
+ else
+ {
+ if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES &&
+ last_byte != VARCHAR_CMP_GREATER_THAN_SPACES)
+ {
+ return UNPACK_FAILURE; // Invalid value
+ }
+ used_bytes= fpi->m_segment_size-1;
+ }
+
+ // Now, need to decode used_bytes of data and append them to the value.
+ if (fpi->m_varchar_charset == &my_charset_utf8_bin)
+ {
+ if (used_bytes & 1)
+ {
+ /*
+ UTF-8 characters are encoded into two-byte entities. There is no way
+ we can have an odd number of bytes after encoding.
+ */
+ return UNPACK_FAILURE;
+ }
+
+ const uchar *src= ptr;
+ const uchar *src_end= ptr + used_bytes;
+ while (src < src_end)
+ {
+ my_wc_t wc= (src[0] <<8) | src[1];
+ src += 2;
+ const CHARSET_INFO *cset= fpi->m_varchar_charset;
+ int res= cset->cset->wc_mb(cset, wc, dst, dst_end);
+ DBUG_ASSERT(res <=3);
+ if (res <= 0)
+ return UNPACK_FAILURE;
+ dst += res;
+ len += res;
+ }
+ }
+ else
+ {
+ if (dst + used_bytes > dst_end)
+ return UNPACK_FAILURE;
+ memcpy(dst, ptr, used_bytes);
+ dst += used_bytes;
+ len += used_bytes;
+ }
+
+ if (finished)
+ {
+ if (extra_spaces)
+ {
+ // Both binary and UTF-8 charset store space as ' ',
+ // so the following is ok:
+ if (dst + extra_spaces > dst_end)
+ return UNPACK_FAILURE;
+ memset(dst, fpi->m_varchar_charset->pad_char, extra_spaces);
+ len += extra_spaces;
+ }
+ break;
+ }
+ }
+
+ if (!finished)
+ return UNPACK_FAILURE;
+
+ /* Save the length */
+ if (field_var->length_bytes == 1)
+ {
+ d0[0]= len;
+ }
+ else
+ {
+ DBUG_ASSERT(field_var->length_bytes == 2);
+ int2store(d0, len);
+ }
+ return UNPACK_SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+/*
+ Function of type rdb_make_unpack_info_t
+*/
+
+static void rdb_make_unpack_unknown(
+ const Rdb_collation_codec *codec __attribute__((__unused__)),
+ const Field *field, Rdb_pack_field_context *pack_ctx)
+{
+ pack_ctx->writer->write(field->ptr, field->pack_length());
+}
+
+
+/*
+ This point of this function is only to indicate that unpack_info is
+ available.
+
+ The actual unpack_info data is produced by the function that packs the key,
+ that is, rdb_pack_with_varchar_space_pad.
+*/
+
+static void rdb_dummy_make_unpack_info(
+ const Rdb_collation_codec *codec __attribute__((__unused__)),
+ const Field *field __attribute__((__unused__)),
+ Rdb_pack_field_context *pack_ctx __attribute__((__unused__)))
+{
+}
+
+/*
+ Function of type rdb_index_field_unpack_t
+*/
+
+static int rdb_unpack_unknown(Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader)
+{
+ const uchar *ptr;
+ uint len = fpi->m_unpack_data_len;
+ // We don't use anything from the key, so skip over it.
+ if (rdb_skip_max_length(fpi, field, reader))
+ {
+ return UNPACK_FAILURE;
+ }
+ // Unpack info is needed but none available.
+ if (len > 0 && unp_reader == nullptr)
+ {
+ return UNPACK_INFO_MISSING;
+ }
+ if ((ptr= (const uchar*)unp_reader->read(len)))
+ {
+ memcpy(dst, ptr, len);
+ return UNPACK_SUCCESS;
+ }
+ return UNPACK_FAILURE;
+}
+
+/*
+ Function of type rdb_make_unpack_info_t
+*/
+
+static void rdb_make_unpack_unknown_varchar(
+ const Rdb_collation_codec *codec __attribute__((__unused__)),
+ const Field *field, Rdb_pack_field_context *pack_ctx)
+{
+ auto f= static_cast<const Field_varstring *>(field);
+ uint len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr);
+ len+= f->length_bytes;
+ pack_ctx->writer->write(field->ptr, len);
+}
+
+
+/*
+ Function of type rdb_index_field_unpack_t
+
+ @detail
+ Unpack a key part in an "unknown" collation from its
+ (mem_comparable_form, unpack_info) form.
+
+ "Unknown" means we have no clue about how mem_comparable_form is made from
+ the original string, so we keep the whole original string in the unpack_info.
+
+ @seealso
+ rdb_make_unpack_unknown, rdb_unpack_unknown
+*/
+
+static int rdb_unpack_unknown_varchar(Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader)
+{
+ const uchar *ptr;
+ uchar *d0= dst;
+ auto f= static_cast<Field_varstring *>(field);
+ dst += f->length_bytes;
+ uint len_bytes= f->length_bytes;
+ // We don't use anything from the key, so skip over it.
+ if (fpi->m_skip_func(fpi, field, reader))
+ {
+ return UNPACK_FAILURE;
+ }
+ // Unpack info is needed but none available.
+ DBUG_ASSERT(len_bytes > 0);
+ if (unp_reader == nullptr)
+ {
+ return UNPACK_INFO_MISSING;
+ }
+ if ((ptr= (const uchar*)unp_reader->read(len_bytes)))
+ {
+ memcpy(d0, ptr, len_bytes);
+ uint len= len_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
+ if ((ptr= (const uchar*)unp_reader->read(len)))
+ {
+ memcpy(dst, ptr, len);
+ return UNPACK_SUCCESS;
+ }
+ }
+ return UNPACK_FAILURE;
+}
+
+
+/*
+ Write unpack_data for a "simple" collation
+*/
+static void rdb_write_unpack_simple(Rdb_bit_writer *writer,
+ const Rdb_collation_codec *codec,
+ const uchar *src, size_t src_len)
+{
+ for (uint i= 0; i < src_len; i++)
+ {
+ writer->write(codec->m_enc_size[src[i]], codec->m_enc_idx[src[i]]);
+ }
+}
+
+
+static uint rdb_read_unpack_simple(Rdb_bit_reader *reader,
+ const Rdb_collation_codec *codec,
+ const uchar *src, size_t src_len,
+ uchar *dst)
+{
+ for (uint i= 0; i < src_len; i++)
+ {
+ if (codec->m_dec_size[src[i]] > 0)
+ {
+ uint *ret;
+ // Unpack info is needed but none available.
+ if (reader == nullptr)
+ {
+ return UNPACK_INFO_MISSING;
+ }
+
+ if ((ret= reader->read(codec->m_dec_size[src[i]])) == nullptr)
+ {
+ return UNPACK_FAILURE;
+ }
+ dst[i]= codec->m_dec_idx[*ret][src[i]];
+ }
+ else
+ {
+ dst[i]= codec->m_dec_idx[0][src[i]];
+ }
+ }
+
+ return UNPACK_SUCCESS;
+}
+
+/*
+ Function of type rdb_make_unpack_info_t
+
+ @detail
+ Make unpack_data for VARCHAR(n) in a "simple" charset.
+*/
+
+static void
+rdb_make_unpack_simple_varchar(const Rdb_collation_codec* codec,
+ const Field *field,
+ Rdb_pack_field_context *pack_ctx)
+{
+ auto f= static_cast<const Field_varstring *>(field);
+ uchar *src= f->ptr + f->length_bytes;
+ size_t src_len= f->length_bytes == 1 ? (uint) *f->ptr : uint2korr(f->ptr);
+ Rdb_bit_writer bit_writer(pack_ctx->writer);
+ // The std::min compares characters with bytes, but for simple collations,
+ // mbmaxlen = 1.
+ rdb_write_unpack_simple(&bit_writer, codec, src,
+ std::min((size_t)f->char_length(), src_len));
+}
+
+/*
+ Function of type rdb_index_field_unpack_t
+
+ @seealso
+ rdb_pack_with_varchar_space_pad - packing function
+ rdb_unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function
+*/
+
+int
+rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *fpi, Field *field,
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader)
+{
+ const uchar *ptr;
+ size_t len= 0;
+ bool finished= false;
+ uchar *d0= dst;
+ Field_varstring* field_var= static_cast<Field_varstring*>(field);
+ // For simple collations, char_length is also number of bytes.
+ DBUG_ASSERT((size_t)fpi->m_max_image_len >= field_var->char_length());
+ uchar *dst_end= dst + field_var->pack_length();
+ dst += field_var->length_bytes;
+ Rdb_bit_reader bit_reader(unp_reader);
+
+ uint space_padding_bytes= 0;
+ uint extra_spaces;
+ if (!unp_reader)
+ {
+ return UNPACK_INFO_MISSING;
+ }
+
+ if ((fpi->m_unpack_info_uses_two_bytes?
+ unp_reader->read_uint16(&extra_spaces):
+ unp_reader->read_uint8(&extra_spaces)))
+ {
+ return UNPACK_FAILURE;
+ }
+
+ if (extra_spaces <= 8)
+ {
+ space_padding_bytes= -(static_cast<int>(extra_spaces) - 8);
+ extra_spaces= 0;
+ }
+ else
+ extra_spaces -= 8;
+
+ space_padding_bytes *= fpi->space_xfrm_len;
+
+ /* Decode the length-emitted encoding here */
+ while ((ptr= (const uchar*)reader->read(fpi->m_segment_size)))
+ {
+ char last_byte= ptr[fpi->m_segment_size - 1]; // number of padding bytes
+ size_t used_bytes;
+ if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES)
+ {
+ // this is the last one
+ if (space_padding_bytes > (fpi->m_segment_size-1))
+ return UNPACK_FAILURE; // Cannot happen, corrupted data
+ used_bytes= (fpi->m_segment_size-1) - space_padding_bytes;
+ finished= true;
+ }
+ else
+ {
+ if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES &&
+ last_byte != VARCHAR_CMP_GREATER_THAN_SPACES)
+ {
+ return UNPACK_FAILURE;
+ }
+ used_bytes= fpi->m_segment_size-1;
+ }
+
+ if (dst + used_bytes > dst_end)
+ {
+ // The value on disk is longer than the field definition allows?
+ return UNPACK_FAILURE;
+ }
+
+ uint ret;
+ if ((ret= rdb_read_unpack_simple(&bit_reader,
+ fpi->m_charset_codec, ptr, used_bytes,
+ dst)) != UNPACK_SUCCESS)
+ {
+ return ret;
+ }
+
+ dst += used_bytes;
+ len += used_bytes;
+
+ if (finished)
+ {
+ if (extra_spaces)
+ {
+ if (dst + extra_spaces > dst_end)
+ return UNPACK_FAILURE;
+ // pad_char has a 1-byte form in all charsets that
+ // are handled by rdb_init_collation_mapping.
+ memset(dst, field_var->charset()->pad_char, extra_spaces);
+ len += extra_spaces;
+ }
+ break;
+ }
+ }
+
+ if (!finished)
+ return UNPACK_FAILURE;
+
+ /* Save the length */
+ if (field_var->length_bytes == 1)
+ {
+ d0[0]= len;
+ }
+ else
+ {
+ DBUG_ASSERT(field_var->length_bytes == 2);
+ int2store(d0, len);
+ }
+ return UNPACK_SUCCESS;
+}
+
+
+/*
+ Function of type rdb_make_unpack_info_t
+
+ @detail
+ Make unpack_data for CHAR(n) value in a "simple" charset.
+ It is CHAR(N), so SQL layer has padded the value with spaces up to N chars.
+
+ @seealso
+ The VARCHAR variant is in rdb_make_unpack_simple_varchar
+*/
+
+static void rdb_make_unpack_simple(const Rdb_collation_codec *codec,
+ const Field *field,
+ Rdb_pack_field_context *pack_ctx)
+{
+ uchar *src= field->ptr;
+ Rdb_bit_writer bit_writer(pack_ctx->writer);
+ rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length());
+}
+
+/*
+ Function of type rdb_index_field_unpack_t
+*/
+
+static int rdb_unpack_simple(Rdb_field_packing *fpi,
+ Field *field __attribute__((__unused__)),
+ uchar *dst,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unp_reader)
+{
+ const uchar *ptr;
+ uint len = fpi->m_max_image_len;
+ Rdb_bit_reader bit_reader(unp_reader);
+
+ if (!(ptr= (const uchar*)reader->read(len)))
+ {
+ return UNPACK_FAILURE;
+ }
+
+ return rdb_read_unpack_simple(unp_reader ? &bit_reader : nullptr,
+ fpi->m_charset_codec, ptr, len, dst);
+}
+
+
+// See Rdb_charset_space_info::spaces_xfrm
+const int RDB_SPACE_XFRM_SIZE= 32;
+
+// A class holding information about how space character is represented in a
+// charset.
+class Rdb_charset_space_info
+{
+ public:
+ // A few strxfrm'ed space characters, at least RDB_SPACE_XFRM_SIZE bytes
+ std::vector<uchar> spaces_xfrm;
+
+ // length(strxfrm(' '))
+ size_t space_xfrm_len;
+
+ // length of the space character itself
+ // Typically space is just 0x20 (length=1) but in ucs2 it is 0x00 0x20
+ // (length=2)
+ size_t space_mb_len;
+};
+
+static std::array<std::unique_ptr<Rdb_charset_space_info>, MY_ALL_CHARSETS_SIZE>
+rdb_mem_comparable_space;
+
+
+/*
+ @brief
+ For a given charset, get
+ - strxfrm(' '), a sample that is at least RDB_SPACE_XFRM_SIZE bytes long.
+ - length of strxfrm(charset, ' ')
+ - length of the space character in the charset
+
+ @param cs IN Charset to get the space for
+ @param ptr OUT A few space characters
+ @param len OUT Return length of the space (in bytes)
+
+ @detail
+ It is tempting to pre-generate mem-comparable form of space character for
+ every charset on server startup.
+ One can't do that: some charsets are not initialized until somebody
+ attempts to use them (e.g. create or open a table that has a field that
+ uses the charset).
+*/
+
+static
+void rdb_get_mem_comparable_space(const CHARSET_INFO *cs,
+ const std::vector<uchar> **xfrm,
+ size_t *xfrm_len,
+ size_t *mb_len)
+{
+ DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE);
+ if (!rdb_mem_comparable_space[cs->number].get())
+ {
+ mysql_mutex_lock(&rdb_mem_cmp_space_mutex);
+ if (!rdb_mem_comparable_space[cs->number].get())
+ {
+ // Upper bound of how many bytes can be occupied by multi-byte form of a
+ // character in any charset.
+ const int MAX_MULTI_BYTE_CHAR_SIZE= 4;
+ DBUG_ASSERT(cs->mbmaxlen <= MAX_MULTI_BYTE_CHAR_SIZE);
+
+ // multi-byte form of the ' ' (space) character
+ uchar space_mb[MAX_MULTI_BYTE_CHAR_SIZE];
+
+ size_t space_mb_len= cs->cset->wc_mb(cs, (my_wc_t) cs->pad_char,
+ space_mb,
+ space_mb + sizeof(space_mb));
+
+ uchar space[20]; // mem-comparable image of the space character
+
+ size_t space_len= cs->coll->strnxfrm(cs,
+ space, sizeof(space),
+ 1,
+ space_mb,
+ space_mb_len,
+ 0);
+ Rdb_charset_space_info *info= new Rdb_charset_space_info;
+ info->space_xfrm_len= space_len;
+ info->space_mb_len= space_mb_len;
+ while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE)
+ {
+ info->spaces_xfrm.insert(info->spaces_xfrm.end(), space,
+ space + space_len);
+ }
+ rdb_mem_comparable_space[cs->number].reset(info);
+ }
+ mysql_mutex_unlock(&rdb_mem_cmp_space_mutex);
+ }
+
+ *xfrm= &rdb_mem_comparable_space[cs->number]->spaces_xfrm;
+ *xfrm_len= rdb_mem_comparable_space[cs->number]->space_xfrm_len;
+ *mb_len= rdb_mem_comparable_space[cs->number]->space_mb_len;
+}
+
+mysql_mutex_t rdb_mem_cmp_space_mutex;
+
+std::array<const Rdb_collation_codec*, MY_ALL_CHARSETS_SIZE>
+ rdb_collation_data;
+mysql_mutex_t rdb_collation_data_mutex;
+
+static bool rdb_is_collation_supported(const my_core::CHARSET_INFO * cs)
+{
+ return (cs->coll == &my_collation_8bit_simple_ci_handler);
+}
+
+static const Rdb_collation_codec *rdb_init_collation_mapping(
+ const my_core::CHARSET_INFO *cs)
+{
+ DBUG_ASSERT(cs && cs->state & MY_CS_AVAILABLE);
+ const Rdb_collation_codec *codec= rdb_collation_data[cs->number];
+
+ if (codec == nullptr && rdb_is_collation_supported(cs))
+ {
+ mysql_mutex_lock(&rdb_collation_data_mutex);
+ codec= rdb_collation_data[cs->number];
+ if (codec == nullptr)
+ {
+ Rdb_collation_codec *cur= nullptr;
+
+ // Compute reverse mapping for simple collations.
+ if (cs->coll == &my_collation_8bit_simple_ci_handler)
+ {
+ cur= new Rdb_collation_codec;
+ std::map<uchar, std::vector<uchar>> rev_map;
+ size_t max_conflict_size= 0;
+ for (int src = 0; src < 256; src++)
+ {
+ uchar dst= cs->sort_order[src];
+ rev_map[dst].push_back(src);
+ max_conflict_size= std::max(max_conflict_size, rev_map[dst].size());
+ }
+ cur->m_dec_idx.resize(max_conflict_size);
+
+ for (auto const &p : rev_map)
+ {
+ uchar dst= p.first;
+ for (uint idx = 0; idx < p.second.size(); idx++)
+ {
+ uchar src= p.second[idx];
+ uchar bits= my_bit_log2(my_round_up_to_next_power(p.second.size()));
+ cur->m_enc_idx[src]= idx;
+ cur->m_enc_size[src]= bits;
+ cur->m_dec_size[dst]= bits;
+ cur->m_dec_idx[idx][dst]= src;
+ }
+ }
+
+ cur->m_make_unpack_info_func=
+ {{ rdb_make_unpack_simple_varchar, rdb_make_unpack_simple }};
+ cur->m_unpack_func=
+ {{ rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple }};
+ }
+ else
+ {
+ // Out of luck for now.
+ }
+
+ if (cur != nullptr)
+ {
+ codec= cur;
+ cur->m_cs= cs;
+ rdb_collation_data[cs->number]= cur;
+ }
+ }
+ mysql_mutex_unlock(&rdb_collation_data_mutex);
+ }
+
+ return codec;
+}
+
+
+static int get_segment_size_from_collation(const CHARSET_INFO* cs)
+{
+ int ret;
+ if (cs == &my_charset_utf8mb4_bin ||
+ cs == &my_charset_utf16_bin ||
+ cs == &my_charset_utf16le_bin ||
+ cs == &my_charset_utf32_bin)
+ {
+ /*
+ In these collations, a character produces one weight, which is 3 bytes.
+ Segment has 3 characters, add one byte for VARCHAR_CMP_* marker, and we
+ get 3*3+1=10
+ */
+ ret= 10;
+ }
+ else
+ {
+ /*
+ All other collations. There are two classes:
+ - Unicode-based, except for collations mentioned in the if-condition.
+ For these all weights are 2 bytes long, a character may produce 0..8
+ weights.
+ in any case, 8 bytes of payload in the segment guarantee that the last
+ space character won't span across segments.
+
+ - Collations not based on unicode. These have length(strxfrm(' '))=1,
+ there nothing to worry about.
+
+ In both cases, take 8 bytes payload + 1 byte for VARCHAR_CMP* marker.
+ */
+ ret= 9;
+ }
+ DBUG_ASSERT(ret < RDB_SPACE_XFRM_SIZE);
+ return ret;
+}
+
+
+/*
+ @brief
+ Setup packing of index field into its mem-comparable form
+
+ @detail
+ - It is possible produce mem-comparable form for any datatype.
+ - Some datatypes also allow to unpack the original value from its
+ mem-comparable form.
+ = Some of these require extra information to be stored in "unpack_info".
+ unpack_info is not a part of mem-comparable form, it is only used to
+ restore the original value
+
+ @param
+ field IN field to be packed/un-packed
+
+ @return
+ TRUE - Field can be read with index-only reads
+ FALSE - Otherwise
+*/
+
+bool Rdb_field_packing::setup(const Rdb_key_def *key_descr, const Field *field,
+ uint keynr_arg, uint key_part_arg,
+ uint16 key_length)
+{
+ int res= false;
+ enum_field_types type= field ? field->real_type() : MYSQL_TYPE_LONGLONG;
+
+ m_keynr= keynr_arg;
+ m_key_part= key_part_arg;
+
+ m_maybe_null= field ? field->real_maybe_null() : false;
+ m_unpack_func= nullptr;
+ m_make_unpack_info_func= nullptr;
+ m_unpack_data_len= 0;
+ space_xfrm= nullptr; // safety
+
+ /* Calculate image length. By default, is is pack_length() */
+ m_max_image_len= field ? field->pack_length() :
+ ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN;
+ m_skip_func= rdb_skip_max_length;
+ m_pack_func= rdb_pack_with_make_sort_key;
+
+ switch (type) {
+ case MYSQL_TYPE_LONGLONG:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_INT24:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_TINY:
+ m_unpack_func= rdb_unpack_integer;
+ return true;
+
+ case MYSQL_TYPE_DOUBLE:
+ m_unpack_func= rdb_unpack_double;
+ return true;
+
+ case MYSQL_TYPE_FLOAT:
+ m_unpack_func= rdb_unpack_float;
+ return true;
+
+ case MYSQL_TYPE_NEWDECIMAL:
+ /*
+ Decimal is packed with Field_new_decimal::make_sort_key, which just
+ does memcpy.
+ Unpacking decimal values was supported only after fix for issue#253,
+ because of that ha_rocksdb::get_storage_type() handles decimal values
+ in a special way.
+ */
+ case MYSQL_TYPE_DATETIME2:
+ case MYSQL_TYPE_TIMESTAMP2:
+ /* These are packed with Field_temporal_with_date_and_timef::make_sort_key */
+ case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */
+ case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */
+ /* Everything that comes here is packed with just a memcpy(). */
+ m_unpack_func= rdb_unpack_binary_str;
+ return true;
+
+ case MYSQL_TYPE_NEWDATE:
+ /*
+ This is packed by Field_newdate::make_sort_key. It assumes the data is
+ 3 bytes, and packing is done by swapping the byte order (for both big-
+ and little-endian)
+ */
+ m_unpack_func= rdb_unpack_newdate;
+ return true;
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
+ {
+ if (key_descr &&
+ key_descr->index_format_min_check(
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1,
+ Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1))
+ {
+ // The my_charset_bin collation is special in that it will consider
+ // shorter strings sorting as less than longer strings.
+ //
+ // See Field_blob::make_sort_key for details.
+ m_max_image_len= key_length +
+ (field->charset() == &my_charset_bin
+ ? reinterpret_cast<const Field_blob*>(field)->pack_length_no_ptr()
+ : 0);
+ // Return false because indexes on text/blob will always require
+ // a prefix. With a prefix, the optimizer will not be able to do an
+ // index-only scan since there may be content occuring after the prefix
+ // length.
+ return false;
+ }
+ }
+ default:
+ break;
+ }
+
+ m_unpack_info_stores_value= false;
+ /* Handle [VAR](CHAR|BINARY) */
+
+ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING)
+ {
+ /*
+ For CHAR-based columns, check how strxfrm image will take.
+ field->field_length = field->char_length() * cs->mbmaxlen.
+ */
+ const CHARSET_INFO *cs= field->charset();
+ m_max_image_len= cs->coll->strnxfrmlen(cs, field->field_length);
+ }
+ const bool is_varchar= (type == MYSQL_TYPE_VARCHAR);
+ const CHARSET_INFO *cs= field->charset();
+ // max_image_len before chunking is taken into account
+ int max_image_len_before_chunks= m_max_image_len;
+
+ if (is_varchar)
+ {
+ // The default for varchar is variable-length, without space-padding for
+ // comparisons
+ m_varchar_charset= cs;
+ m_skip_func= rdb_skip_variable_length;
+ m_pack_func= rdb_pack_with_varchar_encoding;
+ m_max_image_len=
+ (m_max_image_len/(RDB_ESCAPE_LENGTH-1) + 1) * RDB_ESCAPE_LENGTH;
+
+ auto field_var= static_cast<const Field_varstring*>(field);
+ m_unpack_info_uses_two_bytes= (field_var->field_length + 8 >= 0x100);
+ }
+
+ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING)
+ {
+ // See http://dev.mysql.com/doc/refman/5.7/en/string-types.html for
+ // information about character-based datatypes are compared.
+ bool use_unknown_collation= false;
+ DBUG_EXECUTE_IF("myrocks_enable_unknown_collation_index_only_scans",
+ use_unknown_collation= true;);
+
+ if (cs == &my_charset_bin)
+ {
+ // - SQL layer pads BINARY(N) so that it always is N bytes long.
+ // - For VARBINARY(N), values may have different lengths, so we're using
+ // variable-length encoding. This is also the only charset where the
+ // values are not space-padded for comparison.
+ m_unpack_func= is_varchar? rdb_unpack_binary_or_utf8_varchar :
+ rdb_unpack_binary_str;
+ res= true;
+ }
+ else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin)
+ {
+ // For _bin collations, mem-comparable form of the string is the string
+ // itself.
+
+ if (is_varchar)
+ {
+ if (!key_descr ||
+ key_descr->index_format_min_check(
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1,
+ Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1))
+ {
+ // VARCHARs
+ // - are compared as if they were space-padded
+ // - but are not actually space-padded (reading the value back
+ // produces the original value, without the padding)
+ m_unpack_func= rdb_unpack_binary_or_utf8_varchar_space_pad;
+ m_skip_func= rdb_skip_variable_space_pad;
+ m_pack_func= rdb_pack_with_varchar_space_pad;
+ m_make_unpack_info_func= rdb_dummy_make_unpack_info;
+ m_segment_size= get_segment_size_from_collation(cs);
+ m_max_image_len=
+ (max_image_len_before_chunks/(m_segment_size-1) + 1) *
+ m_segment_size;
+ rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len,
+ &space_mb_len);
+ }
+ else
+ {
+ // Older variant where VARCHARs were not compared as space-padded:
+ m_unpack_func= rdb_unpack_binary_or_utf8_varchar;
+ m_skip_func= rdb_skip_variable_length;
+ m_pack_func= rdb_pack_with_varchar_encoding;
+ }
+ }
+ else
+ {
+ // SQL layer pads CHAR(N) values to their maximum length.
+ // We just store that and restore it back.
+ m_unpack_func= (cs == &my_charset_latin1_bin)? rdb_unpack_binary_str:
+ rdb_unpack_utf8_str;
+ }
+ res= true;
+ }
+ else
+ {
+ // This is [VAR]CHAR(n) and the collation is not $(charset_name)_bin
+
+ res= true; // index-only scans are possible
+ m_unpack_data_len= is_varchar ? 0 : field->field_length;
+ uint idx= is_varchar ? 0 : 1;
+ const Rdb_collation_codec *codec= nullptr;
+
+ if (is_varchar)
+ {
+ if (cs->levels_for_order != 1)
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("RocksDB: you're trying to create an index "
+ "with a multi-level collation %s", cs->name);
+ // NO_LINT_DEBUG
+ sql_print_warning("MyRocks will handle this collation internally "
+ " as if it had a NO_PAD attribute.");
+ }
+ // VARCHAR requires space-padding for doing comparisons
+ //
+ // The check for cs->levels_for_order is to catch
+ // latin2_czech_cs and cp1250_czech_cs - multi-level collations
+ // that Variable-Length Space Padded Encoding can't handle.
+ // It is not expected to work for any other multi-level collations,
+ // either.
+ // Currently we handle these collations as NO_PAD, even if they have
+ // PAD_SPACE attribute.
+ if ((!key_descr ||
+ key_descr->index_format_min_check(
+ Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1,
+ Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE1)) &&
+ cs->levels_for_order == 1)
+ {
+ m_pack_func= rdb_pack_with_varchar_space_pad;
+ m_skip_func= rdb_skip_variable_space_pad;
+ m_segment_size= get_segment_size_from_collation(cs);
+ m_max_image_len=
+ (max_image_len_before_chunks/(m_segment_size-1) + 1) *
+ m_segment_size;
+ rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len,
+ &space_mb_len);
+ }
+ else
+ {
+ m_pack_func= rdb_pack_with_varchar_encoding;
+ m_skip_func= rdb_skip_variable_length;
+ }
+ }
+
+ if ((codec= rdb_init_collation_mapping(cs)) != nullptr)
+ {
+ // The collation allows to store extra information in the unpack_info
+ // which can be used to restore the original value from the
+ // mem-comparable form.
+ m_make_unpack_info_func= codec->m_make_unpack_info_func[idx];
+ m_unpack_func= codec->m_unpack_func[idx];
+ m_charset_codec= codec;
+ }
+ else if (use_unknown_collation)
+ {
+ // We have no clue about how this collation produces mem-comparable
+ // form. Our way of restoring the original value is to keep a copy of
+ // the original value in unpack_info.
+ m_unpack_info_stores_value= true;
+ m_make_unpack_info_func= is_varchar ? rdb_make_unpack_unknown_varchar
+ : rdb_make_unpack_unknown;
+ m_unpack_func= is_varchar ? rdb_unpack_unknown_varchar
+ : rdb_unpack_unknown;
+ }
+ else
+ {
+ // Same as above: we don't know how to restore the value from its
+ // mem-comparable form.
+ // Here, we just indicate to the SQL layer we can't do it.
+ DBUG_ASSERT(m_unpack_func == nullptr);
+ m_unpack_info_stores_value= false;
+ res= false; // Indicate that index-only reads are not possible
+ }
+ }
+
+ // Make an adjustment: unpacking partially covered columns is not
+ // possible. field->table is populated when called through
+ // Rdb_key_def::setup, but not during ha_rocksdb::index_flags.
+ if (field->table)
+ {
+ // Get the original Field object and compare lengths. If this key part is
+ // a prefix of a column, then we can't do index-only scans.
+ if (field->table->field[field->field_index]->field_length != key_length)
+ {
+ m_unpack_func= nullptr;
+ m_make_unpack_info_func= nullptr;
+ m_unpack_info_stores_value= true;
+ res= false;
+ }
+ }
+ else
+ {
+ if (field->field_length != key_length)
+ {
+ m_unpack_func= nullptr;
+ m_make_unpack_info_func= nullptr;
+ m_unpack_info_stores_value= true;
+ res= false;
+ }
+ }
+ }
+ return res;
+}
+
+
+Field *Rdb_field_packing::get_field_in_table(const TABLE *tbl) const
+{
+ return tbl->key_info[m_keynr].key_part[m_key_part].field;
+}
+
+
+void Rdb_field_packing::fill_hidden_pk_val(uchar **dst,
+ longlong hidden_pk_id) const
+{
+ DBUG_ASSERT(m_max_image_len == 8);
+
+ String to;
+ rdb_netstr_append_uint64(&to, hidden_pk_id);
+ memcpy(*dst, to.ptr(), m_max_image_len);
+
+ *dst += m_max_image_len;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+// Rdb_ddl_manager
+///////////////////////////////////////////////////////////////////////////////////////////
+
+Rdb_tbl_def::~Rdb_tbl_def()
+{
+ auto ddl_manager= rdb_get_ddl_manager();
+ /* Don't free key definitions */
+ if (m_key_descr_arr)
+ {
+ for (uint i= 0; i < m_key_count; i++) {
+ if (ddl_manager && m_key_descr_arr[i]) {
+ ddl_manager->erase_index_num(m_key_descr_arr[i]->get_gl_index_id());
+ }
+
+ m_key_descr_arr[i]= nullptr;
+ }
+
+ delete[] m_key_descr_arr;
+ m_key_descr_arr= nullptr;
+ }
+}
+
+/*
+ Put table definition DDL entry. Actual write is done at
+ Rdb_dict_manager::commit.
+
+ We write
+ dbname.tablename -> version + {key_entry, key_entry, key_entry, ... }
+
+ Where key entries are a tuple of
+ ( cf_id, index_nr )
+*/
+
+bool Rdb_tbl_def::put_dict(Rdb_dict_manager* dict, rocksdb::WriteBatch *batch,
+ uchar *key, size_t keylen)
+{
+ StringBuffer<8 * Rdb_key_def::PACKED_SIZE> indexes;
+ indexes.alloc(Rdb_key_def::VERSION_SIZE +
+ m_key_count * Rdb_key_def::PACKED_SIZE * 2);
+ rdb_netstr_append_uint16(&indexes, Rdb_key_def::DDL_ENTRY_INDEX_VERSION);
+
+ for (uint i = 0; i < m_key_count; i++)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= m_key_descr_arr[i];
+
+ uchar flags =
+ (kd->m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) |
+ (kd->m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0);
+
+ uint cf_id= kd->get_cf()->GetID();
+ /*
+ If cf_id already exists, cf_flags must be the same.
+ To prevent race condition, reading/modifying/committing CF flags
+ need to be protected by mutex (dict_manager->lock()).
+ When RocksDB supports transaction with pessimistic concurrency
+ control, we can switch to use it and removing mutex.
+ */
+ uint existing_cf_flags;
+ if (dict->get_cf_flags(cf_id, &existing_cf_flags))
+ {
+ if (existing_cf_flags != flags)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Column Family Flag is different from existing flag. "
+ "Assign a new CF flag, or do not change existing "
+ "CF flag.", MYF(0));
+ return true;
+ }
+ }
+ else
+ {
+ dict->add_cf_flags(batch, cf_id, flags);
+ }
+
+ rdb_netstr_append_uint32(&indexes, cf_id);
+ rdb_netstr_append_uint32(&indexes, kd->m_index_number);
+ dict->add_or_update_index_cf_mapping(batch, kd->m_index_type,
+ kd->m_kv_format_version,
+ kd->m_index_number, cf_id);
+ }
+
+ rocksdb::Slice skey((char*)key, keylen);
+ rocksdb::Slice svalue(indexes.c_ptr(), indexes.length());
+
+ dict->put_key(batch, skey, svalue);
+ return false;
+}
+
+void Rdb_tbl_def::check_if_is_mysql_system_table()
+{
+ static const char *const system_dbs[] = {
+ "mysql",
+ "performance_schema",
+ "information_schema",
+ };
+
+ m_is_mysql_system_table= false;
+ for (uint ii = 0; ii < array_elements(system_dbs); ii++) {
+ if (strcmp(m_dbname.c_str(), system_dbs[ii]) == 0) {
+ m_is_mysql_system_table= true;
+ break;
+ }
+ }
+}
+
+void Rdb_tbl_def::set_name(const std::string& name)
+{
+ int err __attribute__((__unused__));
+
+ m_dbname_tablename= name;
+ err= rdb_split_normalized_tablename(name, &m_dbname, &m_tablename,
+ &m_partition);
+ DBUG_ASSERT(err == 0);
+
+ check_if_is_mysql_system_table();
+}
+
+/*
+ Static function of type my_hash_get_key that gets invoked by
+ the m_ddl_hash object of type my_core::HASH.
+ It manufactures a key (db+table name in our case) from a record
+ (Rdb_tbl_def in our case).
+*/
+const uchar* Rdb_ddl_manager::get_hash_key(
+ Rdb_tbl_def *rec, size_t *length,
+ my_bool not_used __attribute__((__unused__)))
+{
+ const std::string& dbname_tablename= rec->full_tablename();
+ *length= dbname_tablename.size();
+ return reinterpret_cast<const uchar*>(dbname_tablename.c_str());
+}
+
+
+/*
+ Static function of type void (*my_hash_free_element_func_t)(void*) that gets
+ invoked by the m_ddl_hash object of type my_core::HASH.
+ It deletes a record (Rdb_tbl_def in our case).
+*/
+void Rdb_ddl_manager::free_hash_elem(void* data)
+{
+ Rdb_tbl_def* elem= reinterpret_cast<Rdb_tbl_def*>(data);
+ delete elem;
+}
+
+void Rdb_ddl_manager::erase_index_num(GL_INDEX_ID gl_index_id)
+{
+ m_index_num_to_keydef.erase(gl_index_id);
+}
+
+
+namespace // anonymous namespace = not visible outside this source file
+{
+struct Rdb_validate_tbls : public Rdb_tables_scanner
+{
+ using tbl_info_t= std::pair<std::string, bool>;
+ using tbl_list_t= std::map<std::string, std::set<tbl_info_t>>;
+
+ tbl_list_t m_list;
+
+ int add_table(Rdb_tbl_def* tdef) override;
+
+ bool compare_to_actual_tables(
+ const std::string& datadir,
+ bool* has_errors);
+
+ bool scan_for_frms(
+ const std::string& datadir,
+ const std::string& dbname,
+ bool* has_errors);
+
+ bool check_frm_file(
+ const std::string& fullpath,
+ const std::string& dbname,
+ const std::string& tablename,
+ bool* has_errors);
+};
+} // anonymous namespace
+
+
+/*
+ Get a list of tables that we expect to have .frm files for. This will use the
+ information just read from the RocksDB data dictionary.
+*/
+int Rdb_validate_tbls::add_table(Rdb_tbl_def* tdef)
+{
+ DBUG_ASSERT(tdef != nullptr);
+
+ /* Add the database/table into the list */
+ bool is_partition = tdef->base_partition().size() != 0;
+ m_list[tdef->base_dbname()].insert(
+ tbl_info_t(tdef->base_tablename(), is_partition));
+
+ return 0;
+}
+
+/*
+ Access the .frm file for this dbname/tablename and see if it is a RocksDB
+ table (or partition table).
+*/
+bool Rdb_validate_tbls::check_frm_file(
+ const std::string& fullpath,
+ const std::string& dbname,
+ const std::string& tablename,
+ bool* has_errors)
+{
+ /* Check this .frm file to see what engine it uses */
+ String fullfilename(fullpath.c_str(), &my_charset_bin);
+ fullfilename.append(FN_DIRSEP);
+ fullfilename.append(tablename.c_str());
+ fullfilename.append(".frm");
+
+ /*
+ This function will return the legacy_db_type of the table. Currently
+ it does not reference the first parameter (THD* thd), but if it ever
+ did in the future we would need to make a version that does it without
+ the connection handle as we don't have one here.
+ */
+ enum legacy_db_type eng_type;
+ frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type);
+ if (type == FRMTYPE_ERROR)
+ {
+ sql_print_warning("RocksDB: Failed to open/read .from file: %s",
+ fullfilename.ptr());
+ return false;
+ }
+
+ if (type == FRMTYPE_TABLE)
+ {
+ /* For a RocksDB table do we have a reference in the data dictionary? */
+ if (eng_type == DB_TYPE_ROCKSDB)
+ {
+ /*
+ Attempt to remove the table entry from the list of tables. If this
+ fails then we know we had a .frm file that wasn't registered in RocksDB.
+ */
+ tbl_info_t element(tablename, false);
+ if (m_list.count(dbname) == 0 ||
+ m_list[dbname].erase(element) == 0)
+ {
+ sql_print_warning("RocksDB: Schema mismatch - "
+ "A .frm file exists for table %s.%s, "
+ "but that table is not registered in RocksDB",
+ dbname.c_str(), tablename.c_str());
+ *has_errors = true;
+ }
+ }
+ else if (eng_type == DB_TYPE_PARTITION_DB)
+ {
+ /*
+ For partition tables, see if it is in the m_list as a partition,
+ but don't generate an error if it isn't there - we don't know that the
+ .frm is for RocksDB.
+ */
+ if (m_list.count(dbname) > 0)
+ {
+ m_list[dbname].erase(tbl_info_t(tablename, true));
+ }
+ }
+ }
+
+ return true;
+}
+
+/* Scan the database subdirectory for .frm files */
+bool Rdb_validate_tbls::scan_for_frms(
+ const std::string& datadir,
+ const std::string& dbname,
+ bool* has_errors)
+{
+ bool result = true;
+ std::string fullpath = datadir + dbname;
+ struct st_my_dir* dir_info = my_dir(fullpath.c_str(), MYF(MY_DONT_SORT));
+
+ /* Access the directory */
+ if (dir_info == nullptr)
+ {
+ sql_print_warning("RocksDB: Could not open database directory: %s",
+ fullpath.c_str());
+ return false;
+ }
+
+ /* Scan through the files in the directory */
+ struct fileinfo* file_info = dir_info->dir_entry;
+ for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++)
+ {
+ /* Find .frm files that are not temp files (those that start with '#') */
+ const char* ext = strrchr(file_info->name, '.');
+ if (ext != nullptr && !is_prefix(file_info->name, tmp_file_prefix) &&
+ strcmp(ext, ".frm") == 0)
+ {
+ std::string tablename = std::string(file_info->name,
+ ext - file_info->name);
+
+ /* Check to see if the .frm file is from RocksDB */
+ if (!check_frm_file(fullpath, dbname, tablename, has_errors))
+ {
+ result = false;
+ break;
+ }
+ }
+ }
+
+ /* Remove any databases who have no more tables listed */
+ if (m_list.count(dbname) == 1 && m_list[dbname].size() == 0)
+ {
+ m_list.erase(dbname);
+ }
+
+ /* Release the directory entry */
+ my_dirend(dir_info);
+
+ return result;
+}
+
+/*
+ Scan the datadir for all databases (subdirectories) and get a list of .frm
+ files they contain
+*/
+bool Rdb_validate_tbls::compare_to_actual_tables(
+ const std::string& datadir,
+ bool* has_errors)
+{
+ bool result = true;
+ struct st_my_dir* dir_info;
+ struct fileinfo* file_info;
+
+ dir_info = my_dir(datadir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT));
+ if (dir_info == nullptr)
+ {
+ sql_print_warning("RocksDB: could not open datadir: %s", datadir.c_str());
+ return false;
+ }
+
+ file_info = dir_info->dir_entry;
+ for (uint ii = 0; ii < dir_info->number_off_files; ii++, file_info++)
+ {
+ /* Ignore files/dirs starting with '.' */
+ if (file_info->name[0] == '.')
+ continue;
+
+ /* Ignore all non-directory files */
+ if (!MY_S_ISDIR(file_info->mystat->st_mode))
+ continue;
+
+ /* Scan all the .frm files in the directory */
+ if (!scan_for_frms(datadir, file_info->name, has_errors))
+ {
+ result = false;
+ break;
+ }
+ }
+
+ /* Release the directory info */
+ my_dirend(dir_info);
+
+ return result;
+}
+
+/*
+ Validate that all the tables in the RocksDB database dictionary match the .frm
+ files in the datdir
+*/
+bool Rdb_ddl_manager::validate_schemas(void)
+{
+ bool has_errors= false;
+ std::string datadir= std::string(mysql_real_data_home);
+ Rdb_validate_tbls table_list;
+
+ /* Get the list of tables from the database dictionary */
+ if (scan_for_tables(&table_list) != 0)
+ {
+ return false;
+ }
+
+ /* Compare that to the list of actual .frm files */
+ if (!table_list.compare_to_actual_tables(datadir, &has_errors))
+ {
+ return false;
+ }
+
+ /*
+ Any tables left in the tables list are ones that are registered in RocksDB
+ but don't have .frm files.
+ */
+ for (const auto& db : table_list.m_list)
+ {
+ for (const auto& table : db.second)
+ {
+ sql_print_warning("RocksDB: Schema mismatch - "
+ "Table %s.%s is registered in RocksDB "
+ "but does not have a .frm file", db.first.c_str(),
+ table.first.c_str());
+ has_errors = true;
+ }
+ }
+
+ return !has_errors;
+}
+
+bool Rdb_ddl_manager::init(Rdb_dict_manager *dict_arg,
+ Rdb_cf_manager *cf_manager,
+ uint32_t validate_tables)
+{
+ m_dict= dict_arg;
+ mysql_rwlock_init(0, &m_rwlock);
+ (void) my_hash_init(&m_ddl_hash,
+ /*system_charset_info*/ &my_charset_bin,
+ 32, 0, 0,
+ (my_hash_get_key) Rdb_ddl_manager::get_hash_key,
+ Rdb_ddl_manager::free_hash_elem,
+ 0);
+
+ /* Read the data dictionary and populate the hash */
+ uchar ddl_entry[Rdb_key_def::INDEX_NUMBER_SIZE];
+ rdb_netbuf_store_index(ddl_entry, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER);
+ rocksdb::Slice ddl_entry_slice((char*)ddl_entry,
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+
+ /* Reading data dictionary should always skip bloom filter */
+ rocksdb::Iterator* it= m_dict->new_iterator();
+ int i= 0;
+
+ uint max_index_id_in_dict= 0;
+ m_dict->get_max_index_id(&max_index_id_in_dict);
+
+ for (it->Seek(ddl_entry_slice); it->Valid(); it->Next())
+ {
+ const uchar *ptr;
+ const uchar *ptr_end;
+ rocksdb::Slice key= it->key();
+ rocksdb::Slice val= it->value();
+
+ if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE &&
+ memcmp(key.data(), ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE))
+ break;
+
+ if (key.size() <= Rdb_key_def::INDEX_NUMBER_SIZE)
+ {
+ sql_print_error("RocksDB: Table_store: key has length %d (corruption?)",
+ (int)key.size());
+ return true;
+ }
+
+ Rdb_tbl_def *tdef= new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE);
+
+ // Now, read the DDLs.
+ int real_val_size= val.size() - Rdb_key_def::VERSION_SIZE;
+ if (real_val_size % Rdb_key_def::PACKED_SIZE*2)
+ {
+ sql_print_error("RocksDB: Table_store: invalid keylist for table %s",
+ tdef->full_tablename().c_str());
+ return true;
+ }
+ tdef->m_key_count= real_val_size / (Rdb_key_def::PACKED_SIZE*2);
+ tdef->m_key_descr_arr= new std::shared_ptr<Rdb_key_def>[tdef->m_key_count];
+
+ ptr= reinterpret_cast<const uchar*>(val.data());
+ int version= rdb_netbuf_read_uint16(&ptr);
+ if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION)
+ {
+ sql_print_error("RocksDB: DDL ENTRY Version was not expected."
+ "Expected: %d, Actual: %d",
+ Rdb_key_def::DDL_ENTRY_INDEX_VERSION, version);
+ return true;
+ }
+ ptr_end= ptr + real_val_size;
+ for (uint keyno= 0; ptr < ptr_end; keyno++)
+ {
+ GL_INDEX_ID gl_index_id;
+ rdb_netbuf_read_gl_index(&ptr, &gl_index_id);
+ uint16 m_index_dict_version= 0;
+ uchar m_index_type= 0;
+ uint16 kv_version= 0;
+ uint flags= 0;
+ if (!m_dict->get_index_info(gl_index_id, &m_index_dict_version,
+ &m_index_type, &kv_version))
+ {
+ sql_print_error("RocksDB: Could not get index information "
+ "for Index Number (%u,%u), table %s",
+ gl_index_id.cf_id, gl_index_id.index_id,
+ tdef->full_tablename().c_str());
+ return true;
+ }
+ if (max_index_id_in_dict < gl_index_id.index_id)
+ {
+ sql_print_error("RocksDB: Found max index id %u from data dictionary "
+ "but also found larger index id %u from dictionary. "
+ "This should never happen and possibly a bug.",
+ max_index_id_in_dict, gl_index_id.index_id);
+ return true;
+ }
+ if (!m_dict->get_cf_flags(gl_index_id.cf_id, &flags))
+ {
+ sql_print_error("RocksDB: Could not get Column Family Flags "
+ "for CF Number %d, table %s",
+ gl_index_id.cf_id,
+ tdef->full_tablename().c_str());
+ return true;
+ }
+
+ rocksdb::ColumnFamilyHandle* cfh = cf_manager->get_cf(gl_index_id.cf_id);
+ DBUG_ASSERT(cfh != nullptr);
+
+ /*
+ We can't fully initialize Rdb_key_def object here, because full
+ initialization requires that there is an open TABLE* where we could
+ look at Field* objects and set max_length and other attributes
+ */
+ tdef->m_key_descr_arr[keyno]=
+ std::make_shared<Rdb_key_def>(gl_index_id.index_id, keyno, cfh,
+ m_index_dict_version,
+ m_index_type, kv_version,
+ flags & Rdb_key_def::REVERSE_CF_FLAG,
+ flags & Rdb_key_def::AUTO_CF_FLAG, "",
+ m_dict->get_stats(gl_index_id));
+ }
+ put(tdef);
+ i++;
+ }
+
+ /*
+ If validate_tables is greater than 0 run the validation. Only fail the
+ initialzation if the setting is 1. If the setting is 2 we continue.
+ */
+ if (validate_tables > 0 && !validate_schemas()) {
+ if (validate_tables == 1) {
+ sql_print_error("RocksDB: Problems validating data dictionary "
+ "against .frm files, exiting");
+ return true;
+ }
+ }
+
+ // index ids used by applications should not conflict with
+ // data dictionary index ids
+ if (max_index_id_in_dict < Rdb_key_def::END_DICT_INDEX_ID)
+ {
+ max_index_id_in_dict= Rdb_key_def::END_DICT_INDEX_ID;
+ }
+
+ m_sequence.init(max_index_id_in_dict+1);
+
+ if (!it->status().ok())
+ {
+ std::string s= it->status().ToString();
+ sql_print_error("RocksDB: Table_store: load error: %s", s.c_str());
+ return true;
+ }
+ delete it;
+ sql_print_information("RocksDB: Table_store: loaded DDL data for %d tables", i);
+ return false;
+}
+
+
+Rdb_tbl_def* Rdb_ddl_manager::find(const std::string& table_name, bool lock)
+{
+ if (lock)
+ {
+ mysql_rwlock_rdlock(&m_rwlock);
+ }
+
+ Rdb_tbl_def* rec= reinterpret_cast<Rdb_tbl_def*>(
+ my_hash_search(&m_ddl_hash,
+ reinterpret_cast<const uchar*>(table_name.c_str()),
+ table_name.size()));
+
+ if (lock)
+ {
+ mysql_rwlock_unlock(&m_rwlock);
+ }
+
+ return rec;
+}
+
+// this is a safe version of the find() function below. It acquires a read
+// lock on m_rwlock to make sure the Rdb_key_def is not discarded while we
+// are finding it. Copying it into 'ret' increments the count making sure
+// that the object will not be discarded until we are finished with it.
+std::shared_ptr<Rdb_key_def> Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id)
+{
+ std::shared_ptr<Rdb_key_def> ret(nullptr);
+
+ mysql_rwlock_rdlock(&m_rwlock);
+
+ auto it= m_index_num_to_keydef.find(gl_index_id);
+ if (it != m_index_num_to_keydef.end())
+ {
+ auto table_def = find(it->second.first, false);
+ if (table_def && it->second.second < table_def->m_key_count)
+ {
+ auto& kd= table_def->m_key_descr_arr[it->second.second];
+ if (kd->max_storage_fmt_length() != 0)
+ {
+ ret = kd;
+ }
+ }
+ }
+
+ mysql_rwlock_unlock(&m_rwlock);
+
+ return ret;
+}
+
+// this method assumes at least read-only lock on m_rwlock
+const std::shared_ptr<Rdb_key_def>& Rdb_ddl_manager::find(
+ GL_INDEX_ID gl_index_id)
+{
+ auto it= m_index_num_to_keydef.find(gl_index_id);
+ if (it != m_index_num_to_keydef.end()) {
+ auto table_def = find(it->second.first, false);
+ if (table_def) {
+ if (it->second.second < table_def->m_key_count) {
+ return table_def->m_key_descr_arr[it->second.second];
+ }
+ }
+ }
+
+ static std::shared_ptr<Rdb_key_def> empty = nullptr;
+
+ return empty;
+}
+
+void Rdb_ddl_manager::set_stats(
+ const std::unordered_map<GL_INDEX_ID, Rdb_index_stats>& stats)
+{
+ mysql_rwlock_wrlock(&m_rwlock);
+ for (auto src : stats) {
+ auto keydef = find(src.second.m_gl_index_id);
+ if (keydef) {
+ keydef->m_stats = src.second;
+ }
+ }
+ mysql_rwlock_unlock(&m_rwlock);
+}
+
+void Rdb_ddl_manager::adjust_stats(
+ const std::vector<Rdb_index_stats>& new_data,
+ const std::vector<Rdb_index_stats>& deleted_data)
+{
+ mysql_rwlock_wrlock(&m_rwlock);
+ int i = 0;
+ for (const auto& data : {new_data, deleted_data})
+ {
+ for (const auto& src : data)
+ {
+ auto keydef= find(src.m_gl_index_id);
+ if (keydef)
+ {
+ keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length());
+ m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats;
+ }
+ }
+ i++;
+ }
+ bool should_save_stats= !m_stats2store.empty();
+ mysql_rwlock_unlock(&m_rwlock);
+ if (should_save_stats)
+ {
+ // Queue an async persist_stats(false) call to the background thread.
+ rdb_queue_save_stats_request();
+ }
+}
+
+void Rdb_ddl_manager::persist_stats(bool sync)
+{
+ mysql_rwlock_wrlock(&m_rwlock);
+ auto local_stats2store = std::move(m_stats2store);
+ m_stats2store.clear();
+ mysql_rwlock_unlock(&m_rwlock);
+
+ // Persist stats
+ std::unique_ptr<rocksdb::WriteBatch> wb = m_dict->begin();
+ std::vector<Rdb_index_stats> stats;
+ std::transform(
+ local_stats2store.begin(), local_stats2store.end(),
+ std::back_inserter(stats),
+ [](
+ const std::pair<GL_INDEX_ID, Rdb_index_stats>& s
+ ) {return s.second;});
+ m_dict->add_stats(wb.get(), stats);
+ m_dict->commit(wb.get(), sync);
+}
+
+/*
+ Put table definition of `tbl` into the mapping, and also write it to the
+ on-disk data dictionary.
+*/
+
+int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *tbl,
+ rocksdb::WriteBatch *batch)
+{
+ uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE];
+ uint pos= 0;
+
+ rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER);
+ pos+= Rdb_key_def::INDEX_NUMBER_SIZE;
+
+ const std::string& dbname_tablename= tbl->full_tablename();
+ memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size());
+ pos += dbname_tablename.size();
+
+ int res;
+ if ((res= tbl->put_dict(m_dict, batch, buf, pos)))
+ {
+ return res;
+ }
+ if ((res= put(tbl)))
+ {
+ return res;
+ }
+ return 0;
+}
+
+
+/* Return 0 - ok, other value - error */
+/* TODO:
+ This function modifies m_ddl_hash and m_index_num_to_keydef.
+ However, these changes need to be reversed if dict_manager.commit fails
+ See the discussion here: https://reviews.facebook.net/D35925#inline-259167
+ Tracked by https://github.com/facebook/mysql-5.6/issues/33
+*/
+int Rdb_ddl_manager::put(Rdb_tbl_def *tbl, bool lock)
+{
+ Rdb_tbl_def *rec;
+ my_bool result;
+ const std::string& dbname_tablename= tbl->full_tablename();
+
+ if (lock)
+ mysql_rwlock_wrlock(&m_rwlock);
+
+ // We have to do this find because 'tbl' is not yet in the list. We need
+ // to find the one we are replacing ('rec')
+ rec= reinterpret_cast<Rdb_tbl_def*>(find(dbname_tablename, false));
+ if (rec)
+ {
+ // this will free the old record.
+ my_hash_delete(&m_ddl_hash, reinterpret_cast<uchar*>(rec));
+ }
+ result= my_hash_insert(&m_ddl_hash, reinterpret_cast<uchar*>(tbl));
+
+ for (uint keyno= 0; keyno < tbl->m_key_count; keyno++) {
+ m_index_num_to_keydef[tbl->m_key_descr_arr[keyno]->get_gl_index_id()]=
+ std::make_pair(dbname_tablename, keyno);
+ }
+
+ if (lock)
+ mysql_rwlock_unlock(&m_rwlock);
+ return result;
+}
+
+
+void Rdb_ddl_manager::remove(Rdb_tbl_def *tbl,
+ rocksdb::WriteBatch *batch, bool lock)
+{
+ if (lock)
+ mysql_rwlock_wrlock(&m_rwlock);
+
+ uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE];
+ uint pos= 0;
+
+ rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER);
+ pos+= Rdb_key_def::INDEX_NUMBER_SIZE;
+
+ const std::string& dbname_tablename= tbl->full_tablename();
+ memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size());
+ pos += dbname_tablename.size();
+
+ rocksdb::Slice tkey((char*)buf, pos);
+ m_dict->delete_key(batch, tkey);
+
+ /* The following will also delete the object: */
+ my_hash_delete(&m_ddl_hash, reinterpret_cast<uchar*>(tbl));
+
+ if (lock)
+ mysql_rwlock_unlock(&m_rwlock);
+}
+
+
+bool Rdb_ddl_manager::rename(const std::string& from, const std::string& to,
+ rocksdb::WriteBatch *batch)
+{
+ Rdb_tbl_def *rec;
+ Rdb_tbl_def *new_rec;
+ bool res= true;
+ uchar new_buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE];
+ uint new_pos= 0;
+
+ mysql_rwlock_wrlock(&m_rwlock);
+ if (!(rec= find(from, false)))
+ {
+ mysql_rwlock_unlock(&m_rwlock);
+ return true;
+ }
+
+ new_rec= new Rdb_tbl_def(to);
+
+ new_rec->m_key_count= rec->m_key_count;
+ new_rec->m_auto_incr_val=
+ rec->m_auto_incr_val.load(std::memory_order_relaxed);
+ new_rec->m_key_descr_arr= rec->m_key_descr_arr;
+ // so that it's not free'd when deleting the old rec
+ rec->m_key_descr_arr= nullptr;
+
+ // Create a new key
+ rdb_netbuf_store_index(new_buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER);
+ new_pos+= Rdb_key_def::INDEX_NUMBER_SIZE;
+
+ const std::string& dbname_tablename= new_rec->full_tablename();
+ memcpy(new_buf + new_pos, dbname_tablename.c_str(), dbname_tablename.size());
+ new_pos += dbname_tablename.size();
+
+ // Create a key to add
+ if (!new_rec->put_dict(m_dict, batch, new_buf, new_pos))
+ {
+ remove(rec, batch, false);
+ put(new_rec, false);
+ res= false; // ok
+ }
+
+ mysql_rwlock_unlock(&m_rwlock);
+ return res;
+}
+
+
+void Rdb_ddl_manager::cleanup()
+{
+ my_hash_free(&m_ddl_hash);
+ mysql_rwlock_destroy(&m_rwlock);
+ m_sequence.cleanup();
+}
+
+
+int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner* tables_scanner)
+{
+ int i, ret;
+ Rdb_tbl_def *rec;
+
+ DBUG_ASSERT(tables_scanner != nullptr);
+
+ mysql_rwlock_rdlock(&m_rwlock);
+
+ ret= 0;
+ i= 0;
+
+ while ((rec = reinterpret_cast<Rdb_tbl_def*>(my_hash_element(&m_ddl_hash,
+ i))))
+ {
+ ret = tables_scanner->add_table(rec);
+ if (ret)
+ break;
+ i++;
+ }
+
+ mysql_rwlock_unlock(&m_rwlock);
+ return ret;
+}
+
+
+/*
+ Rdb_binlog_manager class implementation
+*/
+
+bool Rdb_binlog_manager::init(Rdb_dict_manager *dict_arg)
+{
+ DBUG_ASSERT(dict_arg != nullptr);
+ m_dict= dict_arg;
+
+ rdb_netbuf_store_index(m_key_buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER);
+ m_key_slice = rocksdb::Slice(reinterpret_cast<char*>(m_key_buf),
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+ return false;
+}
+
+void Rdb_binlog_manager::cleanup()
+{
+}
+
+/**
+ Set binlog name, pos and optionally gtid into WriteBatch.
+ This function should be called as part of transaction commit,
+ since binlog info is set only at transaction commit.
+ Actual write into RocksDB is not done here, so checking if
+ write succeeded or not is not possible here.
+ @param binlog_name Binlog name
+ @param binlog_pos Binlog pos
+ @param binlog_gtid Binlog GTID
+ @param batch WriteBatch
+*/
+void Rdb_binlog_manager::update(const char* binlog_name,
+ const my_off_t binlog_pos,
+ const char* binlog_gtid,
+ rocksdb::WriteBatchBase* batch)
+{
+ if (binlog_name && binlog_pos)
+ {
+ // max binlog length (512) + binlog pos (4) + binlog gtid (57) < 1024
+ uchar value_buf[1024];
+ m_dict->put_key(batch, m_key_slice,
+ pack_value(value_buf, binlog_name,
+ binlog_pos, binlog_gtid));
+ }
+}
+
+/**
+ Read binlog committed entry stored in RocksDB, then unpack
+ @param[OUT] binlog_name Binlog name
+ @param[OUT] binlog_pos Binlog pos
+ @param[OUT] binlog_gtid Binlog GTID
+ @return
+ true is binlog info was found (valid behavior)
+ false otherwise
+*/
+bool Rdb_binlog_manager::read(char *binlog_name, my_off_t *binlog_pos,
+ char *binlog_gtid)
+{
+ bool ret= false;
+ if (binlog_name)
+ {
+ std::string value;
+ rocksdb::Status status= m_dict->get_value(m_key_slice, &value);
+ if(status.ok())
+ {
+ if (!unpack_value((const uchar*)value.c_str(),
+ binlog_name, binlog_pos, binlog_gtid))
+ ret= true;
+ }
+ }
+ return ret;
+}
+
+/**
+ Pack binlog_name, binlog_pos, binlog_gtid into preallocated
+ buffer, then converting and returning a RocksDB Slice
+ @param buf Preallocated buffer to set binlog info.
+ @param binlog_name Binlog name
+ @param binlog_pos Binlog pos
+ @param binlog_gtid Binlog GTID
+ @return rocksdb::Slice converted from buf and its length
+*/
+rocksdb::Slice Rdb_binlog_manager::pack_value(uchar *buf,
+ const char* binlog_name,
+ const my_off_t binlog_pos,
+ const char* binlog_gtid)
+{
+ uint pack_len= 0;
+
+ // store version
+ rdb_netbuf_store_uint16(buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION);
+ pack_len += Rdb_key_def::VERSION_SIZE;
+
+ // store binlog file name length
+ DBUG_ASSERT(strlen(binlog_name) <= 65535);
+ uint16_t binlog_name_len = strlen(binlog_name);
+ rdb_netbuf_store_uint16(buf+pack_len, binlog_name_len);
+ pack_len += 2;
+
+ // store binlog file name
+ memcpy(buf+pack_len, binlog_name, binlog_name_len);
+ pack_len += binlog_name_len;
+
+ // store binlog pos
+ rdb_netbuf_store_uint32(buf+pack_len, binlog_pos);
+ pack_len += 4;
+
+ // store binlog gtid length.
+ // If gtid was not set, store 0 instead
+ uint16_t binlog_gtid_len = binlog_gtid? strlen(binlog_gtid) : 0;
+ rdb_netbuf_store_uint16(buf+pack_len, binlog_gtid_len);
+ pack_len += 2;
+
+ if (binlog_gtid_len > 0)
+ {
+ // store binlog gtid
+ memcpy(buf+pack_len, binlog_gtid, binlog_gtid_len);
+ pack_len += binlog_gtid_len;
+ }
+
+ return rocksdb::Slice((char*)buf, pack_len);
+}
+
+/**
+ Unpack value then split into binlog_name, binlog_pos (and binlog_gtid)
+ @param[IN] value Binlog state info fetched from RocksDB
+ @param[OUT] binlog_name Binlog name
+ @param[OUT] binlog_pos Binlog pos
+ @param[OUT] binlog_gtid Binlog GTID
+ @return true on error
+*/
+bool Rdb_binlog_manager::unpack_value(const uchar *value, char *binlog_name,
+ my_off_t *binlog_pos,
+ char *binlog_gtid)
+{
+ uint pack_len= 0;
+
+ DBUG_ASSERT(binlog_pos != nullptr);
+
+ // read version
+ uint16_t version= rdb_netbuf_to_uint16(value);
+ pack_len += Rdb_key_def::VERSION_SIZE;
+ if (version != Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION)
+ return true;
+
+ // read binlog file name length
+ uint16_t binlog_name_len= rdb_netbuf_to_uint16(value+pack_len);
+ pack_len += 2;
+ if (binlog_name_len)
+ {
+ // read and set binlog name
+ memcpy(binlog_name, value+pack_len, binlog_name_len);
+ binlog_name[binlog_name_len]= '\0';
+ pack_len += binlog_name_len;
+
+ // read and set binlog pos
+ *binlog_pos= rdb_netbuf_to_uint32(value+pack_len);
+ pack_len += 4;
+
+ // read gtid length
+ uint16_t binlog_gtid_len= rdb_netbuf_to_uint16(value+pack_len);
+ pack_len += 2;
+ if (binlog_gtid && binlog_gtid_len > 0)
+ {
+ // read and set gtid
+ memcpy(binlog_gtid, value+pack_len, binlog_gtid_len);
+ binlog_gtid[binlog_gtid_len]= '\0';
+ pack_len += binlog_gtid_len;
+ }
+ }
+ return false;
+}
+
+/**
+ Inserts a row into mysql.slave_gtid_info table. Doing this inside
+ storage engine is more efficient than inserting/updating through MySQL.
+
+ @param[IN] id Primary key of the table.
+ @param[IN] db Database name. This is column 2 of the table.
+ @param[IN] gtid Gtid in human readable form. This is column 3 of the table.
+ @param[IN] write_batch Handle to storage engine writer.
+*/
+void Rdb_binlog_manager::update_slave_gtid_info(
+ uint id, const char* db, const char* gtid,
+ rocksdb::WriteBatchBase* write_batch)
+{
+ if (id && db && gtid) {
+ // Make sure that if the slave_gtid_info table exists we have a
+ // pointer to it via m_slave_gtid_info_tbl.
+ if (!m_slave_gtid_info_tbl.load()) {
+ m_slave_gtid_info_tbl.store(
+ rdb_get_ddl_manager()->find("mysql.slave_gtid_info"));
+ }
+ if (!m_slave_gtid_info_tbl.load()) {
+ // slave_gtid_info table is not present. Simply return.
+ return;
+ }
+ DBUG_ASSERT(m_slave_gtid_info_tbl.load()->m_key_count == 1);
+
+ const std::shared_ptr<const Rdb_key_def>& kd=
+ m_slave_gtid_info_tbl.load()->m_key_descr_arr[0];
+ String value;
+
+ // Build key
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE + 4]= {0};
+ uchar* buf= key_buf;
+ rdb_netbuf_store_index(buf, kd->get_index_number());
+ buf += Rdb_key_def::INDEX_NUMBER_SIZE;
+ rdb_netbuf_store_uint32(buf, id);
+ buf += 4;
+ rocksdb::Slice key_slice =
+ rocksdb::Slice((const char*)key_buf, buf-key_buf);
+
+ // Build value
+ uchar value_buf[128]= {0};
+ DBUG_ASSERT(gtid);
+ uint db_len= strlen(db);
+ uint gtid_len= strlen(gtid);
+ buf= value_buf;
+ // 1 byte used for flags. Empty here.
+ buf++;
+
+ // Write column 1.
+ DBUG_ASSERT(strlen(db) <= 64);
+ rdb_netbuf_store_byte(buf, db_len);
+ buf++;
+ memcpy(buf, db, db_len);
+ buf += db_len;
+
+ // Write column 2.
+ DBUG_ASSERT(gtid_len <= 56);
+ rdb_netbuf_store_byte(buf, gtid_len);
+ buf++;
+ memcpy(buf, gtid, gtid_len);
+ buf += gtid_len;
+ rocksdb::Slice value_slice =
+ rocksdb::Slice((const char*)value_buf, buf-value_buf);
+
+ write_batch->Put(kd->get_cf(), key_slice, value_slice);
+ }
+}
+
+bool Rdb_dict_manager::init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager)
+{
+ mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST);
+ m_db= rdb_dict;
+ bool is_automatic;
+ m_system_cfh= cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME,
+ "", nullptr, &is_automatic);
+ rdb_netbuf_store_index(m_key_buf_max_index_id,
+ Rdb_key_def::MAX_INDEX_ID);
+ m_key_slice_max_index_id= rocksdb::Slice(
+ reinterpret_cast<char*>(m_key_buf_max_index_id),
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+ resume_drop_indexes();
+ rollback_ongoing_index_creation();
+
+ return (m_system_cfh == nullptr);
+}
+
+std::unique_ptr<rocksdb::WriteBatch> Rdb_dict_manager::begin()
+{
+ return std::unique_ptr<rocksdb::WriteBatch>(new rocksdb::WriteBatch);
+}
+
+void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase *batch,
+ const rocksdb::Slice &key,
+ const rocksdb::Slice &value)
+{
+ batch->Put(m_system_cfh, key, value);
+}
+
+rocksdb::Status Rdb_dict_manager::get_value(const rocksdb::Slice &key,
+ std::string *value) const
+{
+ rocksdb::ReadOptions options;
+ options.total_order_seek= true;
+ return m_db->Get(options, m_system_cfh, key, value);
+}
+
+void Rdb_dict_manager::delete_key(rocksdb::WriteBatchBase *batch,
+ const rocksdb::Slice &key) const
+{
+ batch->Delete(m_system_cfh, key);
+}
+
+rocksdb::Iterator* Rdb_dict_manager::new_iterator()
+{
+ /* Reading data dictionary should always skip bloom filter */
+ rocksdb::ReadOptions read_options;
+ read_options.total_order_seek= true;
+ return m_db->NewIterator(read_options, m_system_cfh);
+}
+
+int Rdb_dict_manager::commit(rocksdb::WriteBatch *batch, bool sync)
+{
+ if (!batch)
+ return 1;
+ int res= 0;
+ rocksdb::WriteOptions options;
+ options.sync= sync;
+ rocksdb::Status s= m_db->Write(options, batch);
+ res= !s.ok(); // we return true when something failed
+ if (res)
+ {
+ rdb_handle_io_error(s, RDB_IO_ERROR_DICT_COMMIT);
+ }
+ batch->Clear();
+ return res;
+}
+
+void Rdb_dict_manager::dump_index_id(uchar *netbuf,
+ Rdb_key_def::DATA_DICT_TYPE dict_type,
+ const GL_INDEX_ID &gl_index_id)
+{
+ rdb_netbuf_store_uint32(netbuf, dict_type);
+ rdb_netbuf_store_uint32(netbuf + Rdb_key_def::INDEX_NUMBER_SIZE,
+ gl_index_id.cf_id);
+ rdb_netbuf_store_uint32(netbuf + 2 * Rdb_key_def::INDEX_NUMBER_SIZE,
+ gl_index_id.index_id);
+}
+
+void Rdb_dict_manager::delete_with_prefix(rocksdb::WriteBatch* batch,
+ Rdb_key_def::DATA_DICT_TYPE dict_type,
+ const GL_INDEX_ID &gl_index_id) const
+{
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ dump_index_id(key_buf, dict_type, gl_index_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ delete_key(batch, key);
+}
+
+void Rdb_dict_manager::add_or_update_index_cf_mapping(
+ rocksdb::WriteBatch* batch,
+ const uchar m_index_type,
+ const uint16_t kv_version,
+ const uint32_t index_id,
+ const uint32_t cf_id)
+{
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ uchar value_buf[256]= {0};
+ GL_INDEX_ID gl_index_id= {cf_id, index_id};
+ dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ uchar* ptr= value_buf;
+ rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST);
+ ptr+= 2;
+ rdb_netbuf_store_byte(ptr, m_index_type);
+ ptr+= 1;
+ rdb_netbuf_store_uint16(ptr, kv_version);
+ ptr+= 2;
+
+ rocksdb::Slice value= rocksdb::Slice((char*)value_buf, ptr-value_buf);
+ batch->Put(m_system_cfh, key, value);
+}
+
+void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch* batch,
+ const uint32_t cf_id,
+ const uint32_t cf_flags)
+{
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0};
+ uchar value_buf[Rdb_key_def::VERSION_SIZE+
+ Rdb_key_def::INDEX_NUMBER_SIZE]= {0};
+ rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION);
+ rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ rdb_netbuf_store_uint16(value_buf, Rdb_key_def::CF_DEFINITION_VERSION);
+ rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, cf_flags);
+ rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf));
+ batch->Put(m_system_cfh, key, value);
+}
+
+void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID &gl_index_id) const
+{
+ delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id);
+}
+
+
+bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id,
+ uint16_t *m_index_dict_version,
+ uchar *m_index_type,
+ uint16_t *kv_version)
+{
+ bool found= false;
+ bool error= false;
+ std::string value;
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ rocksdb::Status status= get_value(key, &value);
+ if (status.ok())
+ {
+ const uchar* val= (const uchar*)value.c_str();
+ const uchar* ptr= val;
+ *m_index_dict_version= rdb_netbuf_to_uint16(val);
+ *kv_version= 0;
+ *m_index_type= 0;
+ ptr+= 2;
+ switch (*m_index_dict_version) {
+
+ case Rdb_key_def::INDEX_INFO_VERSION_VERIFY_KV_FORMAT:
+ case Rdb_key_def::INDEX_INFO_VERSION_GLOBAL_ID:
+ *m_index_type= rdb_netbuf_to_byte(ptr);
+ ptr+= 1;
+ *kv_version= rdb_netbuf_to_uint16(ptr);
+ found= true;
+ break;
+
+ default:
+ error= true;
+ break;
+ }
+
+ switch (*m_index_type)
+ {
+ case Rdb_key_def::INDEX_TYPE_PRIMARY:
+ case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY:
+ {
+ error= *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST;
+ break;
+ }
+ case Rdb_key_def::INDEX_TYPE_SECONDARY:
+ error= *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST;
+ break;
+ default:
+ error= true;
+ break;
+ }
+ }
+
+ if (error)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("RocksDB: Found invalid key version number (%u, %u, %u) "
+ "from data dictionary. This should never happen "
+ "and it may be a bug.", *m_index_dict_version,
+ *m_index_type, *kv_version);
+ abort_with_stack_traces();
+ }
+
+ return found;
+}
+
+bool Rdb_dict_manager::get_cf_flags(const uint32_t cf_id, uint32_t *cf_flags)
+{
+ bool found= false;
+ std::string value;
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*2]= {0};
+ rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION);
+ rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ rocksdb::Status status= get_value(key, &value);
+ if (status.ok())
+ {
+ const uchar* val= (const uchar*)value.c_str();
+ uint16_t version= rdb_netbuf_to_uint16(val);
+ if (version == Rdb_key_def::CF_DEFINITION_VERSION)
+ {
+ *cf_flags= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE);
+ found= true;
+ }
+ }
+ return found;
+}
+
+/*
+ Returning index ids that were marked as deleted (via DROP TABLE) but
+ still not removed by drop_index_thread yet, or indexes that are marked as
+ ongoing creation.
+ */
+void Rdb_dict_manager::get_ongoing_index_operation(
+ std::vector<GL_INDEX_ID>* gl_index_ids,
+ Rdb_key_def::DATA_DICT_TYPE dd_type)
+{
+ DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ||
+ dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+
+ uchar index_buf[Rdb_key_def::INDEX_NUMBER_SIZE];
+ rdb_netbuf_store_uint32(index_buf, dd_type);
+ rocksdb::Slice index_slice(reinterpret_cast<char*>(index_buf),
+ Rdb_key_def::INDEX_NUMBER_SIZE);
+
+ rocksdb::Iterator* it= new_iterator();
+ for (it->Seek(index_slice); it->Valid(); it->Next())
+ {
+ rocksdb::Slice key= it->key();
+ const uchar* ptr= (const uchar*)key.data();
+
+ /*
+ Ongoing drop/create index operations require key to be of the form:
+ dd_type + cf_id + index_id (== INDEX_NUMBER_SIZE * 3)
+
+ This may need to be changed in the future if we want to process a new
+ ddl_type with different format.
+ */
+ if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3 ||
+ rdb_netbuf_to_uint32(ptr) != dd_type)
+ {
+ break;
+ }
+
+ // We don't check version right now since currently we always store only
+ // Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION = 1 as a value.
+ // If increasing version number, we need to add version check logic here.
+ GL_INDEX_ID gl_index_id;
+ gl_index_id.cf_id= rdb_netbuf_to_uint32(ptr+Rdb_key_def::INDEX_NUMBER_SIZE);
+ gl_index_id.index_id= rdb_netbuf_to_uint32(
+ ptr + 2 * Rdb_key_def::INDEX_NUMBER_SIZE);
+ gl_index_ids->push_back(gl_index_id);
+ }
+ delete it;
+}
+
+/*
+ Returning true if index_id is create/delete ongoing (undergoing creation or
+ marked as deleted via DROP TABLE but drop_index_thread has not wiped yet)
+ or not.
+ */
+bool Rdb_dict_manager::is_index_operation_ongoing(
+ const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type)
+{
+ DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ||
+ dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+
+ bool found= false;
+ std::string value;
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ dump_index_id(key_buf, dd_type, gl_index_id);
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+
+ rocksdb::Status status= get_value(key, &value);
+ if (status.ok())
+ {
+ found= true;
+ }
+ return found;
+}
+
+/*
+ Adding index_id to data dictionary so that the index id is removed
+ by drop_index_thread, or to track online index creation.
+ */
+void Rdb_dict_manager::start_ongoing_index_operation(
+ rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type)
+{
+ DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ||
+ dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ uchar value_buf[Rdb_key_def::VERSION_SIZE]= {0};
+ dump_index_id(key_buf, dd_type, gl_index_id);
+
+ // version as needed
+ if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING)
+ {
+ rdb_netbuf_store_uint16(value_buf,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION);
+ }
+ else
+ {
+ rdb_netbuf_store_uint16(value_buf,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING_VERSION);
+ }
+
+ rocksdb::Slice key= rocksdb::Slice((char*)key_buf, sizeof(key_buf));
+ rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf));
+ batch->Put(m_system_cfh, key, value);
+}
+
+/*
+ Removing index_id from data dictionary to confirm drop_index_thread
+ completed dropping entire key/values of the index_id
+ */
+void Rdb_dict_manager::end_ongoing_index_operation(rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type)
+{
+ DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ||
+ dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+
+ delete_with_prefix(batch, dd_type, gl_index_id);
+}
+
+/*
+ Returning true if there is no target index ids to be removed
+ by drop_index_thread
+ */
+bool Rdb_dict_manager::is_drop_index_empty()
+{
+ std::vector<GL_INDEX_ID> gl_index_ids;
+ get_ongoing_drop_indexes(&gl_index_ids);
+ return gl_index_ids.empty();
+}
+
+/*
+ This function is supposed to be called by DROP TABLE. Logging messages
+ that dropping indexes started, and adding data dictionary so that
+ all associated indexes to be removed
+ */
+void Rdb_dict_manager::add_drop_table(std::shared_ptr<Rdb_key_def>* key_descr,
+ uint32 n_keys,
+ rocksdb::WriteBatch *batch)
+{
+ std::unordered_set<GL_INDEX_ID> dropped_index_ids;
+ for (uint32 i = 0; i < n_keys; i++)
+ {
+ dropped_index_ids.insert(key_descr[i]->get_gl_index_id());
+ }
+
+ add_drop_index(dropped_index_ids, batch);
+}
+
+/*
+ Called during inplace index drop operations. Logging messages
+ that dropping indexes started, and adding data dictionary so that
+ all associated indexes to be removed
+ */
+void Rdb_dict_manager::add_drop_index(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ rocksdb::WriteBatch *batch)
+{
+ for (const auto& gl_index_id : gl_index_ids)
+ {
+ log_start_drop_index(gl_index_id, "Begin");
+ start_drop_index(batch, gl_index_id);
+ }
+}
+
+/*
+ Called during inplace index creation operations. Logging messages
+ that adding indexes started, and updates data dictionary with all associated
+ indexes to be added.
+ */
+void Rdb_dict_manager::add_create_index(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ rocksdb::WriteBatch *batch)
+{
+ for (const auto& gl_index_id : gl_index_ids)
+ {
+ // NO_LINT_DEBUG
+ sql_print_information("RocksDB: Begin index creation (%u,%u)",
+ gl_index_id.cf_id, gl_index_id.index_id);
+ start_create_index(batch, gl_index_id);
+ }
+}
+
+/*
+ This function is supposed to be called by drop_index_thread, when it
+ finished dropping any index, or at the completion of online index creation.
+ */
+void Rdb_dict_manager::finish_indexes_operation(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ Rdb_key_def::DATA_DICT_TYPE dd_type)
+{
+ DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ||
+ dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+
+ std::unique_ptr<rocksdb::WriteBatch> wb= begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ for (const auto& gl_index_id : gl_index_ids)
+ {
+ if (is_index_operation_ongoing(gl_index_id, dd_type))
+ {
+ // NO_LINT_DEBUG
+ sql_print_information("RocksDB: Finished %s (%u,%u)",
+ dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING ?
+ "filtering dropped index" : "index creation",
+ gl_index_id.cf_id, gl_index_id.index_id);
+
+ end_ongoing_index_operation(batch, gl_index_id, dd_type);
+ }
+
+ if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING)
+ {
+ delete_index_info(batch, gl_index_id);
+ }
+ }
+ commit(batch);
+}
+
+/*
+ This function is supposed to be called when initializing
+ Rdb_dict_manager (at startup). If there is any index ids that are
+ drop ongoing, printing out messages for diagnostics purposes.
+ */
+void Rdb_dict_manager::resume_drop_indexes()
+{
+ std::vector<GL_INDEX_ID> gl_index_ids;
+ get_ongoing_drop_indexes(&gl_index_ids);
+
+ uint max_index_id_in_dict= 0;
+ get_max_index_id(&max_index_id_in_dict);
+
+ for (const auto& gl_index_id : gl_index_ids)
+ {
+ log_start_drop_index(gl_index_id, "Resume");
+ if (max_index_id_in_dict < gl_index_id.index_id)
+ {
+ sql_print_error("RocksDB: Found max index id %u from data dictionary "
+ "but also found dropped index id (%u,%u) from drop_index "
+ "dictionary. This should never happen and is possibly a "
+ "bug.", max_index_id_in_dict, gl_index_id.cf_id,
+ gl_index_id.index_id);
+ abort_with_stack_traces();
+ }
+ }
+}
+
+void Rdb_dict_manager::rollback_ongoing_index_creation()
+{
+ std::unique_ptr<rocksdb::WriteBatch> wb= begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ std::vector<GL_INDEX_ID> gl_index_ids;
+ get_ongoing_create_indexes(&gl_index_ids);
+
+ for (const auto& gl_index_id : gl_index_ids)
+ {
+ // NO_LINT_DEBUG
+ sql_print_information("RocksDB: Removing incomplete create index (%u,%u)",
+ gl_index_id.cf_id, gl_index_id.index_id);
+
+ start_drop_index(batch, gl_index_id);
+ end_ongoing_index_operation(batch, gl_index_id,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ }
+
+ commit(batch);
+}
+
+void Rdb_dict_manager::log_start_drop_table(
+ const std::shared_ptr<Rdb_key_def>* key_descr,
+ uint32 n_keys,
+ const char* log_action)
+{
+ for (uint32 i = 0; i < n_keys; i++) {
+ log_start_drop_index(key_descr[i]->get_gl_index_id(), log_action);
+ }
+}
+
+void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id,
+ const char* log_action)
+{
+ uint16 m_index_dict_version= 0;
+ uchar m_index_type= 0;
+ uint16 kv_version= 0;
+ if (!get_index_info(gl_index_id, &m_index_dict_version,
+ &m_index_type, &kv_version))
+ {
+ sql_print_error("RocksDB: Failed to get column family info "
+ "from index id (%u,%u). MyRocks data dictionary may "
+ "get corrupted.", gl_index_id.cf_id, gl_index_id.index_id);
+ abort_with_stack_traces();
+ }
+ sql_print_information("RocksDB: %s filtering dropped index (%u,%u)",
+ log_action, gl_index_id.cf_id, gl_index_id.index_id);
+}
+
+bool Rdb_dict_manager::get_max_index_id(uint32_t *index_id)
+{
+ bool found= false;
+ std::string value;
+
+ rocksdb::Status status= get_value(m_key_slice_max_index_id, &value);
+ if (status.ok())
+ {
+ const uchar* val= (const uchar*)value.c_str();
+ uint16_t version= rdb_netbuf_to_uint16(val);
+ if (version == Rdb_key_def::MAX_INDEX_ID_VERSION)
+ {
+ *index_id= rdb_netbuf_to_uint32(val+Rdb_key_def::VERSION_SIZE);
+ found= true;
+ }
+ }
+ return found;
+}
+
+bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch* batch,
+ const uint32_t index_id)
+{
+ DBUG_ASSERT(batch != nullptr);
+
+ uint32_t old_index_id= -1;
+ if (get_max_index_id(&old_index_id))
+ {
+ if (old_index_id > index_id)
+ {
+ sql_print_error("RocksDB: Found max index id %u from data dictionary "
+ "but trying to update to older value %u. This should "
+ "never happen and possibly a bug.", old_index_id,
+ index_id);
+ return true;
+ }
+ }
+
+ uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE]=
+ {0};
+ rdb_netbuf_store_uint16(value_buf, Rdb_key_def::MAX_INDEX_ID_VERSION);
+ rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, index_id);
+ rocksdb::Slice value= rocksdb::Slice((char*)value_buf, sizeof(value_buf));
+ batch->Put(m_system_cfh, m_key_slice_max_index_id, value);
+ return false;
+}
+
+void Rdb_dict_manager::add_stats(
+ rocksdb::WriteBatch* batch,
+ const std::vector<Rdb_index_stats>& stats
+)
+{
+ DBUG_ASSERT(batch != nullptr);
+
+ for (const auto& it : stats) {
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, it.m_gl_index_id);
+
+ // IndexStats::materialize takes complete care of serialization including
+ // storing the version
+ auto value = Rdb_index_stats::materialize(
+ std::vector<Rdb_index_stats>{it}, 1.);
+
+ batch->Put(
+ m_system_cfh,
+ rocksdb::Slice((char*)key_buf, sizeof(key_buf)),
+ value
+ );
+ }
+}
+
+Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id)
+{
+ uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE*3]= {0};
+ dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, gl_index_id);
+
+ std::string value;
+ rocksdb::Status status= get_value(
+ rocksdb::Slice(reinterpret_cast<char*>(key_buf), sizeof(key_buf)),
+ &value);
+ if (status.ok())
+ {
+ std::vector<Rdb_index_stats> v;
+ // unmaterialize checks if the version matches
+ if (Rdb_index_stats::unmaterialize(value, &v)== 0 && v.size() == 1)
+ {
+ return v[0];
+ }
+ }
+
+ return Rdb_index_stats();
+}
+
+uint Rdb_seq_generator::get_and_update_next_number(Rdb_dict_manager *dict)
+{
+ DBUG_ASSERT(dict != nullptr);
+
+ uint res;
+ mysql_mutex_lock(&m_mutex);
+
+ res= m_next_number++;
+
+ std::unique_ptr<rocksdb::WriteBatch> wb= dict->begin();
+ rocksdb::WriteBatch *batch= wb.get();
+
+ DBUG_ASSERT(batch != nullptr);
+ dict->update_max_index_id(batch, res);
+ dict->commit(batch);
+
+ mysql_mutex_unlock(&m_mutex);
+
+ return res;
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h
new file mode 100644
index 00000000000..7c277ddad7d
--- /dev/null
+++ b/storage/rocksdb/rdb_datadic.h
@@ -0,0 +1,1054 @@
+/*
+ Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ standard header files */
+#include <algorithm>
+#include <atomic>
+#include <map>
+#include <mutex>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+/* C standard header files */
+#include <arpa/inet.h>
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./properties_collector.h"
+#include "./rdb_buff.h"
+#include "./rdb_utils.h"
+
+namespace myrocks {
+
+class Rdb_dict_manager;
+class Rdb_key_def;
+class Rdb_field_packing;
+class Rdb_cf_manager;
+class Rdb_ddl_manager;
+
+/*
+ @brief
+ Field packing context.
+ The idea is to ensure that a call to rdb_index_field_pack_t function
+ is followed by a call to rdb_make_unpack_info_t.
+
+ @detail
+ For some datatypes, unpack_info is produced as a side effect of
+ rdb_index_field_pack_t function call.
+ For other datatypes, packing is just calling make_sort_key(), while
+ rdb_make_unpack_info_t is a custom function.
+ In order to accommodate both cases, we require both calls to be made and
+ unpack_info is passed as context data between the two.
+*/
+class Rdb_pack_field_context
+{
+ public:
+ explicit Rdb_pack_field_context(Rdb_string_writer *writer_arg) :
+ writer(writer_arg)
+ {}
+
+ // NULL means we're not producing unpack_info.
+ Rdb_string_writer *writer;
+};
+
+struct Rdb_collation_codec;
+
+/*
+ C-style "virtual table" allowing different handling of packing logic based
+ on the field type. See Rdb_field_packing::setup() implementation.
+ */
+using rdb_make_unpack_info_t= void (*)(const Rdb_collation_codec *codec,
+ const Field *field,
+ Rdb_pack_field_context *pack_ctx);
+using rdb_index_field_unpack_t= int (*)(Rdb_field_packing *fpi, Field *field,
+ uchar *field_ptr,
+ Rdb_string_reader *reader,
+ Rdb_string_reader *unpack_reader);
+using rdb_index_field_skip_t= int (*)(const Rdb_field_packing *fpi,
+ const Field *field,
+ Rdb_string_reader *reader);
+using rdb_index_field_pack_t= void (*)(Rdb_field_packing *fpi, Field *field,
+ uchar* buf, uchar **dst,
+ Rdb_pack_field_context *pack_ctx);
+
+const uint RDB_INVALID_KEY_LEN= uint(-1);
+
+/* How much one checksum occupies when stored in the record */
+const size_t RDB_CHECKSUM_SIZE= sizeof(uint32_t);
+
+/*
+ How much the checksum data occupies in record, in total.
+ It is storing two checksums plus 1 tag-byte.
+*/
+const size_t RDB_CHECKSUM_CHUNK_SIZE= 2 * RDB_CHECKSUM_SIZE + 1;
+
+/*
+ Checksum data starts from CHECKSUM_DATA_TAG which is followed by two CRC32
+ checksums.
+*/
+const char RDB_CHECKSUM_DATA_TAG= 0x01;
+
+/*
+ Unpack data is variable length. It is a 1 tag-byte plus a
+ two byte length field. The length field includes the header as well.
+*/
+const char RDB_UNPACK_DATA_TAG= 0x02;
+const size_t RDB_UNPACK_DATA_LEN_SIZE= sizeof(uint16_t);
+const size_t RDB_UNPACK_HEADER_SIZE= sizeof(RDB_UNPACK_DATA_TAG) +
+ RDB_UNPACK_DATA_LEN_SIZE;
+
+// Possible return values for rdb_index_field_unpack_t functions.
+enum {
+ UNPACK_SUCCESS= 0,
+ UNPACK_FAILURE= 1,
+ UNPACK_INFO_MISSING= 2,
+};
+
+/*
+ An object of this class represents information about an index in an SQL
+ table. It provides services to encode and decode index tuples.
+
+ Note: a table (as in, on-disk table) has a single Rdb_key_def object which
+ is shared across multiple TABLE* objects and may be used simultaneously from
+ different threads.
+
+ There are several data encodings:
+
+ === SQL LAYER ===
+ SQL layer uses two encodings:
+
+ - "Table->record format". This is the format that is used for the data in
+ the record buffers, table->record[i]
+
+ - KeyTupleFormat (see opt_range.cc) - this is used in parameters to index
+ lookup functions, like handler::index_read_map().
+
+ === Inside RocksDB ===
+ Primary Key is stored as a mapping:
+
+ index_tuple -> StoredRecord
+
+ StoredRecord is in Table->record format, except for blobs, which are stored
+ in-place. See ha_rocksdb::convert_record_to_storage_format for details.
+
+ Secondary indexes are stored as one of two variants:
+
+ index_tuple -> unpack_info
+ index_tuple -> empty_string
+
+ index_tuple here is the form of key that can be compared with memcmp(), aka
+ "mem-comparable form".
+
+ unpack_info is extra data that allows to restore the original value from its
+ mem-comparable form. It is present only if the index supports index-only
+ reads.
+*/
+
+class Rdb_key_def
+{
+public:
+ /* Convert a key from KeyTupleFormat to mem-comparable form */
+ uint pack_index_tuple(TABLE *tbl, uchar *pack_buffer, uchar *packed_tuple,
+ const uchar *key_tuple, key_part_map keypart_map) const;
+
+ /* Convert a key from Table->record format to mem-comparable form */
+ uint pack_record(const TABLE *tbl, uchar *pack_buffer, const uchar *record,
+ uchar *packed_tuple, Rdb_string_writer *unpack_info,
+ bool should_store_checksums,
+ longlong hidden_pk_id= 0, uint n_key_parts= 0,
+ uint *n_null_fields= nullptr) const;
+ /* Pack the hidden primary key into mem-comparable form. */
+ uint pack_hidden_pk(longlong hidden_pk_id,
+ uchar *packed_tuple) const;
+ int unpack_record(TABLE *table, uchar *buf, const rocksdb::Slice *packed_key,
+ const rocksdb::Slice *unpack_info, bool verify_checksums)
+ const;
+
+ static bool unpack_info_has_checksum(const rocksdb::Slice& unpack_info);
+ int compare_keys(const rocksdb::Slice *key1, const rocksdb::Slice *key2,
+ std::size_t* column_index) const;
+
+ size_t key_length(TABLE *table, const rocksdb::Slice &key) const;
+
+ /* Get the key that is the "infimum" for this index */
+ inline void get_infimum_key(uchar *key, uint *size) const
+ {
+ rdb_netbuf_store_index(key, m_index_number);
+ *size= INDEX_NUMBER_SIZE;
+ }
+
+ /* Get the key that is a "supremum" for this index */
+ inline void get_supremum_key(uchar *key, uint *size) const
+ {
+ rdb_netbuf_store_index(key, m_index_number+1);
+ *size= INDEX_NUMBER_SIZE;
+ }
+
+ /* Make a key that is right after the given key. */
+ static int successor(uchar *packed_tuple, uint len);
+
+ /*
+ This can be used to compare prefixes.
+ if X is a prefix of Y, then we consider that X = Y.
+ */
+ // b describes the lookup key, which can be a prefix of a.
+ int cmp_full_keys(const rocksdb::Slice& a, const rocksdb::Slice& b) const
+ {
+ DBUG_ASSERT(covers_key(a));
+ DBUG_ASSERT(covers_key(b));
+
+ return memcmp(a.data(), b.data(), std::min(a.size(), b.size()));
+ }
+
+ /* Check if given mem-comparable key belongs to this index */
+ bool covers_key(const rocksdb::Slice &slice) const
+ {
+ if (slice.size() < INDEX_NUMBER_SIZE)
+ return false;
+
+ if (memcmp(slice.data(), m_index_number_storage_form, INDEX_NUMBER_SIZE))
+ return false;
+
+ return true;
+ }
+
+ /*
+ Return true if the passed mem-comparable key
+ - is from this index, and
+ - it matches the passed key prefix (the prefix is also in mem-comparable
+ form)
+ */
+ bool value_matches_prefix(const rocksdb::Slice &value,
+ const rocksdb::Slice &prefix) const
+ {
+ return covers_key(value) && !cmp_full_keys(value, prefix);
+ }
+
+ uint32 get_keyno() const
+ {
+ return m_keyno;
+ }
+
+ uint32 get_index_number() const
+ {
+ return m_index_number;
+ }
+
+ GL_INDEX_ID get_gl_index_id() const
+ {
+ GL_INDEX_ID gl_index_id = { m_cf_handle->GetID(), m_index_number };
+ return gl_index_id;
+ }
+
+ /* Must only be called for secondary keys: */
+ uint get_primary_key_tuple(TABLE *tbl,
+ const std::shared_ptr<const Rdb_key_def>& pk_descr,
+ const rocksdb::Slice *key, uchar *pk_buffer) const;
+
+ /* Return max length of mem-comparable form */
+ uint max_storage_fmt_length() const
+ {
+ return m_maxlength;
+ }
+
+ uint get_key_parts() const
+ {
+ return m_key_parts;
+ }
+
+ /*
+ Get a field object for key part #part_no
+
+ @detail
+ SQL layer thinks unique secondary indexes and indexes in partitioned
+ tables are not "Extended" with Primary Key columns.
+
+ Internally, we always extend all indexes with PK columns. This function
+ uses our definition of how the index is Extended.
+ */
+ inline Field* get_table_field_for_part_no(TABLE *table, uint part_no) const;
+
+ const std::string& get_name() const {
+ return m_name;
+ }
+
+ Rdb_key_def(const Rdb_key_def& k);
+ Rdb_key_def(uint indexnr_arg, uint keyno_arg,
+ rocksdb::ColumnFamilyHandle* cf_handle_arg,
+ uint16_t index_dict_version_arg,
+ uchar index_type_arg,
+ uint16_t kv_format_version_arg,
+ bool is_reverse_cf_arg, bool is_auto_cf_arg,
+ const char* name,
+ Rdb_index_stats stats= Rdb_index_stats());
+ ~Rdb_key_def();
+
+ enum {
+ INDEX_NUMBER_SIZE= 4,
+ VERSION_SIZE= 2,
+ CF_NUMBER_SIZE= 4,
+ CF_FLAG_SIZE= 4,
+ PACKED_SIZE= 4, // one int
+ };
+
+ // bit flags for combining bools when writing to disk
+ enum {
+ REVERSE_CF_FLAG= 1,
+ AUTO_CF_FLAG= 2,
+ };
+
+ // Data dictionary types
+ enum DATA_DICT_TYPE {
+ DDL_ENTRY_INDEX_START_NUMBER= 1,
+ INDEX_INFO= 2,
+ CF_DEFINITION= 3,
+ BINLOG_INFO_INDEX_NUMBER= 4,
+ DDL_DROP_INDEX_ONGOING= 5,
+ INDEX_STATISTICS= 6,
+ MAX_INDEX_ID= 7,
+ DDL_CREATE_INDEX_ONGOING= 8,
+ END_DICT_INDEX_ID= 255
+ };
+
+ // Data dictionary schema version. Introduce newer versions
+ // if changing schema layout
+ enum {
+ DDL_ENTRY_INDEX_VERSION= 1,
+ CF_DEFINITION_VERSION= 1,
+ BINLOG_INFO_INDEX_NUMBER_VERSION= 1,
+ DDL_DROP_INDEX_ONGOING_VERSION= 1,
+ MAX_INDEX_ID_VERSION= 1,
+ DDL_CREATE_INDEX_ONGOING_VERSION= 1,
+ // Version for index stats is stored in IndexStats struct
+ };
+
+ // Index info version. Introduce newer versions when changing the
+ // INDEX_INFO layout. Update INDEX_INFO_VERSION_LATEST to point to the
+ // latest version number.
+ enum {
+ INDEX_INFO_VERSION_INITIAL= 1, // Obsolete
+ INDEX_INFO_VERSION_KV_FORMAT,
+ INDEX_INFO_VERSION_GLOBAL_ID,
+ // There is no change to data format in this version, but this version
+ // verifies KV format version, whereas previous versions do not. A version
+ // bump is needed to prevent older binaries from skipping the KV version
+ // check inadvertently.
+ INDEX_INFO_VERSION_VERIFY_KV_FORMAT,
+ // This normally point to the latest (currently it does).
+ INDEX_INFO_VERSION_LATEST= INDEX_INFO_VERSION_VERIFY_KV_FORMAT,
+ };
+
+ // MyRocks index types
+ enum {
+ INDEX_TYPE_PRIMARY= 1,
+ INDEX_TYPE_SECONDARY= 2,
+ INDEX_TYPE_HIDDEN_PRIMARY= 3,
+ };
+
+ // Key/Value format version for each index type
+ enum {
+ PRIMARY_FORMAT_VERSION_INITIAL= 10,
+ // This change includes:
+ // - For columns that can be unpacked with unpack_info, PK
+ // stores the unpack_info.
+ // - DECIMAL datatype is no longer stored in the row (because
+ // it can be decoded from its mem-comparable form)
+ // - VARCHAR-columns use endspace-padding.
+ PRIMARY_FORMAT_VERSION_UPDATE1= 11,
+ PRIMARY_FORMAT_VERSION_LATEST= PRIMARY_FORMAT_VERSION_UPDATE1,
+
+ SECONDARY_FORMAT_VERSION_INITIAL= 10,
+ // This change the SK format to include unpack_info.
+ SECONDARY_FORMAT_VERSION_UPDATE1= 11,
+ SECONDARY_FORMAT_VERSION_LATEST= SECONDARY_FORMAT_VERSION_UPDATE1,
+ };
+
+ void setup(const TABLE *table, const Rdb_tbl_def *tbl_def);
+
+ rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; }
+
+ /* Check if keypart #kp can be unpacked from index tuple */
+ inline bool can_unpack(uint kp) const;
+ /* Check if keypart #kp needs unpack info */
+ inline bool has_unpack_info(uint kp) const;
+
+ /* Check if given table has a primary key */
+ static bool table_has_hidden_pk(const TABLE* table);
+
+ void report_checksum_mismatch(bool is_key, const char *data,
+ size_t data_size) const;
+
+ /* Check if index is at least pk_min if it is a PK,
+ or at least sk_min if SK.*/
+ bool index_format_min_check(int pk_min, int sk_min) const;
+
+private:
+
+#ifndef DBUG_OFF
+ inline bool is_storage_available(int offset, int needed) const
+ {
+ int storage_length= static_cast<int>(max_storage_fmt_length());
+ return (storage_length - offset) >= needed;
+ }
+#endif // DBUG_OFF
+
+ /* Global number of this index (used as prefix in StorageFormat) */
+ const uint32 m_index_number;
+
+ uchar m_index_number_storage_form[INDEX_NUMBER_SIZE];
+
+ rocksdb::ColumnFamilyHandle* m_cf_handle;
+
+public:
+ uint16_t m_index_dict_version;
+ uchar m_index_type;
+ /* KV format version for the index id */
+ uint16_t m_kv_format_version;
+ /* If true, the column family stores data in the reverse order */
+ bool m_is_reverse_cf;
+
+ bool m_is_auto_cf;
+ std::string m_name;
+ mutable Rdb_index_stats m_stats;
+private:
+
+ friend class Rdb_tbl_def; // for m_index_number above
+
+ /* Number of key parts in the primary key*/
+ uint m_pk_key_parts;
+
+ /*
+ pk_part_no[X]=Y means that keypart #X of this key is key part #Y of the
+ primary key. Y==-1 means this column is not present in the primary key.
+ */
+ uint *m_pk_part_no;
+
+ /* Array of index-part descriptors. */
+ Rdb_field_packing *m_pack_info;
+
+ uint m_keyno; /* number of this index in the table */
+
+ /*
+ Number of key parts in the index (including "index extension"). This is how
+ many elements are in the m_pack_info array.
+ */
+ uint m_key_parts;
+
+ /* Maximum length of the mem-comparable form. */
+ uint m_maxlength;
+
+ /* mutex to protect setup */
+ mysql_mutex_t m_mutex;
+};
+
+// "Simple" collations (those specified in strings/ctype-simple.c) are simple
+// because their strnxfrm function maps one byte to one byte. However, the
+// mapping is not injective, so the inverse function will take in an extra
+// index parameter containing information to disambiguate what the original
+// character was.
+//
+// The m_enc* members are for encoding. Generally, we want encoding to be:
+// src -> (dst, idx)
+//
+// Since strnxfrm already gives us dst, we just need m_enc_idx[src] to give us
+// idx.
+//
+// For the inverse, we have:
+// (dst, idx) -> src
+//
+// We have m_dec_idx[idx][dst] = src to get our original character back.
+//
+struct Rdb_collation_codec
+{
+ const my_core::CHARSET_INFO *m_cs;
+ // The first element unpacks VARCHAR(n), the second one - CHAR(n).
+ std::array<rdb_make_unpack_info_t, 2> m_make_unpack_info_func;
+ std::array<rdb_index_field_unpack_t, 2> m_unpack_func;
+
+ std::array<uchar, 256> m_enc_idx;
+ std::array<uchar, 256> m_enc_size;
+
+ std::array<uchar, 256> m_dec_size;
+ std::vector<std::array<uchar, 256>> m_dec_idx;
+};
+
+extern mysql_mutex_t rdb_collation_data_mutex;
+extern mysql_mutex_t rdb_mem_cmp_space_mutex;
+extern std::array<const Rdb_collation_codec*, MY_ALL_CHARSETS_SIZE>
+ rdb_collation_data;
+
+
+class Rdb_field_packing
+{
+public:
+ /* Length of mem-comparable image of the field, in bytes */
+ int m_max_image_len;
+
+ /* Length of image in the unpack data */
+ int m_unpack_data_len;
+ int m_unpack_data_offset;
+
+ bool m_maybe_null; /* TRUE <=> NULL-byte is stored */
+
+ /*
+ Valid only for VARCHAR fields.
+ */
+ const CHARSET_INFO *m_varchar_charset;
+
+ // (Valid when Variable Length Space Padded Encoding is used):
+ uint m_segment_size; // size of segment used
+
+ // number of bytes used to store number of trimmed (or added)
+ // spaces in the upack_info
+ bool m_unpack_info_uses_two_bytes;
+
+ const std::vector<uchar>* space_xfrm;
+ size_t space_xfrm_len;
+ size_t space_mb_len;
+
+ const Rdb_collation_codec* m_charset_codec;
+
+ /*
+ @return TRUE: this field makes use of unpack_info.
+ */
+ bool uses_unpack_info() const
+ {
+ return (m_make_unpack_info_func != nullptr);
+ }
+
+ /* TRUE means unpack_info stores the original field value */
+ bool m_unpack_info_stores_value;
+
+ rdb_index_field_pack_t m_pack_func;
+ rdb_make_unpack_info_t m_make_unpack_info_func;
+
+ /*
+ This function takes
+ - mem-comparable form
+ - unpack_info data
+ and restores the original value.
+ */
+ rdb_index_field_unpack_t m_unpack_func;
+
+ /*
+ This function skips over mem-comparable form.
+ */
+ rdb_index_field_skip_t m_skip_func;
+
+private:
+ /*
+ Location of the field in the table (key number and key part number).
+
+ Note that this describes not the field, but rather a position of field in
+ the index. Consider an example:
+
+ col1 VARCHAR (100),
+ INDEX idx1 (col1)),
+ INDEX idx2 (col1(10)),
+
+ Here, idx2 has a special Field object that is set to describe a 10-char
+ prefix of col1.
+
+ We must also store the keynr. It is needed for implicit "extended keys".
+ Every key in MyRocks needs to include PK columns. Generally, SQL layer
+ includes PK columns as part of its "Extended Keys" feature, but sometimes
+ it does not (known examples are unique secondary indexes and partitioned
+ tables).
+ In that case, MyRocks's index descriptor has invisible suffix of PK
+ columns (and the point is that these columns are parts of PK, not parts
+ of the current index).
+ */
+ uint m_keynr;
+ uint m_key_part;
+public:
+ bool setup(const Rdb_key_def *key_descr, const Field *field,
+ uint keynr_arg, uint key_part_arg, uint16 key_length);
+ Field *get_field_in_table(const TABLE *tbl) const;
+ void fill_hidden_pk_val(uchar **dst, longlong hidden_pk_id) const;
+};
+
+/*
+ Descriptor telling how to decode/encode a field to on-disk record storage
+ format. Not all information is in the structure yet, but eventually we
+ want to have as much as possible there to avoid virtual calls.
+
+ For encoding/decoding of index tuples, see Rdb_key_def.
+ */
+class Rdb_field_encoder
+{
+ public:
+ /*
+ STORE_NONE is set when a column can be decoded solely from their
+ mem-comparable form.
+ STORE_SOME is set when a column can be decoded from their mem-comparable
+ form plus unpack_info.
+ STORE_ALL is set when a column cannot be decoded, so its original value
+ must be stored in the PK records.
+ */
+ enum STORAGE_TYPE {
+ STORE_NONE,
+ STORE_SOME,
+ STORE_ALL,
+ };
+ STORAGE_TYPE m_storage_type;
+
+ uint m_null_offset;
+ uint16 m_field_index;
+
+ uchar m_null_mask; // 0 means the field cannot be null
+
+ my_core::enum_field_types m_field_type;
+
+ uint m_pack_length_in_rec;
+
+ bool maybe_null() const { return m_null_mask != 0; }
+
+ bool uses_variable_len_encoding() const
+ {
+ return (m_field_type == MYSQL_TYPE_BLOB ||
+ m_field_type == MYSQL_TYPE_VARCHAR);
+ }
+};
+
+inline Field* Rdb_key_def::get_table_field_for_part_no(TABLE *table,
+ uint part_no) const
+{
+ DBUG_ASSERT(part_no < get_key_parts());
+ return m_pack_info[part_no].get_field_in_table(table);
+}
+
+inline bool Rdb_key_def::can_unpack(uint kp) const
+{
+ DBUG_ASSERT(kp < m_key_parts);
+ return (m_pack_info[kp].m_unpack_func != nullptr);
+}
+
+inline bool Rdb_key_def::has_unpack_info(uint kp) const
+{
+ DBUG_ASSERT(kp < m_key_parts);
+ return m_pack_info[kp].uses_unpack_info();
+}
+
+
+/*
+ A table definition. This is an entry in the mapping
+
+ dbname.tablename -> {index_nr, index_nr, ... }
+
+ There is only one Rdb_tbl_def object for a given table.
+ That's why we keep auto_increment value here, too.
+*/
+
+class Rdb_tbl_def
+{
+ private:
+ void check_if_is_mysql_system_table();
+
+ /* Stores 'dbname.tablename' */
+ std::string m_dbname_tablename;
+
+ /* Store the db name, table name, and partition name */
+ std::string m_dbname;
+ std::string m_tablename;
+ std::string m_partition;
+
+ void set_name(const std::string& name);
+
+ public:
+ explicit Rdb_tbl_def(const std::string& name) :
+ m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1)
+ {
+ set_name(name);
+ }
+
+ Rdb_tbl_def(const char* name, size_t len) :
+ m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1)
+ {
+ set_name(std::string(name, len));
+ }
+
+ explicit Rdb_tbl_def(const rocksdb::Slice& slice, size_t pos= 0) :
+ m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1)
+ {
+ set_name(std::string(slice.data() + pos, slice.size() - pos));
+ }
+
+ ~Rdb_tbl_def();
+
+ /* Number of indexes */
+ uint m_key_count;
+
+ /* Array of index descriptors */
+ std::shared_ptr<Rdb_key_def>* m_key_descr_arr;
+
+ std::atomic<longlong> m_hidden_pk_val;
+ std::atomic<longlong> m_auto_incr_val;
+
+ /* Is this a system table */
+ bool m_is_mysql_system_table;
+
+ bool put_dict(Rdb_dict_manager *dict, rocksdb::WriteBatch *batch,
+ uchar *key, size_t keylen);
+
+ const std::string& full_tablename() const { return m_dbname_tablename; }
+ const std::string& base_dbname() const { return m_dbname; }
+ const std::string& base_tablename() const { return m_tablename; }
+ const std::string& base_partition() const { return m_partition; }
+};
+
+
+/*
+ A thread-safe sequential number generator. Its performance is not a concern
+ hence it is ok to protect it by a mutex.
+*/
+
+class Rdb_seq_generator
+{
+ uint m_next_number= 0;
+
+ mysql_mutex_t m_mutex;
+public:
+ void init(uint initial_number)
+ {
+ mysql_mutex_init(0 , &m_mutex, MY_MUTEX_INIT_FAST);
+ m_next_number= initial_number;
+ }
+
+ uint get_and_update_next_number(Rdb_dict_manager *dict);
+
+ void cleanup()
+ {
+ mysql_mutex_destroy(&m_mutex);
+ }
+};
+
+
+interface Rdb_tables_scanner
+{
+ virtual int add_table(Rdb_tbl_def* tdef) =0;
+};
+
+
+/*
+ This contains a mapping of
+
+ dbname.table_name -> array{Rdb_key_def}.
+
+ objects are shared among all threads.
+*/
+
+class Rdb_ddl_manager
+{
+ Rdb_dict_manager *m_dict= nullptr;
+ my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements
+ // maps index id to <table_name, index number>
+ std::map<GL_INDEX_ID, std::pair<std::string, uint>> m_index_num_to_keydef;
+ mysql_rwlock_t m_rwlock;
+
+ Rdb_seq_generator m_sequence;
+ // A queue of table stats to write into data dictionary
+ // It is produced by event listener (ie compaction and flush threads)
+ // and consumed by the rocksdb background thread
+ std::map<GL_INDEX_ID, Rdb_index_stats> m_stats2store;
+public:
+ /* Load the data dictionary from on-disk storage */
+ bool init(Rdb_dict_manager *dict_arg, Rdb_cf_manager *cf_manager,
+ uint32_t validate_tables);
+
+ void cleanup();
+
+ Rdb_tbl_def* find(const std::string& table_name, bool lock= true);
+ const std::shared_ptr<Rdb_key_def>& find(GL_INDEX_ID gl_index_id);
+ std::shared_ptr<Rdb_key_def> safe_find(GL_INDEX_ID gl_index_id);
+ void set_stats(
+ const std::unordered_map<GL_INDEX_ID, Rdb_index_stats>& stats);
+ void adjust_stats(
+ const std::vector<Rdb_index_stats>& new_data,
+ const std::vector<Rdb_index_stats>& deleted_data
+ =std::vector<Rdb_index_stats>());
+ void persist_stats(bool sync = false);
+
+ /* Modify the mapping and write it to on-disk storage */
+ int put_and_write(Rdb_tbl_def *key_descr, rocksdb::WriteBatch *batch);
+ void remove(Rdb_tbl_def *rec, rocksdb::WriteBatch *batch, bool lock= true);
+ bool rename(const std::string& from, const std::string& to,
+ rocksdb::WriteBatch *batch);
+
+ uint get_and_update_next_number(Rdb_dict_manager *dict)
+ { return m_sequence.get_and_update_next_number(dict); }
+
+ /* Walk the data dictionary */
+ int scan_for_tables(Rdb_tables_scanner* tables_scanner);
+
+ void erase_index_num(GL_INDEX_ID gl_index_id);
+
+private:
+ /* Put the data into in-memory table (only) */
+ int put(Rdb_tbl_def *key_descr, bool lock= true);
+
+ /* Helper functions to be passed to my_core::HASH object */
+ static const uchar* get_hash_key(Rdb_tbl_def *rec, size_t *length,
+ my_bool not_used __attribute__((unused)));
+ static void free_hash_elem(void* data);
+
+ bool validate_schemas();
+};
+
+
+/*
+ Writing binlog information into RocksDB at commit(),
+ and retrieving binlog information at crash recovery.
+ commit() and recovery are always executed by at most single client
+ at the same time, so concurrency control is not needed.
+
+ Binlog info is stored in RocksDB as the following.
+ key: BINLOG_INFO_INDEX_NUMBER
+ value: packed single row:
+ binlog_name_length (2 byte form)
+ binlog_name
+ binlog_position (4 byte form)
+ binlog_gtid_length (2 byte form)
+ binlog_gtid
+*/
+class Rdb_binlog_manager
+{
+public:
+ bool init(Rdb_dict_manager *dict);
+ void cleanup();
+ void update(const char* binlog_name, const my_off_t binlog_pos,
+ const char* binlog_gtid, rocksdb::WriteBatchBase* batch);
+ bool read(char* binlog_name, my_off_t* binlog_pos, char* binlog_gtid);
+ void update_slave_gtid_info(uint id, const char* db, const char* gtid,
+ rocksdb::WriteBatchBase *write_batch);
+
+private:
+ Rdb_dict_manager *m_dict= nullptr;
+ uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE]= {0};
+ rocksdb::Slice m_key_slice;
+
+ rocksdb::Slice pack_value(uchar *buf,
+ const char *binlog_name,
+ const my_off_t binlog_pos,
+ const char *binlog_gtid);
+ bool unpack_value(const uchar *value, char *binlog_name,
+ my_off_t *binlog_pos, char *binlog_gtid);
+
+ std::atomic<Rdb_tbl_def*> m_slave_gtid_info_tbl;
+};
+
+
+/*
+ Rdb_dict_manager manages how MySQL on RocksDB (MyRocks) stores its
+ internal data dictionary.
+ MyRocks stores data dictionary on dedicated system column family
+ named __system__. The system column family is used by MyRocks
+ internally only, and not used by applications.
+
+ Currently MyRocks has the following data dictionary data models.
+
+ 1. Table Name => internal index id mappings
+ key: Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER(0x1) + dbname.tablename
+ value: version, {cf_id, index_id}*n_indexes_of_the_table
+ version is 2 bytes. cf_id and index_id are 4 bytes.
+
+ 2. internal cf_id, index id => index information
+ key: Rdb_key_def::INDEX_INFO(0x2) + cf_id + index_id
+ value: version, index_type, kv_format_version
+ index_type is 1 byte, version and kv_format_version are 2 bytes.
+
+ 3. CF id => CF flags
+ key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id
+ value: version, {is_reverse_cf, is_auto_cf}
+ cf_flags is 4 bytes in total.
+
+ 4. Binlog entry (updated at commit)
+ key: Rdb_key_def::BINLOG_INFO_INDEX_NUMBER (0x4)
+ value: version, {binlog_name,binlog_pos,binlog_gtid}
+
+ 5. Ongoing drop index entry
+ key: Rdb_key_def::DDL_DROP_INDEX_ONGOING(0x5) + cf_id + index_id
+ value: version
+
+ 6. index stats
+ key: Rdb_key_def::INDEX_STATISTICS(0x6) + cf_id + index_id
+ value: version, {materialized PropertiesCollector::IndexStats}
+
+ 7. maximum index id
+ key: Rdb_key_def::MAX_INDEX_ID(0x7)
+ value: index_id
+ index_id is 4 bytes
+
+ 8. Ongoing create index entry
+ key: Rdb_key_def::DDL_CREATE_INDEX_ONGOING(0x8) + cf_id + index_id
+ value: version
+
+ Data dictionary operations are atomic inside RocksDB. For example,
+ when creating a table with two indexes, it is necessary to call Put
+ three times. They have to be atomic. Rdb_dict_manager has a wrapper function
+ begin() and commit() to make it easier to do atomic operations.
+
+*/
+class Rdb_dict_manager
+{
+private:
+ mysql_mutex_t m_mutex;
+ rocksdb::DB *m_db= nullptr;
+ rocksdb::ColumnFamilyHandle *m_system_cfh= nullptr;
+ /* Utility to put INDEX_INFO and CF_DEFINITION */
+
+ uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE]= {0};
+ rocksdb::Slice m_key_slice_max_index_id;
+
+ static void dump_index_id(uchar *netbuf,
+ Rdb_key_def::DATA_DICT_TYPE dict_type,
+ const GL_INDEX_ID &gl_index_id);
+ void delete_with_prefix(rocksdb::WriteBatch* batch,
+ Rdb_key_def::DATA_DICT_TYPE dict_type,
+ const GL_INDEX_ID &gl_index_id) const;
+ /* Functions for fast DROP TABLE/INDEX */
+ void resume_drop_indexes();
+ void log_start_drop_table(const std::shared_ptr<Rdb_key_def>* key_descr,
+ uint32 n_keys,
+ const char* log_action);
+ void log_start_drop_index(GL_INDEX_ID gl_index_id,
+ const char* log_action);
+public:
+ bool init(rocksdb::DB *rdb_dict, Rdb_cf_manager *cf_manager);
+
+ inline void cleanup()
+ {
+ mysql_mutex_destroy(&m_mutex);
+ }
+
+ inline void lock()
+ {
+ mysql_mutex_lock(&m_mutex);
+ }
+
+ inline void unlock()
+ {
+ mysql_mutex_unlock(&m_mutex);
+ }
+
+ /* Raw RocksDB operations */
+ std::unique_ptr<rocksdb::WriteBatch> begin();
+ int commit(rocksdb::WriteBatch *batch, bool sync = true);
+ rocksdb::Status get_value(const rocksdb::Slice& key,
+ std::string *value) const;
+ void put_key(rocksdb::WriteBatchBase *batch, const rocksdb::Slice &key,
+ const rocksdb::Slice &value);
+ void delete_key(rocksdb::WriteBatchBase *batch,
+ const rocksdb::Slice &key) const;
+ rocksdb::Iterator *new_iterator();
+
+ /* Internal Index id => CF */
+ void add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch,
+ const uchar index_type,
+ const uint16_t kv_version,
+ const uint index_id,
+ const uint cf_id);
+ void delete_index_info(rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID &index_id) const;
+ bool get_index_info(const GL_INDEX_ID &gl_index_id,
+ uint16_t *index_dict_version,
+ uchar *index_type, uint16_t *kv_version);
+
+ /* CF id => CF flags */
+ void add_cf_flags(rocksdb::WriteBatch *batch,
+ const uint cf_id,
+ const uint cf_flags);
+ bool get_cf_flags(const uint cf_id, uint *cf_flags);
+
+ /* Functions for fast CREATE/DROP TABLE/INDEX */
+ void get_ongoing_index_operation(std::vector<GL_INDEX_ID>* gl_index_ids,
+ Rdb_key_def::DATA_DICT_TYPE dd_type);
+ bool is_index_operation_ongoing(const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type);
+ void start_ongoing_index_operation(rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type);
+ void end_ongoing_index_operation(rocksdb::WriteBatch* batch,
+ const GL_INDEX_ID& gl_index_id,
+ Rdb_key_def::DATA_DICT_TYPE dd_type);
+ bool is_drop_index_empty();
+ void add_drop_table(std::shared_ptr<Rdb_key_def>* key_descr, uint32 n_keys,
+ rocksdb::WriteBatch *batch);
+ void add_drop_index(const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ rocksdb::WriteBatch *batch);
+ void add_create_index(const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ rocksdb::WriteBatch *batch);
+ void finish_indexes_operation(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids,
+ Rdb_key_def::DATA_DICT_TYPE dd_type);
+ void rollback_ongoing_index_creation();
+
+ inline void get_ongoing_drop_indexes(std::vector<GL_INDEX_ID>* gl_index_ids)
+ {
+ get_ongoing_index_operation(gl_index_ids,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING);
+ }
+ inline void get_ongoing_create_indexes(std::vector<GL_INDEX_ID>* gl_index_ids)
+ {
+ get_ongoing_index_operation(gl_index_ids,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ }
+ inline void start_drop_index(rocksdb::WriteBatch *wb,
+ const GL_INDEX_ID& gl_index_id)
+ {
+ start_ongoing_index_operation(wb, gl_index_id,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING);
+ }
+ inline void start_create_index(rocksdb::WriteBatch *wb,
+ const GL_INDEX_ID& gl_index_id)
+ {
+ start_ongoing_index_operation(wb, gl_index_id,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ }
+ inline void finish_drop_indexes(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids)
+ {
+ finish_indexes_operation(gl_index_ids,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING);
+ }
+ inline void finish_create_indexes(
+ const std::unordered_set<GL_INDEX_ID>& gl_index_ids)
+ {
+ finish_indexes_operation(gl_index_ids,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ }
+ inline bool is_drop_index_ongoing(const GL_INDEX_ID& gl_index_id)
+ {
+ return is_index_operation_ongoing(gl_index_id,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING);
+ }
+ inline bool is_create_index_ongoing(const GL_INDEX_ID& gl_index_id)
+ {
+ return is_index_operation_ongoing(gl_index_id,
+ Rdb_key_def::DDL_CREATE_INDEX_ONGOING);
+ }
+
+ bool get_max_index_id(uint32_t *index_id);
+ bool update_max_index_id(rocksdb::WriteBatch* batch,
+ const uint32_t index_id);
+ void add_stats(rocksdb::WriteBatch* batch,
+ const std::vector<Rdb_index_stats>& stats);
+ Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id);
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc
new file mode 100644
index 00000000000..78af6eff3a5
--- /dev/null
+++ b/storage/rocksdb/rdb_i_s.cc
@@ -0,0 +1,1189 @@
+/*
+ Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* C++ standard header files */
+#include <string>
+#include <vector>
+
+/* MySQL header files */
+#include <sql_show.h>
+
+/* RocksDB header files */
+#include "rocksdb/convenience.h"
+#include "rocksdb/compaction_filter.h"
+#include "rocksdb/filter_policy.h"
+#include "rocksdb/memtablerep.h"
+#include "rocksdb/merge_operator.h"
+#include "rocksdb/slice_transform.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./ha_rocksdb_proto.h"
+#include "./rdb_cf_manager.h"
+#include "./rdb_datadic.h"
+
+namespace myrocks {
+
+/**
+ Define the INFORMATION_SCHEMA (I_S) structures needed by MyRocks storage engine.
+*/
+
+#define ROCKSDB_FIELD_INFO(_name_, _len_, _type_, _flag_) \
+ { _name_, _len_, _type_, 0, _flag_, nullptr, 0 }
+
+#define ROCKSDB_FIELD_INFO_END ROCKSDB_FIELD_INFO(nullptr, \
+ 0, MYSQL_TYPE_NULL, 0)
+
+/*
+ Support for INFORMATION_SCHEMA.ROCKSDB_CFSTATS dynamic table
+ */
+static int rdb_i_s_cfstats_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ bool ret;
+ uint64_t val;
+
+ DBUG_ENTER("rdb_i_s_cfstats_fill_table");
+
+ std::vector<std::pair<const std::string, std::string>> cf_properties = {
+ {rocksdb::DB::Properties::kNumImmutableMemTable, "NUM_IMMUTABLE_MEM_TABLE"},
+ {rocksdb::DB::Properties::kMemTableFlushPending,
+ "MEM_TABLE_FLUSH_PENDING"},
+ {rocksdb::DB::Properties::kCompactionPending, "COMPACTION_PENDING"},
+ {rocksdb::DB::Properties::kCurSizeActiveMemTable,
+ "CUR_SIZE_ACTIVE_MEM_TABLE"},
+ {rocksdb::DB::Properties::kCurSizeAllMemTables, "CUR_SIZE_ALL_MEM_TABLES"},
+ {rocksdb::DB::Properties::kNumEntriesActiveMemTable,
+ "NUM_ENTRIES_ACTIVE_MEM_TABLE"},
+ {rocksdb::DB::Properties::kNumEntriesImmMemTables,
+ "NUM_ENTRIES_IMM_MEM_TABLES"},
+ {rocksdb::DB::Properties::kEstimateTableReadersMem,
+ "NON_BLOCK_CACHE_SST_MEM_USAGE"},
+ {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"}
+ };
+
+ rocksdb::DB *rdb= rdb_get_rocksdb_db();
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+ DBUG_ASSERT(rdb != nullptr);
+
+ for (auto cf_name : cf_manager.get_cf_names())
+ {
+ rocksdb::ColumnFamilyHandle* cfh;
+ bool is_automatic;
+
+ /*
+ Only the cf name is important. Whether it was generated automatically
+ does not matter, so is_automatic is ignored.
+ */
+ cfh= cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic);
+ if (cfh == nullptr)
+ continue;
+
+ for (auto property : cf_properties)
+ {
+ if (!rdb->GetIntProperty(cfh, property.first, &val))
+ continue;
+
+ DBUG_ASSERT(tables != nullptr);
+
+ tables->table->field[0]->store(cf_name.c_str(), cf_name.size(),
+ system_charset_info);
+ tables->table->field[1]->store(property.second.c_str(),
+ property.second.size(),
+ system_charset_info);
+ tables->table->field[2]->store(val, true);
+
+ ret= my_core::schema_table_store_record(thd, tables->table);
+
+ if (ret)
+ DBUG_RETURN(ret);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[]=
+{
+ ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+static int rdb_i_s_cfstats_init(void *p)
+{
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_cfstats_init");
+ DBUG_ASSERT(p != nullptr);
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_cfstats_fields_info;
+ schema->fill_table= rdb_i_s_cfstats_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Support for INFORMATION_SCHEMA.ROCKSDB_DBSTATS dynamic table
+ */
+static int rdb_i_s_dbstats_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ bool ret;
+ uint64_t val;
+
+ DBUG_ENTER("rdb_i_s_dbstats_fill_table");
+
+ std::vector<std::pair<std::string, std::string>> db_properties = {
+ {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"},
+ {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"},
+ {rocksdb::DB::Properties::kOldestSnapshotTime, "DB_OLDEST_SNAPSHOT_TIME"}
+ };
+
+ rocksdb::DB *rdb= rdb_get_rocksdb_db();
+ const rocksdb::BlockBasedTableOptions& table_options=
+ rdb_get_table_options();
+
+ for (auto property : db_properties)
+ {
+ if (!rdb->GetIntProperty(property.first, &val))
+ continue;
+
+ DBUG_ASSERT(tables != nullptr);
+
+ tables->table->field[0]->store(property.second.c_str(),
+ property.second.size(),
+ system_charset_info);
+ tables->table->field[1]->store(val, true);
+
+ ret= my_core::schema_table_store_record(thd, tables->table);
+
+ if (ret)
+ DBUG_RETURN(ret);
+ }
+
+ /*
+ Currently, this can only show the usage of a block cache allocated
+ directly by the handlerton. If the column family config specifies a block
+ cache (i.e. the column family option has a parameter such as
+ block_based_table_factory={block_cache=1G}), then the block cache is
+ allocated within the rocksdb::GetColumnFamilyOptionsFromString().
+
+ There is no interface to retrieve this block cache, nor fetch the usage
+ information from the column family.
+ */
+ val= (table_options.block_cache ? table_options.block_cache->GetUsage() : 0);
+ tables->table->field[0]->store(STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"),
+ system_charset_info);
+ tables->table->field[1]->store(val, true);
+
+ ret= my_core::schema_table_store_record(thd, tables->table);
+
+ DBUG_RETURN(ret);
+}
+
+static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[]=
+{
+ ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+static int rdb_i_s_dbstats_init(void *p)
+{
+ DBUG_ASSERT(p != nullptr);
+
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_dbstats_init");
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_dbstats_fields_info;
+ schema->fill_table= rdb_i_s_dbstats_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT dynamic table
+ */
+
+static int rdb_i_s_perf_context_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+
+ int ret= 0;
+ Field** field= tables->table->field;
+
+ DBUG_ENTER("rdb_i_s_perf_context_fill_table");
+
+ std::vector<std::string> tablenames= rdb_get_open_table_names();
+ for (const auto& it : tablenames)
+ {
+ std::string str, dbname, tablename, partname;
+ Rdb_perf_counters counters;
+
+ if (rdb_normalize_tablename(it, &str)) {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname))
+ {
+ continue;
+ }
+
+ if (rdb_get_table_perf_counters(it.c_str(), &counters))
+ {
+ continue;
+ }
+
+ DBUG_ASSERT(field != nullptr);
+
+ field[0]->store(dbname.c_str(), dbname.size(), system_charset_info);
+ field[1]->store(tablename.c_str(), tablename.size(), system_charset_info);
+ if (partname.size() == 0)
+ {
+ field[2]->set_null();
+ }
+ else
+ {
+ field[2]->set_notnull();
+ field[2]->store(partname.c_str(), partname.size(), system_charset_info);
+ }
+
+ for (int i= 0; i < PC_MAX_IDX; i++)
+ {
+ field[3]->store(rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(),
+ system_charset_info);
+ field[4]->store(counters.m_value[i], true);
+
+ ret= my_core::schema_table_store_record(thd, tables->table);
+ if (ret)
+ DBUG_RETURN(ret);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[]=
+{
+ ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING,
+ MY_I_S_MAYBE_NULL),
+ ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG,
+ 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+static int rdb_i_s_perf_context_init(void *p)
+{
+ DBUG_ASSERT(p != nullptr);
+
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_perf_context_init");
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_perf_context_fields_info;
+ schema->fill_table= rdb_i_s_perf_context_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+static int rdb_i_s_perf_context_global_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+
+ int ret= 0;
+ DBUG_ENTER("rdb_i_s_perf_context_global_fill_table");
+
+ // Get a copy of the global perf counters.
+ Rdb_perf_counters global_counters;
+ rdb_get_global_perf_counters(&global_counters);
+
+ for (int i= 0; i < PC_MAX_IDX; i++) {
+ DBUG_ASSERT(tables->table != nullptr);
+ DBUG_ASSERT(tables->table->field != nullptr);
+
+ tables->table->field[0]->store(rdb_pc_stat_types[i].c_str(),
+ rdb_pc_stat_types[i].size(),
+ system_charset_info);
+ tables->table->field[1]->store(global_counters.m_value[i], true);
+
+ ret= my_core::schema_table_store_record(thd, tables->table);
+ if (ret)
+ DBUG_RETURN(ret);
+ }
+
+ DBUG_RETURN(0);
+}
+
+static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[]=
+{
+ ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+static int rdb_i_s_perf_context_global_init(void *p)
+{
+ DBUG_ASSERT(p != nullptr);
+
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_perf_context_global_init");
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_perf_context_global_fields_info;
+ schema->fill_table= rdb_i_s_perf_context_global_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Support for INFORMATION_SCHEMA.ROCKSDB_CFOPTIONS dynamic table
+ */
+static int rdb_i_s_cfoptions_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+
+ bool ret;
+
+ DBUG_ENTER("rdb_i_s_cfoptions_fill_table");
+
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+
+ for (auto cf_name : cf_manager.get_cf_names())
+ {
+ std::string val;
+ rocksdb::ColumnFamilyOptions opts;
+ cf_manager.get_cf_options(cf_name, &opts);
+
+ std::vector<std::pair<std::string, std::string>> cf_option_types = {
+ {"COMPARATOR", opts.comparator == nullptr ? "NULL" :
+ std::string(opts.comparator->Name())},
+ {"MERGE_OPERATOR", opts.merge_operator == nullptr ? "NULL" :
+ std::string(opts.merge_operator->Name())},
+ {"COMPACTION_FILTER", opts.compaction_filter == nullptr ? "NULL" :
+ std::string(opts.compaction_filter->Name())},
+ {"COMPACTION_FILTER_FACTORY",
+ opts.compaction_filter_factory == nullptr ? "NULL" :
+ std::string(opts.compaction_filter_factory->Name())},
+ {"WRITE_BUFFER_SIZE", std::to_string(opts.write_buffer_size)},
+ {"MAX_WRITE_BUFFER_NUMBER", std::to_string(opts.max_write_buffer_number)},
+ {"MIN_WRITE_BUFFER_NUMBER_TO_MERGE",
+ std::to_string(opts.min_write_buffer_number_to_merge)},
+ {"NUM_LEVELS", std::to_string(opts.num_levels)},
+ {"LEVEL0_FILE_NUM_COMPACTION_TRIGGER",
+ std::to_string(opts.level0_file_num_compaction_trigger)},
+ {"LEVEL0_SLOWDOWN_WRITES_TRIGGER",
+ std::to_string(opts.level0_slowdown_writes_trigger)},
+ {"LEVEL0_STOP_WRITES_TRIGGER",
+ std::to_string(opts.level0_stop_writes_trigger)},
+ {"MAX_MEM_COMPACTION_LEVEL", std::to_string(opts.max_mem_compaction_level)},
+ {"TARGET_FILE_SIZE_BASE", std::to_string(opts.target_file_size_base)},
+ {"TARGET_FILE_SIZE_MULTIPLIER", std::to_string(opts.target_file_size_multiplier)},
+ {"MAX_BYTES_FOR_LEVEL_BASE", std::to_string(opts.max_bytes_for_level_base)},
+ {"LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES",
+ opts.level_compaction_dynamic_level_bytes ? "ON" : "OFF"},
+ {"MAX_BYTES_FOR_LEVEL_MULTIPLIER",
+ std::to_string(opts.max_bytes_for_level_multiplier)},
+ {"SOFT_RATE_LIMIT", std::to_string(opts.soft_rate_limit)},
+ {"HARD_RATE_LIMIT", std::to_string(opts.hard_rate_limit)},
+ {"RATE_LIMIT_DELAY_MAX_MILLISECONDS",
+ std::to_string(opts.rate_limit_delay_max_milliseconds)},
+ {"ARENA_BLOCK_SIZE", std::to_string(opts.arena_block_size)},
+ {"DISABLE_AUTO_COMPACTIONS",
+ opts.disable_auto_compactions ? "ON" : "OFF"},
+ {"PURGE_REDUNDANT_KVS_WHILE_FLUSH",
+ opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"},
+ {"VERIFY_CHECKSUM_IN_COMPACTION",
+ opts.verify_checksums_in_compaction ? "ON" : "OFF"},
+ {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS",
+ std::to_string(opts.max_sequential_skip_in_iterations)},
+ {"MEMTABLE_FACTORY",
+ opts.memtable_factory == nullptr ? "NULL" :
+ opts.memtable_factory->Name()},
+ {"INPLACE_UPDATE_SUPPORT",
+ opts.inplace_update_support ? "ON" : "OFF"},
+ {"INPLACE_UPDATE_NUM_LOCKS",
+ opts.inplace_update_num_locks ? "ON" : "OFF"},
+ {"MEMTABLE_PREFIX_BLOOM_BITS_RATIO",
+ std::to_string(opts.memtable_prefix_bloom_size_ratio)},
+ {"MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE",
+ std::to_string(opts.memtable_huge_page_size)},
+ {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)},
+ {"MAX_SUCCESSIVE_MERGES",
+ std::to_string(opts.max_successive_merges)},
+ {"MIN_PARTIAL_MERGE_OPERANDS",
+ std::to_string(opts.min_partial_merge_operands)},
+ {"OPTIMIZE_FILTERS_FOR_HITS",
+ (opts.optimize_filters_for_hits ? "ON" : "OFF")},
+ };
+
+ // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value
+ val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : "";
+ for (auto level : opts.max_bytes_for_level_multiplier_additional)
+ {
+ val.append(std::to_string(level) + ":");
+ }
+ val.pop_back();
+ cf_option_types.push_back({"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val});
+
+ // get COMPRESSION_TYPE option value
+ GetStringFromCompressionType(&val, opts.compression);
+ if (val.empty())
+ {
+ val = "NULL";
+ }
+ cf_option_types.push_back({"COMPRESSION_TYPE", val});
+
+ // get COMPRESSION_PER_LEVEL option value
+ val = opts.compression_per_level.empty() ? "NULL" : "";
+ for (auto compression_type : opts.compression_per_level)
+ {
+ std::string res;
+ GetStringFromCompressionType(&res, compression_type);
+ if (!res.empty())
+ {
+ val.append(res + ":");
+ }
+ }
+ val.pop_back();
+ cf_option_types.push_back({"COMPRESSION_PER_LEVEL", val});
+
+ // get compression_opts value
+ val = std::to_string(opts.compression_opts.window_bits) + ":";
+ val.append(std::to_string(opts.compression_opts.level) + ":");
+ val.append(std::to_string(opts.compression_opts.strategy));
+ cf_option_types.push_back({"COMPRESSION_OPTS", val});
+
+ // bottommost_compression
+ if (opts.bottommost_compression)
+ {
+ std::string res;
+ GetStringFromCompressionType(&res, opts.bottommost_compression);
+ if (!res.empty())
+ {
+ cf_option_types.push_back({"BOTTOMMOST_COMPRESSION", res});
+ }
+ }
+
+ // get PREFIX_EXTRACTOR option
+ cf_option_types.push_back({"PREFIX_EXTRACTOR",
+ opts.prefix_extractor == nullptr ? "NULL" :
+ std::string(opts.prefix_extractor->Name())});
+
+ // get COMPACTION_STYLE option
+ switch (opts.compaction_style)
+ {
+ case rocksdb::kCompactionStyleLevel: val = "kCompactionStyleLevel"; break;
+ case rocksdb::kCompactionStyleUniversal: val = "kCompactionStyleUniversal"; break;
+ case rocksdb:: kCompactionStyleFIFO: val = "kCompactionStyleFIFO"; break;
+ case rocksdb:: kCompactionStyleNone: val = "kCompactionStyleNone"; break;
+ default: val = "NULL";
+ }
+ cf_option_types.push_back({"COMPACTION_STYLE", val});
+
+ // get COMPACTION_OPTIONS_UNIVERSAL related options
+ rocksdb::CompactionOptionsUniversal compac_opts = opts.compaction_options_universal;
+ val = "{SIZE_RATIO=";
+ val.append(std::to_string(compac_opts.size_ratio));
+ val.append("; MIN_MERGE_WIDTH=");
+ val.append(std::to_string(compac_opts.min_merge_width));
+ val.append("; MAX_MERGE_WIDTH=");
+ val.append(std::to_string(compac_opts.max_merge_width));
+ val.append("; MAX_SIZE_AMPLIFICATION_PERCENT=");
+ val.append(std::to_string(compac_opts.max_size_amplification_percent));
+ val.append("; COMPRESSION_SIZE_PERCENT=");
+ val.append(std::to_string(compac_opts.compression_size_percent));
+ val.append("; STOP_STYLE=");
+ switch (compac_opts.stop_style)
+ {
+ case rocksdb::kCompactionStopStyleSimilarSize:
+ val.append("kCompactionStopStyleSimilarSize}"); break;
+ case rocksdb::kCompactionStopStyleTotalSize:
+ val.append("kCompactionStopStyleTotalSize}"); break;
+ default: val.append("}");
+ }
+ cf_option_types.push_back({"COMPACTION_OPTIONS_UNIVERSAL", val});
+
+ // get COMPACTION_OPTION_FIFO option
+ cf_option_types.push_back({"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE",
+ std::to_string(opts.compaction_options_fifo.max_table_files_size)});
+
+ // get block-based table related options
+ const rocksdb::BlockBasedTableOptions& table_options=
+ rdb_get_table_options();
+
+ // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option
+ cf_option_types.push_back(
+ {"BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS",
+ table_options.cache_index_and_filter_blocks ? "1" : "0"});
+
+ // get BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE option value
+ switch (table_options.index_type)
+ {
+ case rocksdb::BlockBasedTableOptions::kBinarySearch: val = "kBinarySearch"; break;
+ case rocksdb::BlockBasedTableOptions::kHashSearch: val = "kHashSearch"; break;
+ default: val = "NULL";
+ }
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE", val});
+
+ // get BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION option value
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION",
+ table_options.hash_index_allow_collision ? "ON" : "OFF"});
+
+ // get BLOCK_BASED_TABLE_FACTORY::CHECKSUM option value
+ switch (table_options.checksum)
+ {
+ case rocksdb::kNoChecksum: val = "kNoChecksum"; break;
+ case rocksdb::kCRC32c: val = "kCRC32c"; break;
+ case rocksdb::kxxHash: val = "kxxHash"; break;
+ default: val = "NULL";
+ }
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::CHECKSUM", val});
+
+ // get BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE option value
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE",
+ table_options.no_block_cache ? "ON" : "OFF"});
+
+ // get BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY",
+ table_options.filter_policy == nullptr ? "NULL" :
+ std::string(table_options.filter_policy->Name())});
+
+ // get BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING",
+ table_options.whole_key_filtering ? "1" : "0"});
+
+ // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE",
+ table_options.block_cache == nullptr ? "NULL" :
+ std::to_string(table_options.block_cache->GetUsage())});
+
+ // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED",
+ table_options.block_cache_compressed == nullptr ? "NULL" :
+ std::to_string(table_options.block_cache_compressed->GetUsage())});
+
+ // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE",
+ std::to_string(table_options.block_size)});
+
+ // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION",
+ std::to_string(table_options.block_size_deviation)});
+
+ // get BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL",
+ std::to_string(table_options.block_restart_interval)});
+
+ // get BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION option
+ cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION",
+ std::to_string(table_options.format_version)});
+
+ for (auto cf_option_type : cf_option_types)
+ {
+ DBUG_ASSERT(tables->table != nullptr);
+ DBUG_ASSERT(tables->table->field != nullptr);
+
+ tables->table->field[0]->store(cf_name.c_str(), cf_name.size(),
+ system_charset_info);
+ tables->table->field[1]->store(cf_option_type.first.c_str(),
+ cf_option_type.first.size(),
+ system_charset_info);
+ tables->table->field[2]->store(cf_option_type.second.c_str(),
+ cf_option_type.second.size(),
+ system_charset_info);
+
+ ret = my_core::schema_table_store_record(thd, tables->table);
+
+ if (ret)
+ DBUG_RETURN(ret);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] =
+{
+ ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+/*
+ * helper function for rdb_i_s_global_info_fill_table
+ * to insert (TYPE, KEY, VALUE) rows into
+ * information_schema.rocksdb_global_info
+ */
+static int rdb_global_info_fill_row(my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ const char *type,
+ const char *name,
+ const char *value)
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+ DBUG_ASSERT(tables->table != nullptr);
+ DBUG_ASSERT(type != nullptr);
+ DBUG_ASSERT(name != nullptr);
+ DBUG_ASSERT(value != nullptr);
+
+ Field **field= tables->table->field;
+ DBUG_ASSERT(field != nullptr);
+
+ field[0]->store(type, strlen(type), system_charset_info);
+ field[1]->store(name, strlen(name), system_charset_info);
+ field[2]->store(value, strlen(value), system_charset_info);
+
+ return my_core::schema_table_store_record(thd, tables->table);
+}
+
+/*
+ Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table
+ */
+static int rdb_i_s_global_info_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+
+ DBUG_ENTER("rdb_i_s_global_info_fill_table");
+ static const uint32_t INT_BUF_LEN = 21;
+ static const uint32_t GTID_BUF_LEN = 60;
+ static const uint32_t CF_ID_INDEX_BUF_LEN = 60;
+
+ int ret= 0;
+
+ /* binlog info */
+ Rdb_binlog_manager *blm= rdb_get_binlog_manager();
+ DBUG_ASSERT(blm != nullptr);
+
+ char file_buf[FN_REFLEN+1]= {0};
+ my_off_t pos = 0;
+ char pos_buf[INT_BUF_LEN]= {0};
+ char gtid_buf[GTID_BUF_LEN]= {0};
+
+ if (blm->read(file_buf, &pos, gtid_buf)) {
+ snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t) pos);
+ ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf);
+ ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf);
+ ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf);
+ }
+
+ /* max index info */
+ Rdb_dict_manager *dict_manager= rdb_get_dict_manager();
+ DBUG_ASSERT(dict_manager != nullptr);
+
+ uint32_t max_index_id;
+ char max_index_id_buf[INT_BUF_LEN]= {0};
+
+ if (dict_manager->get_max_index_id(&max_index_id)) {
+ snprintf(max_index_id_buf, INT_BUF_LEN, "%u", max_index_id);
+ ret |= rdb_global_info_fill_row(thd, tables, "MAX_INDEX_ID", "MAX_INDEX_ID",
+ max_index_id_buf);
+ }
+
+ /* cf_id -> cf_flags */
+ char cf_id_buf[INT_BUF_LEN]= {0};
+ char cf_value_buf[FN_REFLEN+1] = {0};
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+ for (auto cf_handle : cf_manager.get_all_cf()) {
+ uint flags;
+ dict_manager->get_cf_flags(cf_handle->GetID(), &flags);
+ snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID());
+ snprintf(cf_value_buf, FN_REFLEN, "%s [%u]", cf_handle->GetName().c_str(),
+ flags);
+ ret |= rdb_global_info_fill_row(thd, tables, "CF_FLAGS", cf_id_buf,
+ cf_value_buf);
+
+ if (ret)
+ break;
+ }
+
+ /* DDL_DROP_INDEX_ONGOING */
+ std::vector<GL_INDEX_ID> gl_index_ids;
+ dict_manager->get_ongoing_index_operation(&gl_index_ids,
+ Rdb_key_def::DDL_DROP_INDEX_ONGOING);
+ char cf_id_index_buf[CF_ID_INDEX_BUF_LEN]= {0};
+ for (auto gl_index_id : gl_index_ids) {
+ snprintf(cf_id_index_buf, CF_ID_INDEX_BUF_LEN, "cf_id:%u,index_id:%u",
+ gl_index_id.cf_id, gl_index_id.index_id);
+ ret |= rdb_global_info_fill_row(thd, tables, "DDL_DROP_INDEX_ONGOING",
+ cf_id_index_buf, "");
+
+ if (ret)
+ break;
+ }
+
+ DBUG_RETURN(ret);
+}
+
+static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] =
+{
+ ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("NAME", FN_REFLEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+
+namespace // anonymous namespace = not visible outside this source file
+{
+struct Rdb_ddl_scanner : public Rdb_tables_scanner
+{
+ my_core::THD *m_thd;
+ my_core::TABLE *m_table;
+
+ int add_table(Rdb_tbl_def* tdef) override;
+};
+} // anonymous namespace
+
+
+int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef)
+{
+ DBUG_ASSERT(tdef != nullptr);
+
+ int ret= 0;
+
+ DBUG_ASSERT(m_table != nullptr);
+ Field** field= m_table->field;
+ DBUG_ASSERT(field != nullptr);
+
+ const std::string& dbname= tdef->base_dbname();
+ field[0]->store(dbname.c_str(), dbname.size(), system_charset_info);
+
+ const std::string& tablename= tdef->base_tablename();
+ field[1]->store(tablename.c_str(), tablename.size(), system_charset_info);
+
+ const std::string& partname= tdef->base_partition();
+ if (partname.length() == 0)
+ {
+ field[2]->set_null();
+ }
+ else
+ {
+ field[2]->set_notnull();
+ field[2]->store(partname.c_str(), partname.size(), system_charset_info);
+ }
+
+ for (uint i= 0; i < tdef->m_key_count; i++)
+ {
+ const std::shared_ptr<const Rdb_key_def>& kd= tdef->m_key_descr_arr[i];
+ DBUG_ASSERT(kd != nullptr);
+
+ field[3]->store(kd->m_name.c_str(), kd->m_name.size(), system_charset_info);
+
+ GL_INDEX_ID gl_index_id = kd->get_gl_index_id();
+ field[4]->store(gl_index_id.cf_id, true);
+ field[5]->store(gl_index_id.index_id, true);
+ field[6]->store(kd->m_index_type, true);
+ field[7]->store(kd->m_kv_format_version, true);
+
+ std::string cf_name= kd->get_cf()->GetName();
+ field[8]->store(cf_name.c_str(), cf_name.size(), system_charset_info);
+
+ ret= my_core::schema_table_store_record(m_thd, m_table);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int rdb_i_s_ddl_fill_table(my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond)
+{
+ DBUG_ENTER("rdb_i_s_ddl_fill_table");
+
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+
+ Rdb_ddl_scanner ddl_arg;
+ ddl_arg.m_thd= thd;
+ ddl_arg.m_table= tables->table;
+
+ Rdb_ddl_manager *ddl_manager= rdb_get_ddl_manager();
+ DBUG_ASSERT(ddl_manager != nullptr);
+ int ret= ddl_manager->scan_for_tables(&ddl_arg);
+
+ DBUG_RETURN(ret);
+}
+
+static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] =
+{
+ ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN+1, MYSQL_TYPE_STRING,
+ MY_I_S_MAYBE_NULL),
+ ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0),
+ ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0),
+ ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0),
+ ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t),
+ MYSQL_TYPE_SHORT, 0),
+ ROCKSDB_FIELD_INFO("CF", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+static int rdb_i_s_ddl_init(void *p)
+{
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_ddl_init");
+ DBUG_ASSERT(p != nullptr);
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_ddl_fields_info;
+ schema->fill_table= rdb_i_s_ddl_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+static int rdb_i_s_cfoptions_init(void *p)
+{
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_cfoptions_init");
+ DBUG_ASSERT(p != nullptr);
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_cfoptions_fields_info;
+ schema->fill_table= rdb_i_s_cfoptions_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+static int rdb_i_s_global_info_init(void *p)
+{
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_global_info_init");
+ DBUG_ASSERT(p != nullptr);
+
+ schema= reinterpret_cast<my_core::ST_SCHEMA_TABLE*>(p);
+
+ schema->fields_info= rdb_i_s_global_info_fields_info;
+ schema->fill_table= rdb_i_s_global_info_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+/* Given a path to a file return just the filename portion. */
+static std::string rdb_filename_without_path(
+ const std::string& path)
+{
+ /* Find last slash in path */
+ size_t pos = path.rfind('/');
+
+ /* None found? Just return the original string */
+ if (pos == std::string::npos) {
+ return std::string(path);
+ }
+
+ /* Return everything after the slash (or backslash) */
+ return path.substr(pos + 1);
+}
+
+/* Fill the information_schema.rocksdb_index_file_map virtual table */
+static int rdb_i_s_index_file_map_fill_table(
+ my_core::THD *thd,
+ my_core::TABLE_LIST *tables,
+ my_core::Item *cond __attribute__((__unused__)))
+{
+ DBUG_ASSERT(thd != nullptr);
+ DBUG_ASSERT(tables != nullptr);
+ DBUG_ASSERT(tables->table != nullptr);
+
+ int ret = 0;
+ Field **field = tables->table->field;
+ DBUG_ASSERT(field != nullptr);
+
+ DBUG_ENTER("rdb_i_s_index_file_map_fill_table");
+
+ /* Iterate over all the column families */
+ rocksdb::DB *rdb= rdb_get_rocksdb_db();
+ DBUG_ASSERT(rdb != nullptr);
+
+ Rdb_cf_manager& cf_manager= rdb_get_cf_manager();
+ for (auto cf_handle : cf_manager.get_all_cf()) {
+ /* Grab the the properties of all the tables in the column family */
+ rocksdb::TablePropertiesCollection table_props_collection;
+ rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle,
+ &table_props_collection);
+ if (!s.ok()) {
+ continue;
+ }
+
+ /* Iterate over all the items in the collection, each of which contains a
+ * name and the actual properties */
+ for (auto props : table_props_collection) {
+ /* Add the SST name into the output */
+ std::string sst_name = rdb_filename_without_path(props.first);
+ field[2]->store(sst_name.data(), sst_name.size(), system_charset_info);
+
+ /* Get the __indexstats__ data out of the table property */
+ std::vector<Rdb_index_stats> stats;
+ Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats);
+ if (stats.empty()) {
+ field[0]->store(-1, true);
+ field[1]->store(-1, true);
+ field[3]->store(-1, true);
+ field[4]->store(-1, true);
+ field[5]->store(-1, true);
+ field[6]->store(-1, true);
+ field[7]->store(-1, true);
+ field[8]->store(-1, true);
+ }
+ else {
+ for (auto it : stats) {
+ /* Add the index number, the number of rows, and data size to the output */
+ field[0]->store(it.m_gl_index_id.cf_id, true);
+ field[1]->store(it.m_gl_index_id.index_id, true);
+ field[3]->store(it.m_rows, true);
+ field[4]->store(it.m_data_size, true);
+ field[5]->store(it.m_entry_deletes, true);
+ field[6]->store(it.m_entry_single_deletes, true);
+ field[7]->store(it.m_entry_merges, true);
+ field[8]->store(it.m_entry_others, true);
+
+ /* Tell MySQL about this row in the virtual table */
+ ret= my_core::schema_table_store_record(thd, tables->table);
+ if (ret != 0) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ DBUG_RETURN(ret);
+}
+
+static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] =
+{
+ /* The information_schema.rocksdb_index_file_map virtual table has four fields:
+ * COLUMN_FAMILY => the index's column family contained in the SST file
+ * INDEX_NUMBER => the index id contained in the SST file
+ * SST_NAME => the name of the SST file containing some indexes
+ * NUM_ROWS => the number of entries of this index id in this SST file
+ * DATA_SIZE => the data size stored in this SST file for this index id */
+ ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0),
+ ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0),
+ ROCKSDB_FIELD_INFO("SST_NAME", NAME_LEN+1, MYSQL_TYPE_STRING, 0),
+ ROCKSDB_FIELD_INFO("NUM_ROWS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO("DATA_SIZE", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO("ENTRY_DELETES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO("ENTRY_SINGLEDELETES", sizeof(int64_t),
+ MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO("ENTRY_MERGES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO("ENTRY_OTHERS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0),
+ ROCKSDB_FIELD_INFO_END
+};
+
+/* Initialize the information_schema.rocksdb_index_file_map virtual table */
+static int rdb_i_s_index_file_map_init(void *p)
+{
+ my_core::ST_SCHEMA_TABLE *schema;
+
+ DBUG_ENTER("rdb_i_s_index_file_map_init");
+ DBUG_ASSERT(p != nullptr);
+
+ schema= (my_core::ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info= rdb_i_s_index_file_map_fields_info;
+ schema->fill_table= rdb_i_s_index_file_map_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+static int rdb_i_s_deinit(void *p __attribute__((__unused__)))
+{
+ DBUG_ENTER("rdb_i_s_deinit");
+ DBUG_RETURN(0);
+}
+
+static struct st_mysql_information_schema rdb_i_s_info=
+{ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION };
+
+struct st_mysql_plugin rdb_i_s_cfstats=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_CFSTATS",
+ "Facebook",
+ "RocksDB column family stats",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_cfstats_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_dbstats=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_DBSTATS",
+ "Facebook",
+ "RocksDB database stats",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_dbstats_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_perf_context=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_PERF_CONTEXT",
+ "Facebook",
+ "RocksDB perf context stats",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_perf_context_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_perf_context_global=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_PERF_CONTEXT_GLOBAL",
+ "Facebook",
+ "RocksDB perf context stats (all)",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_perf_context_global_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_cfoptions=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_CF_OPTIONS",
+ "Facebook",
+ "RocksDB column family options",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_cfoptions_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_global_info=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_GLOBAL_INFO",
+ "Facebook",
+ "RocksDB global info",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_global_info_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_ddl=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_DDL",
+ "Facebook",
+ "RocksDB Data Dictionary",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_ddl_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+struct st_mysql_plugin rdb_i_s_index_file_map=
+{
+ MYSQL_INFORMATION_SCHEMA_PLUGIN,
+ &rdb_i_s_info,
+ "ROCKSDB_INDEX_FILE_MAP",
+ "Facebook",
+ "RocksDB index file map",
+ PLUGIN_LICENSE_GPL,
+ rdb_i_s_index_file_map_init,
+ rdb_i_s_deinit,
+ 0x0001, /* version number (0.1) */
+ nullptr, /* status variables */
+ nullptr, /* system variables */
+ nullptr, /* config options */
+ 0, /* flags */
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h
new file mode 100644
index 00000000000..846defab961
--- /dev/null
+++ b/storage/rocksdb/rdb_i_s.h
@@ -0,0 +1,34 @@
+/*
+ Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+namespace myrocks {
+
+/*
+ Declare INFORMATION_SCHEMA (I_S) plugins needed by MyRocks storage engine.
+*/
+
+extern struct st_mysql_plugin rdb_i_s_cfstats;
+extern struct st_mysql_plugin rdb_i_s_dbstats;
+extern struct st_mysql_plugin rdb_i_s_perf_context;
+extern struct st_mysql_plugin rdb_i_s_perf_context_global;
+extern struct st_mysql_plugin rdb_i_s_cfoptions;
+extern struct st_mysql_plugin rdb_i_s_global_info;
+extern struct st_mysql_plugin rdb_i_s_ddl;
+extern struct st_mysql_plugin rdb_i_s_index_file_map;
+
+} // namespace myrocks
+
diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc
new file mode 100644
index 00000000000..dc85db4d356
--- /dev/null
+++ b/storage/rocksdb/rdb_index_merge.cc
@@ -0,0 +1,604 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header file */
+#include "./rdb_index_merge.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./rdb_datadic.h"
+
+namespace myrocks {
+
+Rdb_index_merge::Rdb_index_merge(const ulonglong merge_buf_size,
+ const ulonglong merge_combine_read_size,
+ const rocksdb::Comparator* comparator) :
+ m_merge_buf_size(merge_buf_size),
+ m_merge_combine_read_size(merge_combine_read_size),
+ m_comparator(comparator),
+ m_rec_buf_unsorted(nullptr),
+ m_output_buf(nullptr)
+{
+}
+
+Rdb_index_merge::~Rdb_index_merge()
+{
+ /*
+ Close tmp file, we don't need to worry about deletion, mysql handles it.
+ */
+ my_close(m_merge_file.fd, MYF(MY_WME));
+
+ /* There should be no records left in the offset tree */
+ DBUG_ASSERT(m_offset_tree.empty());
+
+ /* There should be no pointers left on the merge heap */
+ DBUG_ASSERT(m_merge_min_heap.empty());
+}
+
+int Rdb_index_merge::init()
+{
+ /*
+ Create a temporary merge file on disk to store sorted chunks during
+ inplace index creation.
+ */
+ if (merge_file_create())
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ /*
+ Then, allocate buffer to store unsorted records before they are written
+ to disk. They will be written to disk sorted. A sorted tree is used to
+ keep track of the offset of each record within the unsorted buffer.
+ */
+ m_rec_buf_unsorted= std::make_shared<merge_buf_info>(m_merge_buf_size);
+
+ /*
+ Allocate output buffer that will contain sorted block that is written to
+ disk.
+ */
+ m_output_buf= std::make_shared<merge_buf_info>(m_merge_buf_size);
+
+ return 0;
+}
+
+/**
+ Create a merge file in the given location.
+*/
+int Rdb_index_merge::merge_file_create()
+{
+ DBUG_ASSERT(m_merge_file.fd == -1);
+
+ int fd = mysql_tmpfile("myrocks");
+
+ if (fd < 0)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_merge_file.fd = fd;
+ m_merge_file.num_sort_buffers = 0;
+
+ return 0;
+}
+
+/**
+ Add record to offset tree (and unsorted merge buffer) in preparation for
+ writing out to disk in sorted chunks.
+
+ If buffer in memory is full, write the buffer out to disk sorted using the
+ offset tree, and clear the tree. (Happens in merge_buf_write)
+*/
+int Rdb_index_merge::add(const rocksdb::Slice& key,
+ const rocksdb::Slice& val)
+{
+ /* Adding a record after heap is already created results in error */
+ DBUG_ASSERT(m_merge_min_heap.empty());
+
+ /*
+ Check if sort buffer is going to be out of space, if so write it
+ out to disk in sorted order using offset tree.
+ */
+ uint total_offset= RDB_MERGE_CHUNK_LEN + m_rec_buf_unsorted->curr_offset +
+ RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER +
+ key.size() + val.size();
+ if (total_offset >= m_rec_buf_unsorted->total_size)
+ {
+ if (merge_buf_write())
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error writing sort buffer to disk.");
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ }
+
+ ulonglong rec_offset= m_rec_buf_unsorted->curr_offset;
+
+ /*
+ Store key and value in temporary unsorted in memory buffer pointed to by
+ offset tree.
+ */
+ m_rec_buf_unsorted->store_key_value(key, val);
+
+ /* Find sort order of the new record */
+ m_offset_tree.emplace(m_rec_buf_unsorted->block.get() + rec_offset,
+ m_comparator);
+
+ return 0;
+}
+
+/**
+ Sort + write merge buffer chunk out to disk.
+*/
+int Rdb_index_merge::merge_buf_write()
+{
+ DBUG_ASSERT(m_merge_file.fd != -1);
+ DBUG_ASSERT(m_rec_buf_unsorted != nullptr);
+ DBUG_ASSERT(m_output_buf != nullptr);
+ DBUG_ASSERT(!m_offset_tree.empty());
+
+ /* Write actual chunk size to first 8 bytes of the merge buffer */
+ merge_store_uint64(m_output_buf->block.get(),
+ m_rec_buf_unsorted->curr_offset + RDB_MERGE_CHUNK_LEN);
+ m_output_buf->curr_offset += RDB_MERGE_CHUNK_LEN;
+
+ /*
+ Iterate through the offset tree. Should be ordered by the secondary key
+ at this point.
+ */
+ for (auto& rec : m_offset_tree)
+ {
+ DBUG_ASSERT(m_output_buf->curr_offset <= m_merge_buf_size);
+
+ /* Read record from offset (should never fail) */
+ rocksdb::Slice key;
+ rocksdb::Slice val;
+ merge_read_rec(rec.block, &key, &val);
+
+ /* Store key and value into sorted output buffer */
+ m_output_buf->store_key_value(key, val);
+ }
+
+ DBUG_ASSERT(m_output_buf->curr_offset <= m_output_buf->total_size);
+
+ /*
+ Write output buffer to disk.
+
+ Need to position cursor to the chunk it needs to be at on filesystem
+ then write into the respective merge buffer.
+ */
+ if (my_seek(m_merge_file.fd, m_merge_file.num_sort_buffers * m_merge_buf_size,
+ SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error seeking to location in merge file on disk.");
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ if (my_write(m_merge_file.fd, m_output_buf->block.get(),
+ m_output_buf->total_size, MYF(MY_WME | MY_NABP)))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error writing sorted merge buffer to disk.");
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ /* Increment merge file offset to track number of merge buffers written */
+ m_merge_file.num_sort_buffers += 1;
+
+ /* Reset everything for next run */
+ merge_reset();
+
+ return 0;
+}
+
+/**
+ Prepare n-way merge of n sorted buffers on disk, using a heap sorted by
+ secondary key records.
+*/
+int Rdb_index_merge::merge_heap_prepare()
+{
+ DBUG_ASSERT(m_merge_min_heap.empty());
+
+ /*
+ If the offset tree is not empty, there are still some records that need to
+ be written to disk. Write them out now.
+ */
+ if (!m_offset_tree.empty() && merge_buf_write())
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ DBUG_ASSERT(m_merge_file.num_sort_buffers > 0);
+
+ /*
+ For an n-way merge, we need to read chunks of each merge file
+ simultaneously.
+ */
+ ulonglong chunk_size= m_merge_combine_read_size/
+ m_merge_file.num_sort_buffers;
+ if (chunk_size >= m_merge_buf_size)
+ {
+ chunk_size= m_merge_buf_size;
+ }
+
+ /* Allocate buffers for each chunk */
+ for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++)
+ {
+ auto entry= std::make_shared<merge_heap_entry>(m_comparator);
+
+ /*
+ Read chunk_size bytes from each chunk on disk, and place inside
+ respective chunk buffer.
+ */
+ size_t total_size=
+ entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size);
+
+ if (total_size == (size_t) - 1)
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ /* Can reach this condition if an index was added on table w/ no rows */
+ if (total_size - RDB_MERGE_CHUNK_LEN == 0)
+ {
+ break;
+ }
+
+ /* Read the first record from each buffer to initially populate the heap */
+ if (entry->read_rec(&entry->key, &entry->val))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Chunk size is too small to process merge.");
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ m_merge_min_heap.push(std::move(entry));
+ }
+
+ return 0;
+}
+
+/**
+ Create and/or iterate through keys in the merge heap.
+*/
+int Rdb_index_merge::next(rocksdb::Slice* key, rocksdb::Slice* val)
+{
+ /*
+ If table fits in one sort buffer, we can optimize by writing
+ the sort buffer directly through to the sstfilewriter instead of
+ needing to create tmp files/heap to merge the sort buffers.
+
+ If there are no sort buffer records (alters on empty tables),
+ also exit here.
+ */
+ if (m_merge_file.num_sort_buffers == 0)
+ {
+ if (m_offset_tree.empty())
+ {
+ return -1;
+ }
+
+ auto rec= m_offset_tree.begin();
+
+ /* Read record from offset */
+ merge_read_rec(rec->block, key, val);
+
+ m_offset_tree.erase(rec);
+ return 0;
+ }
+
+ int res;
+
+ /*
+ If heap and heap chunk info are empty, we must be beginning the merge phase
+ of the external sort. Populate the heap with initial values from each
+ disk chunk.
+ */
+ if (m_merge_min_heap.empty())
+ {
+ if ((res= merge_heap_prepare()))
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error during preparation of heap.");
+ return res;
+ }
+
+ /*
+ Return the first top record without popping, as we haven't put this
+ inside the SST file yet.
+ */
+ merge_heap_top(key, val);
+ return 0;
+ }
+
+ DBUG_ASSERT(!m_merge_min_heap.empty());
+ return merge_heap_pop_and_get_next(key, val);
+}
+
+/**
+ Get current top record from the heap.
+*/
+void Rdb_index_merge::merge_heap_top(rocksdb::Slice* key,
+ rocksdb::Slice* val)
+{
+ DBUG_ASSERT(!m_merge_min_heap.empty());
+
+ const std::shared_ptr<merge_heap_entry>& entry= m_merge_min_heap.top();
+ *key= entry->key;
+ *val= entry->val;
+}
+
+/**
+ Pops the top record, and uses it to read next record from the
+ corresponding sort buffer and push onto the heap.
+
+ Returns -1 when there are no more records in the heap.
+*/
+int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice* key,
+ rocksdb::Slice* val)
+{
+ /*
+ Make a new reference to shared ptr so it doesn't get destroyed
+ during pop(). We are going to push this entry back onto the heap.
+ */
+ const std::shared_ptr<merge_heap_entry> entry= m_merge_min_heap.top();
+ m_merge_min_heap.pop();
+
+ /*
+ We are finished w/ current chunk if:
+ current_offset + disk_offset == total_size
+
+ Return without adding entry back onto heap.
+ If heap is also empty, we must be finished with merge.
+ */
+ if (entry->chunk_info->is_chunk_finished())
+ {
+ if (m_merge_min_heap.empty())
+ {
+ return -1;
+ }
+
+ merge_heap_top(key, val);
+ return 0;
+ }
+
+ /*
+ Make sure we haven't reached the end of the chunk.
+ */
+ DBUG_ASSERT(!entry->chunk_info->is_chunk_finished());
+
+ /*
+ If merge_read_rec fails, it means the either the chunk was cut off
+ or we've reached the end of the respective chunk.
+ */
+ if (entry->read_rec(&entry->key, &entry->val))
+ {
+ if (entry->read_next_chunk_from_disk(m_merge_file.fd))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+
+ /* Try reading record again, should never fail. */
+ if (entry->read_rec(&entry->key, &entry->val))
+ {
+ return HA_ERR_INTERNAL_ERROR;
+ }
+ }
+
+ /* Push entry back on to the heap w/ updated buffer + offset ptr */
+ m_merge_min_heap.push(std::move(entry));
+
+ /* Return the current top record on heap */
+ merge_heap_top(key, val);
+ return 0;
+}
+
+int Rdb_index_merge::merge_heap_entry::read_next_chunk_from_disk(File fd)
+{
+ if (chunk_info->read_next_chunk_from_disk(fd))
+ {
+ return 1;
+ }
+
+ block= chunk_info->block.get();
+ return 0;
+}
+
+int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd)
+{
+ disk_curr_offset += curr_offset;
+
+ if (my_seek(fd, disk_curr_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error seeking to location in merge file on disk.");
+ return 1;
+ }
+
+ /* Overwrite the old block */
+ size_t bytes_read= my_read(fd, block.get(), block_len, MYF(MY_WME));
+ if (bytes_read == (size_t) -1)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error reading merge file from disk.");
+ return 1;
+ }
+
+ curr_offset= 0;
+ return 0;
+}
+
+/**
+ Get records from offset within sort buffer and compare them.
+ Sort by least to greatest.
+*/
+int Rdb_index_merge::merge_record_compare(const uchar* a_block,
+ const uchar* b_block,
+ const rocksdb::Comparator* const comparator)
+{
+ return comparator->Compare(as_slice(a_block), as_slice(b_block));
+}
+
+/**
+ Given an offset in a merge sort buffer, read out the keys + values.
+ After this, block will point to the next record in the buffer.
+**/
+void Rdb_index_merge::merge_read_rec(const uchar* block,
+ rocksdb::Slice* key,
+ rocksdb::Slice* val)
+{
+ /* Read key at block offset into key slice and the value into value slice*/
+ read_slice(key, block);
+ read_slice(val, block + RDB_MERGE_REC_DELIMITER + key->size());
+}
+
+void Rdb_index_merge::read_slice(rocksdb::Slice* slice, const uchar* block_ptr)
+{
+ uint64 slice_len;
+ merge_read_uint64(&block_ptr, &slice_len);
+
+ *slice= rocksdb::Slice(reinterpret_cast<const char*>(block_ptr), slice_len);
+}
+
+int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *key,
+ rocksdb::Slice *val)
+{
+ const uchar* block_ptr= block;
+
+ /* Read key at block offset into key slice and the value into value slice*/
+ if (read_slice(key, &block_ptr) != 0 || read_slice(val, &block_ptr) != 0)
+ {
+ return 1;
+ }
+
+ chunk_info->curr_offset += (uintptr_t) block_ptr - (uintptr_t) block;
+ block += (uintptr_t) block_ptr - (uintptr_t) block;
+
+ return 0;
+}
+
+int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice* slice,
+ const uchar** block_ptr)
+{
+ if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER))
+ {
+ return 1;
+ }
+
+ uint64 slice_len;
+ merge_read_uint64(block_ptr, &slice_len);
+ if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER + slice_len))
+ {
+ return 1;
+ }
+
+ *slice= rocksdb::Slice(reinterpret_cast<const char*>(*block_ptr), slice_len);
+ *block_ptr += slice_len;
+ return 0;
+}
+
+size_t Rdb_index_merge::merge_heap_entry::prepare(File fd, ulonglong f_offset,
+ ulonglong chunk_size)
+{
+ chunk_info= std::make_shared<merge_buf_info>(chunk_size);
+ size_t res = chunk_info->prepare(fd, f_offset);
+ if (res != (size_t) - 1)
+ {
+ block= chunk_info->block.get() + RDB_MERGE_CHUNK_LEN;
+ }
+
+ return res;
+}
+
+size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset)
+{
+ disk_start_offset= f_offset;
+ disk_curr_offset= f_offset;
+
+ /*
+ Need to position cursor to the chunk it needs to be at on filesystem
+ then read 'chunk_size' bytes into the respective chunk buffer.
+ */
+ if (my_seek(fd, f_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error seeking to location in merge file on disk.");
+ return (size_t) - 1;
+ }
+
+ size_t bytes_read= my_read(fd, block.get(), total_size, MYF(MY_WME));
+ if (bytes_read == (size_t) - 1)
+ {
+ // NO_LINT_DEBUG
+ sql_print_error("Error reading merge file from disk.");
+ return (size_t) - 1;
+ }
+
+ /*
+ Read the first 8 bytes of each chunk, this gives us the actual
+ size of each chunk.
+ */
+ const uchar *block_ptr= block.get();
+ merge_read_uint64(&block_ptr, &total_size);
+ curr_offset += RDB_MERGE_CHUNK_LEN;
+ return total_size;
+}
+
+/* Store key and value w/ their respective delimiters at the given offset */
+void Rdb_index_merge::merge_buf_info::store_key_value(
+ const rocksdb::Slice& key, const rocksdb::Slice& val)
+{
+ store_slice(key);
+ store_slice(val);
+}
+
+void Rdb_index_merge::merge_buf_info::store_slice(const rocksdb::Slice& slice)
+{
+ /* Store length delimiter */
+ merge_store_uint64(&block[curr_offset], slice.size());
+
+ /* Store slice data */
+ memcpy(&block[curr_offset + RDB_MERGE_REC_DELIMITER], slice.data(),
+ slice.size());
+
+ curr_offset += slice.size() + RDB_MERGE_REC_DELIMITER;
+}
+
+
+void Rdb_index_merge::merge_reset()
+{
+ /*
+ Either error, or all values in the sort buffer have been written to disk,
+ so we need to clear the offset tree.
+ */
+ m_offset_tree.clear();
+
+ /* Reset sort buffer block */
+ if (m_rec_buf_unsorted && m_rec_buf_unsorted->block)
+ {
+ m_rec_buf_unsorted->curr_offset= 0;
+ }
+
+ /* Reset output buf */
+ if (m_output_buf && m_output_buf->block)
+ {
+ m_output_buf->curr_offset= 0;
+ }
+}
+
+} // namespace myrocks
+
diff --git a/storage/rocksdb/rdb_index_merge.h b/storage/rocksdb/rdb_index_merge.h
new file mode 100644
index 00000000000..24090c335ac
--- /dev/null
+++ b/storage/rocksdb/rdb_index_merge.h
@@ -0,0 +1,229 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#pragma once
+
+/* MySQL header files */
+#include "../sql/log.h"
+#include "./handler.h" /* handler */
+#include "./my_global.h" /* ulonglong */
+
+/* C++ standard header files */
+#include <set>
+#include <vector>
+#include <queue>
+
+/* RocksDB header files */
+#include "rocksdb/db.h"
+
+/* MyRocks header files */
+#include "./rdb_comparator.h"
+
+namespace myrocks {
+
+/*
+ Length of delimiters used during inplace index creation.
+*/
+#define RDB_MERGE_CHUNK_LEN sizeof(size_t)
+#define RDB_MERGE_REC_DELIMITER sizeof(size_t)
+#define RDB_MERGE_KEY_DELIMITER RDB_MERGE_REC_DELIMITER
+#define RDB_MERGE_VAL_DELIMITER RDB_MERGE_REC_DELIMITER
+
+class Rdb_key_def;
+class Rdb_tbl_def;
+
+class Rdb_index_merge {
+ Rdb_index_merge(const Rdb_index_merge& p)= delete;
+ Rdb_index_merge& operator=(const Rdb_index_merge& p)= delete;
+
+ public:
+ /* Information about temporary files used in external merge sort */
+ struct merge_file_info {
+ File fd= -1; /* file descriptor */
+ ulong num_sort_buffers; /* number of sort buffers in temp file */
+ };
+
+ /* Buffer for sorting in main memory. */
+ struct merge_buf_info {
+ /* heap memory allocated for main memory sort/merge */
+ std::unique_ptr<uchar[]> block;
+ ulonglong block_len; /* amount of data bytes allocated for block above */
+ ulonglong curr_offset; /* offset of the record pointer for the block */
+ ulonglong disk_start_offset; /* where the chunk starts on disk */
+ ulonglong disk_curr_offset; /* current offset on disk */
+ ulonglong total_size; /* total # of data bytes in chunk */
+
+ void store_key_value(const rocksdb::Slice& key, const rocksdb::Slice& val)
+ __attribute__((__nonnull__));
+
+ void store_slice(const rocksdb::Slice& slice)
+ __attribute__((__nonnull__));
+
+ size_t prepare(File fd, ulonglong f_offset)
+ __attribute__((__nonnull__));
+
+ int read_next_chunk_from_disk(File fd)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ inline bool is_chunk_finished()
+ {
+ return curr_offset + disk_curr_offset - disk_start_offset == total_size;
+ }
+
+ inline bool has_space(uint64 needed)
+ {
+ return curr_offset + needed <= block_len;
+ }
+
+ explicit merge_buf_info(const ulonglong merge_block_size) :
+ block(nullptr), block_len(merge_block_size), curr_offset(0),
+ disk_start_offset(0), disk_curr_offset(0), total_size(merge_block_size)
+ {
+ /* Will throw an exception if it runs out of memory here */
+ block= std::unique_ptr<uchar[]>(new uchar[merge_block_size]);
+
+ /* Initialize entire buffer to 0 to avoid valgrind errors */
+ memset(block.get(), 0, merge_block_size);
+ }
+ };
+
+ /* Represents an entry in the heap during merge phase of external sort */
+ struct merge_heap_entry
+ {
+ std::shared_ptr<merge_buf_info> chunk_info; /* pointer to buffer info */
+ uchar* block; /* pointer to heap memory where record is stored */
+ const rocksdb::Comparator* const comparator;
+ rocksdb::Slice key; /* current key pointed to by block ptr */
+ rocksdb::Slice val;
+
+ size_t prepare(File fd, ulonglong f_offset, ulonglong chunk_size)
+ __attribute__((__nonnull__));
+
+ int read_next_chunk_from_disk(File fd)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int read_rec(rocksdb::Slice *key, rocksdb::Slice *val)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int read_slice(rocksdb::Slice* slice, const uchar** block_ptr)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ explicit merge_heap_entry(const rocksdb::Comparator* const comparator) :
+ chunk_info(nullptr), block(nullptr), comparator(comparator) {}
+ };
+
+ struct merge_heap_comparator
+ {
+ bool operator() (const std::shared_ptr<merge_heap_entry>& lhs,
+ const std::shared_ptr<merge_heap_entry>& rhs)
+ {
+ return lhs->comparator->Compare(rhs->key, lhs->key) < 0;
+ }
+ };
+
+ /* Represents a record in unsorted buffer */
+ struct merge_record
+ {
+ uchar* block; /* points to offset of key in sort buffer */
+ const rocksdb::Comparator* const comparator;
+
+ bool operator< (const merge_record &record) const
+ {
+ return merge_record_compare(this->block, record.block, comparator) < 0;
+ }
+
+ merge_record(uchar* block, const rocksdb::Comparator* const comparator) :
+ block(block), comparator(comparator) {}
+ };
+
+ private:
+ const ulonglong m_merge_buf_size;
+ const ulonglong m_merge_combine_read_size;
+ const rocksdb::Comparator* m_comparator;
+ struct merge_file_info m_merge_file;
+ std::shared_ptr<merge_buf_info> m_rec_buf_unsorted;
+ std::shared_ptr<merge_buf_info> m_output_buf;
+ std::set<merge_record> m_offset_tree;
+ std::priority_queue<std::shared_ptr<merge_heap_entry>,
+ std::vector<std::shared_ptr<merge_heap_entry>>,
+ merge_heap_comparator> m_merge_min_heap;
+
+ static inline void merge_store_uint64(uchar *dst, uint64 n)
+ {
+ memcpy(dst, &n, sizeof(n));
+ }
+
+ static inline void merge_read_uint64(const uchar **buf_ptr, uint64 *dst)
+ {
+ DBUG_ASSERT(buf_ptr != nullptr);
+ memcpy(dst, *buf_ptr, sizeof(uint64));
+ *buf_ptr += sizeof(uint64);
+ }
+
+ static inline rocksdb::Slice as_slice(const uchar* block)
+ {
+ uint64 len;
+ merge_read_uint64(&block, &len);
+
+ return rocksdb::Slice(reinterpret_cast<const char*>(block), len);
+ }
+
+ static int merge_record_compare(const uchar* a_block, const uchar* b_block,
+ const rocksdb::Comparator* const comparator)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void merge_read_rec(const uchar* block, rocksdb::Slice* key,
+ rocksdb::Slice* val)
+ __attribute__((__nonnull__));
+
+ void read_slice(rocksdb::Slice* slice, const uchar* block_ptr)
+ __attribute__((__nonnull__));
+
+ public:
+ Rdb_index_merge(const ulonglong merge_buf_size,
+ const ulonglong merge_combine_read_size,
+ const rocksdb::Comparator* comparator);
+ ~Rdb_index_merge();
+
+ int init()
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int merge_file_create()
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int add(const rocksdb::Slice& key, const rocksdb::Slice& val)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int merge_buf_write()
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int next(rocksdb::Slice* key, rocksdb::Slice* val)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ int merge_heap_prepare()
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void merge_heap_top(rocksdb::Slice* key, rocksdb::Slice* val)
+ __attribute__((__nonnull__));
+
+ int merge_heap_pop_and_get_next(rocksdb::Slice* key, rocksdb::Slice* val)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+ void merge_reset();
+};
+
+} // namespace myrocks
+
diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc
new file mode 100644
index 00000000000..e8077e2fd89
--- /dev/null
+++ b/storage/rocksdb/rdb_mutex_wrapper.cc
@@ -0,0 +1,234 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header file */
+#include "./rdb_mutex_wrapper.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./rdb_utils.h"
+
+// Internal MySQL APIs not exposed in any header.
+extern "C"
+{
+void thd_enter_cond(MYSQL_THD thd, mysql_cond_t *cond, mysql_mutex_t *mutex,
+ const PSI_stage_info *stage, PSI_stage_info *old_stage);
+void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage);
+}
+
+using namespace rocksdb;
+
+namespace myrocks {
+
+static
+PSI_stage_info stage_waiting_on_row_lock2= { 0, "Waiting for row lock", 0};
+
+static const int64_t MICROSECS= 1000*1000;
+// A timeout as long as one full non-leap year worth of microseconds is as
+// good as infinite timeout.
+static const int64_t BIG_TIMEOUT= MICROSECS * 60 * 60 * 24 * 7 * 365;
+
+Rdb_cond_var::Rdb_cond_var() {
+ mysql_cond_init(0, &m_cond, nullptr);
+}
+
+Rdb_cond_var::~Rdb_cond_var() {
+ mysql_cond_destroy(&m_cond);
+}
+
+Status Rdb_cond_var::Wait(std::shared_ptr<TransactionDBMutex> mutex_arg) {
+ return WaitFor(mutex_arg, BIG_TIMEOUT);
+}
+
+
+/*
+ @brief
+ Wait on condition variable. The caller must make sure that we own
+ *mutex_ptr. The mutex is released and re-acquired by the wait function.
+
+ @param
+ timeout_micros Timeout in microseconds. Negative value means no timeout.
+
+ @return
+ Status::OK() - Wait successfull
+ Status::TimedOut() - Timed out or wait killed (the caller can check
+ thd_killed() to determine which occurred)
+*/
+
+Status
+Rdb_cond_var::WaitFor(std::shared_ptr<TransactionDBMutex> mutex_arg,
+ int64_t timeout_micros)
+{
+ auto *mutex_obj= reinterpret_cast<Rdb_mutex*>(mutex_arg.get());
+ DBUG_ASSERT(mutex_obj != nullptr);
+
+ mysql_mutex_t * const mutex_ptr= &mutex_obj->m_mutex;
+
+ int res= 0;
+ struct timespec wait_timeout;
+
+ if (timeout_micros < 0)
+ timeout_micros= BIG_TIMEOUT;
+ set_timespec_nsec(wait_timeout, timeout_micros*1000);
+
+#ifndef STANDALONE_UNITTEST
+ PSI_stage_info old_stage;
+ mysql_mutex_assert_owner(mutex_ptr);
+
+ if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0)
+ {
+ my_core::thd_enter_cond(current_thd, &m_cond, mutex_ptr,
+ &stage_waiting_on_row_lock2, &old_stage);
+ /*
+ After the mysql_cond_timedwait we need make this call
+
+ my_core::thd_exit_cond(thd, &old_stage);
+
+ to inform the SQL layer that KILLable wait has ended. However,
+ that will cause mutex to be released. Defer the release until the mutex
+ that is unlocked by RocksDB's Pessimistic Transactions system.
+ */
+ mutex_obj->set_unlock_action(&old_stage);
+ }
+
+#endif
+ bool killed= false;
+
+ do
+ {
+ res= mysql_cond_timedwait(&m_cond, mutex_ptr, &wait_timeout);
+
+#ifndef STANDALONE_UNITTEST
+ if (current_thd)
+ killed= my_core::thd_killed(current_thd);
+#endif
+ } while (!killed && res == EINTR);
+
+ if (res || killed)
+ return Status::TimedOut();
+ else
+ return Status::OK();
+}
+
+
+/*
+
+ @note
+ This function may be called while not holding the mutex that is used to wait
+ on the condition variable.
+
+ The manual page says ( http://linux.die.net/man/3/pthread_cond_signal):
+
+ The pthread_cond_broadcast() or pthread_cond_signal() functions may be called
+ by a thread whether or not it currently owns the mutex that threads calling
+ pthread_cond_wait() or pthread_cond_timedwait() have associated with the
+ condition variable during their waits; however, IF PREDICTABLE SCHEDULING
+ BEHAVIOR IS REQUIRED, THEN THAT MUTEX SHALL BE LOCKED by the thread calling
+ pthread_cond_broadcast() or pthread_cond_signal().
+
+ What's "predicate scheduling" and do we need it? The explanation is here:
+
+ https://groups.google.com/forum/?hl=ky#!msg/comp.programming.threads/wEUgPq541v8/ZByyyS8acqMJ
+ "The problem (from the realtime side) with condition variables is that
+ if you can signal/broadcast without holding the mutex, and any thread
+ currently running can acquire an unlocked mutex and check a predicate
+ without reference to the condition variable, then you can have an
+ indirect priority inversion."
+
+ Another possible consequence is that one can create spurious wake-ups when
+ there are multiple threads signaling the condition.
+
+ None of this looks like a problem for our use case.
+*/
+
+void Rdb_cond_var::Notify()
+{
+ mysql_cond_signal(&m_cond);
+}
+
+
+/*
+ @note
+ This is called without holding the mutex that's used for waiting on the
+ condition. See ::Notify().
+*/
+void Rdb_cond_var::NotifyAll()
+{
+ mysql_cond_broadcast(&m_cond);
+}
+
+
+Rdb_mutex::Rdb_mutex()
+{
+ mysql_mutex_init(0 /* Don't register in P_S. */, &m_mutex,
+ MY_MUTEX_INIT_FAST);
+}
+
+Rdb_mutex::~Rdb_mutex() {
+ mysql_mutex_destroy(&m_mutex);
+}
+
+Status Rdb_mutex::Lock() {
+ mysql_mutex_lock(&m_mutex);
+ DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0);
+ return Status::OK();
+}
+
+// Attempt to acquire lock. If timeout is non-negative, operation may be
+// failed after this many milliseconds.
+// If implementing a custom version of this class, the implementation may
+// choose to ignore the timeout.
+// Return OK on success, or other Status on failure.
+Status Rdb_mutex::TryLockFor(int64_t timeout_time __attribute__((__unused__)))
+{
+ /*
+ Note: PThreads API has pthread_mutex_timedlock(), but mysql's
+ mysql_mutex_* wrappers do not wrap that function.
+ */
+ mysql_mutex_lock(&m_mutex);
+ return Status::OK();
+}
+
+
+#ifndef STANDALONE_UNITTEST
+void Rdb_mutex::set_unlock_action(PSI_stage_info *old_stage_arg)
+{
+ DBUG_ASSERT(old_stage_arg != nullptr);
+
+ mysql_mutex_assert_owner(&m_mutex);
+ DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0);
+
+ m_old_stage_info[current_thd] =
+ std::make_shared<PSI_stage_info>(*old_stage_arg);
+}
+#endif
+
+// Unlock Mutex that was successfully locked by Lock() or TryLockUntil()
+void Rdb_mutex::UnLock() {
+#ifndef STANDALONE_UNITTEST
+ if (m_old_stage_info.count(current_thd) > 0)
+ {
+ std::shared_ptr<PSI_stage_info> old_stage = m_old_stage_info[current_thd];
+ m_old_stage_info.erase(current_thd);
+ /* The following will call mysql_mutex_unlock */
+ my_core::thd_exit_cond(current_thd, old_stage.get());
+ return;
+ }
+#endif
+ mysql_mutex_unlock(&m_mutex);
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h
new file mode 100644
index 00000000000..7d0e4169ade
--- /dev/null
+++ b/storage/rocksdb/rdb_mutex_wrapper.h
@@ -0,0 +1,139 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#pragma once
+
+/* C++ standard header file */
+#include <chrono>
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <unordered_map>
+
+/* MySQL header files */
+#include "./my_sys.h"
+#include "mysql/plugin.h"
+
+/* RocksDB header files */
+#include "rocksdb/utilities/transaction_db_mutex.h"
+
+namespace myrocks {
+
+class Rdb_mutex: public rocksdb::TransactionDBMutex {
+ Rdb_mutex(const Rdb_mutex& p) = delete;
+ Rdb_mutex& operator = (const Rdb_mutex& p)=delete;
+ public:
+ Rdb_mutex();
+ virtual ~Rdb_mutex();
+
+ /*
+ Override parent class's virtual methods of interrest.
+ */
+
+ // Attempt to acquire lock. Return OK on success, or other Status on failure.
+ // If returned status is OK, TransactionDB will eventually call UnLock().
+ virtual rocksdb::Status Lock() override;
+
+ // Attempt to acquire lock. If timeout is non-negative, operation should be
+ // failed after this many microseconds.
+ // Returns OK on success,
+ // TimedOut if timed out,
+ // or other Status on failure.
+ // If returned status is OK, TransactionDB will eventually call UnLock().
+ virtual rocksdb::Status TryLockFor(
+ int64_t timeout_time __attribute__((__unused__))) override;
+
+ // Unlock Mutex that was successfully locked by Lock() or TryLockUntil()
+ virtual void UnLock() override;
+
+ private:
+ mysql_mutex_t m_mutex;
+ friend class Rdb_cond_var;
+
+#ifndef STANDALONE_UNITTEST
+ void set_unlock_action(PSI_stage_info *old_stage_arg);
+ std::unordered_map<THD*, std::shared_ptr<PSI_stage_info>> m_old_stage_info;
+#endif
+};
+
+
+class Rdb_cond_var: public rocksdb::TransactionDBCondVar {
+ public:
+ Rdb_cond_var();
+ virtual ~Rdb_cond_var();
+
+ /*
+ Override parent class's virtual methods of interrest.
+ */
+
+ // Block current thread until condition variable is notified by a call to
+ // Notify() or NotifyAll(). Wait() will be called with mutex locked.
+ // Returns OK if notified.
+ // Returns non-OK if TransactionDB should stop waiting and fail the operation.
+ // May return OK spuriously even if not notified.
+ virtual rocksdb::Status
+ Wait(std::shared_ptr<rocksdb::TransactionDBMutex> mutex) override;
+
+ // Block current thread until condition variable is notifiesd by a call to
+ // Notify() or NotifyAll(), or if the timeout is reached.
+ // If timeout is non-negative, operation should be failed after this many
+ // microseconds.
+ // If implementing a custom version of this class, the implementation may
+ // choose to ignore the timeout.
+ //
+ // Returns OK if notified.
+ // Returns TimedOut if timeout is reached.
+ // Returns other status if TransactionDB should otherwis stop waiting and
+ // fail the operation.
+ // May return OK spuriously even if not notified.
+ virtual rocksdb::Status
+ WaitFor(std::shared_ptr<rocksdb::TransactionDBMutex> mutex,
+ int64_t timeout_time) override;
+
+ // If any threads are waiting on *this, unblock at least one of the
+ // waiting threads.
+ virtual void Notify() override;
+
+ // Unblocks all threads waiting on *this.
+ virtual void NotifyAll() override;
+
+ private:
+ mysql_cond_t m_cond;
+};
+
+
+class Rdb_mutex_factory : public rocksdb::TransactionDBMutexFactory {
+ public:
+ /*
+ Override parent class's virtual methods of interrest.
+ */
+
+ virtual std::shared_ptr<rocksdb::TransactionDBMutex>
+ AllocateMutex() override {
+ return
+ std::make_shared<Rdb_mutex>();
+ }
+
+ virtual std::shared_ptr<rocksdb::TransactionDBCondVar>
+ AllocateCondVar() override {
+ return
+ std::make_shared<Rdb_cond_var>();
+ }
+
+ virtual ~Rdb_mutex_factory() {}
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc
new file mode 100644
index 00000000000..cd0d9e57c2b
--- /dev/null
+++ b/storage/rocksdb/rdb_perf_context.cc
@@ -0,0 +1,240 @@
+/*
+ Portions Copyright (c) 2015-Present, Facebook, Inc.
+ Portions Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header file */
+#include "./rdb_perf_context.h"
+
+/* C++ system header files */
+#include <string>
+
+/* RocksDB header files */
+#include "rocksdb/iostats_context.h"
+#include "rocksdb/perf_context.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb_proto.h"
+
+namespace myrocks {
+
+// To add a new metric:
+// 1. Update the PC enum in rdb_perf_context.h
+// 2. Update sections (A), (B), and (C) below
+// 3. Update perf_context.test and show_engine.test
+
+std::string rdb_pc_stat_types[]=
+{
+ // (A) These should be in the same order as the PC enum
+ "USER_KEY_COMPARISON_COUNT",
+ "BLOCK_CACHE_HIT_COUNT",
+ "BLOCK_READ_COUNT",
+ "BLOCK_READ_BYTE",
+ "BLOCK_READ_TIME",
+ "BLOCK_CHECKSUM_TIME",
+ "BLOCK_DECOMPRESS_TIME",
+ "INTERNAL_KEY_SKIPPED_COUNT",
+ "INTERNAL_DELETE_SKIPPED_COUNT",
+ "GET_SNAPSHOT_TIME",
+ "GET_FROM_MEMTABLE_TIME",
+ "GET_FROM_MEMTABLE_COUNT",
+ "GET_POST_PROCESS_TIME",
+ "GET_FROM_OUTPUT_FILES_TIME",
+ "SEEK_ON_MEMTABLE_TIME",
+ "SEEK_ON_MEMTABLE_COUNT",
+ "SEEK_CHILD_SEEK_TIME",
+ "SEEK_CHILD_SEEK_COUNT",
+ "SEEK_IN_HEAP_TIME",
+ "SEEK_INTERNAL_SEEK_TIME",
+ "FIND_NEXT_USER_ENTRY_TIME",
+ "WRITE_WAL_TIME",
+ "WRITE_MEMTABLE_TIME",
+ "WRITE_DELAY_TIME",
+ "WRITE_PRE_AND_POST_PROCESS_TIME",
+ "DB_MUTEX_LOCK_NANOS",
+ "DB_CONDITION_WAIT_NANOS",
+ "MERGE_OPERATOR_TIME_NANOS",
+ "READ_INDEX_BLOCK_NANOS",
+ "READ_FILTER_BLOCK_NANOS",
+ "NEW_TABLE_BLOCK_ITER_NANOS",
+ "NEW_TABLE_ITERATOR_NANOS",
+ "BLOCK_SEEK_NANOS",
+ "FIND_TABLE_NANOS",
+ "IO_THREAD_POOL_ID",
+ "IO_BYTES_WRITTEN",
+ "IO_BYTES_READ",
+ "IO_OPEN_NANOS",
+ "IO_ALLOCATE_NANOS",
+ "IO_WRITE_NANOS",
+ "IO_READ_NANOS",
+ "IO_RANGE_SYNC_NANOS",
+ "IO_LOGGER_NANOS"
+};
+
+#define IO_PERF_RECORD(_field_) \
+ do { \
+ if (rocksdb::perf_context._field_ > 0) \
+ counters->m_value[idx] += rocksdb::perf_context._field_; \
+ idx++; \
+ } while (0)
+#define IO_STAT_RECORD(_field_) \
+ do { \
+ if (rocksdb::iostats_context._field_ > 0) \
+ counters->m_value[idx] += rocksdb::iostats_context._field_; \
+ idx++; \
+ } while (0)
+
+static void harvest_diffs(Rdb_atomic_perf_counters *counters)
+{
+ // (C) These should be in the same order as the PC enum
+ size_t idx= 0;
+ IO_PERF_RECORD(user_key_comparison_count);
+ IO_PERF_RECORD(block_cache_hit_count);
+ IO_PERF_RECORD(block_read_count);
+ IO_PERF_RECORD(block_read_byte);
+ IO_PERF_RECORD(block_read_time);
+ IO_PERF_RECORD(block_checksum_time);
+ IO_PERF_RECORD(block_decompress_time);
+ IO_PERF_RECORD(internal_key_skipped_count);
+ IO_PERF_RECORD(internal_delete_skipped_count);
+ IO_PERF_RECORD(get_snapshot_time);
+ IO_PERF_RECORD(get_from_memtable_time);
+ IO_PERF_RECORD(get_from_memtable_count);
+ IO_PERF_RECORD(get_post_process_time);
+ IO_PERF_RECORD(get_from_output_files_time);
+ IO_PERF_RECORD(seek_on_memtable_time);
+ IO_PERF_RECORD(seek_on_memtable_count);
+ IO_PERF_RECORD(seek_child_seek_time);
+ IO_PERF_RECORD(seek_child_seek_count);
+ IO_PERF_RECORD(seek_min_heap_time);
+ IO_PERF_RECORD(seek_internal_seek_time);
+ IO_PERF_RECORD(find_next_user_entry_time);
+ IO_PERF_RECORD(write_wal_time);
+ IO_PERF_RECORD(write_memtable_time);
+ IO_PERF_RECORD(write_delay_time);
+ IO_PERF_RECORD(write_pre_and_post_process_time);
+ IO_PERF_RECORD(db_mutex_lock_nanos);
+ IO_PERF_RECORD(db_condition_wait_nanos);
+ IO_PERF_RECORD(merge_operator_time_nanos);
+ IO_PERF_RECORD(read_index_block_nanos);
+ IO_PERF_RECORD(read_filter_block_nanos);
+ IO_PERF_RECORD(new_table_block_iter_nanos);
+ IO_PERF_RECORD(new_table_iterator_nanos);
+ IO_PERF_RECORD(block_seek_nanos);
+ IO_PERF_RECORD(find_table_nanos);
+ IO_STAT_RECORD(thread_pool_id);
+ IO_STAT_RECORD(bytes_written);
+ IO_STAT_RECORD(bytes_read);
+ IO_STAT_RECORD(open_nanos);
+ IO_STAT_RECORD(allocate_nanos);
+ IO_STAT_RECORD(write_nanos);
+ IO_STAT_RECORD(read_nanos);
+ IO_STAT_RECORD(range_sync_nanos);
+ IO_STAT_RECORD(logger_nanos);
+}
+
+#undef IO_PERF_DIFF
+#undef IO_STAT_DIFF
+
+
+static Rdb_atomic_perf_counters rdb_global_perf_counters;
+
+void rdb_get_global_perf_counters(Rdb_perf_counters *counters)
+{
+ DBUG_ASSERT(counters != nullptr);
+
+ counters->load(rdb_global_perf_counters);
+}
+
+void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters)
+{
+ for (int i= 0; i < PC_MAX_IDX; i++) {
+ m_value[i]= atomic_counters.m_value[i].load(std::memory_order_relaxed);
+ }
+}
+
+bool Rdb_io_perf::start(uint32_t perf_context_level)
+{
+ rocksdb::PerfLevel perf_level=
+ static_cast<rocksdb::PerfLevel>(perf_context_level);
+
+ if (rocksdb::GetPerfLevel() != perf_level)
+ {
+ rocksdb::SetPerfLevel(perf_level);
+ }
+
+ if (perf_level == rocksdb::kDisable)
+ {
+ return false;
+ }
+
+ rocksdb::perf_context.Reset();
+ rocksdb::iostats_context.Reset();
+ return true;
+}
+
+void Rdb_io_perf::end_and_record(uint32_t perf_context_level)
+{
+ rocksdb::PerfLevel perf_level=
+ static_cast<rocksdb::PerfLevel>(perf_context_level);
+
+ if (perf_level == rocksdb::kDisable)
+ {
+ return;
+ }
+
+ if (m_atomic_counters)
+ {
+ harvest_diffs(m_atomic_counters);
+ }
+ harvest_diffs(&rdb_global_perf_counters);
+
+ if (m_shared_io_perf_read &&
+ (rocksdb::perf_context.block_read_byte != 0 ||
+ rocksdb::perf_context.block_read_count != 0 ||
+ rocksdb::perf_context.block_read_time != 0))
+ {
+ my_io_perf_t io_perf_read;
+
+ my_io_perf_init(&io_perf_read);
+ io_perf_read.bytes= rocksdb::perf_context.block_read_byte;
+ io_perf_read.requests= rocksdb::perf_context.block_read_count;
+
+ /*
+ Rocksdb does not distinguish between I/O service and wait time, so just
+ use svc time.
+ */
+ io_perf_read.svc_time_max= io_perf_read.svc_time=
+ rocksdb::perf_context.block_read_time;
+
+ my_io_perf_sum_atomic_helper(m_shared_io_perf_read, &io_perf_read);
+ my_io_perf_sum(&m_stats->table_io_perf_read, &io_perf_read);
+ }
+
+ if (m_stats) {
+ if (rocksdb::perf_context.internal_key_skipped_count != 0)
+ {
+ m_stats->key_skipped += rocksdb::perf_context.internal_key_skipped_count;
+ }
+
+ if (rocksdb::perf_context.internal_delete_skipped_count != 0)
+ {
+ m_stats->delete_skipped +=
+ rocksdb::perf_context.internal_delete_skipped_count;
+ }
+ }
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h
new file mode 100644
index 00000000000..1e01e933895
--- /dev/null
+++ b/storage/rocksdb/rdb_perf_context.h
@@ -0,0 +1,134 @@
+/*
+ Portions Copyright (c) 2015-Present, Facebook, Inc.
+ Portions Copyright (c) 2012,2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ standard header files */
+#include <atomic>
+#include <cstdint>
+#include <string>
+
+/* MySQL header files */
+#include "./handler.h"
+#include <my_global.h>
+
+namespace myrocks {
+
+enum {
+ PC_USER_KEY_COMPARISON_COUNT = 0,
+ PC_BLOCK_CACHE_HIT_COUNT,
+ PC_BLOCK_READ_COUNT,
+ PC_BLOCK_READ_BYTE,
+ PC_BLOCK_READ_TIME,
+ PC_BLOCK_CHECKSUM_TIME,
+ PC_BLOCK_DECOMPRESS_TIME,
+ PC_KEY_SKIPPED,
+ PC_DELETE_SKIPPED,
+ PC_GET_SNAPSHOT_TIME,
+ PC_GET_FROM_MEMTABLE_TIME,
+ PC_GET_FROM_MEMTABLE_COUNT,
+ PC_GET_POST_PROCESS_TIME,
+ PC_GET_FROM_OUTPUT_FILES_TIME,
+ PC_SEEK_ON_MEMTABLE_TIME,
+ PC_SEEK_ON_MEMTABLE_COUNT,
+ PC_SEEK_CHILD_SEEK_TIME,
+ PC_SEEK_CHILD_SEEK_COUNT,
+ PC_SEEK_MIN_HEAP_TIME,
+ PC_SEEK_INTERNAL_SEEK_TIME,
+ PC_FIND_NEXT_USER_ENTRY_TIME,
+ PC_WRITE_WAL_TIME,
+ PC_WRITE_MEMTABLE_TIME,
+ PC_WRITE_DELAY_TIME,
+ PC_WRITE_PRE_AND_POST_PROCESSS_TIME,
+ PC_DB_MUTEX_LOCK_NANOS,
+ PC_DB_CONDITION_WAIT_NANOS,
+ PC_MERGE_OPERATOR_TIME_NANOS,
+ PC_READ_INDEX_BLOCK_NANOS,
+ PC_READ_FILTER_BLOCK_NANOS,
+ PC_NEW_TABLE_BLOCK_ITER_NANOS,
+ PC_NEW_TABLE_ITERATOR_NANOS,
+ PC_BLOCK_SEEK_NANOS,
+ PC_FIND_TABLE_NANOS,
+ PC_IO_THREAD_POOL_ID,
+ PC_IO_BYTES_WRITTEN,
+ PC_IO_BYTES_READ,
+ PC_IO_OPEN_NANOS,
+ PC_IO_ALLOCATE_NANOS,
+ PC_IO_WRITE_NANOS,
+ PC_IO_READ_NANOS,
+ PC_IO_RANGE_SYNC_NANOS,
+ PC_IO_LOGGER_NANOS,
+ PC_MAX_IDX
+};
+
+class Rdb_perf_counters;
+
+/*
+ A collection of performance counters that can be safely incremented by
+ multiple threads since it stores atomic datapoints.
+*/
+struct Rdb_atomic_perf_counters
+{
+ std::atomic_ullong m_value[PC_MAX_IDX];
+};
+
+/*
+ A collection of performance counters that is meant to be incremented by
+ a single thread.
+*/
+class Rdb_perf_counters
+{
+ public:
+ uint64_t m_value[PC_MAX_IDX];
+
+ void load(const Rdb_atomic_perf_counters &atomic_counters);
+};
+
+extern std::string rdb_pc_stat_types[PC_MAX_IDX];
+
+/*
+ Perf timers for data reads
+ */
+class Rdb_io_perf
+{
+ // Context management
+ Rdb_atomic_perf_counters *m_atomic_counters= nullptr;
+ my_io_perf_atomic_t *m_shared_io_perf_read= nullptr;
+ ha_statistics *m_stats= nullptr;
+
+ public:
+ void init(Rdb_atomic_perf_counters *atomic_counters,
+ my_io_perf_atomic_t *shared_io_perf_read,
+ ha_statistics *stats)
+ {
+ DBUG_ASSERT(atomic_counters != nullptr);
+ DBUG_ASSERT(shared_io_perf_read != nullptr);
+ DBUG_ASSERT(stats != nullptr);
+
+ m_atomic_counters= atomic_counters;
+ m_shared_io_perf_read= shared_io_perf_read;
+ m_stats= stats;
+ }
+
+ bool start(uint32_t perf_context_level);
+ void end_and_record(uint32_t perf_context_level);
+
+ explicit Rdb_io_perf() : m_atomic_counters(nullptr),
+ m_shared_io_perf_read(nullptr),
+ m_stats(nullptr) {}
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc
new file mode 100644
index 00000000000..d131545e476
--- /dev/null
+++ b/storage/rocksdb/rdb_sst_info.cc
@@ -0,0 +1,417 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header file */
+#include "./rdb_sst_info.h"
+
+/* C++ standard header files */
+#include <cstdio>
+#include <string>
+#include <vector>
+
+/* MySQL header files */
+#include "../sql/log.h"
+#include "./my_dir.h"
+
+/* RocksDB header files */
+#include "rocksdb/db.h"
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+#include "./ha_rocksdb_proto.h"
+#include "./rdb_cf_options.h"
+
+namespace myrocks {
+
+Rdb_sst_file::Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
+ const rocksdb::DBOptions& db_options,
+ const std::string& name) :
+ m_db(db),
+ m_cf(cf),
+ m_db_options(db_options),
+ m_sst_file_writer(nullptr),
+ m_name(name)
+{
+ DBUG_ASSERT(db != nullptr);
+ DBUG_ASSERT(cf != nullptr);
+}
+
+Rdb_sst_file::~Rdb_sst_file()
+{
+ // Make sure we clean up
+ delete m_sst_file_writer;
+ m_sst_file_writer= nullptr;
+
+ // In case something went wrong attempt to delete the temporary file.
+ // If everything went fine that file will have been renamed and this
+ // function call will fail.
+ std::remove(m_name.c_str());
+}
+
+rocksdb::Status Rdb_sst_file::open()
+{
+ DBUG_ASSERT(m_sst_file_writer == nullptr);
+
+ rocksdb::ColumnFamilyDescriptor cf_descr;
+
+ rocksdb::Status s= m_cf->GetDescriptor(&cf_descr);
+ if (!s.ok())
+ {
+ return s;
+ }
+
+ // Create an sst file writer with the current options and comparator
+ const rocksdb::Comparator* comparator= m_cf->GetComparator();
+
+ rocksdb::EnvOptions env_options(m_db_options);
+ rocksdb::Options options(m_db_options, cf_descr.options);
+
+ m_sst_file_writer=
+ new rocksdb::SstFileWriter(env_options, options, comparator);
+
+ s= m_sst_file_writer->Open(m_name);
+ if (!s.ok())
+ {
+ delete m_sst_file_writer;
+ m_sst_file_writer= nullptr;
+ }
+
+ return s;
+}
+
+rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice& key,
+ const rocksdb::Slice& value)
+{
+ DBUG_ASSERT(m_sst_file_writer != nullptr);
+
+ // Add the specified key/value to the sst file writer
+ return m_sst_file_writer->Add(key, value);
+}
+
+// This function is run by the background thread
+rocksdb::Status Rdb_sst_file::commit()
+{
+ DBUG_ASSERT(m_sst_file_writer != nullptr);
+
+ rocksdb::Status s;
+
+ // Close out the sst file
+ s= m_sst_file_writer->Finish();
+ if (s.ok())
+ {
+ std::vector<std::string> files = { m_name };
+ // Add the file to the database
+ // Set the skip_snapshot_check parameter to true since no one
+ // should be accessing the table we are bulk loading
+ s= m_db->AddFile(m_cf, files, true, true);
+ }
+
+ delete m_sst_file_writer;
+ m_sst_file_writer= nullptr;
+
+ return s;
+}
+
+Rdb_sst_info::Rdb_sst_info(rocksdb::DB* db, const std::string& tablename,
+ const std::string& indexname,
+ rocksdb::ColumnFamilyHandle* cf,
+ const rocksdb::DBOptions& db_options) :
+ m_db(db),
+ m_cf(cf),
+ m_db_options(db_options),
+ m_curr_size(0),
+ m_sst_count(0),
+ m_error_msg(""),
+#if defined(RDB_SST_INFO_USE_THREAD)
+ m_queue(),
+ m_mutex(),
+ m_cond(),
+ m_thread(nullptr),
+ m_finished(false),
+#endif
+ m_sst_file(nullptr)
+{
+ m_prefix= db->GetName() + "/";
+
+ std::string normalized_table;
+ if (rdb_normalize_tablename(tablename.c_str(), &normalized_table))
+ {
+ // We failed to get a normalized table name. This should never happen,
+ // but handle it anyway.
+ m_prefix += "fallback_" +
+ std::to_string(
+ reinterpret_cast<intptr_t>(reinterpret_cast<void*>(this))) + "_" +
+ indexname + "_";
+ }
+ else
+ {
+ m_prefix += normalized_table + "_" + indexname + "_";
+ }
+
+ rocksdb::ColumnFamilyDescriptor cf_descr;
+ rocksdb::Status s= m_cf->GetDescriptor(&cf_descr);
+ if (!s.ok())
+ {
+ // Default size if we can't get the cf's target size
+ m_max_size= 64*1024*1024;
+ }
+ else
+ {
+ // Set the maximum size to 3 times the cf's target size
+ m_max_size= cf_descr.options.target_file_size_base * 3;
+ }
+}
+
+Rdb_sst_info::~Rdb_sst_info()
+{
+ DBUG_ASSERT(m_sst_file == nullptr);
+#if defined(RDB_SST_INFO_USE_THREAD)
+ DBUG_ASSERT(m_thread == nullptr);
+#endif
+}
+
+int Rdb_sst_info::open_new_sst_file()
+{
+ DBUG_ASSERT(m_sst_file == nullptr);
+
+ // Create the new sst file's name
+ std::string name= m_prefix + std::to_string(m_sst_count++) + m_suffix;
+
+ // Create the new sst file object
+ m_sst_file= new Rdb_sst_file(m_db, m_cf, m_db_options, name);
+
+ // Open the sst file
+ rocksdb::Status s= m_sst_file->open();
+ if (!s.ok())
+ {
+ set_error_msg(s.ToString());
+ delete m_sst_file;
+ m_sst_file= nullptr;
+ return 1;
+ }
+
+ m_curr_size= 0;
+
+ return 0;
+}
+
+void Rdb_sst_info::close_curr_sst_file()
+{
+ DBUG_ASSERT(m_sst_file != nullptr);
+ DBUG_ASSERT(m_curr_size > 0);
+
+#if defined(RDB_SST_INFO_USE_THREAD)
+ if (m_thread == nullptr)
+ {
+ // We haven't already started a background thread, so start one
+ m_thread= new std::thread(thread_fcn, this);
+ }
+
+ DBUG_ASSERT(m_thread != nullptr);
+
+ {
+ // Add this finished sst file to the queue (while holding mutex)
+ std::lock_guard<std::mutex> guard(m_mutex);
+ m_queue.push(m_sst_file);
+ }
+
+ // Notify the background thread that there is a new entry in the queue
+ m_cond.notify_one();
+#else
+ rocksdb::Status s= m_sst_file->commit();
+ if (!s.ok())
+ {
+ set_error_msg(s.ToString());
+ }
+
+ delete m_sst_file;
+#endif
+
+ // Reset for next sst file
+ m_sst_file= nullptr;
+ m_curr_size= 0;
+}
+
+int Rdb_sst_info::put(const rocksdb::Slice& key,
+ const rocksdb::Slice& value)
+{
+ int rc;
+
+ if (m_curr_size >= m_max_size)
+ {
+ // The current sst file has reached its maximum, close it out
+ close_curr_sst_file();
+
+ // While we are here, check to see if we have had any errors from the
+ // background thread - we don't want to wait for the end to report them
+ if (!m_error_msg.empty())
+ {
+ return 1;
+ }
+ }
+
+ if (m_curr_size == 0)
+ {
+ // We don't have an sst file open - open one
+ rc= open_new_sst_file();
+ if (rc != 0)
+ {
+ return rc;
+ }
+ }
+
+ DBUG_ASSERT(m_sst_file != nullptr);
+
+ // Add the key/value to the current sst file
+ rocksdb::Status s= m_sst_file->put(key, value);
+ if (!s.ok())
+ {
+ set_error_msg(s.ToString());
+ return 1;
+ }
+
+ m_curr_size += key.size() + value.size();
+
+ return 0;
+}
+
+int Rdb_sst_info::commit()
+{
+ if (m_curr_size > 0)
+ {
+ // Close out any existing files
+ close_curr_sst_file();
+ }
+
+#if defined(RDB_SST_INFO_USE_THREAD)
+ if (m_thread != nullptr)
+ {
+ // Tell the background thread we are done
+ m_finished= true;
+ m_cond.notify_one();
+
+ // Wait for the background thread to finish
+ m_thread->join();
+ delete m_thread;
+ m_thread= nullptr;
+ }
+#endif
+
+ // Did we get any errors?
+ if (!m_error_msg.empty())
+ {
+ return 1;
+ }
+
+ return 0;
+}
+
+void Rdb_sst_info::set_error_msg(const std::string& msg)
+{
+#if defined(RDB_SST_INFO_USE_THREAD)
+ // Both the foreground and background threads can set the error message
+ // so lock the mutex to protect it. We only want the first error that
+ // we encounter.
+ std::lock_guard<std::mutex> guard(m_mutex);
+#endif
+ my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str());
+ if (m_error_msg.empty())
+ {
+ m_error_msg= msg;
+ }
+}
+
+#if defined(RDB_SST_INFO_USE_THREAD)
+// Static thread function - the Rdb_sst_info object is in 'object'
+void Rdb_sst_info::thread_fcn(void* object)
+{
+ reinterpret_cast<Rdb_sst_info*>(object)->run_thread();
+}
+
+void Rdb_sst_info::run_thread()
+{
+ std::unique_lock<std::mutex> lk(m_mutex);
+
+ do
+ {
+ // Wait for notification or 1 second to pass
+ m_cond.wait_for(lk, std::chrono::seconds(1));
+
+ // Inner loop pulls off all Rdb_sst_file entries and processes them
+ while (!m_queue.empty())
+ {
+ Rdb_sst_file* sst_file= m_queue.front();
+ m_queue.pop();
+
+ // Release the lock - we don't want to hold it while committing the file
+ lk.unlock();
+
+ // Close out the sst file and add it to the database
+ rocksdb::Status s= sst_file->commit();
+ if (!s.ok())
+ {
+ set_error_msg(s.ToString());
+ }
+
+ delete sst_file;
+
+ // Reacquire the lock for the next inner loop iteration
+ lk.lock();
+ }
+
+ // If the queue is empty and the main thread has indicated we should exit
+ // break out of the loop.
+ } while (!m_finished);
+
+ DBUG_ASSERT(m_queue.empty());
+}
+#endif
+
+void Rdb_sst_info::init(rocksdb::DB* db)
+{
+ std::string path= db->GetName() + FN_DIRSEP;
+ struct st_my_dir* dir_info= my_dir(path.c_str(), MYF(MY_DONT_SORT));
+
+ // Access the directory
+ if (dir_info == nullptr)
+ {
+ // NO_LINT_DEBUG
+ sql_print_warning("RocksDB: Could not access database directory: %s",
+ path.c_str());
+ return;
+ }
+
+ // Scan through the files in the directory
+ struct fileinfo* file_info= dir_info->dir_entry;
+ for (uint ii= 0; ii < dir_info->number_off_files; ii++, file_info++)
+ {
+ // find any files ending with m_suffix ...
+ std::string name= file_info->name;
+ size_t pos= name.find(m_suffix);
+ if (pos != std::string::npos && name.size() - pos == m_suffix.size())
+ {
+ // ... and remove them
+ std::string fullname= path + name;
+ my_delete(fullname.c_str(), MYF(0));
+ }
+ }
+
+ // Release the directory entry
+ my_dirend(dir_info);
+}
+
+std::string Rdb_sst_info::m_suffix= ".bulk_load.tmp";
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h
new file mode 100644
index 00000000000..8845ec98122
--- /dev/null
+++ b/storage/rocksdb/rdb_sst_info.h
@@ -0,0 +1,101 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#pragma once
+
+/* C++ standard header files */
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <string>
+#include <thread>
+#include <vector>
+
+/* RocksDB header files */
+#include "rocksdb/db.h"
+#include "rocksdb/sst_file_writer.h"
+
+// define RDB_SST_INFO_USE_THREAD /* uncomment to use threads */
+
+namespace myrocks {
+
+class Rdb_sst_file {
+ Rdb_sst_file(const Rdb_sst_file& p)= delete;
+ Rdb_sst_file& operator=(const Rdb_sst_file& p)= delete;
+
+ rocksdb::DB* m_db;
+ rocksdb::ColumnFamilyHandle* m_cf;
+ const rocksdb::DBOptions& m_db_options;
+ rocksdb::SstFileWriter* m_sst_file_writer;
+ std::string m_name;
+
+ public:
+ Rdb_sst_file(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf,
+ const rocksdb::DBOptions& db_options, const std::string& name);
+ ~Rdb_sst_file();
+
+ rocksdb::Status open();
+ rocksdb::Status put(const rocksdb::Slice& key, const rocksdb::Slice& value);
+ rocksdb::Status commit();
+};
+
+class Rdb_sst_info {
+ Rdb_sst_info(const Rdb_sst_info& p)= delete;
+ Rdb_sst_info& operator=(const Rdb_sst_info& p)= delete;
+
+ rocksdb::DB* m_db;
+ rocksdb::ColumnFamilyHandle* m_cf;
+ const rocksdb::DBOptions& m_db_options;
+ uint64_t m_curr_size;
+ uint64_t m_max_size;
+ uint m_sst_count;
+ std::string m_error_msg;
+ std::string m_prefix;
+ static std::string m_suffix;
+#if defined(RDB_SST_INFO_USE_THREAD)
+ std::queue<Rdb_sst_file*> m_queue;
+ std::mutex m_mutex;
+ std::condition_variable m_cond;
+ std::thread* m_thread;
+ bool m_finished;
+#endif
+ Rdb_sst_file* m_sst_file;
+
+ int open_new_sst_file();
+ void close_curr_sst_file();
+ void set_error_msg(const std::string& msg);
+
+#if defined(RDB_SST_INFO_USE_THREAD)
+ void run_thread();
+
+ static void thread_fcn(void* object);
+#endif
+
+ public:
+ Rdb_sst_info(rocksdb::DB* db, const std::string& tablename,
+ const std::string& indexname, rocksdb::ColumnFamilyHandle* cf,
+ const rocksdb::DBOptions& db_options);
+ ~Rdb_sst_info();
+
+ int put(const rocksdb::Slice& key, const rocksdb::Slice& value);
+ int commit();
+
+ const std::string& error_message() const { return m_error_msg; }
+
+ static void init(rocksdb::DB* db);
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc
new file mode 100644
index 00000000000..1538404ea56
--- /dev/null
+++ b/storage/rocksdb/rdb_threads.cc
@@ -0,0 +1,81 @@
+/*
+ Portions Copyright (c) 2015-Present, Facebook, Inc.
+ Portions Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+/* The C++ file's header */
+#include "./rdb_threads.h"
+
+namespace myrocks {
+
+void* Rdb_thread::thread_func(void* thread_ptr)
+{
+ DBUG_ASSERT(thread_ptr != nullptr);
+ Rdb_thread* thread= static_cast<Rdb_thread*>(thread_ptr);
+ if (!thread->m_run_once.exchange(true))
+ {
+ thread->run();
+ thread->uninit();
+ }
+ return nullptr;
+}
+
+
+void Rdb_thread::init(
+#ifdef HAVE_PSI_INTERFACE
+ my_core::PSI_mutex_key stop_bg_psi_mutex_key,
+ my_core::PSI_cond_key stop_bg_psi_cond_key
+#endif
+ )
+{
+ DBUG_ASSERT(!m_run_once);
+ mysql_mutex_init(stop_bg_psi_mutex_key, &m_signal_mutex, MY_MUTEX_INIT_FAST);
+ mysql_cond_init(stop_bg_psi_cond_key, &m_signal_cond, nullptr);
+}
+
+
+void Rdb_thread::uninit()
+{
+ mysql_mutex_destroy(&m_signal_mutex);
+ mysql_cond_destroy(&m_signal_cond);
+}
+
+
+int Rdb_thread::create_thread(
+#ifdef HAVE_PSI_INTERFACE
+ PSI_thread_key background_psi_thread_key
+#endif
+ )
+{
+ return mysql_thread_create(background_psi_thread_key,
+ &m_handle, nullptr, thread_func, this);
+}
+
+
+void Rdb_thread::signal(bool stop_thread)
+{
+ mysql_mutex_lock(&m_signal_mutex);
+ if (stop_thread) {
+ m_stop= true;
+ }
+ mysql_cond_signal(&m_signal_cond);
+ mysql_mutex_unlock(&m_signal_mutex);
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h
new file mode 100644
index 00000000000..cba03b4cdba
--- /dev/null
+++ b/storage/rocksdb/rdb_threads.h
@@ -0,0 +1,116 @@
+/*
+ Portions Copyright (c) 2015-Present, Facebook, Inc.
+ Portions Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* MySQL includes */
+#include "./my_global.h"
+#include <mysql/psi/mysql_table.h>
+#include <mysql/thread_pool_priv.h>
+
+/* MyRocks header files */
+#include "./rdb_utils.h"
+
+namespace myrocks {
+
+class Rdb_thread
+{
+ private:
+ // Disable Copying
+ Rdb_thread(const Rdb_thread&);
+ Rdb_thread& operator=(const Rdb_thread&);
+
+ // Make sure we run only once
+ std::atomic_bool m_run_once;
+
+ pthread_t m_handle;
+
+ protected:
+ mysql_mutex_t m_signal_mutex;
+ mysql_cond_t m_signal_cond;
+ bool m_stop= false;
+
+ public:
+ Rdb_thread() : m_run_once(false) {}
+
+#ifdef HAVE_PSI_INTERFACE
+ void init(my_core::PSI_mutex_key stop_bg_psi_mutex_key,
+ my_core::PSI_cond_key stop_bg_psi_cond_key);
+ int create_thread(
+ my_core::PSI_thread_key background_psi_thread_key);
+#else
+ void init();
+ int create_thread();
+#endif
+
+ virtual void run(void) = 0;
+
+ void signal(bool stop_thread= false);
+
+ int join()
+ {
+ return pthread_join(m_handle, nullptr);
+ }
+
+ void uninit();
+
+ virtual ~Rdb_thread() {}
+
+ private:
+ static void* thread_func(void* thread_ptr);
+};
+
+
+/**
+ MyRocks background thread control
+ N.B. This is on top of RocksDB's own background threads
+ (@see rocksdb::CancelAllBackgroundWork())
+*/
+
+class Rdb_background_thread : public Rdb_thread
+{
+ private:
+ bool m_save_stats= false;
+
+ void reset()
+ {
+ mysql_mutex_assert_owner(&m_signal_mutex);
+ m_stop= false;
+ m_save_stats= false;
+ }
+
+ public:
+ virtual void run() override;
+
+ void request_save_stats()
+ {
+ mysql_mutex_lock(&m_signal_mutex);
+ m_save_stats= true;
+ mysql_mutex_unlock(&m_signal_mutex);
+ }
+};
+
+
+/*
+ Drop index thread control
+*/
+
+struct Rdb_drop_index_thread : public Rdb_thread
+{
+ virtual void run() override;
+};
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc
new file mode 100644
index 00000000000..05f0104d483
--- /dev/null
+++ b/storage/rocksdb/rdb_utils.cc
@@ -0,0 +1,311 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* This C++ file's header */
+#include "./rdb_utils.h"
+
+/* C++ standard header files */
+#include <array>
+#include <string>
+
+/* C standard header files */
+#include <ctype.h>
+
+/* MyRocks header files */
+#include "./ha_rocksdb.h"
+
+namespace myrocks {
+
+/*
+ Skip past any spaces in the input
+*/
+const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str)
+{
+ DBUG_ASSERT(cs != nullptr);
+ DBUG_ASSERT(str != nullptr);
+
+ while (my_isspace(cs, *str))
+ {
+ str++;
+ }
+
+ return str;
+}
+
+/*
+ Compare (ignoring case) to see if str2 is the next data in str1.
+ Note that str1 can be longer but we only compare up to the number
+ of characters in str2.
+*/
+bool rdb_compare_strings_ic(const char *str1, const char *str2)
+{
+ DBUG_ASSERT(str1 != nullptr);
+ DBUG_ASSERT(str2 != nullptr);
+
+ // Scan through the strings
+ size_t ii;
+ for (ii = 0; str2[ii]; ii++)
+ {
+ if (toupper(static_cast<int>(str1[ii])) !=
+ toupper(static_cast<int>(str2[ii])))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ Scan through an input string looking for pattern, ignoring case
+ and skipping all data enclosed in quotes.
+*/
+const char* rdb_find_in_string(const char *str, const char *pattern,
+ bool *succeeded)
+{
+ char quote = '\0';
+ bool escape = false;
+
+ DBUG_ASSERT(str != nullptr);
+ DBUG_ASSERT(pattern != nullptr);
+ DBUG_ASSERT(succeeded != nullptr);
+
+ *succeeded = false;
+
+ for ( ; *str; str++)
+ {
+ /* If we found a our starting quote character */
+ if (*str == quote)
+ {
+ /* If it was escaped ignore it */
+ if (escape)
+ {
+ escape = false;
+ }
+ /* Otherwise we are now outside of the quoted string */
+ else
+ {
+ quote = '\0';
+ }
+ }
+ /* Else if we are currently inside a quoted string? */
+ else if (quote != '\0')
+ {
+ /* If so, check for the escape character */
+ escape = !escape && *str == '\\';
+ }
+ /* Else if we found a quote we are starting a quoted string */
+ else if (*str == '"' || *str == '\'' || *str == '`')
+ {
+ quote = *str;
+ }
+ /* Else we are outside of a quoted string - look for our pattern */
+ else
+ {
+ if (rdb_compare_strings_ic(str, pattern))
+ {
+ *succeeded = true;
+ return str;
+ }
+ }
+ }
+
+ // Return the character after the found pattern or the null terminateor
+ // if the pattern wasn't found.
+ return str;
+}
+
+/*
+ See if the next valid token matches the specified string
+*/
+const char* rdb_check_next_token(struct charset_info_st* cs, const char *str,
+ const char *pattern, bool *succeeded)
+{
+ DBUG_ASSERT(cs != nullptr);
+ DBUG_ASSERT(str != nullptr);
+ DBUG_ASSERT(pattern != nullptr);
+ DBUG_ASSERT(succeeded != nullptr);
+
+ // Move past any spaces
+ str = rdb_skip_spaces(cs, str);
+
+ // See if the next characters match the pattern
+ if (rdb_compare_strings_ic(str, pattern))
+ {
+ *succeeded = true;
+ return str + strlen(pattern);
+ }
+
+ *succeeded = false;
+ return str;
+}
+
+/*
+ Parse id
+*/
+const char* rdb_parse_id(struct charset_info_st* cs, const char *str,
+ std::string *id)
+{
+ DBUG_ASSERT(cs != nullptr);
+ DBUG_ASSERT(str != nullptr);
+
+ // Move past any spaces
+ str = rdb_skip_spaces(cs, str);
+
+ if (*str == '\0')
+ {
+ return str;
+ }
+
+ char quote = '\0';
+ if (*str == '`' || *str == '"')
+ {
+ quote = *str++;
+ }
+
+ size_t len = 0;
+ const char* start = str;
+
+ if (quote != '\0')
+ {
+ for ( ; ; )
+ {
+ if (*str == '\0')
+ {
+ return str;
+ }
+
+ if (*str == quote)
+ {
+ str++;
+ if (*str != quote)
+ {
+ break;
+ }
+ }
+
+ str++;
+ len++;
+ }
+ }
+ else
+ {
+ while (!my_isspace(cs, *str) && *str != '(' && *str != ')' &&
+ *str != '.' && *str != ',' && *str != '\0')
+ {
+ str++;
+ len++;
+ }
+ }
+
+ // If the user requested the id create it and return it
+ if (id != nullptr)
+ {
+ *id = std::string("");
+ id->reserve(len);
+ while (len--)
+ {
+ *id += *start;
+ if (*start++ == quote)
+ {
+ start++;
+ }
+ }
+ }
+
+ return str;
+}
+
+/*
+ Skip id
+*/
+const char* rdb_skip_id(struct charset_info_st* cs, const char *str)
+{
+ DBUG_ASSERT(cs != nullptr);
+ DBUG_ASSERT(str != nullptr);
+
+ return rdb_parse_id(cs, str, nullptr);
+}
+
+static const std::size_t rdb_hex_bytes_per_char = 2;
+static const std::array<char, 16> rdb_hexdigit =
+{
+ { '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }
+};
+
+/*
+ Convert data into a hex string with optional maximum length.
+ If the data is larger than the maximum length trancate it and append "..".
+*/
+std::string rdb_hexdump(const char *data, std::size_t data_len,
+ std::size_t maxsize)
+{
+ DBUG_ASSERT(data != nullptr);
+
+ // Count the elements in the string
+ std::size_t elems = data_len;
+ // Calculate the amount of output needed
+ std::size_t len = elems * rdb_hex_bytes_per_char;
+ std::string str;
+
+ if (maxsize != 0 && len > maxsize)
+ {
+ // If the amount of output is too large adjust the settings
+ // and leave room for the ".." at the end
+ elems = (maxsize - 2) / rdb_hex_bytes_per_char;
+ len = elems * rdb_hex_bytes_per_char + 2;
+ }
+
+ // Reserve sufficient space to avoid reallocations
+ str.reserve(len);
+
+ // Loop through the input data and build the output string
+ for (std::size_t ii = 0; ii < elems; ii++, data++)
+ {
+ uint8_t ch = (uint8_t) *data;
+ str += rdb_hexdigit[ch >> 4];
+ str += rdb_hexdigit[ch & 0x0F];
+ }
+
+ // If we can't fit it all add the ".."
+ if (elems != data_len)
+ {
+ str += "..";
+ }
+
+ return str;
+}
+
+
+/*
+ Attempt to access the database subdirectory to see if it exists
+*/
+bool rdb_database_exists(const std::string& db_name)
+{
+ std::string dir = std::string(mysql_real_data_home) + FN_DIRSEP + db_name;
+ struct st_my_dir* dir_info = my_dir(dir.c_str(),
+ MYF(MY_DONT_SORT | MY_WANT_STAT));
+ if (dir_info == nullptr)
+ {
+ return false;
+ }
+
+ my_dirend(dir_info);
+ return true;
+}
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h
new file mode 100644
index 00000000000..5630dc1c20f
--- /dev/null
+++ b/storage/rocksdb/rdb_utils.h
@@ -0,0 +1,206 @@
+/*
+ Copyright (c) 2016, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#pragma once
+
+/* C++ standard header files */
+#include <string>
+
+/* MySQL header files */
+#include "./sql_string.h"
+
+/* RocksDB header files */
+#include "rocksdb/slice.h"
+
+#ifdef HAVE_JEMALLOC
+ #include <jemalloc/jemalloc.h>
+#endif
+
+namespace myrocks {
+
+/*
+ Guess what?
+ An interface is a class where all members are public by default.
+*/
+
+#ifndef interface
+#define interface struct
+#endif // interface
+
+/*
+ Introduce C-style pseudo-namespaces, a handy way to make code more readble
+ when calling into a legacy API, which does not have any namespace defined.
+ Since we cannot or don't want to change the API in any way, we can use this
+ mechanism to define readability tokens that look like C++ namespaces, but are
+ not enforced in any way by the compiler, since the pre-compiler strips them
+ out. However, on the calling side, code looks like my_core::thd_ha_data()
+ rather than plain a thd_ha_data() call. This technique adds an immediate
+ visible cue on what type of API we are calling into.
+*/
+
+#ifndef my_core
+// C-style pseudo-namespace for MySQL Core API, to be used in decorating calls
+// to non-obvious MySQL functions, like the ones that do not start with well
+// known prefixes: "my_", "sql_", and "mysql_".
+#define my_core
+#endif // my_core
+
+/*
+ The intent behind a SHIP_ASSERT() macro is to have a mechanism for validating
+ invariants in retail builds. Traditionally assertions (such as macros defined
+ in <cassert>) are evaluated for performance reasons only in debug builds and
+ become NOOP in retail builds when NDEBUG is defined.
+
+ This macro is intended to validate the invariants which are critical for
+ making sure that data corruption and data loss won't take place. Proper
+ intended usage can be described as "If a particular condition is not true then
+ stop everything what's going on and terminate the process because continued
+ execution will cause really bad things to happen".
+
+ Use the power of SHIP_ASSERT() wisely.
+*/
+
+#ifndef SHIP_ASSERT
+#define SHIP_ASSERT(expr) \
+ do { \
+ if (!(expr)) { \
+ my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \
+ abort_with_stack_traces(); \
+ } \
+ } while (0)
+#endif // SHIP_ASSERT
+
+/*
+ Assert a implies b.
+ If a is true, then b must be true.
+ If a is false, then the value is b does not matter.
+*/
+#ifndef DBUG_ASSERT_IMP
+#define DBUG_ASSERT_IMP(a, b) DBUG_ASSERT(!(a) || (b))
+#endif
+
+/*
+ Assert a if and only if b.
+ a and b must be both true or both false.
+*/
+#ifndef DBUG_ASSERT_IFF
+#define DBUG_ASSERT_IFF(a, b) \
+ DBUG_ASSERT(static_cast<bool>(a) == static_cast<bool>(b))
+#endif
+
+/*
+ Helper function to get an NULL terminated uchar* out of a given MySQL String.
+*/
+
+inline uchar* rdb_mysql_str_to_uchar_str(my_core::String *str)
+{
+ DBUG_ASSERT(str != nullptr);
+ return reinterpret_cast<uchar*>(str->c_ptr());
+}
+
+/*
+ Helper function to get plain (not necessary NULL terminated) uchar* out of a
+ given STL string.
+*/
+
+inline const uchar* rdb_std_str_to_uchar_ptr(const std::string &str)
+{
+ return reinterpret_cast<const uchar*>(str.data());
+}
+
+/*
+ Helper function to get plain (not necessary NULL terminated) uchar* out of a
+ given RocksDB item.
+*/
+
+inline const uchar* rdb_slice_to_uchar_ptr(const rocksdb::Slice *item)
+{
+ DBUG_ASSERT(item != nullptr);
+ return reinterpret_cast<const uchar*>(item->data());
+}
+
+/*
+ Call this function in cases when you can't rely on garbage collector and need
+ to explicitly purge all unused dirty pages. This should be a relatively rare
+ scenario for cases where it has been verified that this intervention has
+ noticeable benefits.
+*/
+inline int purge_all_jemalloc_arenas()
+{
+#ifdef HAVE_JEMALLOC
+ unsigned narenas = 0;
+ size_t sz = sizeof(unsigned);
+ char name[25] = { 0 };
+
+ // Get the number of arenas first. Please see `jemalloc` documentation for
+ // all the various options.
+ int result = mallctl("arenas.narenas", &narenas, &sz, nullptr, 0);
+
+ // `mallctl` returns 0 on success and we really want caller to know if all the
+ // trickery actually works.
+ if (result) {
+ return result;
+ }
+
+ // Form the command to be passed to `mallctl` and purge all the unused dirty
+ // pages.
+ snprintf(name, sizeof(name) / sizeof(char), "arena.%d.purge", narenas);
+ result = mallctl(name, nullptr, nullptr, nullptr, 0);
+
+ return result;
+#else
+ return EXIT_SUCCESS;
+#endif
+}
+
+/*
+ Helper functions to parse strings.
+*/
+
+const char* rdb_skip_spaces(struct charset_info_st* cs, const char *str)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+bool rdb_compare_strings_ic(const char *str1, const char *str2)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+const char* rdb_find_in_string(const char *str, const char *pattern,
+ bool *succeeded)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+const char* rdb_check_next_token(struct charset_info_st* cs, const char *str,
+ const char *pattern, bool *succeeded)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+const char* rdb_parse_id(struct charset_info_st* cs, const char *str,
+ std::string *id)
+ __attribute__((__nonnull__(1, 2), __warn_unused_result__));
+
+const char* rdb_skip_id(struct charset_info_st* cs, const char *str)
+ __attribute__((__nonnull__, __warn_unused_result__));
+
+/*
+ Helper functions to populate strings.
+*/
+
+std::string rdb_hexdump(const char *data, std::size_t data_len,
+ std::size_t maxsize = 0)
+ __attribute__((__nonnull__));
+
+/*
+ Helper function to see if a database exists
+ */
+bool rdb_database_exists(const std::string& db_name);
+
+} // namespace myrocks
diff --git a/storage/rocksdb/rocksdb-range-access.txt b/storage/rocksdb/rocksdb-range-access.txt
new file mode 100644
index 00000000000..c974279ac77
--- /dev/null
+++ b/storage/rocksdb/rocksdb-range-access.txt
@@ -0,0 +1,353 @@
+
+This file describes how MySQL index navigation commands are translated into
+RocksDB index navigation commands.
+
+Index tuples are shown as
+
+ ( kv )-aaa-pkN
+
+here
+ * '(kv)' is the 4-byte index number.
+ * '-' is just for readability
+ * everything that follows the '-' is mem-comparable form of the key.
+ In ascii encoding, aaa < bbb < ccc < xxx.
+
+Tuples that start with '#' do not exist in the database. They are only shown
+to demonstrate where Seek() calls end up with.
+
+== HA_READ_KEY_EXACT, forward CF ==
+
+ (kv-1)-xxx-pk
+# ( kv )-aaa <-- "kv-aaa" doesn't exist in the database, but it would be
+ here.
+ ( kv )-aaa-pk <--- Seek("kv-aaa") will put us here on the next record.
+ ( kv )-aaa-pk2
+ ( kv )-bbb-...
+
+RocksDB calls:
+
+ it->Seek(kv);
+ if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...))
+ return record.
+
+== HA_READ_KEY_EXACT, backward CF ==
+
+When we need to seek to a tuple that is a prefix of a full key:
+
+ (kv+1)-xxx-pk
+ ( kv )-ccc-pk
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk2
+ ( kv )-bbb-pk1 < -- We need to be here
+# ( kv )-bbb <---we call Seek(kv-bbb)
+ ( kv )-aaa-pk ... and end up here. Should call it->Prev().
+
+There is a special case when (kv)-bbb-pk1 is the last record in the CF, and
+we get invalid iterator. Then, we need to call SeekToLast().
+
+Another kind of special case is when we need to seek to the full value.
+Suppose, the lookup tuple is kv-bbb-pk1:
+
+ (kv+1)-xxx-pk
+ ( kv )-ccc-pk
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk2
+ ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1)
+ ( kv )-bbb-pk0
+
+Then, Seek(kv-bbb-pk1) will position us exactly the tuple we need, and we
+won't need to call it->Prev(). If we get an invalid iterator, there is no
+need to call SeekToLast().
+
+RocksDB calls:
+
+ it->Seek(tuple);
+
+ if (!using_full_key)
+ {
+ if (!it->Valid())
+ it->SeekToLast();
+ else
+ it->Prev();
+ }
+
+ if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...))
+ return record.
+
+== HA_READ_KEY_OR_NEXT, forward CF ==
+
+This is finding min(key) such that key >= lookup_tuple.
+
+If lookup tuple is kv-bbb:
+
+ ( kv )-aaa-pk
+# ( kv )-bbb <-- "kv-bbb" doesn't exist in the database, but it would be
+ here.
+ ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here on the next record.
+ ( kv )-bbb-pk2
+ ( kv )-bbb-...
+
+RocksDB calls:
+
+ Seek(kv);
+ if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...))
+ return record.
+
+== HA_READ_KEY_OR_NEXT, backward CF ==
+
+When specified key tuple is a key prefix:
+
+ (kv+1)-xxx-pk
+ ( kv )-ccc-pk
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk2
+ ( kv )-bbb-pk1 < -- We need to be here (or above)
+# ( kv )-bbb <---we call Seek(kv-bbb)
+ ( kv )-aaa-pk ... and end up here. Should call it->Prev().
+
+There is a special case when (kv)-bbb-pk1 is the last record in the CF, and
+we get invalid iterator. Then, we need to call SeekToLast().
+
+Another kind of special case is when we need to seek to the full value.
+Suppose, the lookup tuple is kv-bbb-pk1:
+
+ (kv+1)-xxx-pk
+ ( kv )-ccc-pk
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk2
+ ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1)
+ ( kv )-bbb-pk0
+
+Then, Seek(kv-bbb-pk1) may position us exactly at the tuple we need, and we
+won't need to call it->Prev().
+If kv-bbb-pk1 is not present in the database, we will be positioned on
+kv-bbb-pk0, and we will need to call it->Prev().
+If we get an invalid iterator, we DO need to call SeekToLast().
+
+RocksDB calls:
+
+ Seek(...);
+
+ if (!it->Valid())
+ it->SeekToLast();
+ else
+ {
+ if (!using_full_key ||
+ !(kd->covers_key(...) || kd->cmp_full_keys(...))
+ it->Prev();
+ }
+
+ if (it->Valid() && kd->covers_key(..))
+ return record.
+
+== HA_READ_AFTER_KEY, forward CF ==
+
+This is finding min(key) such that key > lookup_key.
+
+Suppose lookup_key = kv-bbb
+
+ ( kv )-aaa-pk
+# ( kv )-bbb
+ ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here. We need to
+ ( kv )-bbb-pk2 get to the value that is next after 'bbb'.
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk4
+ ( kv )-bbb-pk5
+ ( kv )-ccc-pkN <-- That is, we need to be here.
+
+However, we don't know that the next value is kv-ccc. Instead, we seek to the
+first value that strictly greater than 'kv-bbb'. It is Successor(kv-bbb).
+
+It doesn't matter if we're using a full extended key or not.
+
+RocksDB calls:
+
+ Seek(Successor(kv-bbb));
+ if (it->Valid() && kd->covers_key(it.key()))
+ return record;
+
+Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that
+we seek to Successor($lookup_key) instead of $lookup_key itself.
+
+== HA_READ_AFTER_KEY, backward CF ==
+
+Suppose, the lookup key is 'kv-bbb':
+
+ (kv+1)-xxx-pk
+ ( kv )-ccc-pk7
+ ( kv )-ccc-pk6 <-- We need to be here.
+# Successor(kv-bbb) <-- We get here when we call Seek(Successor(kv-bbb))
+ ( kv )-bbb-pk5 and we will need to call Prev() (*)
+ ( kv )-bbb-pk4
+ ( kv )-bbb-pk3
+ ( kv )-bbb-pk2
+ ( kv )-bbb-pk1
+# ( kv )-bbb <-- We would get here if we called Seek(kv-bbb).
+ ( kv )-aaa-pk
+
+(*) - unless Successor(kv-bbb)=(kv-ccc), and Seek(kv-ccc) hits the row. In
+that case, we won't need to call Prev().
+
+RocksDB calls:
+
+ Seek(Successor(kv-bbb));
+ if (!it->Valid())
+ {
+ /*
+ We may get EOF if rows with 'kv-bbb' (below the Successor... line in the
+ diagram) do not exist. This doesn't mean that rows with values kv-ccc
+ do not exist.
+ */
+ it->SeekToLast();
+ }
+ else
+ {
+ if (!using_full_key ||
+ !kd->value_matches_prefix(...))
+ {
+ it->Prev();
+ }
+ }
+
+ if (it->Valid() && kd->covers_key(...))
+ return record.
+
+Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that
+we seek to Successor($lookup_key) instead of $lookup_key itself.
+
+
+== HA_READ_BEFORE_KEY, forward CF ==
+
+This is finding max(key) such that key < lookup_tuple.
+
+Suppose, lookup_tuple=kv-bbb.
+
+ ( kv )-aaa-pk1
+ ( kv )-aaa-pk2
+ ( kv )-aaa-pk3 <-- Need to be here.
+# ( kv )-bbb
+ ( kv )-bbb-pk4 <-- Seek("kv-bbb") will put us here.
+ ( kv )-bbb-pk5
+ ( kv )-bbb-pk6
+
+1. Seek(kv-bbb) will put us at kv-bbb-pk4 (or return an invalid iterator
+ if kv-bbb-pk4 and subsequent rows do not exist in the db).
+2. We will need to call Prev() to get to the record before.
+ (if there is no record before kv-bbb, then we can't find a record).
+
+It doesn't matter if we're using a full extended key or not.
+
+RocksDB calls:
+
+ it->Seek(kv-bbb);
+ if (it->Valid())
+ it->Prev();
+ else
+ it->SeekToLast();
+
+ if (it->Valid() && kd->covers_key(...))
+ return record;
+
+
+== HA_READ_BEFORE_KEY, backward CF ==
+
+This is finding max(key) such that key < lookup_tuple.
+Suppose, lookup_tuple=kv-bbb, a prefix of the full key.
+
+ ( kv )-bbb-pk6
+ ( kv )-bbb-pk5
+ ( kv )-bbb-pk4
+# ( kv )-bbb
+ ( kv )-aaa-pk3 <-- Need to be here, and Seek("kv-bbb") will put us here
+ ( kv )-aaa-pk2
+ ( kv )-aaa-pk1
+
+If the lookup tuple is a full key (e.g. kv-bbb-pk4), and the key is present in
+the database, the iterator will be positioned on the key. We will need to call
+Next() to get the next key.
+
+RocksDB calls:
+
+ it->Seek(kv-bbb);
+
+ if (it->Valid() && using_full_key &&
+ kd->value_matches_prefix(...))
+ {
+ /* We are using full key and we've hit an exact match */
+ it->Next();
+ }
+
+ if (it->Valid() && kd->covers_key(...))
+ return record;
+
+== HA_READ_PREFIX_LAST, forward CF ==
+
+Find the last record with the specified index prefix lookup_tuple.
+
+Suppose, lookup_tuple='kv-bbb'
+
+ ( kv )-aaa-pk2
+ ( kv )-aaa-pk3
+# ( kv )-bbb
+ ( kv )-bbb-pk4
+ ( kv )-bbb-pk5
+ ( kv )-bbb-pk6
+ ( kv )-bbb-pk7 <--- Need to be here.
+# ( kv )-ccc
+ ( kv )-ccc-pk8 <-- Seek(Successor(kv-bbb)) will get us here. will need
+ ( kv )-ccc-pk9 to call Prev().
+
+RocksDB calls:
+
+ Seek(Successor(kv-bbb));
+ if (!it->Valid())
+ it->SeekToLast();
+ else
+ it->Prev();
+
+ if (it->Valid() && kd->covers_key(...))
+ {
+ if (!cmp_full_keys(lookup_tuple)) // not needed in _OR_PREV
+ {
+ // the record's prefix matches lookup_tuple.
+ return record;
+ }
+ }
+
+== HA_READ_PREFIX_LAST, backward CF ==
+
+Suppose, lookup_tuple='kv-bbb'
+
+ ( kv )-ccc-pk9
+ ( kv )-ccc-pk8
+# ( kv )-ccc <-- 2. Seek(Successor(kv-bbb)) will point here
+ and it will fall down to the next row.
+ ( kv )-bbb-pk7 <--- 1. Need to be here.
+ ( kv )-bbb-pk6
+ ( kv )-bbb-pk5
+ ( kv )-bbb-pk4
+# ( kv )-bbb
+ ( kv )-aaa-pk3
+ ( kv )-aaa-pk2
+
+
+RocksDB calls:
+
+ it->Seek(Successor(kv-bbb));
+
+ if (using_full_key && it->Valid() && !cmp_full_keys(Sucessor(lookup_key)))
+ it->Next();
+
+ if (it->Valid() && kd->covers_key(..))
+ {
+ if (!cmp_full_keys(...)) // not needed in _OR_PREV
+ {
+ // the record's prefix matches lookup_tuple.
+ return record;
+ }
+ }
+
+== HA_READ_PREFIX_LAST_OR_PREV, forward or backward CF ==
+
+This is just like HA_READ_PREFIX_LAST but we don't need to check that the key
+we've got is in the search prefix. (search for "not needed in _OR_PREV" above)
diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc
new file mode 100644
index 00000000000..ce51481690b
--- /dev/null
+++ b/storage/rocksdb/tools/mysql_ldb.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013, Facebook, Inc. All rights reserved.
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree. An additional grant
+// of patent rights can be found in the PATENTS file in the same directory.
+//
+#include "rocksdb/ldb_tool.h"
+#include "../rdb_comparator.h"
+
+int main(int argc, char** argv) {
+ rocksdb::Options db_options;
+ myrocks::Rdb_pk_comparator pk_comparator;
+ db_options.comparator= &pk_comparator;
+
+ rocksdb::LDBTool tool;
+ tool.Run(argc, argv, db_options);
+ return 0;
+}
diff --git a/storage/rocksdb/unittest/CMakeLists.txt b/storage/rocksdb/unittest/CMakeLists.txt
new file mode 100644
index 00000000000..d2a5ea2aff7
--- /dev/null
+++ b/storage/rocksdb/unittest/CMakeLists.txt
@@ -0,0 +1,22 @@
+IF (WITH_ROCKSDB_SE_STORAGE_ENGINE)
+ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
+ ${CMAKE_SOURCE_DIR}/unittest/mytap
+ ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src
+ )
+ LINK_LIBRARIES(mytap mysys dbug strings)
+
+ ADD_DEFINITIONS(-DSTANDALONE_UNITTEST)
+
+ MYSQL_ADD_EXECUTABLE(test_properties_collector
+ test_properties_collector.cc
+ )
+ TARGET_LINK_LIBRARIES(test_properties_collector mysqlserver)
+
+ # Necessary to make sure that we can use the jemalloc API calls.
+ GET_TARGET_PROPERTY(mysql_embedded LINK_FLAGS PREV_LINK_FLAGS)
+ IF(NOT PREV_LINK_FLAGS)
+ SET(PREV_LINK_FLAGS)
+ ENDIF()
+ SET_TARGET_PROPERTIES(test_properties_collector PROPERTIES LINK_FLAGS
+ "${PREV_LINK_FLAGS} ${WITH_MYSQLD_LDFLAGS}")
+ENDIF()
diff --git a/storage/rocksdb/unittest/test_properties_collector.cc b/storage/rocksdb/unittest/test_properties_collector.cc
new file mode 100644
index 00000000000..f798a43d045
--- /dev/null
+++ b/storage/rocksdb/unittest/test_properties_collector.cc
@@ -0,0 +1,60 @@
+/*
+ Copyright (c) 2015, Facebook, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* MyRocks header files */
+#include "../ha_rocksdb.h"
+#include "../rdb_datadic.h"
+
+void putKeys(myrocks::Rdb_tbl_prop_coll* coll,
+ int num,
+ bool is_delete,
+ uint64_t expected_deleted)
+{
+ std::string str("aaaaaaaaaaaaaa");
+ rocksdb::Slice sl(str.data(), str.size());
+
+ for (int i=0; i < num; i++) {
+ coll->AddUserKey(
+ sl, sl,
+ is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut,
+ 0, 100);
+ }
+ DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted);
+}
+
+int main(int argc, char** argv)
+{
+ // test the circular buffer for delete flags
+ myrocks::Rdb_compact_params params;
+ params.m_file_size= 333;
+ params.m_deletes= 333; // irrelevant
+ params.m_window= 10;
+
+ myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0,
+ RDB_DEFAULT_TBL_STATS_SAMPLE_PCT);
+
+ putKeys(&coll, 2, true, 2); // [xx]
+ putKeys(&coll, 3, false, 2); // [xxo]
+ putKeys(&coll, 1, true, 3); // [xxox]
+ putKeys(&coll, 6, false, 3); // [xxoxoooooo]
+ putKeys(&coll, 3, true, 4); // xxo[xooooooxxx]
+ putKeys(&coll, 1, false, 4); // xxox[ooooooxxxo]
+ putKeys(&coll, 100, false, 4); // ....[oooooooooo]
+ putKeys(&coll, 100, true, 10); // ....[xxxxxxxxxx]
+ putKeys(&coll, 100, true, 10); // ....[oooooooooo]
+
+ return 0;
+}