summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-04-15 15:09:22 +0200
committerSergei Golubchik <sergii@pisem.net>2013-04-15 15:09:22 +0200
commita9035be5b7a7b3865ddb4ef34a5d0cfc65dfc254 (patch)
treea9df7341e91623f62fe37cd47fce139d8888fc95 /storage
parent3a1c91d87d69ef243b3e78be6089102cafef0a8e (diff)
parentf57ecb7786177e0af3b1e3ec94302720b2e0f967 (diff)
downloadmariadb-git-a9035be5b7a7b3865ddb4ef34a5d0cfc65dfc254.tar.gz
10.0-base merge
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/azio.c26
-rw-r--r--storage/archive/azlib.h7
-rw-r--r--storage/archive/ha_archive.cc218
-rw-r--r--storage/archive/ha_archive.h4
-rw-r--r--storage/blackhole/ha_blackhole.cc9
-rw-r--r--storage/blackhole/ha_blackhole.h1
-rw-r--r--storage/cassandra/ha_cassandra.cc24
-rw-r--r--storage/cassandra/ha_cassandra.h5
-rw-r--r--storage/csv/ha_tina.cc49
-rw-r--r--storage/csv/ha_tina.h1
-rw-r--r--storage/example/ha_example.cc61
-rw-r--r--storage/example/ha_example.h5
-rw-r--r--storage/federated/ha_federated.cc15
-rw-r--r--storage/federated/ha_federated.h1
-rw-r--r--storage/federatedx/ha_federatedx.cc122
-rw-r--r--storage/federatedx/ha_federatedx.h3
-rw-r--r--storage/heap/ha_heap.cc10
-rw-r--r--storage/heap/ha_heap.h1
-rw-r--r--storage/heap/hp_delete.c54
-rw-r--r--storage/innobase/btr/btr0btr.cc38
-rw-r--r--storage/innobase/btr/btr0cur.cc8
-rw-r--r--storage/innobase/buf/buf0buf.cc148
-rw-r--r--storage/innobase/buf/buf0flu.cc24
-rw-r--r--storage/innobase/buf/buf0lru.cc42
-rw-r--r--storage/innobase/dict/dict0load.cc3
-rw-r--r--storage/innobase/fil/fil0fil.cc84
-rw-r--r--storage/innobase/handler/ha_innodb.cc199
-rw-r--r--storage/innobase/handler/handler0alter.cc6
-rw-r--r--storage/innobase/include/btr0cur.h5
-rw-r--r--storage/innobase/include/btr0cur.ic13
-rw-r--r--storage/innobase/include/buf0buf.h29
-rw-r--r--storage/innobase/include/buf0buf.ic8
-rw-r--r--storage/innobase/include/buf0lru.h5
-rw-r--r--storage/innobase/include/data0type.ic31
-rw-r--r--storage/innobase/include/fil0fil.h15
-rw-r--r--storage/innobase/include/lock0lock.h12
-rw-r--r--storage/innobase/include/rem0rec.h80
-rw-r--r--storage/innobase/include/row0merge.h2
-rw-r--r--storage/innobase/include/srv0srv.h18
-rw-r--r--storage/innobase/include/univ.i7
-rw-r--r--storage/innobase/lock/lock0lock.cc99
-rw-r--r--storage/innobase/log/log0recv.cc15
-rw-r--r--storage/innobase/mysql-test/storage_engine/autoinc_secondary.rdiff2
-rw-r--r--storage/innobase/mysql-test/storage_engine/insert_delayed.rdiff6
-rw-r--r--storage/innobase/mysql-test/storage_engine/parts/repair_table.rdiff16
-rw-r--r--storage/innobase/mysql-test/storage_engine/repair_table.rdiff6
-rw-r--r--storage/innobase/mysql-test/storage_engine/type_char_indexes.rdiff8
-rw-r--r--storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff66
-rw-r--r--storage/innobase/mysql-test/storage_engine/vcol.rdiff8
-rw-r--r--storage/innobase/os/os0file.cc72
-rw-r--r--storage/innobase/rem/rem0rec.cc254
-rw-r--r--storage/innobase/row/row0ins.cc5
-rw-r--r--storage/innobase/row/row0merge.cc114
-rw-r--r--storage/innobase/row/row0mysql.cc7
-rw-r--r--storage/innobase/row/row0umod.cc4
-rw-r--r--storage/innobase/row/row0undo.cc2
-rw-r--r--storage/innobase/srv/srv0srv.cc22
-rw-r--r--storage/innobase/sync/sync0sync.cc4
-rw-r--r--storage/innobase/trx/trx0purge.cc10
-rw-r--r--storage/innobase/trx/trx0rec.cc19
-rw-r--r--storage/maria/ha_maria.cc14
-rw-r--r--storage/maria/ha_maria.h2
-rw-r--r--storage/maria/ma_bitmap.c26
-rw-r--r--storage/maria/ma_blockrec.c4
-rw-r--r--storage/maria/ma_check.c8
-rw-r--r--storage/maria/ma_create.c12
-rw-r--r--storage/maria/ma_init.c2
-rw-r--r--storage/maria/ma_key.c2
-rw-r--r--storage/maria/ma_loghandler.c12
-rw-r--r--storage/maria/ma_loghandler_lsn.h6
-rw-r--r--storage/maria/ma_pagecache.c12
-rw-r--r--storage/maria/ma_recovery.c4
-rw-r--r--storage/maria/ma_test3.c2
-rw-r--r--storage/maria/maria_def.h2
-rw-r--r--storage/maria/maria_pack.c2
-rw-r--r--storage/maria/trnman.h2
-rw-r--r--storage/maria/unittest/ma_control_file-t.c2
-rw-r--r--storage/maria/unittest/ma_maria_log_cleanup.c2
-rw-r--r--storage/maria/unittest/trnman-t.c2
-rw-r--r--storage/myisam/ha_myisam.cc9
-rw-r--r--storage/myisam/ha_myisam.h4
-rw-r--r--storage/myisam/mi_create.c13
-rw-r--r--storage/myisam/mi_open.c6
-rw-r--r--storage/myisam/mi_search.c15
-rw-r--r--storage/myisam/myisamchk.c16
-rw-r--r--storage/myisam/myisampack.c3
-rw-r--r--storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff6
-rw-r--r--storage/myisam/mysql-test/storage_engine/foreign_keys.rdiff8
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff29
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff29
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/delete.rdiff2
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/insert.rdiff4
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/level_read_committed.rdiff138
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff19
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/level_repeatable_read.rdiff149
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/level_serializable.rdiff172
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/select_for_update.rdiff4
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff2
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/update.rdiff2
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/xa.rdiff8
-rw-r--r--storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff2
-rw-r--r--storage/myisammrg/ha_myisammrg.cc21
-rw-r--r--storage/myisammrg/ha_myisammrg.h1
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/alter_table.rdiff219
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff59
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/analyze_table.rdiff56
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff98
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/cache_index.rdiff117
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/char_indexes.rdiff0
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/checksum_table_live.rdiff19
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/create_table.rdiff94
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/foreign_keys.rdiff285
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/fulltext_search.rdiff292
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/handler.rdiff167
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/index.rdiff17
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/index_enable_disable.rdiff50
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/index_type_btree.rdiff17
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/index_type_hash.rdiff103
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/insert_delayed.rdiff40
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/lock.rdiff142
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/optimize_table.rdiff59
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/alter_table.rdiff131
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/analyze_table.rdiff170
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/check_table.rdiff348
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/checksum_table.rdiff170
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/create_table.rdiff315
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/optimize_table.rdiff186
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff598
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/parts/truncate_table.rdiff201
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff234
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/show_engine.rdiff12
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_ai.rdiff24
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_avg_row_length.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_checksum.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_connection.rdiff27
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff26
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_delay_key_write.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_insert_method.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_key_block_size.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_max_rows.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_min_rows.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_pack_keys.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_password.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff25
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_opt_union.rdiff24
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_standard_opts.rdiff27
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/tbl_temporary.rdiff14
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff83
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff29
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff29
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/delete.rdiff84
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/insert.rdiff97
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/level_read_committed.rdiff138
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff19
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/level_repeatable_read.rdiff149
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/level_serializable.rdiff172
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/select_for_update.rdiff90
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff63
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/update.rdiff99
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/xa.rdiff123
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff55
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/type_char_indexes.rdiff28
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/type_float_indexes.rdiff15
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/type_spatial.rdiff1418
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/type_spatial_indexes.rdiff2834
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/vcol.rdiff161
-rw-r--r--storage/oqgraph/CMakeLists.txt1
-rw-r--r--storage/oqgraph/ha_oqgraph.cc16
-rw-r--r--storage/oqgraph/ha_oqgraph.h1
-rw-r--r--storage/pbxt/src/discover_xt.cc3
-rw-r--r--storage/perfschema/ha_perfschema.cc9
-rw-r--r--storage/perfschema/ha_perfschema.h2
-rw-r--r--storage/sequence/CMakeLists.txt1
-rw-r--r--storage/sequence/mysql-test/sequence/inc.inc4
-rw-r--r--storage/sequence/mysql-test/sequence/inc.opt2
-rw-r--r--storage/sequence/mysql-test/sequence/simple.result270
-rw-r--r--storage/sequence/mysql-test/sequence/simple.test93
-rw-r--r--storage/sequence/sequence.cc343
-rw-r--r--storage/sphinx/ha_sphinx.cc24
-rw-r--r--storage/sphinx/ha_sphinx.h1
-rw-r--r--storage/test_sql_discovery/CMakeLists.txt2
-rw-r--r--storage/test_sql_discovery/mysql-test/archive/discover.rdiff35
-rw-r--r--storage/test_sql_discovery/mysql-test/archive/discover.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/r/plugin.rdiff11
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/create.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/drop.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/mdl_sync.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/partition_disabled.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/plugin.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/main/t/rename.test3
-rw-r--r--storage/test_sql_discovery/mysql-test/sql_discovery/inc.inc4
-rw-r--r--storage/test_sql_discovery/mysql-test/sql_discovery/inc.opt2
-rw-r--r--storage/test_sql_discovery/mysql-test/sql_discovery/simple.result199
-rw-r--r--storage/test_sql_discovery/mysql-test/sql_discovery/simple.test133
-rw-r--r--storage/test_sql_discovery/test_sql_discovery.cc175
-rw-r--r--storage/xtradb/btr/btr0btr.c96
-rw-r--r--storage/xtradb/btr/btr0cur.c34
-rw-r--r--storage/xtradb/buf/buf0buf.c194
-rw-r--r--storage/xtradb/buf/buf0flu.c40
-rw-r--r--storage/xtradb/buf/buf0lru.c176
-rw-r--r--storage/xtradb/buf/buf0rea.c3
-rw-r--r--storage/xtradb/dict/dict0dict.c32
-rw-r--r--storage/xtradb/dict/dict0load.c3
-rw-r--r--storage/xtradb/fil/fil0fil.c165
-rw-r--r--storage/xtradb/handler/ha_innodb.cc348
-rw-r--r--storage/xtradb/handler/ha_innodb.h10
-rw-r--r--storage/xtradb/handler/handler0alter.cc14
-rw-r--r--storage/xtradb/handler/i_s.cc53
-rw-r--r--storage/xtradb/ibuf/ibuf0ibuf.c40
-rw-r--r--storage/xtradb/include/btr0cur.h7
-rw-r--r--storage/xtradb/include/btr0cur.ic13
-rw-r--r--storage/xtradb/include/buf0buf.h53
-rw-r--r--storage/xtradb/include/buf0buf.ic42
-rw-r--r--storage/xtradb/include/buf0lru.h5
-rw-r--r--storage/xtradb/include/data0type.ic32
-rw-r--r--storage/xtradb/include/dict0dict.h2
-rw-r--r--storage/xtradb/include/dict0dict.ic14
-rw-r--r--storage/xtradb/include/dict0mem.h7
-rw-r--r--storage/xtradb/include/fil0fil.h15
-rw-r--r--storage/xtradb/include/lock0lock.h12
-rw-r--r--storage/xtradb/include/log0online.h44
-rw-r--r--storage/xtradb/include/os0file.h2
-rw-r--r--storage/xtradb/include/page0zip.h8
-rw-r--r--storage/xtradb/include/rem0rec.h80
-rw-r--r--storage/xtradb/include/row0undo.h7
-rw-r--r--storage/xtradb/include/row0upd.ic3
-rw-r--r--storage/xtradb/include/srv0srv.h21
-rw-r--r--storage/xtradb/include/sync0sync.h2
-rw-r--r--storage/xtradb/include/univ.i37
-rw-r--r--storage/xtradb/lock/lock0lock.c256
-rw-r--r--storage/xtradb/log/log0online.c319
-rw-r--r--storage/xtradb/log/log0recv.c20
-rw-r--r--storage/xtradb/os/os0file.c95
-rw-r--r--storage/xtradb/page/page0cur.c7
-rw-r--r--storage/xtradb/page/page0page.c24
-rw-r--r--storage/xtradb/page/page0zip.c157
-rw-r--r--storage/xtradb/rem/rem0rec.c257
-rw-r--r--storage/xtradb/row/row0ins.c5
-rw-r--r--storage/xtradb/row/row0merge.c117
-rw-r--r--storage/xtradb/row/row0mysql.c12
-rw-r--r--storage/xtradb/row/row0sel.c33
-rw-r--r--storage/xtradb/row/row0umod.c56
-rw-r--r--storage/xtradb/row/row0undo.c21
-rw-r--r--storage/xtradb/srv/srv0srv.c240
-rw-r--r--storage/xtradb/srv/srv0start.c5
-rw-r--r--storage/xtradb/sync/sync0sync.c4
-rw-r--r--storage/xtradb/trx/trx0purge.c17
-rw-r--r--storage/xtradb/trx/trx0rec.c20
-rw-r--r--storage/xtradb/trx/trx0trx.c6
249 files changed, 11030 insertions, 6796 deletions
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index 92d7ad70344..4519d15cefc 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -364,6 +364,8 @@ void read_header(azio_stream *s, unsigned char *buffer)
{
if (buffer[0] == az_magic[0] && buffer[1] == az_magic[1])
{
+ uchar tmp[AZ_FRMVER_LEN + 2];
+
s->version= (unsigned int)buffer[AZ_VERSION_POS];
s->minor_version= (unsigned int)buffer[AZ_MINOR_VERSION_POS];
s->block_size= 1024 * buffer[AZ_BLOCK_POS];
@@ -379,6 +381,22 @@ void read_header(azio_stream *s, unsigned char *buffer)
s->comment_start_pos= (unsigned int)uint4korr(buffer + AZ_COMMENT_POS);
s->comment_length= (unsigned int)uint4korr(buffer + AZ_COMMENT_LENGTH_POS);
s->dirty= (unsigned int)buffer[AZ_DIRTY_POS];
+
+ /*
+ we'll hard-code the current frm format for now, to avoid
+ changing archive table versions.
+ */
+ if (s->frm_length == 0 ||
+ my_pread(s->file, tmp, sizeof(tmp), s->frm_start_pos + 64, MYF(MY_NABP)) ||
+ tmp[0] != 0 || tmp[1] != AZ_FRMVER_LEN)
+ {
+ s->frmver_length= 0;
+ }
+ else
+ {
+ s->frmver_length= tmp[1];
+ memcpy(s->frmver, tmp+2, s->frmver_length);
+ }
}
else if (buffer[0] == gz_magic[0] && buffer[1] == gz_magic[1])
{
@@ -855,7 +873,7 @@ int azclose (azio_stream *s)
Though this was added to support MySQL's FRM file, anything can be
stored in this location.
*/
-int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
+int azwrite_frm(azio_stream *s, const uchar *blob, unsigned int length)
{
if (s->mode == 'r')
return 1;
@@ -867,7 +885,7 @@ int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
s->frm_length= length;
s->start+= length;
- if (my_pwrite(s->file, (uchar*) blob, s->frm_length,
+ if (my_pwrite(s->file, blob, s->frm_length,
s->frm_start_pos, MYF(MY_NABP)) ||
write_header(s) ||
(my_seek(s->file, 0, MY_SEEK_END, MYF(0)) == MY_FILEPOS_ERROR))
@@ -876,9 +894,9 @@ int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
return 0;
}
-int azread_frm(azio_stream *s, char *blob)
+int azread_frm(azio_stream *s, uchar *blob)
{
- return my_pread(s->file, (uchar*) blob, s->frm_length,
+ return my_pread(s->file, blob, s->frm_length,
s->frm_start_pos, MYF(MY_NABP)) ? 1 : 0;
}
diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h
index 29a6329fb0a..2971705b2f1 100644
--- a/storage/archive/azlib.h
+++ b/storage/archive/azlib.h
@@ -198,6 +198,7 @@ extern "C" {
#define AZ_BUFSIZE_READ 32768
#define AZ_BUFSIZE_WRITE 16384
+#define AZ_FRMVER_LEN 16 /* same as MY_UUID_SIZE in 10.0.2 */
typedef struct azio_stream {
z_stream stream;
@@ -227,6 +228,8 @@ typedef struct azio_stream {
unsigned char dirty; /* State of file */
unsigned int frm_start_pos; /* Position for start of FRM */
unsigned int frm_length; /* Position for start of FRM */
+ unsigned char frmver[AZ_FRMVER_LEN];
+ unsigned int frmver_length;
unsigned int comment_start_pos; /* Position for start of comment */
unsigned int comment_length; /* Position for start of comment */
} azio_stream;
@@ -331,8 +334,8 @@ extern int azclose(azio_stream *file);
error number (see function gzerror below).
*/
-extern int azwrite_frm (azio_stream *s, char *blob, unsigned int length);
-extern int azread_frm (azio_stream *s, char *blob);
+extern int azwrite_frm (azio_stream *s, const uchar *blob, unsigned int length);
+extern int azread_frm (azio_stream *s, uchar *blob);
extern int azwrite_comment (azio_stream *s, char *blob, unsigned int length);
extern int azread_comment (azio_stream *s, char *blob);
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 342f8be956e..d18e46c08b1 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2004, 2012, Oracle and/or its affiliates
+ Copyright (c) 2010, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -25,6 +26,7 @@
#include <myisam.h> // T_EXTEND
#include "ha_archive.h"
+#include "discover.h"
#include <my_dir.h>
#include <mysql/plugin.h>
@@ -104,7 +106,6 @@ static HASH archive_open_tables;
/* The file extension */
#define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call
-#define ARM ".ARM" // Meta file (deprecated)
/*
uchar + uchar
@@ -120,10 +121,7 @@ extern "C" PSI_file_key arch_key_file_data;
static handler *archive_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
-int archive_discover(handlerton *hton, THD* thd, const char *db,
- const char *name,
- uchar **frmblob,
- size_t *frmlen);
+int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share);
/*
Number of rows that will force a bulk insert.
@@ -161,12 +159,11 @@ static PSI_mutex_info all_archive_mutexes[]=
{ &az_key_mutex_ARCHIVE_SHARE_mutex, "ARCHIVE_SHARE::mutex", 0}
};
-PSI_file_key arch_key_file_metadata, arch_key_file_data, arch_key_file_frm;
+PSI_file_key arch_key_file_metadata, arch_key_file_data;
static PSI_file_info all_archive_files[]=
{
{ &arch_key_file_metadata, "metadata", 0},
- { &arch_key_file_data, "data", 0},
- { &arch_key_file_frm, "FRM", 0}
+ { &arch_key_file_data, "data", 0}
};
static void init_archive_psi_keys(void)
@@ -198,6 +195,14 @@ static void init_archive_psi_keys(void)
TRUE Error
*/
+/*
+ We just implement one additional file extension.
+*/
+static const char *ha_archive_exts[] = {
+ ARZ,
+ NullS
+};
+
int archive_db_init(void *p)
{
DBUG_ENTER("archive_db_init");
@@ -212,7 +217,8 @@ int archive_db_init(void *p)
archive_hton->db_type= DB_TYPE_ARCHIVE_DB;
archive_hton->create= archive_create_handler;
archive_hton->flags= HTON_NO_FLAGS;
- archive_hton->discover= archive_discover;
+ archive_hton->discover_table= archive_discover;
+ archive_hton->tablefile_extensions= ha_archive_exts;
if (mysql_mutex_init(az_key_mutex_archive_mutex,
&archive_mutex, MY_MUTEX_INIT_FAST))
@@ -261,22 +267,20 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
archive_reader_open= FALSE;
}
-int archive_discover(handlerton *hton, THD* thd, const char *db,
- const char *name,
- uchar **frmblob,
- size_t *frmlen)
+int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share)
{
DBUG_ENTER("archive_discover");
- DBUG_PRINT("archive_discover", ("db: %s, name: %s", db, name));
+ DBUG_PRINT("archive_discover", ("db: '%s' name: '%s'", share->db.str,
+ share->table_name.str));
azio_stream frm_stream;
char az_file[FN_REFLEN];
- char *frm_ptr;
+ uchar *frm_ptr;
MY_STAT file_stat;
- build_table_filename(az_file, sizeof(az_file) - 1, db, name, ARZ, 0);
+ strxmov(az_file, share->normalized_path.str, ARZ, NullS);
if (!(mysql_file_stat(/* arch_key_file_data */ 0, az_file, &file_stat, MYF(0))))
- goto err;
+ DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY)))
{
@@ -286,19 +290,23 @@ int archive_discover(handlerton *hton, THD* thd, const char *db,
}
if (frm_stream.frm_length == 0)
- goto err;
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- frm_ptr= (char *)my_malloc(sizeof(char) * frm_stream.frm_length, MYF(0));
- azread_frm(&frm_stream, frm_ptr);
- azclose(&frm_stream);
+ frm_ptr= (uchar *)my_malloc(sizeof(char) * frm_stream.frm_length,
+ MYF(MY_THREAD_SPECIFIC | MY_WME));
+ if (!frm_ptr)
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- *frmlen= frm_stream.frm_length;
- *frmblob= (uchar*) frm_ptr;
+ if (azread_frm(&frm_stream, frm_ptr))
+ goto ret;
- DBUG_RETURN(0);
-err:
- my_errno= 0;
- DBUG_RETURN(1);
+ azclose(&frm_stream);
+
+ my_errno= share->init_from_binary_frm_image(thd, 1,
+ frm_ptr, frm_stream.frm_length);
+ret:
+ my_free(frm_ptr);
+ DBUG_RETURN(my_errno);
}
/*
@@ -427,6 +435,9 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
*/
if (archive_tmp.version < ARCHIVE_VERSION)
*rc= HA_ERR_TABLE_NEEDS_UPGRADE;
+ else if (frm_compare(&archive_tmp))
+ *rc= HA_ERR_TABLE_DEF_CHANGED;
+
azclose(&archive_tmp);
(void) my_hash_insert(&archive_open_tables, (uchar*) share);
@@ -529,20 +540,6 @@ int ha_archive::init_archive_reader()
}
-/*
- We just implement one additional file extension.
-*/
-static const char *ha_archive_exts[] = {
- ARZ,
- NullS
-};
-
-const char **ha_archive::bas_ext() const
-{
- return ha_archive_exts;
-}
-
-
/*
When opening a file we:
Create/get our shared structure.
@@ -569,14 +566,8 @@ int ha_archive::open(const char *name, int mode, uint open_options)
{
case 0:
break;
+ case HA_ERR_TABLE_DEF_CHANGED:
case HA_ERR_CRASHED_ON_USAGE:
- DBUG_PRINT("ha_archive", ("archive table was crashed"));
- if (open_options & HA_OPEN_FOR_REPAIR)
- {
- rc= 0;
- break;
- }
- /* fall through */
case HA_ERR_TABLE_NEEDS_UPGRADE:
if (open_options & HA_OPEN_FOR_REPAIR)
{
@@ -655,9 +646,10 @@ int ha_archive::close(void)
int ha_archive::frm_copy(azio_stream *src, azio_stream *dst)
{
int rc= 0;
- char *frm_ptr;
+ uchar *frm_ptr;
- if (!(frm_ptr= (char *) my_malloc(src->frm_length, MYF(0))))
+ if (!(frm_ptr= (uchar *) my_malloc(src->frm_length,
+ MYF(MY_THREAD_SPECIFIC | MY_WME))))
return HA_ERR_OUT_OF_MEM;
/* Write file offset is set to the end of the file. */
@@ -671,6 +663,25 @@ int ha_archive::frm_copy(azio_stream *src, azio_stream *dst)
}
+/**
+ Compare frm blob with the on-disk frm file
+
+ @param s The azio stream.
+
+ @return Zero if equal, non-zero otherwise.
+*/
+
+int ha_archive::frm_compare(azio_stream *s)
+{
+ if (!s->frmver_length)
+ return 0; // Old pre-10.0 archive table. Never rediscover.
+
+ LEX_CUSTRING *ver= &table->s->tabledef_version;
+ return ver->length != s->frmver_length ||
+ memcmp(ver->str, s->frmver, ver->length);
+}
+
+
/*
We create our data file here. The format is pretty simple.
You can read about the format of the data file above.
@@ -687,9 +698,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
char linkname[FN_REFLEN];
int error;
azio_stream create_stream; /* Archive file we are working with */
- File frm_file; /* File handler for readers */
- MY_STAT file_stat; // Stat information for the data file
- uchar *frm_ptr;
+ const uchar *frm_ptr;
+ size_t frm_len;
DBUG_ENTER("ha_archive::create");
@@ -738,56 +748,40 @@ int ha_archive::create(const char *name, TABLE *table_arg,
There is a chance that the file was "discovered". In this case
just use whatever file is there.
*/
- if (!(mysql_file_stat(/* arch_key_file_data */ 0, name_buff, &file_stat, MYF(0))))
+ my_errno= 0;
+ if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY)))
{
- my_errno= 0;
- if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY)))
- {
- error= errno;
- goto error2;
- }
+ error= errno;
+ goto error2;
+ }
- if (linkname[0])
- my_symlink(name_buff, linkname, MYF(0));
- fn_format(name_buff, name, "", ".frm",
- MY_REPLACE_EXT | MY_UNPACK_FILENAME);
+ if (linkname[0])
+ my_symlink(name_buff, linkname, MYF(0));
- /*
- Here is where we open up the frm and pass it to archive to store
- */
- if ((frm_file= mysql_file_open(arch_key_file_frm, name_buff, O_RDONLY, MYF(0))) >= 0)
- {
- if (!mysql_file_fstat(frm_file, &file_stat, MYF(MY_WME)))
- {
- frm_ptr= (uchar *)my_malloc(sizeof(uchar) * (size_t)file_stat.st_size, MYF(0));
- if (frm_ptr)
- {
- mysql_file_read(frm_file, frm_ptr, (size_t)file_stat.st_size, MYF(0));
- azwrite_frm(&create_stream, (char *)frm_ptr, (size_t)file_stat.st_size);
- my_free(frm_ptr);
- }
- }
- mysql_file_close(frm_file, MYF(0));
- }
+ /*
+ Here is where we open up the frm and pass it to archive to store
+ */
+ if (!table_arg->s->read_frm_image(&frm_ptr, &frm_len))
+ {
+ azwrite_frm(&create_stream, frm_ptr, frm_len);
+ table_arg->s->free_frm_image(frm_ptr);
+ }
- if (create_info->comment.str)
- azwrite_comment(&create_stream, create_info->comment.str,
- create_info->comment.length);
+ if (create_info->comment.str)
+ azwrite_comment(&create_stream, create_info->comment.str,
+ create_info->comment.length);
- /*
- Yes you need to do this, because the starting value
- for the autoincrement may not be zero.
- */
- create_stream.auto_increment= stats.auto_increment_value ?
- stats.auto_increment_value - 1 : 0;
- if (azclose(&create_stream))
- {
- error= errno;
- goto error2;
- }
+ /*
+ Yes you need to do this, because the starting value
+ for the autoincrement may not be zero.
+ */
+ create_stream.auto_increment= stats.auto_increment_value ?
+ stats.auto_increment_value - 1 : 0;
+ if (azclose(&create_stream))
+ {
+ error= errno;
+ goto error2;
}
- else
- my_errno= 0;
DBUG_PRINT("ha_archive", ("Creating File %s", name_buff));
DBUG_PRINT("ha_archive", ("Creating Link %s", linkname));
@@ -906,10 +900,11 @@ int ha_archive::write_row(uchar *buf)
mysql_mutex_lock(&share->mutex);
- if (!share->archive_write_open)
- if (init_archive_writer())
- DBUG_RETURN(errno);
-
+ if (!share->archive_write_open && init_archive_writer())
+ {
+ rc= errno;
+ goto error;
+ }
if (table->next_number_field && record == table->record[0])
{
@@ -939,7 +934,8 @@ int ha_archive::write_row(uchar *buf)
First we create a buffer that we can use for reading rows, and can pass
to get_row().
*/
- if (!(read_buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ if (!(read_buf= (uchar*) my_malloc(table->s->reclength,
+ MYF(MY_THREAD_SPECIFIC | MY_WME))))
{
rc= HA_ERR_OUT_OF_MEM;
goto error;
@@ -989,7 +985,6 @@ int ha_archive::write_row(uchar *buf)
error:
mysql_mutex_unlock(&share->mutex);
my_free(read_buf);
-
DBUG_RETURN(rc);
}
@@ -1809,23 +1804,6 @@ void ha_archive::destroy_record_buffer(archive_record_buffer *r)
struct st_mysql_storage_engine archive_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
-mysql_declare_plugin(archive)
-{
- MYSQL_STORAGE_ENGINE_PLUGIN,
- &archive_storage_engine,
- "ARCHIVE",
- "Brian Aker, MySQL AB",
- "Archive storage engine",
- PLUGIN_LICENSE_GPL,
- archive_db_init, /* Plugin Init */
- archive_db_done, /* Plugin Deinit */
- 0x0300 /* 3.0 */,
- NULL, /* status variables */
- NULL, /* system variables */
- NULL, /* config options */
- 0, /* flags */
-}
-mysql_declare_plugin_end;
maria_declare_plugin(archive)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 627267c7306..7e8d5cee47b 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -51,7 +51,7 @@ typedef struct st_archive_share {
Version for file format.
1 - Initial Version (Never Released)
2 - Stream Compression, seperate blobs, no packing
- 3 - One steam (row and blobs), with packing
+ 3 - One stream (row and blobs), with packing
*/
#define ARCHIVE_VERSION 3
@@ -76,6 +76,7 @@ class ha_archive: public handler
archive_record_buffer *create_record_buffer(unsigned int length);
void destroy_record_buffer(archive_record_buffer *r);
int frm_copy(azio_stream *src, azio_stream *dst);
+ int frm_compare(azio_stream *src);
public:
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
@@ -83,7 +84,6 @@ public:
{
}
const char *index_type(uint inx) { return "NONE"; }
- const char **bas_ext() const;
ulonglong table_flags() const
{
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_CAN_BIT_FIELD |
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 812623314bf..91a2c70cb9f 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -52,15 +52,6 @@ ha_blackhole::ha_blackhole(handlerton *hton,
{}
-static const char *ha_blackhole_exts[] = {
- NullS
-};
-
-const char **ha_blackhole::bas_ext() const
-{
- return ha_blackhole_exts;
-}
-
int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
{
DBUG_ENTER("ha_blackhole::open");
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index 51857f3bb2a..b70320848d7 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -51,7 +51,6 @@ public:
don't implement this method unless you really have indexes
*/
const char *index_type(uint key_number);
- const char **bas_ext() const;
ulonglong table_flags() const
{
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
index e8c5b844e3d..a9a97806a7e 100644
--- a/storage/cassandra/ha_cassandra.cc
+++ b/storage/cassandra/ha_cassandra.cc
@@ -369,16 +369,6 @@ ha_cassandra::ha_cassandra(handlerton *hton, TABLE_SHARE *table_arg)
{}
-static const char *ha_cassandra_exts[] = {
- NullS
-};
-
-const char **ha_cassandra::bas_ext() const
-{
- return ha_cassandra_exts;
-}
-
-
int ha_cassandra::connect_and_check_options(TABLE *table_arg)
{
ha_table_option_struct *options= table_arg->s->option_struct;
@@ -2578,21 +2568,21 @@ struct st_mysql_storage_engine cassandra_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
static SHOW_VAR cassandra_status_variables[]= {
- {"Cassandra_row_inserts",
+ {"row_inserts",
(char*) &cassandra_counters.row_inserts, SHOW_LONG},
- {"Cassandra_row_insert_batches",
+ {"row_insert_batches",
(char*) &cassandra_counters.row_insert_batches, SHOW_LONG},
- {"Cassandra_multiget_keys_scanned",
+ {"multiget_keys_scanned",
(char*) &cassandra_counters.multiget_keys_scanned, SHOW_LONG},
- {"Cassandra_multiget_reads",
+ {"multiget_reads",
(char*) &cassandra_counters.multiget_reads, SHOW_LONG},
- {"Cassandra_multiget_rows_read",
+ {"multiget_rows_read",
(char*) &cassandra_counters.multiget_rows_read, SHOW_LONG},
- {"Cassandra_timeout_exceptions",
+ {"timeout_exceptions",
(char*) &cassandra_counters.timeout_exceptions, SHOW_LONG},
- {"Cassandra_unavailable_exceptions",
+ {"unavailable_exceptions",
(char*) &cassandra_counters.unavailable_exceptions, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
};
diff --git a/storage/cassandra/ha_cassandra.h b/storage/cassandra/ha_cassandra.h
index cb2f9fb237b..0c225c58780 100644
--- a/storage/cassandra/ha_cassandra.h
+++ b/storage/cassandra/ha_cassandra.h
@@ -132,11 +132,6 @@ public:
const char *index_type(uint inx) { return "HASH"; }
/** @brief
- The file extensions.
- */
- const char **bas_ext() const;
-
- /** @brief
This is a list of flags that indicate what functionality the storage engine
implements. The current table flags are documented in handler.h
*/
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 916c7b151de..c97cfc57bdb 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -136,6 +136,16 @@ static void init_tina_psi_keys(void)
}
#endif /* HAVE_PSI_INTERFACE */
+/*
+ If frm_error() is called in table.cc this is called to find out what file
+ extensions exist for this handler.
+*/
+static const char *ha_tina_exts[] = {
+ CSV_EXT,
+ CSM_EXT,
+ NullS
+};
+
static int tina_init_func(void *p)
{
handlerton *tina_hton;
@@ -153,6 +163,7 @@ static int tina_init_func(void *p)
tina_hton->create= tina_create_handler;
tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES |
HTON_NO_PARTITION);
+ tina_hton->tablefile_extensions= ha_tina_exts;
return 0;
}
@@ -825,21 +836,6 @@ err:
}
/*
- If frm_error() is called in table.cc this is called to find out what file
- extensions exist for this handler.
-*/
-static const char *ha_tina_exts[] = {
- CSV_EXT,
- CSM_EXT,
- NullS
-};
-
-const char **ha_tina::bas_ext() const
-{
- return ha_tina_exts;
-}
-
-/*
Three functions below are needed to enable concurrent insert functionality
for CSV engine. For more details see mysys/thr_lock.c
*/
@@ -1413,9 +1409,9 @@ int ha_tina::rnd_end()
DBUG_RETURN(-1);
/* Open the file again */
- if (((data_file= mysql_file_open(csv_key_file_data,
- share->data_file_name,
- O_RDONLY, MYF(MY_WME))) == -1))
+ if ((data_file= mysql_file_open(csv_key_file_data,
+ share->data_file_name,
+ O_RDONLY, MYF(MY_WME))) == -1)
DBUG_RETURN(my_errno ? my_errno : -1);
/*
As we reopened the data file, increase share->data_file_version
@@ -1754,23 +1750,6 @@ bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
struct st_mysql_storage_engine csv_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
-mysql_declare_plugin(csv)
-{
- MYSQL_STORAGE_ENGINE_PLUGIN,
- &csv_storage_engine,
- "CSV",
- "Brian Aker, MySQL AB",
- "CSV storage engine",
- PLUGIN_LICENSE_GPL,
- tina_init_func, /* Plugin Init */
- tina_done_func, /* Plugin Deinit */
- 0x0100 /* 1.0 */,
- NULL, /* status variables */
- NULL, /* system variables */
- NULL, /* config options */
- 0, /* flags */
-}
-mysql_declare_plugin_end;
maria_declare_plugin(csv)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index 26404b3a9e7..ebf62fbed65 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -103,7 +103,6 @@ public:
free_root(&blobroot, MYF(0));
}
const char *index_type(uint inx) { return "NONE"; }
- const char **bas_ext() const;
ulonglong table_flags() const
{
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT |
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index 98968d0b5b5..588cf11f641 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -118,6 +118,8 @@ static HASH example_open_tables;
/* The mutex used to init the hash; variable for example share methods */
mysql_mutex_t example_mutex;
+static MYSQL_THDVAR_ULONG(varopt_default, PLUGIN_VAR_RQCMDARG,
+ "default value of the VAROPT table option", NULL, NULL, 5, 0, 100, 0);
/**
Structure for CREATE TABLE options (table options).
@@ -133,6 +135,7 @@ struct ha_table_option_struct
ulonglong ullparam;
uint enumparam;
bool boolparam;
+ ulonglong varparam;
};
@@ -179,6 +182,12 @@ ha_create_table_option example_table_option_list[]=
The default is 1, that is true, yes, on.
*/
HA_TOPTION_BOOL("YESNO", boolparam, 1),
+ /*
+ one option defined by the system variable. The type, the range, or
+ a list of allowed values is the same as for the system variable.
+ */
+ HA_TOPTION_SYSVAR("VAROPT", varparam, varopt_default),
+
HA_TOPTION_END
};
@@ -229,6 +238,27 @@ static void init_example_psi_keys()
#endif
+/**
+ @brief
+ If frm_error() is called then we will use this to determine
+ the file extensions that exist for the storage engine. This is also
+ used by the default rename_table and delete_table method in
+ handler.cc and by the default discover_many method.
+
+ For engines that have two file name extentions (separate meta/index file
+ and data file), the order of elements is relevant. First element of engine
+ file name extentions array should be meta/index file extention. Second
+ element - data file extention. This order is assumed by
+ prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
+
+ @see
+ rename_table method in handler.cc and
+ delete_table method in handler.cc
+*/
+
+static const char *ha_example_exts[] = {
+ NullS
+};
static int example_init_func(void *p)
{
DBUG_ENTER("example_init_func");
@@ -247,6 +277,7 @@ static int example_init_func(void *p)
example_hton->flags= HTON_CAN_RECREATE;
example_hton->table_options= example_table_option_list;
example_hton->field_options= example_field_option_list;
+ example_hton->tablefile_extensions= ha_example_exts;
DBUG_RETURN(0);
}
@@ -355,33 +386,6 @@ ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg)
/**
@brief
- If frm_error() is called then we will use this to determine
- the file extensions that exist for the storage engine. This is also
- used by the default rename_table and delete_table method in
- handler.cc.
-
- For engines that have two file name extentions (separate meta/index file
- and data file), the order of elements is relevant. First element of engine
- file name extentions array should be meta/index file extention. Second
- element - data file extention. This order is assumed by
- prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
-
- @see
- rename_table method in handler.cc and
- delete_table method in handler.cc
-*/
-
-static const char *ha_example_exts[] = {
- NullS
-};
-
-const char **ha_example::bas_ext() const
-{
- return ha_example_exts;
-}
-
-/**
- @brief
Used for opening tables. The name will be the name of the file.
@details
@@ -1092,6 +1096,7 @@ static MYSQL_SYSVAR_ULONG(
static struct st_mysql_sys_var* example_system_variables[]= {
MYSQL_SYSVAR(enum_var),
MYSQL_SYSVAR(ulong_var),
+ MYSQL_SYSVAR(varopt_default),
NULL
};
@@ -1110,7 +1115,7 @@ static int show_func_example(MYSQL_THD thd, struct st_mysql_show_var *var,
static struct st_mysql_show_var func_status[]=
{
- {"example_func_example", (char *)show_func_example, SHOW_SIMPLE_FUNC},
+ {"func_example", (char *)show_func_example, SHOW_SIMPLE_FUNC},
{0,0,SHOW_UNDEF}
};
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index 9be370edfe3..d3d31893781 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -73,11 +73,6 @@ public:
const char *index_type(uint inx) { return "HASH"; }
/** @brief
- The file extensions.
- */
- const char **bas_ext() const;
-
- /** @brief
This is a list of flags that indicate what functionality the storage engine
implements. The current table flags are documented in handler.h
*/
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index 0c07af0a554..7cd7e82f69e 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -1615,21 +1615,6 @@ ha_rows ha_federated::records_in_range(uint inx, key_range *start_key,
DBUG_ENTER("ha_federated::records_in_range");
DBUG_RETURN(FEDERATED_RECORDS_IN_RANGE);
}
-/*
- If frm_error() is called then we will use this to to find out
- what file extentions exist for the storage engine. This is
- also used by the default rename_table and delete_table method
- in handler.cc.
-*/
-
-const char **ha_federated::bas_ext() const
-{
- static const char *ext[]=
- {
- NullS
- };
- return ext;
-}
/*
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index fc2c4740cc0..31f33f5f789 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -134,7 +134,6 @@ public:
*/
// perhaps get index type
const char *index_type(uint inx) { return "REMOTE"; }
- const char **bas_ext() const;
/*
This is a list of flags that says what the storage engine
implements. The current table flags are documented in
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index e1c2a38964a..d760fcf082f 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -426,6 +426,7 @@ int federatedx_db_init(void *p)
federatedx_hton->savepoint_release= ha_federatedx::savepoint_release;
federatedx_hton->commit= ha_federatedx::commit;
federatedx_hton->rollback= ha_federatedx::rollback;
+ federatedx_hton->discover_table_structure= ha_federatedx::discover_assisted;
federatedx_hton->create= federatedx_create_handler;
federatedx_hton->flags= HTON_ALTER_NOT_SUPPORTED;
@@ -516,15 +517,16 @@ err:
}
-static int parse_url_error(FEDERATEDX_SHARE *share, TABLE *table, int error_num)
+static int parse_url_error(FEDERATEDX_SHARE *share, TABLE_SHARE *table_s,
+ int error_num)
{
char buf[FEDERATEDX_QUERY_BUFFER_SIZE];
int buf_len;
DBUG_ENTER("ha_federatedx parse_url_error");
- buf_len= min(table->s->connect_string.length,
+ buf_len= min(table_s->connect_string.length,
FEDERATEDX_QUERY_BUFFER_SIZE-1);
- strmake(buf, table->s->connect_string.str, buf_len);
+ strmake(buf, table_s->connect_string.str, buf_len);
my_error(error_num, MYF(0), buf);
DBUG_RETURN(error_num);
}
@@ -646,8 +648,8 @@ error:
*/
-static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
- uint table_create_flag)
+static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share,
+ TABLE_SHARE *table_s, uint table_create_flag)
{
uint error_num= (table_create_flag ?
ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE :
@@ -657,11 +659,11 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
share->port= 0;
share->socket= 0;
DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
- DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length));
- DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length,
- table->s->connect_string.str));
- share->connection_string= strmake_root(mem_root, table->s->connect_string.str,
- table->s->connect_string.length);
+ DBUG_PRINT("info", ("Length: %u", (uint) table_s->connect_string.length));
+ DBUG_PRINT("info", ("String: '%.*s'", (int) table_s->connect_string.length,
+ table_s->connect_string.str));
+ share->connection_string= strmake_root(mem_root, table_s->connect_string.str,
+ table_s->connect_string.length);
DBUG_PRINT("info",("parse_url alloced share->connection_string %lx",
(long unsigned int) share->connection_string));
@@ -714,9 +716,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
Connection specifies everything but, resort to
expecting remote and foreign table names to match
*/
- share->table_name= strmake_root(mem_root, table->s->table_name.str,
+ share->table_name= strmake_root(mem_root, table_s->table_name.str,
(share->table_name_length=
- table->s->table_name.length));
+ table_s->table_name.length));
DBUG_PRINT("info",
("internal format, default table_name "
"share->connection_string: %s share->table_name: %s",
@@ -730,7 +732,7 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
{
share->parsed= TRUE;
// Add a null for later termination of table name
- share->connection_string[table->s->connect_string.length]= 0;
+ share->connection_string[table_s->connect_string.length]= 0;
share->scheme= share->connection_string;
DBUG_PRINT("info",("parse_url alloced share->scheme: %lx",
(ulong) share->scheme));
@@ -764,7 +766,7 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
user:@hostname:port/db/table
Then password is a null string, so set to NULL
*/
- if ((share->password[0] == '\0'))
+ if (share->password[0] == '\0')
share->password= NULL;
}
@@ -817,7 +819,7 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
DBUG_RETURN(0);
error:
- DBUG_RETURN(parse_url_error(share, table, error_num));
+ DBUG_RETURN(parse_url_error(share, table_s, error_num));
}
/*****************************************************************************
@@ -1583,7 +1585,7 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
tmp_share.share_key= table_name;
tmp_share.share_key_length= strlen(table_name);
- if (parse_url(&mem_root, &tmp_share, table, 0))
+ if (parse_url(&mem_root, &tmp_share, table->s, 0))
goto error;
/* TODO: change tmp_share.scheme to LEX_STRING object */
@@ -1719,22 +1721,6 @@ ha_rows ha_federatedx::records_in_range(uint inx, key_range *start_key,
DBUG_ENTER("ha_federatedx::records_in_range");
DBUG_RETURN(FEDERATEDX_RECORDS_IN_RANGE);
}
-/*
- If frm_error() is called then we will use this to to find out
- what file extentions exist for the storage engine. This is
- also used by the default rename_table and delete_table method
- in handler.cc.
-*/
-
-const char **ha_federatedx::bas_ext() const
-{
- static const char *ext[]=
- {
- NullS
- };
- return ext;
-}
-
federatedx_txn *ha_federatedx::get_txn(THD *thd, bool no_create)
{
@@ -3366,7 +3352,7 @@ int ha_federatedx::create(const char *name, TABLE *table_arg,
federatedx_io *tmp_io= NULL;
DBUG_ENTER("ha_federatedx::create");
- if ((retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1)))
+ if ((retval= parse_url(thd->mem_root, &tmp_share, table_arg->s, 1)))
goto error;
/* loopback socket connections hang due to LOCK_open mutex */
@@ -3588,6 +3574,72 @@ int ha_federatedx::rollback(handlerton *hton, MYSQL_THD thd, bool all)
DBUG_RETURN(return_val);
}
+
+/*
+ Federated supports assisted discovery, like
+ CREATE TABLE t1 CONNECTION="mysql://joe:pass@192.168.1.111/federated/t1";
+ but not a fully automatic discovery where a table magically appear
+ on any use (like, on SELECT * from t1).
+*/
+int ha_federatedx::discover_assisted(handlerton *hton, THD* thd,
+ TABLE_SHARE *table_s, HA_CREATE_INFO *info)
+{
+ int error= HA_ERR_NO_CONNECTION;
+ FEDERATEDX_SHARE tmp_share;
+ CHARSET_INFO *cs= system_charset_info;
+ MYSQL mysql;
+ char buf[1024];
+ String query(buf, sizeof(buf), cs);
+ MYSQL_RES *res;
+ MYSQL_ROW rdata;
+ ulong *rlen;
+
+ if (parse_url(thd->mem_root, &tmp_share, table_s, 1))
+ return HA_WRONG_CREATE_OPTION;
+
+ mysql_init(&mysql);
+ mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, cs->csname);
+
+ if (!mysql_real_connect(&mysql, tmp_share.hostname, tmp_share.username,
+ tmp_share.password, tmp_share.database,
+ tmp_share.port, tmp_share.socket, 0))
+ goto err1;
+
+ if (mysql_real_query(&mysql, STRING_WITH_LEN("SET SQL_MODE=NO_TABLE_OPTIONS")))
+ goto err1;
+
+ query.copy(STRING_WITH_LEN("SHOW CREATE TABLE "), cs);
+ append_ident(&query, tmp_share.table_name,
+ tmp_share.table_name_length, ident_quote_char);
+
+ if (mysql_real_query(&mysql, query.ptr(), query.length()))
+ goto err1;
+
+ if (!((res= mysql_store_result(&mysql))))
+ goto err1;
+
+ if (!(rdata= mysql_fetch_row(res)) || !((rlen= mysql_fetch_lengths(res))))
+ goto err2;
+
+ query.copy(rdata[1], rlen[1], cs);
+ query.append(STRING_WITH_LEN(" CONNECTION='"), cs);
+ query.append_for_single_quote(table_s->connect_string.str,
+ table_s->connect_string.length);
+ query.append('\'');
+
+ error= table_s->init_from_sql_statement_string(thd, true,
+ query.ptr(), query.length());
+
+err2:
+ mysql_free_result(res);
+err1:
+ if (error)
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), mysql_error(&mysql));
+ mysql_close(&mysql);
+ return error;
+}
+
+
struct st_mysql_storage_engine federatedx_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
@@ -3601,10 +3653,10 @@ maria_declare_plugin(federatedx)
PLUGIN_LICENSE_GPL,
federatedx_db_init, /* Plugin Init */
federatedx_done, /* Plugin Deinit */
- 0x0200 /* 2.0 */,
+ 0x0201 /* 2.1 */,
NULL, /* status variables */
NULL, /* system variables */
- "2.0", /* string version */
+ "2.1", /* string version */
MariaDB_PLUGIN_MATURITY_BETA /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index dcbbe534262..44c20b6703a 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -297,6 +297,8 @@ private:
static int savepoint_release(handlerton *hton, MYSQL_THD thd, void *sv);
static int commit(handlerton *hton, MYSQL_THD thd, bool all);
static int rollback(handlerton *hton, MYSQL_THD thd, bool all);
+ static int discover_assisted(handlerton *, THD*, TABLE_SHARE *,
+ HA_CREATE_INFO *);
bool append_stmt_insert(String *query);
@@ -317,7 +319,6 @@ public:
*/
// perhaps get index type
const char *index_type(uint inx) { return "REMOTE"; }
- const char **bas_ext() const;
/*
This is a list of flags that says what the storage engine
implements. The current table flags are documented in
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 8e63799680b..916abaa60ea 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -75,16 +75,6 @@ ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg)
internal_table(0)
{}
-
-static const char *ha_heap_exts[] = {
- NullS
-};
-
-const char **ha_heap::bas_ext() const
-{
- return ha_heap_exts;
-}
-
/*
Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index 30ad06e2c06..74824b66c42 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -45,7 +45,6 @@ public:
}
/* Rows also use a fixed-size format */
enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
- const char **bas_ext() const;
ulonglong table_flags() const
{
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c
index d00ac94a918..043e4d3540d 100644
--- a/storage/heap/hp_delete.c
+++ b/storage/heap/hp_delete.c
@@ -48,7 +48,6 @@ int heap_delete(HP_INFO *info, const uchar *record)
pos[share->reclength]=0; /* Record deleted */
share->deleted++;
share->key_version++;
- info->current_hash_ptr=0;
#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
DBUG_EXECUTE("check_heap",heap_check_heap(info, 0););
#endif
@@ -105,7 +104,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
const uchar *record, uchar *recpos, int flag)
{
- ulong blength,pos2,pos_hashnr,lastpos_hashnr;
+ ulong blength, pos2, pos_hashnr, lastpos_hashnr, key_pos;
HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr;
HP_SHARE *share=info->s;
DBUG_ENTER("hp_delete_key");
@@ -117,9 +116,9 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
last_ptr=0;
/* Search after record with key */
- pos= hp_find_hash(&keyinfo->block,
- hp_mask(hp_rec_hashnr(keyinfo, record), blength,
- share->records + 1));
+ key_pos= hp_mask(hp_rec_hashnr(keyinfo, record), blength, share->records + 1);
+ pos= hp_find_hash(&keyinfo->block, key_pos);
+
gpos = pos3 = 0;
while (pos->ptr_to_rec != recpos)
@@ -182,21 +181,50 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
}
pos2= hp_mask(lastpos_hashnr, blength, share->records + 1);
if (pos2 == hp_mask(pos_hashnr, blength, share->records + 1))
- { /* Identical key-positions */
+ {
+ /* lastpos and the row in the main bucket entry (pos) has the same hash */
if (pos2 != share->records)
{
- empty[0]=lastpos[0];
+ /*
+ The bucket entry was not deleted. Copy lastpos over the
+ deleted entry and update previous link to point to it.
+ */
+ empty[0]= lastpos[0];
hp_movelink(lastpos, pos, empty);
+ if (last_ptr == lastpos)
+ {
+ /*
+ We moved the row that info->current_hash_ptr points to.
+ Update info->current_hash_ptr to point to the new position.
+ */
+ info->current_hash_ptr= empty;
+ }
DBUG_RETURN(0);
}
- pos3= pos; /* Link pos->next after lastpos */
- }
- else
- {
- pos3= 0; /* Different positions merge */
- keyinfo->hash_buckets--;
+ /*
+ Shrinking the hash table deleted the main bucket entry for this hash.
+ In this case the last entry was the first key in the key chain.
+ We move things around so that we keep the original key order to ensure
+ that heap_rnext() works.
+
+ - Move the row at the main bucket entry to the empty spot.
+ - Move the last entry first in the new chain.
+ - Link in the first element of the hash.
+ */
+ empty[0]= pos[0];
+ pos[0]= lastpos[0];
+ hp_movelink(pos, pos, empty);
+
+ /* Update current_hash_ptr if the entry moved */
+ if (last_ptr == lastpos)
+ info->current_hash_ptr= pos;
+ else if (last_ptr == pos)
+ info->current_hash_ptr= empty;
+ DBUG_RETURN(0);
}
+ pos3= 0; /* Different positions merge */
+ keyinfo->hash_buckets--;
empty[0]=lastpos[0];
hp_movelink(pos3, empty, pos->next_key);
pos->next_key=empty;
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 8b7a19777ab..8eae3c7e3bc 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -3098,6 +3098,8 @@ btr_lift_page_up(
buf_block_t* blocks[BTR_MAX_LEVELS];
ulint n_blocks; /*!< last used index in blocks[] */
ulint i;
+ ibool lift_father_up = FALSE;
+ buf_block_t* block_orig = block;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
@@ -3108,11 +3110,13 @@ btr_lift_page_up(
{
btr_cur_t cursor;
- mem_heap_t* heap = mem_heap_create(100);
- ulint* offsets;
+ ulint* offsets = NULL;
+ mem_heap_t* heap = mem_heap_create(
+ sizeof(*offsets)
+ * (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields));
buf_block_t* b;
- offsets = btr_page_get_father_block(NULL, heap, index,
+ offsets = btr_page_get_father_block(offsets, heap, index,
block, mtr, &cursor);
father_block = btr_cur_get_block(&cursor);
father_page_zip = buf_block_get_page_zip(father_block);
@@ -3136,6 +3140,29 @@ btr_lift_page_up(
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
}
+ if (n_blocks && page_level == 0) {
+ /* The father page also should be the only on its level (not
+ root). We should lift up the father page at first.
+ Because the leaf page should be lifted up only for root page.
+ The freeing page is based on page_level (==0 or !=0)
+ to choose segment. If the page_level is changed ==0 from !=0,
+ later freeing of the page doesn't find the page allocation
+ to be freed.*/
+
+ lift_father_up = TRUE;
+ block = father_block;
+ page = buf_block_get_frame(block);
+ page_level = btr_page_get_level(page, mtr);
+
+ ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
+ ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
+ ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+
+ father_block = blocks[0];
+ father_page_zip = buf_block_get_page_zip(father_block);
+ father_page = buf_block_get_frame(father_block);
+ }
+
mem_heap_free(heap);
}
@@ -3143,6 +3170,7 @@ btr_lift_page_up(
/* Make the father empty */
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
+ page_level++;
/* Copy the records to the father page one by one. */
if (0
@@ -3174,7 +3202,7 @@ btr_lift_page_up(
lock_update_copy_and_discard(father_block, block);
/* Go upward to root page, decrementing levels by one. */
- for (i = 0; i < n_blocks; i++, page_level++) {
+ for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) {
page_t* page = buf_block_get_frame(blocks[i]);
page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
@@ -3196,7 +3224,7 @@ btr_lift_page_up(
ut_ad(page_validate(father_page, index));
ut_ad(btr_check_node_ptr(index, father_block, mtr));
- return(father_block);
+ return(lift_father_up ? block_orig : father_block);
}
/*************************************************************//**
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index aeb16200f80..56cce411bba 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -97,6 +97,11 @@ srv_refresh_innodb_monitor_stats(). Referenced by
srv_printf_innodb_monitor(). */
UNIV_INTERN ulint btr_cur_n_sea_old = 0;
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+UNIV_INTERN uint btr_cur_limit_optimistic_insert_debug = 0;
+#endif /* UNIV_DEBUG */
+
/** In the optimistic insert, if the insert does not fit, but this much space
can be released by page reorganize, then it is reorganized */
#define BTR_CUR_PAGE_REORGANIZE_LIMIT (UNIV_PAGE_SIZE / 32)
@@ -1276,6 +1281,9 @@ btr_cur_optimistic_insert(
}
}
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
+ goto fail);
+
/* If there have been many consecutive inserts, and we are on the leaf
level, check if we have to split the page to reserve enough free space
for future updates of records. */
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 96821478e60..28d5a472531 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -383,6 +383,33 @@ buf_get_total_list_len(
}
/********************************************************************//**
+Get total list size in bytes from all buffer pools. */
+UNIV_INTERN
+void
+buf_get_total_list_size_in_bytes(
+/*=============================*/
+ buf_pools_list_size_t* buf_pools_list_size) /*!< out: list sizes
+ in all buffer pools */
+{
+ ulint i;
+ ut_ad(buf_pools_list_size);
+ memset(buf_pools_list_size, 0, sizeof(*buf_pools_list_size));
+
+ for (i = 0; i < srv_buf_pool_instances; i++) {
+ buf_pool_t* buf_pool;
+
+ buf_pool = buf_pool_from_array(i);
+ /* We don't need mutex protection since this is
+ for statistics purpose */
+ buf_pools_list_size->LRU_bytes += buf_pool->stat.LRU_bytes;
+ buf_pools_list_size->unzip_LRU_bytes +=
+ UT_LIST_GET_LEN(buf_pool->unzip_LRU) * UNIV_PAGE_SIZE;
+ buf_pools_list_size->flush_list_bytes +=
+ buf_pool->stat.flush_list_bytes;
+ }
+}
+
+/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
void
@@ -1802,34 +1829,24 @@ buf_page_make_young(
}
/********************************************************************//**
-Sets the time of the first access of a page and moves a page to the
-start of the buffer pool LRU list if it is too old. This high-level
-function can be used to prevent an important page from slipping
-out of the buffer pool. */
+Moves a page to the start of the buffer pool LRU list if it is too old.
+This high-level function can be used to prevent an important page from
+slipping out of the buffer pool. */
static
void
-buf_page_set_accessed_make_young(
-/*=============================*/
- buf_page_t* bpage, /*!< in/out: buffer block of a
+buf_page_make_young_if_needed(
+/*==========================*/
+ buf_page_t* bpage) /*!< in/out: buffer block of a
file page */
- unsigned access_time) /*!< in: bpage->access_time
- read under mutex protection,
- or 0 if unknown */
{
+#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
-
ut_ad(!buf_pool_mutex_own(buf_pool));
+#endif /* UNIV_DEBUG */
ut_a(buf_page_in_file(bpage));
if (buf_page_peek_if_too_old(bpage)) {
- buf_pool_mutex_enter(buf_pool);
- buf_LRU_make_block_young(bpage);
- buf_pool_mutex_exit(buf_pool);
- } else if (!access_time) {
- ulint time_ms = ut_time_ms();
- buf_pool_mutex_enter(buf_pool);
- buf_page_set_accessed(bpage, time_ms);
- buf_pool_mutex_exit(buf_pool);
+ buf_page_make_young(bpage);
}
}
@@ -1978,7 +1995,6 @@ buf_page_get_zip(
rw_lock_t* hash_lock;
ibool discard_attempted = FALSE;
ibool must_read;
- unsigned access_time;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
buf_pool->stat.n_page_gets++;
@@ -2051,15 +2067,17 @@ err_exit:
got_block:
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
- access_time = buf_page_is_accessed(bpage);
rw_lock_s_unlock(hash_lock);
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(!bpage->file_page_was_freed);
#endif
+
+ buf_page_set_accessed(bpage);
+
mutex_exit(block_mutex);
- buf_page_set_accessed_make_young(bpage, access_time);
+ buf_page_make_young_if_needed(bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(++buf_dbg_counter % 5771 || buf_validate());
@@ -2671,22 +2689,29 @@ wait_until_unfixed:
UNIV_MEM_INVALID(bpage, sizeof *bpage);
rw_lock_x_unlock(hash_lock);
- mutex_exit(&block->mutex);
- mutex_exit(&buf_pool->zip_mutex);
buf_pool->n_pend_unzip++;
-
buf_pool_mutex_exit(buf_pool);
+ access_time = buf_page_is_accessed(&block->page);
+ mutex_exit(&block->mutex);
+ mutex_exit(&buf_pool->zip_mutex);
+
buf_page_free_descriptor(bpage);
- /* Decompress the page and apply buffered operations
- while not holding buf_pool->mutex or block->mutex. */
+ /* Decompress the page while not holding
+ buf_pool->mutex or block->mutex. */
ut_a(buf_zip_decompress(block, TRUE));
if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
- ibuf_merge_or_delete_for_page(block, space, offset,
- zip_size, TRUE);
+ if (access_time) {
+#ifdef UNIV_IBUF_COUNT_DEBUG
+ ut_a(ibuf_count_get(space, offset) == 0);
+#endif /* UNIV_IBUF_COUNT_DEBUG */
+ } else {
+ ibuf_merge_or_delete_for_page(
+ block, space, offset, zip_size, TRUE);
+ }
}
/* Unfix and unlatch the block. */
@@ -2799,14 +2824,15 @@ wait_until_unfixed:
ut_a(mode == BUF_GET_POSSIBLY_FREED
|| !block->page.file_page_was_freed);
#endif
- mutex_exit(&block->mutex);
-
/* Check if this is the first access to the page */
-
access_time = buf_page_is_accessed(&block->page);
- if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
- buf_page_set_accessed_make_young(&block->page, access_time);
+ buf_page_set_accessed(&block->page);
+
+ mutex_exit(&block->mutex);
+
+ if (mode != BUF_PEEK_IF_IN_POOL) {
+ buf_page_make_young_if_needed(&block->page);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -2857,7 +2883,7 @@ wait_until_unfixed:
mtr_memo_push(mtr, block, fix_type);
- if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) {
+ if (mode != BUF_PEEK_IF_IN_POOL && !access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -2912,15 +2938,13 @@ buf_page_optimistic_get(
buf_block_buf_fix_inc(block, file, line);
- mutex_exit(&block->mutex);
+ access_time = buf_page_is_accessed(&block->page);
- /* Check if this is the first access to the page.
- We do a dirty read on purpose, to avoid mutex contention.
- This field is only used for heuristic purposes; it does not
- affect correctness. */
+ buf_page_set_accessed(&block->page);
- access_time = buf_page_is_accessed(&block->page);
- buf_page_set_accessed_make_young(&block->page, access_time);
+ mutex_exit(&block->mutex);
+
+ buf_page_make_young_if_needed(&block->page);
ut_ad(!ibuf_inside(mtr)
|| ibuf_page(buf_block_get_space(block),
@@ -2975,7 +2999,7 @@ buf_page_optimistic_get(
mutex_exit(&block->mutex);
#endif
- if (UNIV_UNLIKELY(!access_time)) {
+ if (!access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -3038,24 +3062,14 @@ buf_page_get_known_nowait(
buf_block_buf_fix_inc(block, file, line);
+ buf_page_set_accessed(&block->page);
+
mutex_exit(&block->mutex);
buf_pool = buf_pool_from_block(block);
- if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
- buf_pool_mutex_enter(buf_pool);
- buf_LRU_make_block_young(&block->page);
- buf_pool_mutex_exit(buf_pool);
- } else if (!buf_page_is_accessed(&block->page)) {
- /* Above, we do a dirty read on purpose, to avoid
- mutex contention. The field buf_page_t::access_time
- is only used for heuristic purposes. Writes to the
- field must be protected by mutex, however. */
- ulint time_ms = ut_time_ms();
-
- buf_pool_mutex_enter(buf_pool);
- buf_page_set_accessed(&block->page, time_ms);
- buf_pool_mutex_exit(buf_pool);
+ if (mode == BUF_MAKE_YOUNG) {
+ buf_page_make_young_if_needed(&block->page);
}
ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
@@ -3234,6 +3248,7 @@ buf_page_init(
ulint offset, /*!< in: offset of the page within space
in units of a page */
ulint fold, /*!< in: buf_page_address_fold(space,offset) */
+ ulint zip_size,/*!< in: compressed page size, or 0 */
buf_block_t* block) /*!< in/out: block to init */
{
buf_page_t* hash_page;
@@ -3302,6 +3317,9 @@ buf_page_init(
ut_d(block->page.in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
fold, &block->page);
+ if (zip_size) {
+ page_zip_set_size(&block->page.zip, zip_size);
+ }
}
/********************************************************************//**
@@ -3407,7 +3425,7 @@ err_exit:
ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
- buf_page_init(buf_pool, space, offset, fold, block);
+ buf_page_init(buf_pool, space, offset, fold, zip_size, block);
rw_lock_x_unlock(hash_lock);
/* The block must be put to the LRU list, to the old blocks */
@@ -3426,8 +3444,6 @@ err_exit:
buf_page_set_io_fix(bpage, BUF_IO_READ);
if (zip_size) {
- page_zip_set_size(&block->page.zip, zip_size);
-
/* buf_pool->mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to
@@ -3528,7 +3544,8 @@ err_exit:
rw_lock_x_unlock(hash_lock);
- /* The block must be put to the LRU list, to the old blocks */
+ /* The block must be put to the LRU list, to the old blocks
+ The zip_size is already set into the page zip */
buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage);
@@ -3578,7 +3595,6 @@ buf_page_create(
buf_block_t* block;
ulint fold;
buf_block_t* free_block = NULL;
- ulint time_ms = ut_time_ms();
buf_pool_t* buf_pool = buf_pool_get(space, offset);
rw_lock_t* hash_lock;
@@ -3630,7 +3646,7 @@ buf_page_create(
mutex_enter(&block->mutex);
- buf_page_init(buf_pool, space, offset, fold, block);
+ buf_page_init(buf_pool, space, offset, fold, zip_size, block);
rw_lock_x_unlock(hash_lock);
@@ -3650,8 +3666,6 @@ buf_page_create(
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
-
- page_zip_set_size(&block->page.zip, zip_size);
mutex_exit(&block->mutex);
/* buf_pool->mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex
@@ -3675,12 +3689,12 @@ buf_page_create(
rw_lock_x_unlock(&block->lock);
}
- buf_page_set_accessed(&block->page, time_ms);
-
buf_pool_mutex_exit(buf_pool);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
+ buf_page_set_accessed(&block->page);
+
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 023ed766c62..577878ef964 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -104,6 +104,23 @@ in thrashing. */
/* @} */
+/******************************************************************//**
+Increases flush_list size in bytes with zip_size for compressed page,
+UNIV_PAGE_SIZE for uncompressed page in inline function */
+static inline
+void
+incr_flush_list_size_in_bytes(
+/*==========================*/
+ buf_block_t* block, /*!< in: control block */
+ buf_pool_t* buf_pool) /*!< in: buffer pool instance */
+{
+ ulint zip_size;
+ ut_ad(buf_flush_list_mutex_own(buf_pool));
+ zip_size = page_zip_get_size(&block->page.zip);
+ buf_pool->stat.flush_list_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
+ ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
+}
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/******************************************************************//**
Validates the flush list.
@@ -333,6 +350,7 @@ buf_flush_insert_into_flush_list(
ut_d(block->page.in_flush_list = TRUE);
block->page.oldest_modification = lsn;
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
+ incr_flush_list_size_in_bytes(block, buf_pool);
#ifdef UNIV_DEBUG_VALGRIND
{
@@ -439,6 +457,8 @@ buf_flush_insert_sorted_into_flush_list(
MONITOR_INC(MONITOR_PAGE_INFLUSH);
+ incr_flush_list_size_in_bytes(block, buf_pool);
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -538,6 +558,7 @@ buf_flush_remove(
buf_page_t* bpage) /*!< in: pointer to the block in question */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ulint zip_size;
ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
@@ -576,6 +597,9 @@ buf_flush_remove(
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.flush_list_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE;
+
bpage->oldest_modification = 0;
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index c35d84cb985..5f0c0cae96c 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -158,6 +158,23 @@ buf_LRU_block_free_hashed_page(
be in a state where it can be freed */
/******************************************************************//**
+Increases LRU size in bytes with zip_size for compressed page,
+UNIV_PAGE_SIZE for uncompressed page in inline function */
+static inline
+void
+incr_LRU_size_in_bytes(
+/*===================*/
+ buf_page_t* bpage, /*!< in: control block */
+ buf_pool_t* buf_pool) /*!< in: buffer pool instance */
+{
+ ulint zip_size;
+ ut_ad(buf_pool_mutex_own(buf_pool));
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.LRU_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
+ ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
+}
+
+/******************************************************************//**
Determines if the unzip_LRU list should be used for evicting a victim
instead of the general LRU list.
@return TRUE if should use unzip_LRU */
@@ -1107,6 +1124,7 @@ buf_LRU_remove_block(
buf_page_t* bpage) /*!< in: control block */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ulint zip_size;
ut_ad(buf_pool);
ut_ad(bpage);
@@ -1142,6 +1160,9 @@ buf_LRU_remove_block(
UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);
ut_d(bpage->in_LRU_list = FALSE);
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.LRU_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE;
+
buf_unzip_LRU_remove_block_if_needed(bpage);
/* If the LRU list is so short that LRU_old is not defined,
@@ -1202,7 +1223,10 @@ buf_unzip_LRU_add_block(
}
/******************************************************************//**
-Adds a block to the LRU list end. */
+Adds a block to the LRU list end. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INLINE
void
buf_LRU_add_block_to_end_low(
@@ -1221,6 +1245,8 @@ buf_LRU_add_block_to_end_low(
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
ut_d(bpage->in_LRU_list = TRUE);
+ incr_LRU_size_in_bytes(bpage, buf_pool);
+
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
ut_ad(buf_pool->LRU_old);
@@ -1249,7 +1275,10 @@ buf_LRU_add_block_to_end_low(
}
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INLINE
void
buf_LRU_add_block_low(
@@ -1291,6 +1320,8 @@ buf_LRU_add_block_low(
ut_d(bpage->in_LRU_list = TRUE);
+ incr_LRU_size_in_bytes(bpage, buf_pool);
+
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
ut_ad(buf_pool->LRU_old);
@@ -1318,7 +1349,10 @@ buf_LRU_add_block_low(
}
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INTERN
void
buf_LRU_add_block(
@@ -1540,6 +1574,8 @@ func_exit:
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
prev_b, b);
+ incr_LRU_size_in_bytes(b, buf_pool);
+
if (buf_page_is_old(b)) {
buf_pool->LRU_old_len++;
if (UNIV_UNLIKELY
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index ff93be3e76a..95bc022de8b 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -2380,7 +2380,8 @@ dict_load_foreigns(
ibool check_charsets) /*!< in: TRUE=check charset
compatibility */
{
- char tuple_buf[DTUPLE_EST_ALLOC(1)];
+ ulint tuple_buf[(DTUPLE_EST_ALLOC(1) + sizeof(ulint) - 1)
+ / sizeof(ulint)];
btr_pcur_t pcur;
dtuple_t* tuple;
dfield_t* dfield;
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 2e6835fe0c0..9aafeb4f69c 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1493,7 +1493,7 @@ fil_space_get_size(
ut_ad(fil_system);
- fil_mutex_enter_and_prepare_for_io(id);
+ mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id);
@@ -1508,6 +1508,23 @@ fil_space_get_size(
ut_a(1 == UT_LIST_GET_LEN(space->chain));
+ mutex_exit(&fil_system->mutex);
+
+ /* It is possible that the space gets evicted at this point
+ before the fil_mutex_enter_and_prepare_for_io() acquires
+ the fil_system->mutex. Check for this after completing the
+ call to fil_mutex_enter_and_prepare_for_io(). */
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ /* We are still holding the fil_system->mutex. Check if
+ the space is still in memory cache. */
+ space = fil_space_get_by_id(id);
+
+ if (space == NULL) {
+ mutex_exit(&fil_system->mutex);
+ return(0);
+ }
+
node = UT_LIST_GET_FIRST(space->chain);
/* It must be a single-table tablespace and we have not opened
@@ -1545,7 +1562,7 @@ fil_space_get_flags(
return(0);
}
- fil_mutex_enter_and_prepare_for_io(id);
+ mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id);
@@ -1560,6 +1577,23 @@ fil_space_get_flags(
ut_a(1 == UT_LIST_GET_LEN(space->chain));
+ mutex_exit(&fil_system->mutex);
+
+ /* It is possible that the space gets evicted at this point
+ before the fil_mutex_enter_and_prepare_for_io() acquires
+ the fil_system->mutex. Check for this after completing the
+ call to fil_mutex_enter_and_prepare_for_io(). */
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ /* We are still holding the fil_system->mutex. Check if
+ the space is still in memory cache. */
+ space = fil_space_get_by_id(id);
+
+ if (space == NULL) {
+ mutex_exit(&fil_system->mutex);
+ return(0);
+ }
+
node = UT_LIST_GET_FIRST(space->chain);
/* It must be a single-table tablespace and we have not opened
@@ -2745,7 +2779,7 @@ retry:
mutex_exit(&fil_system->mutex);
#ifndef UNIV_HOTBACKUP
- if (success) {
+ if (success && !recv_recovery_on) {
mtr_t mtr;
mtr_start(&mtr);
@@ -4047,6 +4081,21 @@ retry:
start_page_no = space->size;
file_start_page_no = space->size - node->size;
+#ifdef HAVE_POSIX_FALLOCATE
+ if (srv_use_posix_fallocate) {
+ mutex_exit(&fil_system->mutex);
+ success = os_file_set_size(node->name, node->handle,
+ size_after_extend * page_size);
+ mutex_enter(&fil_system->mutex);
+ if (success) {
+ node->size += (size_after_extend - start_page_no);
+ space->size += (size_after_extend - start_page_no);
+ os_has_said_disk_full = FALSE;
+ }
+ goto complete_io;
+ }
+#endif
+
/* Extend at most 64 pages at a time */
buf_size = ut_min(64, size_after_extend - start_page_no) * page_size;
buf2 = static_cast<byte*>(mem_alloc(buf_size + page_size));
@@ -4102,6 +4151,10 @@ retry:
node->size += pages_added;
node->being_extended = FALSE;
+#ifdef HAVE_POSIX_FALLOCATE
+complete_io:
+#endif
+
fil_node_complete_io(node, fil_system, OS_FILE_WRITE);
*actual_size = space->size;
@@ -5025,3 +5078,28 @@ fil_close(void)
fil_system = NULL;
}
+
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name) /*!< in: temp table name used while
+ swapping */
+{
+ mtr_t mtr;
+ mtr_start(&mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, old_space_id,
+ 0, 0, old_name, tmp_name, &mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, new_space_id,
+ 0, 0, new_name, old_name, &mtr);
+ mtr_commit(&mtr);
+}
+
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index e19fe47e81a..e3c2204affb 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -167,6 +167,8 @@ static my_bool innobase_file_format_check = TRUE;
static my_bool innobase_log_archive = FALSE;
static char* innobase_log_arch_dir = NULL;
#endif /* UNIV_LOG_ARCHIVE */
+static my_bool innobase_use_atomic_writes = FALSE;
+static my_bool innobase_use_fallocate = TRUE;
static my_bool innobase_use_doublewrite = TRUE;
static my_bool innobase_use_checksums = TRUE;
static my_bool innobase_locks_unsafe_for_binlog = FALSE;
@@ -471,8 +473,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_buffer_pool_load_status, SHOW_CHAR},
{"buffer_pool_pages_data",
(char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG},
+ {"buffer_pool_bytes_data",
+ (char*) &export_vars.innodb_buffer_pool_bytes_data, SHOW_LONG},
{"buffer_pool_pages_dirty",
(char*) &export_vars.innodb_buffer_pool_pages_dirty, SHOW_LONG},
+ {"buffer_pool_bytes_dirty",
+ (char*) &export_vars.innodb_buffer_pool_bytes_dirty, SHOW_LONG},
{"buffer_pool_pages_flushed",
(char*) &export_vars.innodb_buffer_pool_pages_flushed, SHOW_LONG},
{"buffer_pool_pages_free",
@@ -567,6 +573,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_truncated_status_writes, SHOW_LONG},
{"available_undo_logs",
(char*) &export_vars.innodb_available_undo_logs, SHOW_LONG},
+#ifdef UNIV_DEBUG
+ {"purge_trx_id_age",
+ (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
+ {"purge_view_trx_id_age",
+ (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
+#endif /* UNIV_DEBUG */
{NullS, NullS, SHOW_LONG}
};
@@ -1407,6 +1419,8 @@ convert_error_code_to_mysql(
return(HA_ERR_INDEX_CORRUPT);
case DB_UNDO_RECORD_TOO_BIG:
return(HA_ERR_UNDO_REC_TOO_BIG);
+ case DB_OUT_OF_MEMORY:
+ return(HA_ERR_OUT_OF_MEM);
}
}
@@ -1621,15 +1635,6 @@ innobase_get_lower_case_table_names(void)
return(lower_case_table_names);
}
-#if defined (__WIN__) && defined (MYSQL_DYNAMIC_PLUGIN)
-extern MYSQL_PLUGIN_IMPORT MY_TMPDIR mysql_tmpdir_list;
-/*******************************************************************//**
-Map an OS error to an errno value. The OS error number is stored in
-_doserrno and the mapped value is stored in errno) */
-void __cdecl
-_dosmaperr(
- unsigned long); /*!< in: OS error value */
-
/*********************************************************************//**
Creates a temporary file.
@return temporary file descriptor, or < 0 on error */
@@ -1638,92 +1643,16 @@ int
innobase_mysql_tmpfile(void)
/*========================*/
{
- int fd; /* handle of opened file */
- HANDLE osfh; /* OS handle of opened file */
- char* tmpdir; /* point to the directory
- where to create file */
- TCHAR path_buf[MAX_PATH - 14]; /* buffer for tmp file path.
- The length cannot be longer
- than MAX_PATH - 14, or
- GetTempFileName will fail. */
- char filename[MAX_PATH]; /* name of the tmpfile */
- DWORD fileaccess = GENERIC_READ /* OS file access */
- | GENERIC_WRITE
- | DELETE;
- DWORD fileshare = FILE_SHARE_READ /* OS file sharing mode */
- | FILE_SHARE_WRITE
- | FILE_SHARE_DELETE;
- DWORD filecreate = CREATE_ALWAYS; /* OS method of open/create */
- DWORD fileattrib = /* OS file attribute flags */
- FILE_ATTRIBUTE_NORMAL
- | FILE_FLAG_DELETE_ON_CLOSE
- | FILE_ATTRIBUTE_TEMPORARY
- | FILE_FLAG_SEQUENTIAL_SCAN;
-
- DBUG_ENTER("innobase_mysql_tmpfile");
-
- tmpdir = my_tmpdir(&mysql_tmpdir_list);
-
- /* The tmpdir parameter can not be NULL for GetTempFileName. */
- if (!tmpdir) {
- uint ret;
-
- /* Use GetTempPath to determine path for temporary files. */
- ret = GetTempPath(sizeof(path_buf), path_buf);
- if (ret > sizeof(path_buf) || (ret == 0)) {
-
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- tmpdir = path_buf;
- }
-
- /* Use GetTempFileName to generate a unique filename. */
- if (!GetTempFileName(tmpdir, "ib", 0, filename)) {
-
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- DBUG_PRINT("info", ("filename: %s", filename));
-
- /* Open/Create the file. */
- osfh = CreateFile(filename, fileaccess, fileshare, NULL,
- filecreate, fileattrib, NULL);
- if (osfh == INVALID_HANDLE_VALUE) {
-
- /* open/create file failed! */
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- do {
- /* Associates a CRT file descriptor with the OS file handle. */
- fd = _open_osfhandle((intptr_t) osfh, 0);
- } while (fd == -1 && errno == EINTR);
+ int fd2 = -1;
+ File fd;
- if (fd == -1) {
- /* Open failed, close the file handle. */
+ DBUG_EXECUTE_IF(
+ "innobase_tmpfile_creation_failure",
+ return(-1);
+ );
- _dosmaperr(GetLastError()); /* map error */
- CloseHandle(osfh); /* no need to check if
- CloseHandle fails */
- }
+ fd = mysql_tmpfile("ib");
- DBUG_RETURN(fd);
-}
-#else
-/*********************************************************************//**
-Creates a temporary file.
-@return temporary file descriptor, or < 0 on error */
-UNIV_INTERN
-int
-innobase_mysql_tmpfile(void)
-/*========================*/
-{
- int fd2 = -1;
- File fd = mysql_tmpfile("ib");
if (fd >= 0) {
/* Copy the file descriptor, so that the additional resources
allocated by create_temp_file() can be freed by invoking
@@ -1767,7 +1696,6 @@ innobase_mysql_tmpfile(void)
}
return(fd2);
}
-#endif /* defined (__WIN__) && defined (MYSQL_DYNAMIC_PLUGIN) */
/*********************************************************************//**
Wrapper around MySQL's copy_and_convert function.
@@ -2635,6 +2563,13 @@ ha_innobase::init_table_handle_for_HANDLER(void)
reset_template();
}
+/****************************************************************//**
+Gives the file extension of an InnoDB single-table tablespace. */
+static const char* ha_innobase_exts[] = {
+ ".ibd",
+ NullS
+};
+
/*********************************************************************//**
Opens an InnoDB database.
@return 0 on success, error code on failure */
@@ -2691,6 +2626,9 @@ innobase_init(
innobase_hton->alter_table_flags = innobase_alter_table_flags;
innobase_hton->kill_query = innobase_kill_query;
+ if (srv_file_per_table)
+ innobase_hton->tablefile_extensions = ha_innobase_exts;
+
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifndef DBUG_OFF
@@ -3026,6 +2964,38 @@ innobase_change_buffering_inited_ok:
innobase_commit_concurrency_init_default();
+#ifdef HAVE_POSIX_FALLOCATE
+ srv_use_posix_fallocate = 0 && (ibool) innobase_use_fallocate;
+#endif
+ srv_use_atomic_writes = (ibool) innobase_use_atomic_writes;
+ if (innobase_use_atomic_writes) {
+ fprintf(stderr, "InnoDB: using atomic writes.\n");
+
+ /* Force doublewrite buffer off, atomic writes replace it. */
+ if (srv_use_doublewrite_buf) {
+ fprintf(stderr, "InnoDB: Switching off doublewrite buffer "
+ "because of atomic writes.\n");
+ innobase_use_doublewrite = srv_use_doublewrite_buf = FALSE;
+ }
+
+ /* Force O_DIRECT on Unixes (on Windows writes are always unbuffered)*/
+#ifndef _WIN32
+ if(!innobase_file_flush_method ||
+ !strstr(innobase_file_flush_method, "O_DIRECT")) {
+ innobase_file_flush_method =
+ srv_file_flush_method_str = (char*)"O_DIRECT";
+ fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n");
+ }
+#endif
+#ifdef HAVE_POSIX_FALLOCATE
+ /* Due to a bug in directFS, using atomics needs
+ * posix_fallocate to extend the file
+ * pwrite() past end of the file won't work
+ */
+ srv_use_posix_fallocate = TRUE;
+#endif
+ }
+
#ifdef HAVE_PSI_INTERFACE
/* Register keys with MySQL performance schema */
int count;
@@ -3924,13 +3894,6 @@ ha_innobase::table_flags() const
}
/****************************************************************//**
-Gives the file extension of an InnoDB single-table tablespace. */
-static const char* ha_innobase_exts[] = {
- ".ibd",
- NullS
-};
-
-/****************************************************************//**
Returns the table type (storage engine name).
@return table type */
UNIV_INTERN
@@ -14619,6 +14582,20 @@ static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
"Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Prevent partial page writes, via atomic writes."
+ "The option is used to prevent partial writes in case of a crash/poweroff, "
+ "as faster alternative to doublewrite buffer."
+ "Currently this option works only "
+ "on Linux only with FusionIO device, and directFS filesystem.",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(use_fallocate, innobase_use_fallocate,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Preallocate files fast, using operating system functionality. On POSIX systems, posix_fallocate system call is used.",
+ NULL, NULL, TRUE);
+
static MYSQL_SYSVAR_ULONG(io_capacity, srv_io_capacity,
PLUGIN_VAR_RQCMDARG,
"Number of IOPs the server can do. Tunes the background IO rate",
@@ -15179,11 +15156,23 @@ static MYSQL_SYSVAR_BOOL(print_all_deadlocks, srv_print_all_deadlocks,
"Print all deadlocks to MySQL error log (off by default)",
NULL, NULL, FALSE);
-#ifdef UNIV_DEBUG_never
+#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_NOCMDOPT,
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
NULL, NULL, 0, 0, 1024, 0);
+
+static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
+ btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
+ "Artificially limit the number of records per B-tree page (0=unlimited).",
+ NULL, NULL, 0, 0, UINT_MAX32, 0);
+
+static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
+ srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDOPT,
+ "Pause actual purging any delete-marked records, but merely update the purge view. "
+ "It is to create artificially the situation the purge view have been updated "
+ "but the each purges were not done yet.",
+ NULL, NULL, FALSE);
#endif /* UNIV_DEBUG */
static struct st_mysql_sys_var* innobase_system_variables[]= {
@@ -15206,6 +15195,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(data_file_path),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
+ MYSQL_SYSVAR(use_atomic_writes),
+ MYSQL_SYSVAR(use_fallocate),
MYSQL_SYSVAR(fast_shutdown),
MYSQL_SYSVAR(file_io_threads),
MYSQL_SYSVAR(read_io_threads),
@@ -15299,8 +15290,10 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(undo_directory),
MYSQL_SYSVAR(undo_tablespaces),
MYSQL_SYSVAR(sync_array_size),
-#ifdef UNIV_DEBUG_never /* disable this flag. --innodb-trx becomes ambiguous */
+#ifdef UNIV_DEBUG
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
+ MYSQL_SYSVAR(limit_optimistic_insert_debug),
+ MYSQL_SYSVAR(trx_purge_view_update_only_debug),
#endif /* UNIV_DEBUG */
NULL
};
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 1468bc79c04..e1a10ade9ad 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -102,8 +102,6 @@ innobase_col_to_mysql(
ut_ad(flen >= len);
ut_ad(DATA_MBMAXLEN(col->mbminmaxlen)
>= DATA_MBMINLEN(col->mbminmaxlen));
- ut_ad(DATA_MBMAXLEN(col->mbminmaxlen)
- > DATA_MBMINLEN(col->mbminmaxlen) || flen == len);
memcpy(dest, data, len);
break;
@@ -315,7 +313,7 @@ innobase_check_index_keys(
}
}
- my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
@@ -329,7 +327,7 @@ innobase_check_index_keys(
continue;
}
- my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
key_part1.field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index f437575579e..80c62185fb0 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -822,6 +822,11 @@ srv_printf_innodb_monitor(). */
extern ulint btr_cur_n_sea_old;
#endif /* !UNIV_HOTBACKUP */
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+extern uint btr_cur_limit_optimistic_insert_debug;
+#endif /* UNIV_DEBUG */
+
#ifndef UNIV_NONINL
#include "btr0cur.ic"
#endif
diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic
index 540417e3062..080866c7465 100644
--- a/storage/innobase/include/btr0cur.ic
+++ b/storage/innobase/include/btr0cur.ic
@@ -27,6 +27,16 @@ Created 10/16/1994 Heikki Tuuri
#include "btr0btr.h"
#ifdef UNIV_DEBUG
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
+if (btr_cur_limit_optimistic_insert_debug\
+ && (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
+ CODE;\
+}
+#else
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
+#endif /* UNIV_DEBUG */
+
+#ifdef UNIV_DEBUG
/*********************************************************//**
Returns the page cursor component of a tree cursor.
@return pointer to page cursor component */
@@ -135,6 +145,9 @@ btr_cur_compress_recommendation(
page = btr_cur_get_page(cursor);
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
+ return(FALSE));
+
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 2284f21e3ab..d56f1790ae4 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -205,6 +205,15 @@ struct buf_pool_info_struct{
typedef struct buf_pool_info_struct buf_pool_info_t;
+/** The occupied bytes of lists in all buffer pools */
+struct buf_pools_list_size_struct {
+ ulint LRU_bytes; /*!< LRU size in bytes */
+ ulint unzip_LRU_bytes; /*!< unzip_LRU size in bytes */
+ ulint flush_list_bytes; /*!< flush_list size in bytes */
+};
+
+typedef struct buf_pools_list_size_struct buf_pools_list_size_t;
+
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Acquire mutex on all buffer pool instances */
@@ -1010,8 +1019,7 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
__attribute__((nonnull));
/*********************************************************************//**
Gets the buf_block_t handle of a buffered file block if an uncompressed
@@ -1368,6 +1376,14 @@ buf_get_total_list_len(
ulint* free_len, /*!< out: length of all free lists */
ulint* flush_list_len);/*!< out: length of all flush lists */
/********************************************************************//**
+Get total list size in bytes from all buffer pools. */
+UNIV_INTERN
+void
+buf_get_total_list_size_in_bytes(
+/*=============================*/
+ buf_pools_list_size_t* buf_pools_list_size); /*!< out: list sizes
+ in all buffer pools */
+/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
void
@@ -1547,10 +1563,11 @@ struct buf_page_struct{
to read this for heuristic
purposes without holding any
mutex or latch */
- unsigned access_time:32; /*!< time of first access, or
- 0 if the block was never accessed
- in the buffer pool */
/* @} */
+ unsigned access_time; /*!< time of first access, or
+ 0 if the block was never accessed
+ in the buffer pool. Protected by
+ block mutex */
# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ibool file_page_was_freed;
/*!< this is set to TRUE when
@@ -1730,6 +1747,8 @@ struct buf_pool_stat_struct{
young because the first access
was not long enough ago, in
buf_page_peek_if_too_old() */
+ ulint LRU_bytes; /*!< LRU size in bytes */
+ ulint flush_list_bytes;/*!< flush_list size in bytes */
};
/** Statistics of buddy blocks of a given size. */
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 88c29ab5603..d0a6df4eb40 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -614,18 +614,18 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
{
#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
- ut_ad(buf_pool_mutex_own(buf_pool));
+ ut_ad(!buf_pool_mutex_own(buf_pool));
+ ut_ad(mutex_own(buf_page_get_mutex(bpage)));
#endif
ut_a(buf_page_in_file(bpage));
if (!bpage->access_time) {
/* Make this the time of the first access. */
- bpage->access_time = time_ms;
+ bpage->access_time = ut_time_ms();
}
}
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 527852da758..74c5525c2e5 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -157,7 +157,10 @@ buf_LRU_block_free_non_file_page(
/*=============================*/
buf_block_t* block); /*!< in: block, must not contain a file page */
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INTERN
void
buf_LRU_add_block(
diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic
index a5e94a8edff..d489bef89a8 100644
--- a/storage/innobase/include/data0type.ic
+++ b/storage/innobase/include/data0type.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -556,35 +556,18 @@ dtype_get_fixed_size_low(
} else if (!comp) {
return(len);
} else {
- /* We play it safe here and ask MySQL for
- mbminlen and mbmaxlen. Although
- mbminlen and mbmaxlen are
- initialized if and only if prtype
- is (in one of the 3 functions in this file),
- it could be that none of these functions
- has been called. */
-
+#ifdef UNIV_DEBUG
ulint i_mbminlen, i_mbmaxlen;
innobase_get_cset_width(
dtype_get_charset_coll(prtype),
&i_mbminlen, &i_mbmaxlen);
- if (DATA_MBMINMAXLEN(i_mbminlen, i_mbmaxlen)
- != mbminmaxlen) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: "
- "mbminlen=%lu, "
- "mbmaxlen=%lu, "
- "type->mbminlen=%lu, "
- "type->mbmaxlen=%lu\n",
- (ulong) i_mbminlen,
- (ulong) i_mbmaxlen,
- (ulong) DATA_MBMINLEN(mbminmaxlen),
- (ulong) DATA_MBMAXLEN(mbminmaxlen));
- }
- if (i_mbminlen == i_mbmaxlen) {
+ ut_ad(DATA_MBMINMAXLEN(i_mbminlen, i_mbmaxlen)
+ == mbminmaxlen);
+#endif /* UNIV_DEBUG */
+ if (DATA_MBMINLEN(mbminmaxlen)
+ == DATA_MBMAXLEN(mbminmaxlen)) {
return(len);
}
}
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 1e2b8049860..4bd9f9fa51f 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -739,6 +739,21 @@ fil_tablespace_is_being_deleted(
/*============================*/
ulint id); /*!< in: space id */
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name); /*!< in: temp table name used while
+ swapping */
+
typedef struct fil_space_struct fil_space_t;
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index a1ffe87d5bd..f97a11b9483 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -864,14 +864,22 @@ lock_trx_has_sys_table_locks(
remains set when the waiting lock is granted,
or if the lock is inherited to a neighboring
record */
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_MODE_MASK
+#define LOCK_CONV_BY_OTHER 4096 /*!< this bit is set when the lock is created
+ by other transaction */
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_MODE_MASK
# error
#endif
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_TYPE_MASK
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_TYPE_MASK
# error
#endif
/* @} */
+/** Checks if this is a waiting lock created by lock->trx itself.
+@param type_mode lock->type_mode
+@return whether it is a waiting lock belonging to lock->trx */
+#define lock_is_wait_not_by_other(type_mode) \
+ ((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
+
/** Lock operation struct */
typedef struct lock_op_struct lock_op_t;
/** Lock operation struct */
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index c6c70bb5f09..671f8052afa 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -362,24 +362,6 @@ rec_get_offsets_func(
rec_get_offsets_func(rec,index,offsets,n,heap,__FILE__,__LINE__)
/******************************************************//**
-Determine the offset to each field in a leaf-page record
-in ROW_FORMAT=COMPACT. This is a special case of
-rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
-void
-rec_init_offsets_comp_ordinary(
-/*===========================*/
- const rec_t* rec, /*!< in: physical record in
- ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets);/*!< in/out: array of offsets;
- in: n=rec_offs_n_fields(offsets) */
-
-/******************************************************//**
The following function determines the offsets to each field
in the record. It can reuse a previously allocated array. */
UNIV_INTERN
@@ -648,8 +630,48 @@ rec_copy(
/*=====*/
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in a temporary file.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+ __attribute__((warn_unused_result, nonnull));
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+ __attribute__((nonnull));
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+ __attribute__((nonnull));
+
/**************************************************************//**
Copies the first n fields of a physical record to a new physical record in
a buffer.
@@ -684,21 +706,6 @@ rec_fold(
__attribute__((pure));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************//**
-Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
-void
-rec_convert_dtuple_to_rec_comp(
-/*===========================*/
- rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields);/*!< in: number of data fields */
-/*********************************************************//**
Builds a physical record out of a data tuple and
stores it into the given buffer.
@return pointer to the origin of physical record */
@@ -731,10 +738,7 @@ UNIV_INTERN
ulint
rec_get_converted_size_comp_prefix(
/*===============================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
+ const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra); /*!< out: extra size */
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index c4e2f5ddf41..95c6d85075c 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -294,7 +294,7 @@ row_merge_buf_empty(
/*********************************************************************//**
Create a merge file. */
UNIV_INTERN
-void
+int
row_merge_file_create(
/*==================*/
merge_file_t* merge_file); /*!< out: merge file structure */
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 99cff251e3c..65257baa4bb 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -252,6 +252,11 @@ extern ibool srv_use_doublewrite_buf;
extern ulong srv_doublewrite_batch_size;
extern ulong srv_checksum_algorithm;
+extern ibool srv_use_atomic_writes;
+#ifdef HAVE_POSIX_FALLOCATE
+extern ibool srv_use_posix_fallocate;
+#endif
+
extern ulong srv_max_buf_pool_modified_pct;
extern ulong srv_max_purge_lag;
extern ulong srv_max_purge_lag_delay;
@@ -312,6 +317,10 @@ extern ibool srv_print_latch_waits;
extern ulint srv_fatal_semaphore_wait_threshold;
extern ulint srv_dml_needed_delay;
+#ifdef UNIV_DEBUG
+extern my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
#ifndef HAVE_ATOMIC_BUILTINS
/** Mutex protecting some server global variables. */
extern mutex_t server_mutex;
@@ -379,7 +388,7 @@ extern ulint srv_buf_pool_flushed;
reading of a disk page */
extern ulint srv_buf_pool_reads;
-/* print all user-level transactions deadlocks to mysqld stderr */
+/** print all user-level transactions deadlocks to mysqld stderr */
extern my_bool srv_print_all_deadlocks;
/** Status variables to be passed to MySQL */
@@ -727,7 +736,9 @@ struct export_var_struct{
char innodb_buffer_pool_load_status[512];/*!< Buf pool load status */
ulint innodb_buffer_pool_pages_total; /*!< Buffer pool size */
ulint innodb_buffer_pool_pages_data; /*!< Data pages */
+ ulint innodb_buffer_pool_bytes_data; /*!< File bytes used */
ulint innodb_buffer_pool_pages_dirty; /*!< Dirty data pages */
+ ulint innodb_buffer_pool_bytes_dirty; /*!< File bytes modified */
ulint innodb_buffer_pool_pages_misc; /*!< Miscellanous pages */
ulint innodb_buffer_pool_pages_free; /*!< Free pages */
#ifdef UNIV_DEBUG
@@ -771,6 +782,11 @@ struct export_var_struct{
ulint innodb_num_open_files; /*!< fil_n_file_opened */
ulint innodb_truncated_status_writes; /*!< srv_truncated_status_writes */
ulint innodb_available_undo_logs; /*!< srv_available_undo_logs */
+#ifdef UNIV_DEBUG
+ ulint innodb_purge_trx_id_age; /*!< max_trx_id - purged trx_id */
+ ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
+ - purged view's min trx_id */
+#endif /* UNIV_DEBUG */
};
/** Thread slot in the thread table. */
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 60eb1fede91..422828e76f4 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -42,6 +42,8 @@ Created 1/20/1994 Heikki Tuuri
#define _IB_TO_STR(s) #s
#define IB_TO_STR(s) _IB_TO_STR(s)
+#include <mysql_version.h>
+
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 2
#define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH
@@ -55,10 +57,7 @@ component, i.e. we show M.N.P as M.N */
#define INNODB_VERSION_SHORT \
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
-#define INNODB_VERSION_STR \
- IB_TO_STR(INNODB_VERSION_MAJOR) "." \
- IB_TO_STR(INNODB_VERSION_MINOR) "." \
- IB_TO_STR(INNODB_VERSION_BUGFIX)
+#define INNODB_VERSION_STR MYSQL_SERVER_VERSION
#define REFMAN "http://dev.mysql.com/doc/refman/" \
IB_TO_STR(MYSQL_VERSION_MAJOR) "." \
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 476b305ca70..fff59852704 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -858,13 +858,17 @@ lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /*!< in/out: record lock */
{
- ut_ad(lock->trx->lock.wait_lock == lock);
ut_ad(lock_get_wait(lock));
ut_ad(lock_mutex_own());
/* Reset the back pointer in trx to this waiting lock request */
- lock->trx->lock.wait_lock = NULL;
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
+ ut_ad(lock->trx->lock.wait_lock == lock);
+ lock->trx->lock.wait_lock = NULL;
+ } else {
+ ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ }
lock->type_mode &= ~LOCK_WAIT;
}
@@ -1476,7 +1480,7 @@ Checks if a transaction has a GRANTED explicit lock on rec stronger or equal
to precise_mode.
@return lock or NULL */
UNIV_INLINE
-const lock_t*
+lock_t*
lock_rec_has_expl(
/*==============*/
ulint precise_mode,/*!< in: LOCK_S or LOCK_X
@@ -1489,7 +1493,7 @@ lock_rec_has_expl(
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: transaction */
{
- const lock_t* lock;
+ lock_t* lock;
ut_ad(lock_mutex_own());
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
@@ -1498,14 +1502,14 @@ lock_rec_has_expl(
for (lock = lock_rec_get_first(block, heap_no);
lock != NULL;
- lock = lock_rec_get_next_const(heap_no, lock)) {
+ lock = lock_rec_get_next(heap_no, lock)) {
if (lock->trx == trx
+ && !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(
lock_get_mode(lock),
static_cast<enum lock_mode>(
precise_mode & LOCK_MODE_MASK))
- && !lock_get_wait(lock)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
@@ -1813,7 +1817,7 @@ lock_rec_create(
}
ut_ad(trx_mutex_own(trx));
- if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
+ if (lock_is_wait_not_by_other(type_mode)) {
lock_set_lock_and_trx_wait(lock, trx);
}
@@ -1853,11 +1857,12 @@ lock_rec_enqueue_waiting(
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
+ lock_t* lock, /*!< in: lock object; NULL if a new
+ one should be created. */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
- lock_t* lock;
trx_id_t victim_trx_id;
ut_ad(lock_mutex_own());
@@ -1893,10 +1898,20 @@ lock_rec_enqueue_waiting(
ut_ad(0);
}
- /* Enqueue the lock request that will wait to be granted, note that
- we already own the trx mutex. */
- lock = lock_rec_create(
- type_mode | LOCK_WAIT, block, heap_no, index, trx, TRUE);
+ if (lock == NULL) {
+ /* Enqueue the lock request that will wait
+ to be granted, note that we already own
+ the trx mutex. */
+ lock = lock_rec_create(
+ type_mode | LOCK_WAIT, block, heap_no,
+ index, trx, TRUE);
+ } else {
+ ut_ad(lock->type_mode & LOCK_WAIT);
+ ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ lock_set_lock_and_trx_wait(lock, trx);
+ }
/* Release the mutex to obey the latching order.
This is safe, because lock_deadlock_check_and_resolve()
@@ -2163,6 +2178,7 @@ lock_rec_lock_slow(
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
+ lock_t* lock;
enum db_err err = DB_SUCCESS;
ut_ad(lock_mutex_own());
@@ -2180,7 +2196,27 @@ lock_rec_lock_slow(
trx_mutex_enter(trx);
- if (lock_rec_has_expl(mode, block, heap_no, trx)) {
+ lock = lock_rec_has_expl(mode, block, heap_no, trx);
+ if (lock) {
+ if (lock->type_mode & LOCK_CONV_BY_OTHER) {
+ /* This lock or lock waiting was created by the other
+ transaction, not by the transaction (trx) itself.
+ So, the transaction (trx) should treat it collectly
+ according as whether granted or not. */
+
+ if (lock->type_mode & LOCK_WAIT) {
+ /* This lock request was not granted yet.
+ Should wait for granted. */
+
+ goto enqueue_waiting;
+ } else {
+ /* This lock request was already granted.
+ Just clearing the flag. */
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ }
+ }
+
/* The trx already has a strong enough lock on rec: do
nothing */
@@ -2193,8 +2229,10 @@ lock_rec_lock_slow(
have a lock strong enough already granted on the
record, we have to wait. */
+ ut_ad(lock == NULL);
+enqueue_waiting:
err = lock_rec_enqueue_waiting(
- mode, block, heap_no, index, thr);
+ mode, block, heap_no, lock, index, thr);
} else if (!impl) {
/* Set the requested lock on the record, note that
@@ -2348,7 +2386,8 @@ lock_grant(
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
- if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
+ && lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
que_thr_t* thr;
thr = que_thr_end_lock_wait(lock->trx);
@@ -2375,6 +2414,7 @@ lock_rec_cancel(
ut_ad(lock_mutex_own());
ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2541,8 +2581,12 @@ lock_rec_reset_and_release_wait(
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
- if (lock_get_wait(lock)) {
+ if (lock_is_wait_not_by_other(lock->type_mode)) {
lock_rec_cancel(lock);
+ } else if (lock_get_wait(lock)) {
+ /* just reset LOCK_WAIT */
+ lock_rec_reset_nth_bit(lock, heap_no);
+ lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
@@ -4017,6 +4061,7 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(trx));
+ ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
/* Non-locking autocommit read-only transactions should not set
any locks. */
@@ -5850,7 +5895,7 @@ lock_rec_insert_check_and_lock(
err = lock_rec_enqueue_waiting(
LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
- block, next_rec_heap_no, index, thr);
+ block, next_rec_heap_no, NULL, index, thr);
trx_mutex_exit(trx);
} else {
@@ -5943,10 +5988,25 @@ lock_rec_convert_impl_to_expl(
if (impl_trx != NULL
&& !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
heap_no, impl_trx)) {
+ ulint type_mode = (LOCK_REC | LOCK_X
+ | LOCK_REC_NOT_GAP);
+
+ /* If the delete-marked record was locked already,
+ we should reserve lock waiting for impl_trx as
+ implicit lock. Because cannot lock at this moment.*/
+
+ if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
+ && lock_rec_other_has_conflicting(
+ static_cast<enum lock_mode>
+ (LOCK_X | LOCK_REC_NOT_GAP), block,
+ heap_no, impl_trx)) {
+
+ type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
+ }
lock_rec_add_to_queue(
- LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
- block, heap_no, index, impl_trx, FALSE);
+ type_mode, block, heap_no, index,
+ impl_trx, FALSE);
}
lock_mutex_exit();
@@ -6581,6 +6641,7 @@ lock_cancel_waiting_and_release(
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(lock->trx));
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
lock->trx->lock.cancel = TRUE;
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index f914fc2676c..c2b9c152a44 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -958,8 +958,11 @@ recv_parse_or_apply_log_rec_body(
not NULL, then the log record is
applied to the page, and the log
record should be complete then */
- mtr_t* mtr) /*!< in: mtr or NULL; should be non-NULL
+ mtr_t* mtr, /*!< in: mtr or NULL; should be non-NULL
if and only if block is non-NULL */
+ ulint space_id)
+ /*!< in: tablespace id obtained by
+ parsing initial log record */
{
dict_index_t* index = NULL;
page_t* page;
@@ -1231,8 +1234,11 @@ recv_parse_or_apply_log_rec_body(
ut_ad(!page || page_type != FIL_PAGE_TYPE_ALLOCATED);
ptr = mlog_parse_string(ptr, end_ptr, page, page_zip);
break;
- case MLOG_FILE_CREATE:
case MLOG_FILE_RENAME:
+ ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type,
+ space_id, 0);
+ break;
+ case MLOG_FILE_CREATE:
case MLOG_FILE_DELETE:
case MLOG_FILE_CREATE2:
ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type, 0, 0);
@@ -1611,7 +1617,8 @@ recv_recover_page_func(
recv_parse_or_apply_log_rec_body(recv->type, buf,
buf + recv->len,
- block, &mtr);
+ block, &mtr,
+ recv_addr->space);
end_lsn = recv->start_lsn + recv->len;
mach_write_to_8(FIL_PAGE_LSN + page, end_lsn);
@@ -2079,7 +2086,7 @@ recv_parse_log_rec(
#endif /* UNIV_LOG_LSN_DEBUG */
new_ptr = recv_parse_or_apply_log_rec_body(*type, new_ptr, end_ptr,
- NULL, NULL);
+ NULL, NULL, *space);
if (UNIV_UNLIKELY(new_ptr == NULL)) {
return(0);
diff --git a/storage/innobase/mysql-test/storage_engine/autoinc_secondary.rdiff b/storage/innobase/mysql-test/storage_engine/autoinc_secondary.rdiff
index c24594c5024..00cda7c4435 100644
--- a/storage/innobase/mysql-test/storage_engine/autoinc_secondary.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/autoinc_secondary.rdiff
@@ -8,7 +8,7 @@
-SELECT LAST_INSERT_ID();
-LAST_INSERT_ID()
-1
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-a 1
-a 2
diff --git a/storage/innobase/mysql-test/storage_engine/insert_delayed.rdiff b/storage/innobase/mysql-test/storage_engine/insert_delayed.rdiff
index 62895fa928f..9e6cddf03f0 100644
--- a/storage/innobase/mysql-test/storage_engine/insert_delayed.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/insert_delayed.rdiff
@@ -1,5 +1,5 @@
---- suite/storage_engine/insert_delayed.result 2012-07-12 20:04:07.143544998 +0400
-+++ suite/storage_engine/insert_delayed.reject 2012-07-15 17:49:34.551810189 +0400
+--- suite/storage_engine/insert_delayed.result 2013-01-23 01:23:49.461254916 +0400
++++ suite/storage_engine/insert_delayed.reject 2013-01-23 01:47:05.975698364 +0400
@@ -5,7 +5,16 @@
connect con0,localhost,root,,;
SET lock_wait_timeout = 1;
@@ -14,7 +14,7 @@
+# -------------------------------------------
INSERT DELAYED INTO t1 SET a=4, b='d';
+ERROR HY000: DELAYED option not supported for table 't1'
- INSERT DELAYED INTO t1 SELECT 5, 'e';
+ INSERT DELAYED INTO t1 (a,b) SELECT 5, 'e';
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
disconnect con0;
@@ -20,6 +29,4 @@
diff --git a/storage/innobase/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/parts/repair_table.rdiff
index aab866fde83..7ddc57e0ead 100644
--- a/storage/innobase/mysql-test/storage_engine/parts/repair_table.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/parts/repair_table.rdiff
@@ -1,12 +1,12 @@
---- suite/storage_engine/parts/repair_table.result 2012-07-15 01:22:58.861853325 +0400
-+++ suite/storage_engine/parts/repair_table.reject 2012-07-15 20:07:11.268009209 +0400
+--- suite/storage_engine/parts/repair_table.result 2013-01-23 01:35:44.388267080 +0400
++++ suite/storage_engine/parts/repair_table.reject 2013-01-23 01:44:40.337529283 +0400
@@ -9,27 +9,27 @@
INSERT INTO t2 (a,b) SELECT a, b FROM t1;
ALTER TABLE t1 REPAIR PARTITION p0;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
- INSERT INTO t1 VALUES (3,'c');
+ INSERT INTO t1 (a,b) VALUES (3,'c');
ALTER TABLE t1 REPAIR PARTITION NO_WRITE_TO_BINLOG p0, p1;
Table Op Msg_type Msg_text
-test.t1 repair status OK
@@ -21,7 +21,7 @@
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
- INSERT INTO t1 VALUES (10,'j');
+ INSERT INTO t1 (a,b) VALUES (10,'j');
ALTER TABLE t1 REPAIR PARTITION p1 QUICK USE_FRM;
Table Op Msg_type Msg_text
-test.t1 repair status OK
@@ -112,7 +112,7 @@
-test.t1 check error Size of datafile is: 26 Should be: 39
-test.t1 check error Partition p0 returned error
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-8 h
-10 j
@@ -136,7 +136,7 @@
-test.t1 check warning Found 3 key parts. Should be: 2
-test.t1 check error Partition p0 returned error
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-8 h
-10 j
@@ -160,7 +160,7 @@
-test.t1 check error Size of datafile is: 39 Should be: 52
-test.t1 check error Partition p1 returned error
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-8 h
-10 j
@@ -187,7 +187,7 @@
-test.t1 check warning Found 4 key parts. Should be: 3
-test.t1 check error Partition p1 returned error
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-8 h
-10 j
diff --git a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
index 9c51fea47ff..ae5b863eae1 100644
--- a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff
@@ -57,7 +57,7 @@
-test.t1 check Error Incorrect file format 't1'
-test.t1 check error Corrupt
+test.t1 check status OK
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
-ERROR HY000: Incorrect file format 't1'
+a b
+1 a
@@ -107,7 +107,7 @@
-Table Op Msg_type Msg_text
-test.t1 check error Size of datafile is: 39 Should be: 65
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-ERROR HY000: Incorrect key file for table 't1'; try to repair it
-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-# If you got a difference in error message, just add it to rdiff file
@@ -122,7 +122,7 @@
-test.t1 check warning Table is marked as crashed and last repair failed
-test.t1 check error Size of datafile is: 39 Should be: 65
-test.t1 check error Corrupt
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-# If you got a difference in error message, just add it to rdiff file
diff --git a/storage/innobase/mysql-test/storage_engine/type_char_indexes.rdiff b/storage/innobase/mysql-test/storage_engine/type_char_indexes.rdiff
index 7fce0a108e9..53241fe331e 100644
--- a/storage/innobase/mysql-test/storage_engine/type_char_indexes.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/type_char_indexes.rdiff
@@ -2,19 +2,19 @@
+++ suite/storage_engine/type_char_indexes.reject 2012-07-15 17:51:55.810034331 +0400
@@ -98,7 +98,7 @@
SET SESSION optimizer_switch = 'engine_condition_pushdown=on';
- EXPLAIN SELECT * FROM t1 WHERE c > 'a';
+ EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
id select_type table type possible_keys key key_len ref rows Extra
-# # # range c_v c_v # # # Using index condition
+# # # range c_v c_v # # # Using where
- SELECT * FROM t1 WHERE c > 'a';
+ SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
c c20 v16 v128
b char3 varchar1a varchar1b
@@ -135,7 +135,7 @@
r3a
- EXPLAIN SELECT * FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
id select_type table type possible_keys key key_len ref rows Extra
-# # # range # v16 # # # #
+# # # ALL # NULL # # # #
- SELECT * FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
c c20 v16 v128
a char1 varchar1a varchar1b
diff --git a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff
index 9a9566deafb..9061900182e 100644
--- a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff
@@ -1,5 +1,5 @@
---- suite/storage_engine/type_spatial_indexes.result 2012-07-12 04:52:40.840023344 +0400
-+++ suite/storage_engine/type_spatial_indexes.reject 2012-07-15 19:27:32.761911079 +0400
+--- suite/storage_engine/type_spatial_indexes.result 2013-01-23 01:25:45.367797786 +0400
++++ suite/storage_engine/type_spatial_indexes.reject 2013-01-23 01:46:17.560307029 +0400
@@ -702,699 +702,15 @@
DROP DATABASE IF EXISTS gis_ogs;
CREATE DATABASE gis_ogs;
@@ -79,43 +79,43 @@
-Field Type Null Key Default Extra
-fid int(11) YES NULL
-g geometry NO NULL
--INSERT INTO gis_point VALUES
+-INSERT INTO gis_point (fid,g) VALUES
-(101, PointFromText('POINT(10 10)')),
-(102, PointFromText('POINT(20 10)')),
-(103, PointFromText('POINT(20 20)')),
-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
--INSERT INTO gis_line VALUES
+-INSERT INTO gis_line (fid,g) VALUES
-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
--INSERT INTO gis_polygon VALUES
+-INSERT INTO gis_polygon (fid,g) VALUES
-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
--INSERT INTO gis_multi_point VALUES
+-INSERT INTO gis_multi_point (fid,g) VALUES
-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
--INSERT INTO gis_multi_line VALUES
+-INSERT INTO gis_multi_line (fid,g) VALUES
-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
--INSERT INTO gis_multi_polygon VALUES
+-INSERT INTO gis_multi_polygon (fid,g) VALUES
-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
--INSERT INTO gis_geometrycollection VALUES
+-INSERT INTO gis_geometrycollection (fid,g) VALUES
-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
-(122, GeomFromText('GeometryCollection()')),
-(123, GeomFromText('GeometryCollection EMPTY'));
--INSERT into gis_geometry SELECT * FROM gis_point;
--INSERT into gis_geometry SELECT * FROM gis_line;
--INSERT into gis_geometry SELECT * FROM gis_polygon;
--INSERT into gis_geometry SELECT * FROM gis_multi_point;
--INSERT into gis_geometry SELECT * FROM gis_multi_line;
--INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
--INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
-SELECT fid, AsText(g) FROM gis_point;
-fid AsText(g)
-101 POINT(10 10)
@@ -433,7 +433,7 @@
-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
-USE gis_ogs;
-# Lakes
--INSERT INTO lakes VALUES (
+-INSERT INTO lakes (fid,name,shore) VALUES (
-101, 'BLUE LAKE',
-PolyFromText(
-'POLYGON(
@@ -442,68 +442,68 @@
- )',
-101));
-# Road Segments
--INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2,
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
-LineFromText(
-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
--INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4,
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
-LineFromText(
-'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
--INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2,
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
-LineFromText(
-'LINESTRING( 70 38, 72 48 )' ,101));
--INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4,
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
-LineFromText(
-'LINESTRING( 70 38, 84 42 )' ,101));
--INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL,
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
-1,
-LineFromText(
-'LINESTRING( 28 26, 28 0 )',101));
-# DividedRoutes
--INSERT INTO divided_routes VALUES(119, 'Route 75', 4,
+-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
-MLineFromText(
-'MULTILINESTRING((10 48,10 21,10 0),
- (16 0,16 23,16 48))', 101));
-# Forests
--INSERT INTO forests VALUES(109, 'Green Forest',
+-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
-MPolyFromText(
-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
-101));
-# Bridges
--INSERT INTO bridges VALUES(110, 'Cam Bridge', PointFromText(
+-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
-'POINT( 44 31 )', 101));
-# Streams
--INSERT INTO streams VALUES(111, 'Cam Stream',
+-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
-LineFromText(
-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
--INSERT INTO streams VALUES(112, NULL,
+-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
-LineFromText(
-'LINESTRING( 76 0, 78 4, 73 9 )', 101));
-# Buildings
--INSERT INTO buildings VALUES(113, '123 Main Street',
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
-PointFromText(
-'POINT( 52 30 )', 101),
-PolyFromText(
-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
--INSERT INTO buildings VALUES(114, '215 Main Street',
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
-PointFromText(
-'POINT( 64 33 )', 101),
-PolyFromText(
-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
-# Ponds
--INSERT INTO ponds VALUES(120, NULL, 'Stock Pond',
+-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
-MPolyFromText(
-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
-# Named Places
--INSERT INTO named_places VALUES(117, 'Ashton',
+-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
-PolyFromText(
-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
--INSERT INTO named_places VALUES(118, 'Goose Island',
+-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
-PolyFromText(
-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
-# Map Neatlines
--INSERT INTO map_neatlines VALUES(115,
+-INSERT INTO map_neatlines (fid,neatline) VALUES(115,
-PolyFromText(
-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
-SELECT Dimension(shore)
diff --git a/storage/innobase/mysql-test/storage_engine/vcol.rdiff b/storage/innobase/mysql-test/storage_engine/vcol.rdiff
index 2226062834e..23b26a52228 100644
--- a/storage/innobase/mysql-test/storage_engine/vcol.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/vcol.rdiff
@@ -12,7 +12,7 @@
-Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-1 2
-2 3
@@ -29,7 +29,7 @@
-Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-1 2
-2 3
@@ -46,7 +46,7 @@
-Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-1 2
-2 3
@@ -63,7 +63,7 @@
-Warnings:
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
--SELECT * FROM t1;
+-SELECT a,b FROM t1;
-a b
-1 2
-2 3
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 62cde1cf728..57ff14b9f95 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -1402,6 +1402,43 @@ os_file_set_nocache(
#endif
}
+
+#ifdef __linux__
+#include <sys/ioctl.h>
+#ifndef DFS_IOCTL_ATOMIC_WRITE_SET
+#define DFS_IOCTL_ATOMIC_WRITE_SET _IOW(0x95, 2, uint)
+#endif
+static int os_file_set_atomic_writes(os_file_t file, const char *name)
+{
+ static int first_time = 1;
+ int atomic_option = 1;
+
+ int ret = ioctl (file, DFS_IOCTL_ATOMIC_WRITE_SET, &atomic_option);
+
+ if (ret) {
+ fprintf(stderr,
+ "InnoDB : can't use atomic write on %s, errno %d\n",
+ name, errno);
+ return ret;
+ }
+ return ret;
+}
+#else
+static int os_file_set_atomic_writes(os_file_t file, const char *name)
+{
+ fprintf(stderr,
+ "InnoDB : can't use atomic writes on %s - not implemented on this platform."
+ "innodb_use_atomic_writes needs to be 0.\n",
+ name);
+#ifdef _WIN32
+ SetLastError(ERROR_INVALID_FUNCTION);
+#else
+ errno = EINVAL;
+#endif
+ return -1;
+}
+#endif
+
/****************************************************************//**
NOTE! Use the corresponding macro os_file_create(), not directly
this function!
@@ -1552,6 +1589,13 @@ try_again:
*success = TRUE;
}
+ if (srv_use_atomic_writes && type == OS_DATA_FILE &&
+ os_file_set_atomic_writes(file, name)) {
+ CloseHandle(file);
+ *success = FALSE;
+ file = INVALID_HANDLE_VALUE;
+ }
+
return(file);
#else /* __WIN__ */
os_file_t file;
@@ -1668,6 +1712,12 @@ try_again:
file = -1;
}
#endif /* USE_FILE_LOCK */
+ if (srv_use_atomic_writes && type == OS_DATA_FILE
+ && os_file_set_atomic_writes(file, name)) {
+ close(file);
+ *success = FALSE;
+ file = -1;
+ }
return(file);
#endif /* __WIN__ */
@@ -1965,6 +2015,28 @@ os_file_set_size(
current_size = 0;
+#ifdef HAVE_POSIX_FALLOCATE
+ if (srv_use_posix_fallocate) {
+ if (posix_fallocate(file, current_size, size) == -1) {
+ fprintf(stderr,
+ "InnoDB: Error: preallocating data for"
+ " file %s failed at\n"
+ "InnoDB: offset 0 size %lld. Operating system"
+ " error number %d.\n"
+ "InnoDB: Check that the disk is not full"
+ " or a disk quota exceeded.\n"
+ "InnoDB: Some operating system error numbers"
+ " are described at\n"
+ "InnoDB: "
+ REFMAN "operating-system-error-codes.html\n",
+ name, (longlong)size, errno);
+
+ return (FALSE);
+ }
+ return (TRUE);
+ }
+#endif
+
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 5a864f122a3..d56eb59e0bb 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -168,7 +168,6 @@ rec_get_n_extern_new(
{
const byte* nulls;
const byte* lens;
- dict_field_t* field;
ulint null_mask;
ulint n_extern;
ulint i;
@@ -189,10 +188,13 @@ rec_get_n_extern_new(
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -210,8 +212,6 @@ rec_get_n_extern_new(
if (UNIV_UNLIKELY(!field->fixed_len)) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -240,16 +240,15 @@ rec_get_n_extern_new(
Determine the offset to each field in a leaf-page record
in ROW_FORMAT=COMPACT. This is a special case of
rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_init_offsets_comp_ordinary(
/*===========================*/
const rec_t* rec, /*!< in: physical record in
ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
+ ibool temp, /*!< in: whether to use the
+ format for temporary files in
+ index creation */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
@@ -257,27 +256,38 @@ rec_init_offsets_comp_ordinary(
ulint i = 0;
ulint offs = 0;
ulint any_ext = 0;
- const byte* nulls = rec - (extra + 1);
+ const byte* nulls = temp
+ ? rec - 1
+ : rec - (1 + REC_N_NEW_EXTRA_BYTES);
const byte* lens = nulls
- UT_BITS_IN_BYTES(index->n_nullable);
- dict_field_t* field;
ulint null_mask = 1;
#ifdef UNIV_DEBUG
- /* We cannot invoke rec_offs_make_valid() here, because it can hold
- that extra != REC_N_NEW_EXTRA_BYTES. Similarly, rec_offs_validate()
- will fail in that case, because it invokes rec_get_status(). */
+ /* We cannot invoke rec_offs_make_valid() here if temp=TRUE.
+ Similarly, rec_offs_validate() will fail in that case, because
+ it invokes rec_get_status(). */
offsets[2] = (ulint) rec;
offsets[3] = (ulint) index;
#endif /* UNIV_DEBUG */
+ ut_ad(temp || dict_table_is_comp(index->table));
+
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype
- & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -297,10 +307,9 @@ rec_init_offsets_comp_ordinary(
null_mask <<= 1;
}
- if (UNIV_UNLIKELY(!field->fixed_len)) {
+ if (!field->fixed_len
+ || (temp && !dict_col_get_fixed_size(col, temp))) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -394,9 +403,8 @@ rec_init_offsets(
= dict_index_get_n_unique_in_tree(index);
break;
case REC_STATUS_ORDINARY:
- rec_init_offsets_comp_ordinary(rec,
- REC_N_NEW_EXTRA_BYTES,
- index, offsets);
+ rec_init_offsets_comp_ordinary(
+ rec, FALSE, index, offsets);
return;
}
@@ -774,17 +782,19 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INTERN
+UNIV_INLINE __attribute__((warn_unused_result, nonnull(1,2)))
ulint
-rec_get_converted_size_comp_prefix(
-/*===============================*/
+rec_get_converted_size_comp_prefix_low(
+/*===================================*/
const dict_index_t* index, /*!< in: record descriptor;
dict_table_is_comp() is
assumed to hold, even if
it does not */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
- ulint* extra) /*!< out: extra size */
+ ulint* extra, /*!< out: extra size */
+ ibool temp) /*!< in: whether this is a
+ temporary file record */
{
ulint extra_size;
ulint data_size;
@@ -793,15 +803,25 @@ rec_get_converted_size_comp_prefix(
ut_ad(fields);
ut_ad(n_fields > 0);
ut_ad(n_fields <= dict_index_get_n_fields(index));
+ ut_ad(!temp || extra);
- extra_size = REC_N_NEW_EXTRA_BYTES
+ extra_size = temp
+ ? UT_BITS_IN_BYTES(index->n_nullable)
+ : REC_N_NEW_EXTRA_BYTES
+ UT_BITS_IN_BYTES(index->n_nullable);
data_size = 0;
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
for (i = 0; i < n_fields; i++) {
const dict_field_t* field;
ulint len;
+ ulint fixed_len;
const dict_col_t* col;
field = dict_index_get_nth_field(index, i);
@@ -820,6 +840,11 @@ rec_get_converted_size_comp_prefix(
ut_ad(len <= col->len || col->mtype == DATA_BLOB
|| (col->len == 0 && col->mtype == DATA_VARCHAR));
+ fixed_len = field->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -827,11 +852,20 @@ rec_get_converted_size_comp_prefix(
0..127. The length will be encoded in two bytes when
it is 128 or more, or when the field is stored externally. */
- if (field->fixed_len) {
- ut_ad(len == field->fixed_len);
+ if (fixed_len) {
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(col->mbminmaxlen);
+
+ ut_ad(len <= fixed_len);
+
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
+
/* dict_index_add_col() should guarantee this */
ut_ad(!field->prefix_len
- || field->fixed_len == field->prefix_len);
+ || fixed_len == field->prefix_len);
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(&fields[i])) {
ut_ad(col->len >= 256 || col->mtype == DATA_BLOB);
extra_size += 2;
@@ -848,7 +882,7 @@ rec_get_converted_size_comp_prefix(
data_size += len;
}
- if (UNIV_LIKELY_NULL(extra)) {
+ if (extra) {
*extra = extra_size;
}
@@ -856,6 +890,23 @@ rec_get_converted_size_comp_prefix(
}
/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_comp_prefix(
+/*===============================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ ut_ad(dict_table_is_comp(index->table));
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
+}
+
+/**********************************************************//**
Determines the size of a data tuple in ROW_FORMAT=COMPACT.
@return total size */
UNIV_INTERN
@@ -899,8 +950,8 @@ rec_get_converted_size_comp(
return(ULINT_UNDEFINED);
}
- return(size + rec_get_converted_size_comp_prefix(index, fields,
- n_fields, extra));
+ return(size + rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
}
/***********************************************************//**
@@ -1077,19 +1128,18 @@ rec_convert_dtuple_to_rec_old(
/*********************************************************//**
Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_convert_dtuple_to_rec_comp(
/*===========================*/
rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields)/*!< in: number of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint status, /*!< in: status bits of the record */
+ ibool temp) /*!< in: whether to use the
+ format for temporary files in
+ index creation */
{
const dfield_t* field;
const dtype_t* type;
@@ -1101,31 +1151,44 @@ rec_convert_dtuple_to_rec_comp(
ulint n_node_ptr_field;
ulint fixed_len;
ulint null_mask = 1;
- ut_ad(extra == 0 || dict_table_is_comp(index->table));
- ut_ad(extra == 0 || extra == REC_N_NEW_EXTRA_BYTES);
+ ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(n_fields > 0);
- switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
- case REC_STATUS_ORDINARY:
+ if (temp) {
+ ut_ad(status == REC_STATUS_ORDINARY);
ut_ad(n_fields <= dict_index_get_n_fields(index));
n_node_ptr_field = ULINT_UNDEFINED;
- break;
- case REC_STATUS_NODE_PTR:
- ut_ad(n_fields == dict_index_get_n_unique_in_tree(index) + 1);
- n_node_ptr_field = n_fields - 1;
- break;
- case REC_STATUS_INFIMUM:
- case REC_STATUS_SUPREMUM:
- ut_ad(n_fields == 1);
- n_node_ptr_field = ULINT_UNDEFINED;
- break;
- default:
- ut_error;
- return;
+ nulls = rec - 1;
+ if (dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only
+ need to adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+ } else {
+ nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
+
+ switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
+ case REC_STATUS_ORDINARY:
+ ut_ad(n_fields <= dict_index_get_n_fields(index));
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ case REC_STATUS_NODE_PTR:
+ ut_ad(n_fields
+ == dict_index_get_n_unique_in_tree(index) + 1);
+ n_node_ptr_field = n_fields - 1;
+ break;
+ case REC_STATUS_INFIMUM:
+ case REC_STATUS_SUPREMUM:
+ ut_ad(n_fields == 1);
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ default:
+ ut_error;
+ return;
+ }
}
end = rec;
- nulls = rec - (extra + 1);
lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
/* clear the SQL-null flags */
memset(lens + 1, 0, nulls - lens);
@@ -1171,6 +1234,10 @@ rec_convert_dtuple_to_rec_comp(
ifield = dict_index_get_nth_field(index, i);
fixed_len = ifield->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(ifield->col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -1178,8 +1245,17 @@ rec_convert_dtuple_to_rec_comp(
0..127. The length will be encoded in two bytes when
it is 128 or more, or when the field is stored externally. */
if (fixed_len) {
- ut_ad(len == fixed_len);
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(
+ ifield->col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(
+ ifield->col->mbminmaxlen);
+
+ ut_ad(len <= fixed_len);
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
ut_ad(!dfield_is_ext(field));
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(field)) {
ut_ad(ifield->col->len >= 256
|| ifield->col->mtype == DATA_BLOB);
@@ -1233,8 +1309,7 @@ rec_convert_dtuple_to_rec_new(
rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp(
- rec, REC_N_NEW_EXTRA_BYTES, index, status,
- dtuple->fields, dtuple->n_fields);
+ rec, index, dtuple->fields, dtuple->n_fields, status, FALSE);
/* Set the info bits of the record */
rec_set_info_and_status_bits(rec, dtuple_get_info_bits(dtuple));
@@ -1296,6 +1371,54 @@ rec_convert_dtuple_to_rec(
return(rec);
}
+#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, TRUE));
+}
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+{
+ rec_init_offsets_comp_ordinary(rec, TRUE, index, offsets);
+}
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+{
+ rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
+ REC_STATUS_ORDINARY, TRUE);
+}
+
/**************************************************************//**
Copies the first n fields of a physical record to a data tuple. The fields
are copied to the memory heap. */
@@ -1506,6 +1629,7 @@ rec_copy_prefix_to_buf(
return(*buf + (rec - (lens + 1)));
}
+#endif /* UNIV_HOTBACKUP */
/***************************************************************//**
Validates the consistency of an old-style physical record.
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index e79518e24de..e8d15fb539c 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -2394,7 +2394,10 @@ row_ins_index_entry(
err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
n_ext, thr);
if (err != DB_FAIL) {
-
+ if (index == dict_table_get_first_index(index->table)
+ && thr_get_trx(thr)->mysql_thd != 0) {
+ DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
+ }
return(err);
}
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index cf662cb1f88..244aa0a69f1 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -294,6 +294,7 @@ row_merge_buf_add(
ulint len;
const dict_col_t* col;
ulint col_no;
+ ulint fixed_len;
const dfield_t* row_field;
ibool col_adjusted;
@@ -435,9 +436,30 @@ row_merge_buf_add(
ut_ad(len <= col->len || col->mtype == DATA_BLOB);
- if (ifield->fixed_len) {
- ut_ad(len == ifield->fixed_len);
+ fixed_len = ifield->fixed_len;
+ if (fixed_len && !dict_table_is_comp(index->table)
+ && DATA_MBMINLEN(col->mbminmaxlen)
+ != DATA_MBMAXLEN(col->mbminmaxlen)) {
+ /* CHAR in ROW_FORMAT=REDUNDANT is always
+ fixed-length, but in the temporary file it is
+ variable-length for variable-length character
+ sets. */
+ fixed_len = 0;
+ }
+
+ if (fixed_len) {
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(col->mbminmaxlen);
+
+ /* len should be between size calcualted base on
+ mbmaxlen and mbminlen */
+ ut_ad(len <= fixed_len);
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
+
ut_ad(!dfield_is_ext(field));
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(field)) {
extra_size += 2;
} else if (len < 128
@@ -464,12 +486,11 @@ row_merge_buf_add(
ulint size;
ulint extra;
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields, &extra);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra);
- ut_ad(data_size + extra_size + REC_N_NEW_EXTRA_BYTES == size);
- ut_ad(extra_size + REC_N_NEW_EXTRA_BYTES == extra);
+ ut_ad(data_size + extra_size == size);
+ ut_ad(extra_size == extra);
}
#endif /* UNIV_DEBUG */
@@ -660,14 +681,9 @@ row_merge_buf_write(
ulint extra_size;
const dfield_t* entry = buf->tuples[i];
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields,
- &extra_size);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra_size);
ut_ad(size >= extra_size);
- ut_ad(extra_size >= REC_N_NEW_EXTRA_BYTES);
- extra_size -= REC_N_NEW_EXTRA_BYTES;
- size -= REC_N_NEW_EXTRA_BYTES;
/* Encode extra_size + 1 */
if (extra_size + 1 < 0x80) {
@@ -680,9 +696,8 @@ row_merge_buf_write(
ut_ad(b + size < &block[srv_sort_buf_size]);
- rec_convert_dtuple_to_rec_comp(b + extra_size, 0, index,
- REC_STATUS_ORDINARY,
- entry, n_fields);
+ rec_convert_dtuple_to_temp(b + extra_size, index,
+ entry, n_fields);
b += size;
@@ -790,6 +805,8 @@ row_merge_read(
os_offset_t ofs = ((os_offset_t) offset) * srv_sort_buf_size;
ibool success;
+ DBUG_EXECUTE_IF("row_merge_read_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_read) {
fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
@@ -839,6 +856,8 @@ row_merge_write(
ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len);
+ DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_write) {
fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
@@ -951,7 +970,7 @@ err_exit:
*mrec = *buf + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
@@ -970,7 +989,7 @@ err_exit:
*mrec = b + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
ut_ad(extra_size + data_size < sizeof *buf);
@@ -2449,7 +2468,7 @@ row_merge_drop_temp_indexes(void)
/*********************************************************************//**
Creates temporary merge files, and if UNIV_PFS_IO defined, register
the file descriptor with Performance Schema.
-@return File descriptor */
+@return file descriptor, or -1 on failure */
UNIV_INLINE
int
row_merge_file_create_low(void)
@@ -2471,13 +2490,19 @@ row_merge_file_create_low(void)
#ifdef UNIV_PFS_IO
register_pfs_file_open_end(locker, fd);
#endif
+ if (fd < 0) {
+ fprintf(stderr,
+ "InnoDB: Error: Cannot create temporary merge file\n");
+ return(-1);
+ }
return(fd);
}
/*********************************************************************//**
-Create a merge file. */
+Create a merge file.
+@return file descriptor, or -1 on failure */
UNIV_INTERN
-void
+int
row_merge_file_create(
/*==================*/
merge_file_t* merge_file) /*!< out: merge file structure */
@@ -2488,6 +2513,7 @@ row_merge_file_create(
}
merge_file->offset = 0;
merge_file->n_rec = 0;
+ return(merge_file->fd);
}
/*********************************************************************//**
@@ -2761,6 +2787,28 @@ row_merge_rename_tables(
goto err_exit;
}
+ /* Generate the redo logs for file operations */
+ fil_mtr_rename_log(old_table->space, old_name,
+ new_table->space, new_table->name, tmp_name);
+
+ /* What if the redo logs are flushed to disk here? This is
+ tested with following crash point */
+ DBUG_EXECUTE_IF("bug14669848_precommit", log_buffer_flush_to_disk();
+ DBUG_SUICIDE(););
+
+ /* File operations cannot be rolled back. So, before proceeding
+ with file operations, commit the dictionary changes.*/
+ trx_commit_for_mysql(trx);
+
+ /* If server crashes here, the dictionary in InnoDB and MySQL
+ will differ. The .ibd files and the .frm files must be swapped
+ manually by the administrator. No loss of data. */
+ DBUG_EXECUTE_IF("bug14669848", DBUG_SUICIDE(););
+
+ /* Ensure that the redo logs are flushed to disk. The config
+ innodb_flush_log_at_trx_commit must not affect this. */
+ log_buffer_flush_to_disk();
+
/* The following calls will also rename the .ibd data files if
the tables are stored in a single-table tablespace */
@@ -2935,7 +2983,7 @@ row_merge_build_indexes(
ulint i;
ulint j;
ulint error;
- int tmpfd;
+ int tmpfd = -1;
dict_index_t* fts_sort_idx = NULL;
fts_psort_t* psort_info = NULL;
fts_psort_t* merge_info = NULL;
@@ -2959,9 +3007,21 @@ row_merge_build_indexes(
block = static_cast<row_merge_block_t*>(
os_mem_alloc_large(&block_size));
+ /* Initialize all the merge file descriptors, so that we
+ don't call row_merge_file_destroy() on uninitialized
+ merge file descriptor */
+
for (i = 0; i < n_indexes; i++) {
+ merge_files[i].fd = -1;
+ }
- row_merge_file_create(&merge_files[i]);
+ for (i = 0; i < n_indexes; i++) {
+
+ if (row_merge_file_create(&merge_files[i]) < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
if (indexes[i]->type & DICT_FTS) {
ibool opt_doc_id_size = FALSE;
@@ -2982,6 +3042,12 @@ row_merge_build_indexes(
tmpfd = row_merge_file_create_low();
+ if (tmpfd < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
+
/* Reset the MySQL row buffer that is used when reporting
duplicate keys. */
innobase_rec_reset(table);
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index f1811a664c2..7a07833fa16 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -4532,6 +4532,13 @@ end:
trx->error_state = DB_SUCCESS;
trx_rollback_to_savepoint(trx, NULL);
trx->error_state = DB_SUCCESS;
+ } else {
+ if (old_is_tmp && !new_is_tmp) {
+ /* After ALTER TABLE the table statistics
+ needs to be rebuilt. It will be rebuilt
+ when the table is loaded again. */
+ table->stat_initialized = FALSE;
+ }
}
}
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 4869909f5a6..42034c5b80d 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -518,7 +518,7 @@ row_undo_mod_upd_del_sec(
ulint err = DB_SUCCESS;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
-
+ ut_ad(!node->undo_row);
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -576,6 +576,8 @@ row_undo_mod_del_mark_sec(
mem_heap_t* heap;
ulint err = DB_SUCCESS;
+ ut_ad(!node->undo_row);
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 757d3544ba4..a73f858599d 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -217,7 +217,7 @@ row_undo_search_clust_to_pcur(
node->row = row_build(ROW_COPY_DATA, clust_index, rec,
offsets, NULL, ext, node->heap);
- if (node->update) {
+ if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
node->undo_row = dtuple_copy(node->row, node->heap);
row_upd_replace(node->undo_row, &node->undo_ext,
clust_index, node->update, node->heap);
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index e64cc006f02..30e0698eab9 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -39,6 +39,8 @@ Created 10/8/1995 Heikki Tuuri
*******************************************************/
/* Dummy comment */
+#include "m_string.h" /* for my_sys.h */
+#include "my_sys.h" /* DEBUG_SYNC_C */
#include "srv0srv.h"
#include "ut0mem.h"
@@ -354,6 +356,11 @@ UNIV_INTERN unsigned long long srv_stats_persistent_sample_pages = 20;
UNIV_INTERN ibool srv_use_doublewrite_buf = TRUE;
+UNIV_INTERN ibool srv_use_atomic_writes = FALSE;
+#ifdef HAVE_POSIX_FALLOCATE
+UNIV_INTERN ibool srv_use_posix_fallocate = TRUE;
+#endif
+
/** doublewrite buffer is 1MB is size i.e.: it can hold 128 16K pages.
The following parameter is the size of the buffer that is used for
batch flushing i.e.: LRU flushing and flush_list flushing. The rest
@@ -1308,13 +1315,15 @@ void
srv_export_innodb_status(void)
/*==========================*/
{
- buf_pool_stat_t stat;
- ulint LRU_len;
- ulint free_len;
- ulint flush_list_len;
+ buf_pool_stat_t stat;
+ buf_pools_list_size_t buf_pools_list_size;
+ ulint LRU_len;
+ ulint free_len;
+ ulint flush_list_len;
buf_get_total_stat(&stat);
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
+ buf_get_total_list_size_in_bytes(&buf_pools_list_size);
mutex_enter(&srv_innodb_monitor_mutex);
@@ -1343,7 +1352,12 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_read_ahead_evicted
= stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data = LRU_len;
+ export_vars.innodb_buffer_pool_bytes_data =
+ buf_pools_list_size.LRU_bytes
+ + buf_pools_list_size.unzip_LRU_bytes;
export_vars.innodb_buffer_pool_pages_dirty = flush_list_len;
+ export_vars.innodb_buffer_pool_bytes_dirty =
+ buf_pools_list_size.flush_list_bytes;
export_vars.innodb_buffer_pool_pages_free = free_len;
#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index af64d011db2..c492edf89b6 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -309,9 +309,9 @@ mutex_create_func(
/* NOTE! The very first mutexes are not put to the mutex list */
- if ((mutex == &mutex_list_mutex)
+ if (mutex == &mutex_list_mutex
#ifdef UNIV_SYNC_DEBUG
- || (mutex == &sync_thread_mutex)
+ || mutex == &sync_thread_mutex
#endif /* UNIV_SYNC_DEBUG */
) {
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 62c140879aa..d050e7461e7 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -69,6 +69,10 @@ UNIV_INTERN mysql_pfs_key_t trx_purge_latch_key;
UNIV_INTERN mysql_pfs_key_t purge_sys_bh_mutex_key;
#endif /* UNIV_PFS_MUTEX */
+#ifdef UNIV_DEBUG
+UNIV_INTERN my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
/********************************************************************//**
Fetches the next undo log record from the history list to purge. It must be
released with the corresponding release function.
@@ -1215,6 +1219,12 @@ trx_purge(
rw_lock_x_unlock(&purge_sys->latch);
+#ifdef UNIV_DEBUG
+ if (srv_purge_view_update_only_debug) {
+ return(0);
+ }
+#endif
+
/* Fetch the UNDO recs that need to be purged. */
n_pages_handled = trx_purge_attach_undo_recs(
n_purge_threads, purge_sys, &purge_sys->limit, batch_size);
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index b87eac9362e..203139f23fd 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1588,6 +1588,25 @@ trx_undo_prev_version_build(
if (row_upd_changes_field_size_or_external(index, offsets, update)) {
ulint n_ext;
+ /* We should confirm the existence of disowned external data,
+ if the previous version record is delete marked. If the trx_id
+ of the previous record is seen by purge view, we should treat
+ it as missing history, because the disowned external data
+ might be purged already.
+
+ The inherited external data (BLOBs) can be freed (purged)
+ after trx_id was committed, provided that no view was started
+ before trx_id. If the purge view can see the committed
+ delete-marked record by trx_id, no transactions need to access
+ the BLOB. */
+
+ if ((update->info_bits & REC_INFO_DELETED_FLAG)
+ && read_view_sees_trx_id(purge_sys->view, trx_id)) {
+ /* treat as a fresh insert, not to
+ cause assertion error at the caller. */
+ return(DB_SUCCESS);
+ }
+
/* We have to set the appropriate extern storage bits in the
old version of the record: the extern bits in rec for those
fields that update does NOT update, as well as the bits for
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 95f37ddb12f..af5925dbeeb 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -1000,12 +1000,6 @@ static const char *ha_maria_exts[]=
};
-const char **ha_maria::bas_ext() const
-{
- return ha_maria_exts;
-}
-
-
const char *ha_maria::index_type(uint key_number)
{
return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
@@ -3491,6 +3485,7 @@ static int ha_maria_init(void *p)
maria_hton->db_type= DB_TYPE_UNKNOWN;
maria_hton->create= maria_create_handler;
maria_hton->panic= maria_hton_panic;
+ maria_hton->tablefile_extensions= ha_maria_exts;
maria_hton->commit= maria_commit;
maria_hton->rollback= maria_rollback;
maria_hton->checkpoint_state= maria_checkpoint_state;
@@ -3762,11 +3757,6 @@ SHOW_VAR status_variables[]= {
{NullS, NullS, SHOW_LONG}
};
-static struct st_mysql_show_var aria_status_variables[]= {
- {"Aria", (char*) &status_variables, SHOW_ARRAY},
- {NullS, NullS, SHOW_LONG}
-};
-
/****************************************************************************
* Maria MRR implementation: use DS-MRR
***************************************************************************/
@@ -3841,7 +3831,7 @@ maria_declare_plugin(aria)
ha_maria_init, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0105, /* 1.5 */
- aria_status_variables, /* status variables */
+ status_variables, /* status variables */
system_variables, /* system variables */
"1.5", /* string version */
MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */
diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h
index 086c4088d95..7d12f1637b4 100644
--- a/storage/maria/ha_maria.h
+++ b/storage/maria/ha_maria.h
@@ -35,7 +35,6 @@ C_MODE_START
ICP_RESULT index_cond_func_maria(void *arg);
C_MODE_END
-extern ulong maria_sort_buffer_size;
extern TYPELIB maria_recover_typelib;
extern ulonglong maria_recover_options;
@@ -60,7 +59,6 @@ public:
~ha_maria() {}
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *index_type(uint key_number);
- const char **bas_ext() const;
ulonglong table_flags() const
{ return int_table_flags; }
ulong index_flags(uint inx, uint part, bool all_parts) const;
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index e3668d3c8d3..d48e8df5cf1 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -1,4 +1,5 @@
/* Copyright (C) 2007 Michael Widenius
+ Copyright (c) 2010, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -135,8 +136,7 @@ const char *bits_to_txt[]=
"tail 00-40 % full", "tail 40-80 % full", "tail/blob full"
};
-/*#define WRONG_BITMAP_FLUSH 1*/ /*define only for provoking bugs*/
-#undef WRONG_BITMAP_FLUSH
+#define WRONG_BITMAP_FLUSH 0 /*define to 1 only for provoking bugs*/
static my_bool _ma_read_bitmap_page(MARIA_HA *info,
MARIA_FILE_BITMAP *bitmap,
@@ -164,11 +164,7 @@ static inline my_bool write_changed_bitmap(MARIA_SHARE *share,
*/
bitmap->changed_not_flushed= 1;
- if ((bitmap->non_flushable == 0)
-#ifdef WRONG_BITMAP_FLUSH
- || 1
-#endif
- )
+ if ((bitmap->non_flushable == 0) || WRONG_BITMAP_FLUSH)
{
res= pagecache_write(share->pagecache,
&bitmap->file, bitmap->page, 0,
@@ -495,7 +491,7 @@ my_bool _ma_bitmap_flush_all(MARIA_SHARE *share)
{
bitmap->flush_all_requested++;
bitmap->waiting_for_non_flushable++;
-#ifndef WRONG_BITMAP_FLUSH
+#if !WRONG_BITMAP_FLUSH
while (bitmap->non_flushable > 0)
{
DBUG_PRINT("info", ("waiting for bitmap to be flushable"));
@@ -1256,7 +1252,7 @@ static my_bool allocate_head(MARIA_FILE_BITMAP *bitmap, uint size,
a full page or a tail page
*/
if ((!bits && best_data) ||
- ((bits & LL(04444444444444444)) == LL(04444444444444444)))
+ ((bits & 04444444444444444LL) == 04444444444444444LL))
continue;
for (i= 0; i < 16 ; i++, bits >>= 3)
{
@@ -1344,8 +1340,8 @@ static my_bool allocate_tail(MARIA_FILE_BITMAP *bitmap, uint size,
quite common case if we have blobs.
*/
- if ((!bits && best_data) || bits == LL(0xffffffffffff) ||
- bits == LL(04444444444444444))
+ if ((!bits && best_data) || bits == 0xffffffffffffLL ||
+ bits == 04444444444444444LL)
continue;
for (i= 0; i < 16; i++, bits >>= 3)
{
@@ -1470,14 +1466,14 @@ static ulong allocate_full_pages(MARIA_FILE_BITMAP *bitmap,
bits= prefix_bits= uint6korr(data_start - 6);
DBUG_ASSERT(bits != 0);
/* 111 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 */
- if (!(bits & LL(07000000000000000)))
+ if (!(bits & 07000000000000000LL))
{
data_start-= 6;
do
{
prefix_area_size++;
bits<<= 3;
- } while (!(bits & LL(07000000000000000)));
+ } while (!(bits & 07000000000000000LL));
area_size+= prefix_area_size;
/* Calculate offset to page from data_start */
prefix_area_size= 16 - prefix_area_size;
@@ -1526,11 +1522,11 @@ static ulong allocate_full_pages(MARIA_FILE_BITMAP *bitmap,
best_prefix_area_size= 16 - best_prefix_area_size;
if (best_area_size < best_prefix_area_size)
{
- tmp= (LL(1) << best_area_size*3) - 1;
+ tmp= (1LL << best_area_size*3) - 1;
best_area_size= best_prefix_area_size; /* for easy end test */
}
else
- tmp= (LL(1) << best_prefix_area_size*3) - 1;
+ tmp= (1LL << best_prefix_area_size*3) - 1;
tmp<<= (16 - best_prefix_area_size) * 3;
DBUG_ASSERT((best_prefix_bits & tmp) == 0);
best_prefix_bits|= tmp;
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 55b9a137050..16657ba80ae 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -5454,7 +5454,7 @@ restart_bitmap_scan:
{
bits= uint6korr(data);
/* Skip not allocated pages and blob / full tail pages */
- if (bits && bits != LL(07777777777777777))
+ if (bits && bits != 07777777777777777LL)
break;
}
bit_pos= 0;
@@ -7123,7 +7123,7 @@ my_bool _ma_apply_undo_row_delete(MARIA_HA *info, LSN undo_lsn,
memcpy(field_pos, field_length_data, size_length);
field_length_data+= size_length;
- memcpy(field_pos + size_length, &header, sizeof(&header));
+ memcpy(field_pos + size_length, &header, sizeof(header));
header+= blob_length;
*blob_lengths++= blob_length;
break;
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index ab9080c40fb..192e2ed32a1 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -4025,8 +4025,8 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
if (rep_quick && (param->testflag & T_FORCE_UNIQUENESS))
{
- my_off_t skr= (share->state.state.data_file_length +
- (sort_info.org_data_file_type == COMPRESSED_RECORD) ?
+ my_off_t skr= share->state.state.data_file_length +
+ ((sort_info.org_data_file_type == COMPRESSED_RECORD) ?
MEMMAP_EXTRA_MARGIN : 0);
#ifdef USE_RELOC
if (sort_info.org_data_file_type == STATIC_RECORD &&
@@ -4545,8 +4545,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
if (rep_quick && (param->testflag & T_FORCE_UNIQUENESS))
{
- my_off_t skr= (share->state.state.data_file_length +
- (sort_info.org_data_file_type == COMPRESSED_RECORD) ?
+ my_off_t skr= share->state.state.data_file_length +
+ ((sort_info.org_data_file_type == COMPRESSED_RECORD) ?
MEMMAP_EXTRA_MARGIN : 0);
#ifdef USE_RELOC
if (sort_info.org_data_file_type == STATIC_RECORD &&
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index 28c3491730f..4ed00598c2f 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -1215,19 +1215,19 @@ uint maria_get_pointer_length(ulonglong file_length, uint def)
if (file_length) /* If not default */
{
#ifdef NOT_YET_READY_FOR_8_BYTE_POINTERS
- if (file_length >= (ULL(1) << 56))
+ if (file_length >= (1ULL << 56))
def=8;
else
#endif
- if (file_length >= (ULL(1) << 48))
+ if (file_length >= (1ULL << 48))
def=7;
- else if (file_length >= (ULL(1) << 40))
+ else if (file_length >= (1ULL << 40))
def=6;
- else if (file_length >= (ULL(1) << 32))
+ else if (file_length >= (1ULL << 32))
def=5;
- else if (file_length >= (ULL(1) << 24))
+ else if (file_length >= (1ULL << 24))
def=4;
- else if (file_length >= (ULL(1) << 16))
+ else if (file_length >= (1ULL << 16))
def=3;
else
def=2;
diff --git a/storage/maria/ma_init.c b/storage/maria/ma_init.c
index 78ca7ed9bf8..962405552f0 100644
--- a/storage/maria/ma_init.c
+++ b/storage/maria/ma_init.c
@@ -148,7 +148,7 @@ my_bool maria_upgrade()
"Converting them to Aria names",
MYF(ME_JUST_INFO));
- for (i= 0; i < dir->number_off_files; i++)
+ for (i= 0; i < dir->number_of_files; i++)
{
const char *file= dir->dir_entry[i].name;
if (strncmp(file, "maria_log.", 10) == 0 &&
diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c
index f62ffcc49a0..a3553801eaa 100644
--- a/storage/maria/ma_key.c
+++ b/storage/maria/ma_key.c
@@ -99,7 +99,7 @@ uint transid_store_packed(MARIA_HA *info, uchar *to, ulonglong trid)
uchar *start;
uint length;
uchar buff[8];
- DBUG_ASSERT(trid < (LL(1) << (MARIA_MAX_PACK_TRANSID_SIZE*8)));
+ DBUG_ASSERT(trid < (1LL << (MARIA_MAX_PACK_TRANSID_SIZE*8)));
DBUG_ASSERT(trid >= info->s->state.create_trid);
trid= (trid - info->s->state.create_trid) << 1;
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 56926c048d8..5233e57594c 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -3274,7 +3274,7 @@ static my_bool translog_get_last_page_addr(TRANSLOG_ADDRESS *addr,
DBUG_PRINT("info", ("File size: %s", llstr(file_size, buff)));
if (file_size == MY_FILEPOS_ERROR)
DBUG_RETURN(1);
- DBUG_ASSERT(file_size < ULL(0xffffffff));
+ DBUG_ASSERT(file_size < 0xffffffffULL);
if (((uint32)file_size) > TRANSLOG_PAGE_SIZE)
{
rec_offset= (((((uint32)file_size) / TRANSLOG_PAGE_SIZE) - 1) *
@@ -3479,7 +3479,7 @@ my_bool translog_walk_filenames(const char *directory,
if (!(dirp = my_dir(directory, MYF(MY_DONT_SORT))))
return FALSE;
- for (i= 0; i < dirp->number_off_files; i++)
+ for (i= 0; i < dirp->number_of_files; i++)
{
char *file= dirp->dir_entry[i].name;
if (strncmp(file, "aria_log.", 10) == 0 &&
@@ -3785,12 +3785,12 @@ my_bool translog_init_with_table(const char *directory,
TRANSLOG_FILE *file= (TRANSLOG_FILE *)my_malloc(sizeof(TRANSLOG_FILE),
MYF(0));
- compile_time_assert(MY_FILEPOS_ERROR > ULL(0xffffffff));
+ compile_time_assert(MY_FILEPOS_ERROR > 0xffffffffULL);
if (file == NULL ||
(file->handler.file=
open_logfile_by_number_no_cache(i)) < 0 ||
mysql_file_seek(file->handler.file, 0, SEEK_END, MYF(0)) >=
- ULL(0xffffffff))
+ 0xffffffffULL)
{
int j;
for (j= i - log_descriptor.min_file - 1; j > 0; j--)
@@ -5247,7 +5247,7 @@ static uchar *translog_put_LSN_diff(LSN base_lsn, LSN lsn, uchar *dst)
dst[0]= (uchar)(0x80 | (diff >> 24));
int3store(dst + 1, diff & 0xFFFFFFL);
}
- else if (diff <= LL(0x3FFFFFFFFF))
+ else if (diff <= 0x3FFFFFFFFFLL)
{
dst-= 5;
@@ -5344,7 +5344,7 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst)
{
/* take 1 from file offset */
first_byte++;
- base_offset+= LL(0x100000000);
+ base_offset+= 0x100000000LL;
}
file_no= LSN_FILE_NO(base_lsn) - first_byte;
DBUG_ASSERT(base_offset - diff <= UINT_MAX);
diff --git a/storage/maria/ma_loghandler_lsn.h b/storage/maria/ma_loghandler_lsn.h
index 7fa53bc0a50..f618429f6f3 100644
--- a/storage/maria/ma_loghandler_lsn.h
+++ b/storage/maria/ma_loghandler_lsn.h
@@ -83,8 +83,8 @@ typedef TRANSLOG_ADDRESS LSN;
other bytes are a LSN.
*/
typedef LSN LSN_WITH_FLAGS;
-#define LSN_WITH_FLAGS_TO_LSN(x) (x & ULL(0x00FFFFFFFFFFFFFF))
-#define LSN_WITH_FLAGS_TO_FLAGS(x) (x & ULL(0xFF00000000000000))
+#define LSN_WITH_FLAGS_TO_LSN(x) (x & 0x00FFFFFFFFFFFFFFULL)
+#define LSN_WITH_FLAGS_TO_FLAGS(x) (x & 0xFF00000000000000ULL)
#define FILENO_IMPOSSIBLE 0 /**< log file's numbering starts at 1 */
#define LOG_OFFSET_IMPOSSIBLE 0 /**< log always has a header */
@@ -106,6 +106,6 @@ typedef LSN LSN_WITH_FLAGS;
Unlike ULONGLONG_MAX, it can be safely used in comparison with valid LSNs
(ULONGLONG_MAX is too big for correctness of cmp_translog_addr()).
*/
-#define LSN_MAX (LSN)ULL(0x00FFFFFFFFFFFFFF)
+#define LSN_MAX (LSN)0x00FFFFFFFFFFFFFFULL
#endif
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index 6aaccea219f..4a3c3efd3fb 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -1650,7 +1650,7 @@ static void unlink_hash(PAGECACHE *pagecache, PAGECACHE_HASH_LINK *hash_link)
struct st_my_thread_var *thread;
hash_link->file= first_page->file;
- DBUG_ASSERT(first_page->pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(first_page->pageno < ((1ULL) << 40));
hash_link->pageno= first_page->pageno;
do
{
@@ -1813,7 +1813,7 @@ restart:
goto restart;
}
hash_link->file= *file;
- DBUG_ASSERT(pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(pageno < ((1ULL) << 40));
hash_link->pageno= pageno;
link_hash(start, hash_link);
/* Register the request for the page */
@@ -3357,7 +3357,7 @@ uchar *pagecache_read(PAGECACHE *pagecache,
page_cache_page_pin_str[unlock_pin]));
DBUG_ASSERT(buff != 0 || (buff == 0 && (unlock_pin == PAGECACHE_PIN ||
unlock_pin == PAGECACHE_PIN_LEFT_PINNED)));
- DBUG_ASSERT(pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(pageno < ((1ULL) << 40));
#endif
if (!page_link)
@@ -3797,7 +3797,7 @@ my_bool pagecache_delete(PAGECACHE *pagecache,
pin == PAGECACHE_PIN_LEFT_PINNED);
restart:
- DBUG_ASSERT(pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(pageno < ((1ULL) << 40));
if (pagecache->can_be_used)
{
/* Key cache is used */
@@ -3977,7 +3977,7 @@ my_bool pagecache_write_part(PAGECACHE *pagecache,
DBUG_ASSERT(lock != PAGECACHE_LOCK_LEFT_READLOCKED);
DBUG_ASSERT(lock != PAGECACHE_LOCK_READ_UNLOCK);
DBUG_ASSERT(offset + size <= pagecache->block_size);
- DBUG_ASSERT(pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(pageno < ((1ULL) << 40));
#endif
if (!page_link)
@@ -4974,7 +4974,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache,
ptr+= 2;
ptr[0]= (share->kfile.file == block->hash_link->file.file);
ptr++;
- DBUG_ASSERT(block->hash_link->pageno < ((ULL(1)) << 40));
+ DBUG_ASSERT(block->hash_link->pageno < ((1ULL) << 40));
page_store(ptr, block->hash_link->pageno);
ptr+= PAGE_STORE_SIZE;
lsn_store(ptr, block->rec_lsn);
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index 9ac42f885b5..aeeda26b791 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -1,5 +1,5 @@
/* Copyright (C) 2006, 2007 MySQL AB
- Copyright (C) 2010-2011 Monty Program Ab
+ Copyright (C) 2010, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -3683,7 +3683,7 @@ static void print_redo_phase_progress(TRANSLOG_ADDRESS addr)
end_offset);
if (initial_remainder == (ulonglong)(-1))
initial_remainder= local_remainder;
- percentage_done= (uint) ((initial_remainder - local_remainder) * ULL(100) /
+ percentage_done= (uint) ((initial_remainder - local_remainder) * 100ULL /
initial_remainder);
if ((percentage_done - percentage_printed) >= 10)
{
diff --git a/storage/maria/ma_test3.c b/storage/maria/ma_test3.c
index c11de6f8242..64b22e45c1b 100644
--- a/storage/maria/ma_test3.c
+++ b/storage/maria/ma_test3.c
@@ -114,7 +114,7 @@ int main(int argc,char **argv)
sleep(1);
return 0;
}
- rnd(1);
+ (void)rnd(1);
}
for (i=0 ; i < forks ; i++)
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index e983f561bbb..d1d52441588 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -855,7 +855,7 @@ struct st_maria_handler
#define MARIA_MAX_KEYPTR_SIZE 5 /* For calculating block lengths */
/* Marker for impossible delete link */
-#define IMPOSSIBLE_PAGE_NO LL(0xFFFFFFFFFF)
+#define IMPOSSIBLE_PAGE_NO 0xFFFFFFFFFFLL
/* The UNIQUE check is done with a hashed long key */
diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c
index 40686995378..788bc5c2ad3 100644
--- a/storage/maria/maria_pack.c
+++ b/storage/maria/maria_pack.c
@@ -1905,7 +1905,7 @@ static int make_huff_decode_table(HUFF_TREE *huff_tree, uint trees)
return 1;
huff_tree->code_len=(uchar*) (huff_tree->code+elements);
make_traverse_code_tree(huff_tree, huff_tree->root,
- 8 * sizeof(ulonglong), LL(0));
+ 8 * sizeof(ulonglong), 0);
}
}
return 0;
diff --git a/storage/maria/trnman.h b/storage/maria/trnman.h
index f28345908f2..77e2916390a 100644
--- a/storage/maria/trnman.h
+++ b/storage/maria/trnman.h
@@ -56,7 +56,7 @@ struct st_ma_transaction
uint16 flags; /**< Various flags */
};
-#define TRANSACTION_LOGGED_LONG_ID ULL(0x8000000000000000)
+#define TRANSACTION_LOGGED_LONG_ID 0x8000000000000000ULL
#define MAX_TRID (~(TrID)0)
extern WT_RESOURCE_TYPE ma_rc_dup_unique;
diff --git a/storage/maria/unittest/ma_control_file-t.c b/storage/maria/unittest/ma_control_file-t.c
index b4e757788c2..1d52dee6ece 100644
--- a/storage/maria/unittest/ma_control_file-t.c
+++ b/storage/maria/unittest/ma_control_file-t.c
@@ -277,7 +277,7 @@ static int test_five_logs_and_max_trid(void)
RET_ERR_UNLESS(open_file() == CONTROL_FILE_OK);
expect_logno= 100;
- expect_max_trid= ULL(14111978111);
+ expect_max_trid= 14111978111ULL;
for (i= 0; i<5; i++)
{
expect_logno*= 3;
diff --git a/storage/maria/unittest/ma_maria_log_cleanup.c b/storage/maria/unittest/ma_maria_log_cleanup.c
index 60e4fdb02fd..3e4bc755832 100644
--- a/storage/maria/unittest/ma_maria_log_cleanup.c
+++ b/storage/maria/unittest/ma_maria_log_cleanup.c
@@ -38,7 +38,7 @@ my_bool maria_log_remove(const char *testdir)
if (!(dirp = my_dir(maria_data_root, MYF(MY_DONT_SORT))))
return 1;
- for (i= 0; i < dirp->number_off_files; i++)
+ for (i= 0; i < dirp->number_of_files; i++)
{
char *file= dirp->dir_entry[i].name;
if (strncmp(file, "aria_log.", 9) == 0 &&
diff --git a/storage/maria/unittest/trnman-t.c b/storage/maria/unittest/trnman-t.c
index c2bc993e2ff..78740eac9c1 100644
--- a/storage/maria/unittest/trnman-t.c
+++ b/storage/maria/unittest/trnman-t.c
@@ -45,7 +45,7 @@ pthread_handler_t test_trnman(void *arg)
for (x= ((int)(intptr)(&m)); m > 0; )
{
- y= x= (x*LL(3628273133) + LL(1500450271)) % LL(9576890767); /* three prime numbers */
+ y= x= (x*3628273133LL + 1500450271LL) % 9576890767LL; /* three prime numbers */
m-= n= x % MAX_ITER;
for (i= 0; i < n; i++)
{
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index f649de8bd5c..4caffdd5fe3 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -77,7 +77,7 @@ static MYSQL_THDVAR_ULONG(repair_threads, PLUGIN_VAR_RQCMDARG,
static MYSQL_THDVAR_ULONGLONG(sort_buffer_size, PLUGIN_VAR_RQCMDARG,
"The buffer that is allocated when sorting the index when doing "
"a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE", NULL, NULL,
- 8192 * 1024, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), SIZE_T_MAX, 1);
+ 8192 * 1024, MIN_SORT_BUFFER + MALLOC_OVERHEAD, SIZE_T_MAX, 1);
static MYSQL_SYSVAR_BOOL(use_mmap, opt_myisam_use_mmap, PLUGIN_VAR_NOCMDARG,
"Use memory mapping for reading and writing MyISAM tables", NULL, NULL, FALSE);
@@ -677,12 +677,6 @@ static const char *ha_myisam_exts[] = {
NullS
};
-const char **ha_myisam::bas_ext() const
-{
- return ha_myisam_exts;
-}
-
-
const char *ha_myisam::index_type(uint key_number)
{
return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
@@ -2209,6 +2203,7 @@ static int myisam_init(void *p)
myisam_hton->create= myisam_create_handler;
myisam_hton->panic= myisam_panic;
myisam_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES;
+ myisam_hton->tablefile_extensions= ha_myisam_exts;
mi_killed= mi_killed_in_mariadb;
return 0;
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index b558f504071..63fb0ea5a2a 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -26,9 +26,6 @@
#include "handler.h" /* handler */
#include "table.h" /* TABLE_SHARE */
-struct TABLE_SHARE;
-typedef struct st_ha_create_information HA_CREATE_INFO;
-
#define HA_RECOVER_DEFAULT 1 /* Automatic recover active */
#define HA_RECOVER_BACKUP 2 /* Make a backupfile on recover */
#define HA_RECOVER_FORCE 4 /* Recover even if we loose rows */
@@ -57,7 +54,6 @@ class ha_myisam: public handler
~ha_myisam() {}
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *index_type(uint key_number);
- const char **bas_ext() const;
ulonglong table_flags() const { return int_table_flags; }
int index_init(uint idx, bool sorted);
int index_end();
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index ad97fba2cbb..0792d90cfaa 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -1,5 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -862,19 +863,19 @@ uint mi_get_pointer_length(ulonglong file_length, uint def)
if (file_length) /* If not default */
{
#ifdef NOT_YET_READY_FOR_8_BYTE_POINTERS
- if (file_length >= ULL(1) << 56)
+ if (file_length >= 1ULL << 56)
def=8;
else
#endif
- if (file_length >= ULL(1) << 48)
+ if (file_length >= 1ULL << 48)
def=7;
- else if (file_length >= ULL(1) << 40)
+ else if (file_length >= 1ULL << 40)
def=6;
- else if (file_length >= ULL(1) << 32)
+ else if (file_length >= 1ULL << 32)
def=5;
- else if (file_length >= ULL(1) << 24)
+ else if (file_length >= 1ULL << 24)
def=4;
- else if (file_length >= ULL(1) << 16)
+ else if (file_length >= 1ULL << 16)
def=3;
else
def=2;
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index 438057e22df..22225303bae 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -349,6 +349,12 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
}
else if (pos->type == HA_KEYTYPE_BINARY)
pos->charset= &my_charset_bin;
+ if (!(share->keyinfo[i].flag & HA_SPATIAL) &&
+ pos->start > share->base.reclength)
+ {
+ my_errno= HA_ERR_CRASHED;
+ goto err;
+ }
}
if (share->keyinfo[i].flag & HA_SPATIAL)
{
diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c
index 968cb9624a6..01fa10de7a3 100644
--- a/storage/myisam/mi_search.c
+++ b/storage/myisam/mi_search.c
@@ -949,9 +949,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
("Found too long binary packed key: %u of %u at 0x%lx",
length, keyinfo->maxlength, (long) *page_pos));
DBUG_DUMP("key", *page_pos, 16);
- mi_print_error(keyinfo->share, HA_ERR_CRASHED);
- my_errno=HA_ERR_CRASHED;
- DBUG_RETURN(0); /* Wrong key */
+ goto crashed; /* Wrong key */
}
/* Key is packed against prev key, take prefix from prev key. */
from= key;
@@ -994,6 +992,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (from == from_end) { from=page; from_end=page_end; }
length+= (uint) ((*key++ = *from++));
}
+ if (length > keyseg->length)
+ goto crashed;
}
else
length=keyseg->length;
@@ -1033,15 +1033,18 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (from_end != page_end)
{
DBUG_PRINT("error",("Error when unpacking key"));
- mi_print_error(keyinfo->share, HA_ERR_CRASHED);
- my_errno=HA_ERR_CRASHED;
- DBUG_RETURN(0); /* Error */
+ goto crashed; /* Error */
}
/* Copy data pointer and, if appropriate, key block pointer. */
memcpy((uchar*) key,(uchar*) from,(size_t) length);
*page_pos= from+length;
}
DBUG_RETURN((uint) (key-start_key)+keyseg->length);
+
+ crashed:
+ mi_print_error(keyinfo->share, HA_ERR_CRASHED);
+ my_errno= HA_ERR_CRASHED;
+ DBUG_RETURN(0);
}
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index c8546ee56f5..7be84599d86 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -284,25 +284,25 @@ static struct my_option my_long_options[] =
{ "read_buffer_size", OPT_READ_BUFFER_SIZE, "",
&check_param.read_buffer_length,
&check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
- (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
- INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0},
+ READ_BUFFER_INIT, MALLOC_OVERHEAD,
+ INT_MAX32, MALLOC_OVERHEAD, 1L, 0},
{ "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "",
&check_param.write_buffer_length,
&check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
- (long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
- INT_MAX32, (long) MALLOC_OVERHEAD, (long) 1L, 0},
+ READ_BUFFER_INIT, MALLOC_OVERHEAD,
+ INT_MAX32, MALLOC_OVERHEAD, 1L, 0},
{ "sort_buffer_size", OPT_SORT_BUFFER_SIZE,
"Deprecated. myisam_sort_buffer_size alias is being used",
&check_param.sort_buffer_length,
&check_param.sort_buffer_length, 0, GET_ULL, REQUIRED_ARG,
- (long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD),
- SIZE_T_MAX, (long) MALLOC_OVERHEAD, (long) 1L, 0},
+ SORT_BUFFER_INIT, MIN_SORT_BUFFER + MALLOC_OVERHEAD,
+ SIZE_T_MAX, MALLOC_OVERHEAD, 1L, 0},
{ "myisam_sort_buffer_size", OPT_SORT_BUFFER_SIZE,
"Alias of sort_buffer_size parameter",
&check_param.sort_buffer_length,
&check_param.sort_buffer_length, 0, GET_ULL, REQUIRED_ARG,
- (long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD),
- SIZE_T_MAX, (long) MALLOC_OVERHEAD, (long) 1L, 0},
+ SORT_BUFFER_INIT, MIN_SORT_BUFFER + MALLOC_OVERHEAD,
+ SIZE_T_MAX, MALLOC_OVERHEAD, 1L, 0},
{ "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "",
&check_param.sort_key_blocks,
&check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG,
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index 6ce88db87f5..1985a53acf8 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1931,7 +1932,7 @@ static int make_huff_decode_table(HUFF_TREE *huff_tree, uint trees)
return 1;
huff_tree->code_len=(uchar*) (huff_tree->code+elements);
make_traverse_code_tree(huff_tree, huff_tree->root,
- 8 * sizeof(ulonglong), LL(0));
+ 8 * sizeof(ulonglong), 0);
}
}
return 0;
diff --git a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
index 3caf2ad6c3a..07c03fc8d3e 100644
--- a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff
@@ -7,15 +7,15 @@
-DROP TABLE t1;
-CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-INSERT INTO t1 (a) VALUES (1),(2);
--SELECT * FROM t1;
+-SELECT a FROM t1;
-a
-1
-2
-ALTER TABLE t1 DISCARD TABLESPACE;
--SELECT * FROM t1;
+-SELECT a FROM t1;
-ERROR HY000: Got error -1 from storage engine
-ALTER TABLE t1 IMPORT TABLESPACE;
--SELECT * FROM t1;
+-SELECT a FROM t1;
-a
-1
-2
diff --git a/storage/myisam/mysql-test/storage_engine/foreign_keys.rdiff b/storage/myisam/mysql-test/storage_engine/foreign_keys.rdiff
index 67157754c04..31da75e5ea5 100644
--- a/storage/myisam/mysql-test/storage_engine/foreign_keys.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/foreign_keys.rdiff
@@ -44,12 +44,12 @@
+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
+# -------------------------------------------
DELETE FROM t2 WHERE a=2;
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
1 c
-2 d
+3 d
- SELECT * FROM t2;
+ SELECT a,b FROM t2;
a b
-1 a
+3 b
@@ -90,7 +90,7 @@
+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
+# -------------------------------------------
- SELECT * FROM t2;
+ SELECT a,b FROM t2;
a b
-5 a
-5 a
@@ -117,7 +117,7 @@
+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
+# -------------------------------------------
- SELECT * FROM t2;
+ SELECT a,b FROM t2;
a b
+1 a
+1 a
diff --git a/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff b/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
index 8d1434ad2fe..b460b5b4763 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
@@ -1,9 +1,20 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-13a20
-> 1
+--- suite/storage_engine/trx/cons_snapshot_repeatable_read.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/cons_snapshot_repeatable_read.reject 2013-01-23 02:44:05.336711176 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ connect con2,localhost,root,,;
+@@ -11,6 +17,7 @@
+ # If consistent read works on this isolation level (REPEATABLE READ), the following SELECT should not return the value we inserted (1)
+ SELECT a FROM t1;
+ a
++1
+ COMMIT;
+ connection default;
+ disconnect con1;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
index 8d1434ad2fe..d5f1a03084e 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
@@ -1,9 +1,20 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-13a20
-> 1
+--- suite/storage_engine/trx/cons_snapshot_serializable.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/cons_snapshot_serializable.reject 2013-01-23 02:44:05.928703734 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ connect con2,localhost,root,,;
+@@ -11,6 +17,7 @@
+ # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
+ SELECT a FROM t1;
+ a
++1
+ COMMIT;
+ connection default;
+ disconnect con1;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/delete.rdiff b/storage/myisam/mysql-test/storage_engine/trx/delete.rdiff
index 491b4636796..d7111ab28fc 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/delete.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/delete.rdiff
@@ -22,7 +22,7 @@
ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
-10000 foobar
-10000 foobar
diff --git a/storage/myisam/mysql-test/storage_engine/trx/insert.rdiff b/storage/myisam/mysql-test/storage_engine/trx/insert.rdiff
index 4619db1d095..cee69633860 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/insert.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/insert.rdiff
@@ -22,7 +22,7 @@
ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
+0 test
1 a
@@ -44,7 +44,7 @@
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
INSERT INTO t1 (b,a) VALUES ('test1',10);
COMMIT;
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
+0 test
1 a
diff --git a/storage/myisam/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/myisam/mysql-test/storage_engine/trx/level_read_committed.rdiff
index c0cc9ff9351..94e3fc83e1e 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/level_read_committed.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/level_read_committed.rdiff
@@ -1,44 +1,94 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-18a25
-> 1
-25a33,34
-> 1
-> 2
-30a40,43
-> 1
-> 101
-> 102
-> 2
-34a48,49
-> 101
-> 102
-39a55,56
-> 101
-> 102
-44a62,63
-> 101
-> 102
-51a71,72
-> 101
-> 102
-54a76,77
-> 301
-> 302
-58a82,83
-> 101
-> 102
-61a87,88
-> 301
-> 302
-65a93,94
-> 101
-> 102
-68a98,99
-> 301
-> 302
+--- suite/storage_engine/trx/level_read_committed.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/level_read_committed.reject 2013-01-23 02:44:06.572695636 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+@@ -16,6 +22,7 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -23,25 +30,37 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ connection con1;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -49,23 +68,35 @@
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/myisam/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
index 2a7ddd33c8c..91a2786cb68 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
@@ -1,7 +1,12 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
+--- suite/storage_engine/trx/level_read_uncommitted.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/level_read_uncommitted.reject 2013-01-23 02:44:07.196687792 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/level_repeatable_read.rdiff b/storage/myisam/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
index 8b8df802275..2c265900004 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
@@ -1,53 +1,96 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-18a25
-> 1
-25a33,34
-> 1
-> 2
-27,28c36
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-31a40,43
-> 1
-> 101
-> 102
-> 2
-35a48,49
-> 101
-> 102
-40a55,56
-> 101
-> 102
-44a61,64
-> 1
-> 101
-> 102
-> 2
-49a70,73
-> 1
-> 101
-> 102
-> 2
-51a76,77
-> 301
-> 302
-55a82,83
-> 101
-> 102
-58a87,88
-> 301
-> 302
-62a93,94
-> 101
-> 102
-65a98,99
-> 301
-> 302
+--- suite/storage_engine/trx/level_repeatable_read.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/level_repeatable_read.reject 2013-01-23 02:44:07.776680499 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+@@ -16,6 +22,7 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -23,46 +30,73 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ 201
+ 202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/level_serializable.rdiff b/storage/myisam/mysql-test/storage_engine/trx/level_serializable.rdiff
index bbcb8ac1838..7955036e000 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/level_serializable.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/level_serializable.rdiff
@@ -1,69 +1,103 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MyISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-14,15c20
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-19a25
-> 1
-22,23c28
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-27a33,34
-> 1
-> 2
-32a40,43
-> 1
-> 101
-> 102
-> 2
-35a47,50
-> 1
-> 101
-> 102
-> 2
-38a54,57
-> 1
-> 101
-> 102
-> 2
-41a61,64
-> 1
-> 101
-> 102
-> 2
-46a70,77
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
-49a81,88
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
-52a92,99
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
+--- suite/storage_engine/trx/level_serializable.result 2013-01-22 22:05:05.246633000 +0400
++++ suite/storage_engine/trx/level_serializable.reject 2013-01-23 02:44:08.384672856 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MyISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+@@ -11,45 +17,86 @@
+ connection con2;
+ BEGIN;
+ INSERT INTO t1 (a) VALUES(1);
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ COMMIT;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisam/mysql-test/storage_engine/trx/select_for_update.rdiff b/storage/myisam/mysql-test/storage_engine/trx/select_for_update.rdiff
index 08e0802a28f..044ce56da43 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/select_for_update.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/select_for_update.rdiff
@@ -13,7 +13,7 @@
@@ -14,16 +20,33 @@
1 a
3 a
- SELECT * FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+ SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+a b
+1 a
@@ -38,7 +38,7 @@
+# -------------------------------------------
connection con1;
COMMIT;
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
-1 a
+1 c
diff --git a/storage/myisam/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff b/storage/myisam/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
index 97941bbdb13..e1e8d3050e7 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
@@ -25,7 +25,7 @@
+# -------------------------------------------
connection con1;
COMMIT;
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
-1 a
+1 c
diff --git a/storage/myisam/mysql-test/storage_engine/trx/update.rdiff b/storage/myisam/mysql-test/storage_engine/trx/update.rdiff
index 131286d613c..ca3b77e7cec 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/update.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/update.rdiff
@@ -31,7 +31,7 @@
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
UPDATE t1 SET b = 'upd' WHERE a = 10050;
COMMIT;
- SELECT * FROM t1;
+ SELECT a,b FROM t1;
a b
10050 upd
10050 upd
diff --git a/storage/myisam/mysql-test/storage_engine/trx/xa.rdiff b/storage/myisam/mysql-test/storage_engine/trx/xa.rdiff
index f445ad909f3..73c3796b13b 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/xa.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/xa.rdiff
@@ -12,21 +12,21 @@
connect con2,localhost,root,,;
@@ -9,17 +15,22 @@
connection con1;
- SELECT * FROM t1;
+ SELECT a FROM t1;
a
+1
connection con2;
INSERT INTO t1 (a) VALUES (2);
XA END 'xa1';
connection con1;
- SELECT * FROM t1;
+ SELECT a FROM t1;
a
+1
+2
connection con2;
XA PREPARE 'xa1';
connection con1;
- SELECT * FROM t1;
+ SELECT a FROM t1;
a
+1
+2
@@ -78,7 +78,7 @@
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
connection con1;
- SELECT * FROM t1;
+ SELECT a FROM t1;
a
@@ -93,4 +114,6 @@
2
diff --git a/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff b/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff
index f4629fb19a0..e637643d59d 100644
--- a/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff
+++ b/storage/myisam/mysql-test/storage_engine/trx/xa_recovery.rdiff
@@ -20,7 +20,7 @@
+ERROR XAE04: XAER_NOTA: Unknown XID
XA COMMIT 'xa2';
+ERROR XAE04: XAER_NOTA: Unknown XID
- SELECT * FROM t1;
+ SELECT a FROM t1;
a
+1
+2
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 7bb7990d9a3..41bba55c397 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -171,12 +171,6 @@ extern "C" void myrg_print_wrong_table(const char *table_name)
}
-const char **ha_myisammrg::bas_ext() const
-{
- return ha_myisammrg_exts;
-}
-
-
const char *ha_myisammrg::index_type(uint key_number)
{
return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
@@ -331,6 +325,19 @@ extern "C" int myisammrg_parent_open_callback(void *callback_param,
CPP_UNNAMED_NS_END
+/*
+ Set external_ref for the child MyISAM tables. They need this to be set in
+ order to check for killed status.
+*/
+static void myrg_set_external_ref(MYRG_INFO *m_info, void *ext_ref_arg)
+{
+ int i;
+ for (i= 0; i < (int)m_info->tables; i++)
+ {
+ m_info->open_tables[i].table->external_ref= ext_ref_arg;
+ }
+}
+
/**
Open a MERGE parent table, but not its children.
@@ -394,6 +401,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)),
}
file->children_attached= TRUE;
+ myrg_set_external_ref(file, (void*)table);
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
}
@@ -1697,6 +1705,7 @@ static int myisammrg_init(void *p)
myisammrg_hton->create= myisammrg_create_handler;
myisammrg_hton->panic= myisammrg_panic;
myisammrg_hton->flags= HTON_NO_PARTITION;
+ myisammrg_hton->tablefile_extensions= ha_myisammrg_exts;
return 0;
}
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index 8007e7d04e8..455819c5526 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -82,7 +82,6 @@ public:
ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisammrg();
- const char **bas_ext() const;
const char *index_type(uint key_number);
ulonglong table_flags() const
{
diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_table.rdiff
index 447a38a5b2b..b2ec2129d04 100644
--- a/storage/myisammrg/mysql-test/storage_engine/alter_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/alter_table.rdiff
@@ -1,68 +1,151 @@
-11c11
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-19c19
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-27c27
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-35c35
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-43c43
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-51c51
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-59c59
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-67c67
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-75c75
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-82c82
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-91c91
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
-100c100
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-107c107
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-122c122
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-130c130
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-138c138
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-146c146
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- alter_table.result 2013-01-22 22:05:05.246633000 +0400
++++ alter_table.reject 2013-01-23 02:50:10.652118538 +0400
+@@ -8,7 +8,7 @@
+ `a` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0';
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -16,7 +16,7 @@
+ `a` int(11) DEFAULT '0',
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ALTER a DROP DEFAULT;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -24,7 +24,7 @@
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 CHANGE COLUMN b b1 <CHAR_COLUMN> FIRST;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -32,7 +32,7 @@
+ `b1` char(8) DEFAULT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 CHANGE b1 b <INT_COLUMN> AFTER c;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -40,7 +40,7 @@
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 CHANGE b b <CHAR_COLUMN>;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -48,7 +48,7 @@
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 MODIFY COLUMN b <INT_COLUMN>;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -56,7 +56,7 @@
+ `a` int(11),
+ `c` char(8) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 MODIFY COLUMN b <CHAR_COLUMN> FIRST;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -64,7 +64,7 @@
+ `b` char(8) DEFAULT NULL,
+ `a` int(11),
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 MODIFY COLUMN b <INT_COLUMN> AFTER a;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -72,14 +72,14 @@
+ `a` int(11),
+ `b` int(11) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 DROP COLUMN b;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11),
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 RENAME TO t2;
+ SHOW CREATE TABLE t1;
+ ERROR 42S02: Table 'test.t1' doesn't exist
+@@ -88,7 +88,7 @@
+ t2 CREATE TABLE `t2` (
+ `a` int(11),
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
+ DROP TABLE t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,5),(2,2),(4,3);
+@@ -97,14 +97,14 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ORDER BY b ASC, a DESC;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ SELECT a,b FROM t1;
+ a b
+ 2 2
+@@ -119,7 +119,7 @@
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) COLLATE latin1_general_cs DEFAULT NULL,
+ `c` char(8) COLLATE latin1_general_cs DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 CONVERT TO CHARACTER SET utf8;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -127,7 +127,7 @@
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+ `c` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -135,7 +135,7 @@
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ `c` char(8) CHARACTER SET utf8 DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 FORCE;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+@@ -143,5 +143,5 @@
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) CHARACTER SET utf8 DEFAULT NULL,
+ `c` char(8) CHARACTER SET utf8 DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
index cfc821582ed..1091d6250b9 100644
--- a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff
@@ -1,27 +1,32 @@
-4,18c4,13
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< INSERT INTO t1 (a) VALUES (1),(2);
-< SELECT * FROM t1;
-< a
-< 1
-< 2
-< ALTER TABLE t1 DISCARD TABLESPACE;
-< SELECT * FROM t1;
-< ERROR HY000: Got error -1 from storage engine
-< ALTER TABLE t1 IMPORT TABLESPACE;
-< SELECT * FROM t1;
-< a
-< 1
-< 2
----
-> ERROR HY000: 'test.t1' is not BASE TABLE
-> # ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ ALTER TABLE t1 DISCARD TABLESPACE ]
-> # The statement|command finished with ER_WRONG_OBJECT.
-> # Tablespace operations or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- alter_tablespace.result 2013-01-22 22:05:05.246633000 +0400
++++ alter_tablespace.reject 2013-01-23 02:50:11.288110543 +0400
+@@ -1,19 +1,14 @@
+ DROP TABLE IF EXISTS t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ ALTER TABLE t1 DISCARD TABLESPACE;
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-INSERT INTO t1 (a) VALUES (1),(2);
+-SELECT a FROM t1;
+-a
+-1
+-2
+-ALTER TABLE t1 DISCARD TABLESPACE;
+-SELECT a FROM t1;
+-ERROR HY000: Got error -1 from storage engine
+-ALTER TABLE t1 IMPORT TABLESPACE;
+-SELECT a FROM t1;
+-a
+-1
+-2
++ERROR HY000: 'test.t1' is not BASE TABLE
++# ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ ALTER TABLE t1 DISCARD TABLESPACE ]
++# The statement|command finished with ER_WRONG_OBJECT.
++# Tablespace operations or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/analyze_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/analyze_table.rdiff
index 139bcc00a81..9854a986cc5 100644
--- a/storage/myisammrg/mysql-test/storage_engine/analyze_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/analyze_table.rdiff
@@ -1,22 +1,34 @@
-8c8
-< test.t1 analyze status OK
----
-> test.t1 analyze note The storage engine for the table doesn't support analyze
-12c12
-< test.t2 analyze status OK
----
-> test.t2 analyze note The storage engine for the table doesn't support analyze
-17,18c17,18
-< test.t1 analyze status OK
-< test.t2 analyze status OK
----
-> test.t1 analyze note The storage engine for the table doesn't support analyze
-> test.t2 analyze note The storage engine for the table doesn't support analyze
-24c24
-< test.t1 analyze status OK
----
-> test.t1 analyze note The storage engine for the table doesn't support analyze
-28c28
-< test.t1 analyze status OK
----
-> test.t1 analyze note The storage engine for the table doesn't support analyze
+--- analyze_table.result 2013-01-22 22:05:05.246633000 +0400
++++ analyze_table.reject 2013-01-23 02:50:11.912102699 +0400
+@@ -5,25 +5,25 @@
+ INSERT INTO t1 (a,b) VALUES (3,'c');
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze note The storage engine for the table doesn't support analyze
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ ANALYZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 analyze status OK
++test.t2 analyze note The storage engine for the table doesn't support analyze
+ INSERT INTO t1 (a,b) VALUES (5,'e');
+ INSERT INTO t2 (a,b) VALUES (6,'f');
+ ANALYZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
+-test.t2 analyze status OK
++test.t1 analyze note The storage engine for the table doesn't support analyze
++test.t2 analyze note The storage engine for the table doesn't support analyze
+ DROP TABLE t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, <CUSTOM_INDEX>(a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a) VALUES (1),(2),(4),(7);
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze note The storage engine for the table doesn't support analyze
+ INSERT INTO t1 (a) VALUES (8),(10),(11),(12);
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze note The storage engine for the table doesn't support analyze
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff b/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
index e9095aa3944..68264bdeb8d 100644
--- a/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/autoincrement.rdiff
@@ -1,34 +1,64 @@
-9c9
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-55c55
-< t1 <STORAGE_ENGINE> # # # # # # # # 6 # # # # # # #
----
-> t1 <STORAGE_ENGINE> # # # # # # # # 0 # # # # # # #
-62c62
-< t1 # # # # # # # # # 8 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-81c81
-< t1 # # # # # # # # # 10 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-85c85
-< t1 # # # # # # # # # 21 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-106c106
-< t1 # # # # # # # # # 22 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-128,129c128,129
-< 100 a
-< 101 b
----
-> 1 a
-> 2 b
-132c132
-< 100
----
-> 1
+--- autoincrement.result 2013-01-22 22:05:05.246633000 +0400
++++ autoincrement.reject 2013-01-23 02:50:12.848090932 +0400
+@@ -6,7 +6,7 @@
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` char(8) DEFAULT NULL,
+ KEY `a` (`a`)
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ INSERT INTO t1 (b) VALUES ('a'),('b');
+ SELECT a,b FROM t1 ORDER BY a;
+ a b
+@@ -52,14 +52,14 @@
+ SET sql_mode = '<INITIAL_SQL_MODE>';
+ SHOW TABLE STATUS FROM test LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 <STORAGE_ENGINE> # # # # # # # # 6 # # # # # # #
++t1 <STORAGE_ENGINE> # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (a,b) VALUES (6,'g'),(7,'h');
+ SELECT LAST_INSERT_ID();
+ LAST_INSERT_ID()
+ 5
+ SHOW TABLE STATUS FROM test LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 8 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (a,b) VALUES (NULL,'i'),(9,'j');
+ SELECT a,b FROM t1 ORDER BY a;
+ a b
+@@ -78,11 +78,11 @@
+ 8
+ SHOW TABLE STATUS FROM test LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 10 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (a,b) VALUES (20,'k');
+ SHOW TABLE STATUS FROM test LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 21 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (a,b) VALUES (NULL,'l');
+ SELECT a,b FROM t1 ORDER BY a;
+ a b
+@@ -103,7 +103,7 @@
+ 21
+ SHOW TABLE STATUS FROM test LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 22 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (a,b) VALUES (-5,'m');
+ SELECT a,b FROM t1 ORDER BY a;
+ a b
+@@ -125,9 +125,9 @@
+ INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b');
+ SELECT a,b FROM t1;
+ a b
+-100 a
+-101 b
++1 a
++2 b
+ SELECT LAST_INSERT_ID();
+ LAST_INSERT_ID()
+-100
++1
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/cache_index.rdiff b/storage/myisammrg/mysql-test/storage_engine/cache_index.rdiff
index e10b22a8e66..612c8d38311 100644
--- a/storage/myisammrg/mysql-test/storage_engine/cache_index.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/cache_index.rdiff
@@ -1,46 +1,71 @@
-15,16c15,16
-< test.t1 assign_to_keycache status OK
-< test.t2 assign_to_keycache status OK
----
-> test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
-> test.t2 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
-19,20c19,20
-< test.t1 preload_keys status OK
-< test.t2 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
-> test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
-25,26c25,26
-< test.t1 preload_keys status OK
-< test.t2 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
-> test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
-31c31
-< test.t1 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
-35c35
-< test.t1 assign_to_keycache status OK
----
-> test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
-39c39
-< test.t1 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
-50c50
-< test.t1 assign_to_keycache status OK
----
-> test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
-54c54
-< test.t1 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
-62c62
-< test.t1 assign_to_keycache status OK
----
-> test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
-66c66
-< test.t1 preload_keys status OK
----
-> test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+--- cache_index.result 2013-01-22 22:05:05.246633000 +0400
++++ cache_index.reject 2013-01-23 02:50:13.468083137 +0400
+@@ -12,31 +12,31 @@
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=128*1024;
+ CACHE INDEX t1 INDEX (a), t2 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
+-test.t2 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
++test.t2 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ LOAD INDEX INTO CACHE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
+-test.t2 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=8*1024;
+ LOAD INDEX INTO CACHE t1, t2 IGNORE LEAVES;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
+-test.t2 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
+ SET GLOBAL <CACHE_NAME>.key_cache_age_threshold = 100, <CACHE_NAME>.key_cache_block_size = 512, <CACHE_NAME>.key_cache_division_limit = 1, <CACHE_NAME>.key_cache_segments=2;
+ INSERT INTO t1 (a,b) VALUES (5,'e'),(6,'f');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ SET GLOBAL new_<CACHE_NAME>.key_buffer_size=128*1024;
+ CACHE INDEX t1 IN new_<CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ LOAD INDEX INTO CACHE t1 IGNORE LEAVES;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ INSERT INTO t1 (a,b) VALUES (9,'i');
+ DROP TABLE t2;
+ DROP TABLE t1;
+@@ -47,11 +47,11 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CACHE INDEX t1 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -59,11 +59,11 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CACHE INDEX t1 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ DROP TABLE t1;
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=0;
+ SET GLOBAL new_<CACHE_NAME>.key_buffer_size=0;
diff --git a/storage/myisammrg/mysql-test/storage_engine/char_indexes.rdiff b/storage/myisammrg/mysql-test/storage_engine/char_indexes.rdiff
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/storage/myisammrg/mysql-test/storage_engine/char_indexes.rdiff
+++ /dev/null
diff --git a/storage/myisammrg/mysql-test/storage_engine/checksum_table_live.rdiff b/storage/myisammrg/mysql-test/storage_engine/checksum_table_live.rdiff
index 1710cc18fea..f09aec971a9 100644
--- a/storage/myisammrg/mysql-test/storage_engine/checksum_table_live.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/checksum_table_live.rdiff
@@ -1,6 +1,13 @@
-14,15c14,15
-< test.t1 4272806499
-< test.t2 0
----
-> test.t1 NULL
-> test.t2 NULL
+--- checksum_table_live.result 2013-01-22 22:05:05.246633000 +0400
++++ checksum_table_live.reject 2013-01-23 02:50:14.440070917 +0400
+@@ -11,8 +11,8 @@
+ test.t1 4272806499
+ CHECKSUM TABLE t1, t2 QUICK;
+ Table Checksum
+-test.t1 4272806499
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
+ CHECKSUM TABLE t1, t2 EXTENDED;
+ Table Checksum
+ test.t1 4272806499
diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
index 5d3578ad3cd..585e5c915ba 100644
--- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff
@@ -1,37 +1,57 @@
-7c7
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-16c16
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-22c22
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-27,36c27,35
-< SHOW CREATE TABLE t1;
-< Table Create Table
-< t1 CREATE TABLE `t1` (
-< `1` bigint(20) NOT NULL DEFAULT '0'
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
-< SELECT * FROM t1;
-< 1
-< 1
-< 2
-< DROP TABLE t1;
----
-> ERROR HY000: 'test.t1' is not BASE TABLE
-> # ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_WRONG_OBJECT.
-> # CREATE TABLE .. AS SELECT or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-43c42
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- create_table.result 2013-01-22 22:05:05.246633000 +0400
++++ create_table.reject 2013-01-23 02:50:19.544006752 +0400
+@@ -4,7 +4,7 @@
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ CREATE TABLE IF NOT EXISTS t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ Warnings:
+ Note 1050 Table 't1' already exists
+@@ -13,33 +13,32 @@
+ Table Create Table
+ t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ CREATE TEMPORARY TABLE t2 LIKE t1;
+ SHOW CREATE TABLE t2;
+ Table Create Table
+ t2 CREATE TEMPORARY TABLE `t2` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TEMPORARY TABLE t2;
+ DROP TABLE t2;
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> AS SELECT 1 UNION SELECT 2;
+-SHOW CREATE TABLE t1;
+-Table Create Table
+-t1 CREATE TABLE `t1` (
+- `1` bigint(20) NOT NULL DEFAULT '0'
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+-SELECT * FROM t1;
+-1
+-1
+-2
+-DROP TABLE t1;
++ERROR HY000: 'test.t1' is not BASE TABLE
++# ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_WRONG_OBJECT.
++# CREATE TABLE .. AS SELECT or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ SET storage_engine = <STORAGE_ENGINE>;
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ FLUSH LOGS;
+ DROP TABLE IF EXISTS t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/foreign_keys.rdiff b/storage/myisammrg/mysql-test/storage_engine/foreign_keys.rdiff
index 7362bd282a5..f716b7f2fe8 100644
--- a/storage/myisammrg/mysql-test/storage_engine/foreign_keys.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/foreign_keys.rdiff
@@ -1,138 +1,147 @@
-15,17c15,16
-< KEY `a` (`a`),
-< CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> KEY `a` (`a`)
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
-19c18,25
-< ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
----
-> # ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-23c29,36
-< ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
----
-> # ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-25c38
-< ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
----
-> # ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED_2)
-27c40,47
-< ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
----
-> # ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED_2)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-32c52
-< 2 d
----
-> 3 d
-35c55,56
-< 1 a
----
-> 3 b
-> 3 b
-37c58,65
-< ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails
----
-> # ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-49,51c77,78
-< KEY `a` (`a`),
-< CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`) ON DELETE CASCADE ON UPDATE CASCADE
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> KEY `a` (`a`)
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
-53c80
-< ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`) ON DELETE CASCADE ON UPDATE CASCADE)
----
-> # ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
-54a82
-> ERROR 42S02: Table 'test.t1' doesn't exist
-56a85,92
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_NO_SUCH_TABLE.
-> # UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-59,64c95,104
-< 5 a
-< 5 a
-< 5 b
-< 5 c
-< 5 d
-< 5 e
----
-> 1 a
-> 1 a
-> 2 b
-> 2 b
-> 3 a
-> 3 c
-> 3 c
-> 4 d
-> 4 d
-> 4 e
-65a106,113
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_NO_SUCH_TABLE.
-> # DELETE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-67a116,125
-> 1 a
-> 1 a
-> 2 b
-> 2 b
-> 3 a
-> 3 c
-> 3 c
-> 4 d
-> 4 d
-> 4 e
-69c127,135
-< ERROR 42000: Cannot truncate a table referenced in a foreign key constraint (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `test`.`t1` (`a`))
----
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: ER_TRUNCATE_ILLEGAL_FK)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_NO_SUCH_TABLE.
-> # Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-71a138
-> ERROR 42S02: Unknown table 't1'
+--- foreign_keys.result 2013-01-22 22:05:05.246633000 +0400
++++ foreign_keys.reject 2013-01-23 02:50:28.187898084 +0400
+@@ -12,29 +12,57 @@
+ t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+- KEY `a` (`a`),
+- CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++ KEY `a` (`a`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
+ INSERT INTO t2 (a,b) VALUES (1,'a'),(2,'b');
+-ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
++# ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ INSERT INTO t1 (a,b) VALUES (1,'c'),(2,'d');
+ INSERT INTO t2 (a,b) VALUES (1,'a'),(2,'b');
+ UPDATE t2 SET a=a+1;
+-ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
++# ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ UPDATE t1 SET a=3 WHERE a=2;
+-ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
++# ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED_2)
+ DELETE FROM t1 WHERE a=2;
+-ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
++# ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED_2)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DELETE FROM t2 WHERE a=2;
+ SELECT a,b FROM t1;
+ a b
+ 1 c
+-2 d
++3 d
+ SELECT a,b FROM t2;
+ a b
+-1 a
++3 b
++3 b
+ DROP TABLE t1;
+-ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails
++# ERROR: Statement succeeded (expected results: ER_ROW_IS_REFERENCED)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t2;
+ CREATE TABLE t2 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -46,26 +74,65 @@
+ t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL,
+- KEY `a` (`a`),
+- CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`) ON DELETE CASCADE ON UPDATE CASCADE
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++ KEY `a` (`a`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t2`)
+ INSERT INTO t2 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d');
+-ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`) ON DELETE CASCADE ON UPDATE CASCADE)
++# ERROR: Statement succeeded (expected results: ER_NO_REFERENCED_ROW_2)
+ INSERT INTO t1 (a,b) VALUES (3,'a'),(4,'a');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(4,'e'),(3,'a');
+ UPDATE t1 SET a=a+1;
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_NO_SUCH_TABLE.
++# UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ SELECT a,b FROM t2;
+ a b
+-5 a
+-5 a
+-5 b
+-5 c
+-5 d
+-5 e
++1 a
++1 a
++2 b
++2 b
++3 a
++3 c
++3 c
++4 d
++4 d
++4 e
+ DELETE FROM t1 WHERE b='a' LIMIT 2;
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_NO_SUCH_TABLE.
++# DELETE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ SELECT a,b FROM t2;
+ a b
++1 a
++1 a
++2 b
++2 b
++3 a
++3 c
++3 c
++4 d
++4 d
++4 e
+ TRUNCATE TABLE t1;
+-ERROR 42000: Cannot truncate a table referenced in a foreign key constraint (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `test`.`t1` (`a`))
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: ER_TRUNCATE_ILLEGAL_FK)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_NO_SUCH_TABLE.
++# Foreign keys or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t2;
+ DROP TABLE t1;
++ERROR 42S02: Unknown table 't1'
diff --git a/storage/myisammrg/mysql-test/storage_engine/fulltext_search.rdiff b/storage/myisammrg/mysql-test/storage_engine/fulltext_search.rdiff
index 262370f01ed..7234cdfd2fa 100644
--- a/storage/myisammrg/mysql-test/storage_engine/fulltext_search.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/fulltext_search.rdiff
@@ -1,142 +1,150 @@
-7,64c7,15
-< SHOW INDEXES IN t1;
-< Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-< t1 1 v1 1 v1 # # NULL NULL YES FULLTEXT
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text1','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
-< If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
-< For developers who want to code on MariaDB or MySQL
-< * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
-< o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
-< o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
-< * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
-< o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
-< For MariaDB / MySQL end users
-< * MariaDB Crash Course by Ben Forta
-< o First MariaDB book!
-< o For people who want to learn SQL and the basics of MariaDB.
-< o Now shipping. Purchase at Amazon.com or your favorite bookseller.
-< * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
-< o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
-< o Free to read in the Knowledgebase!
-< * MySQL (4th Edition) by Paul DuBois
-< o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
-< * MySQL Cookbook by Paul DuBois
-< o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
-< * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
-< o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
-<
-< * MySQL Admin Cookbook
-< o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
-<
-< * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
-< o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ',
-< 'There are several reasons why contributing code is one of the easiest and most rewarding ways to contribute to MariaDB:
-<
-< 1. We are very responsive toward reviews of submitted code and as soon as the review is done, the submitted code is merged into an existing MariaDB tree and made available to everyone, not just select customers.
-< 2. Code reviews are performed by the MariaDB core development team and the quality, detail, and timeliness of our reviews are better than you will find elsewhere.
-< 3. With MariaDB everyone has access to the latest code.
-< 4. If a patch is very safe and/or very useful we are willing to push it into the stable code (as long as it can\'t break any existing applications). We are willing to do this to ensure the freedom to add small, needed fixes on a stable release so users don\'t have to wait a year for something to be added which is critical to their business.
-< 5. If you are an active contributor, you can become a member of maria-captains, even if you aren\'t working for Monty Program Ab. All captains have the same rights as any other captain to accept and reject patches. Our development model is truly open for everyone.
-< The Contributing Code page details many of the actual steps involved in working with the MariaDB source code. It\'s important that you use the same tools and submit patches in the same way as other developers to keep development running smoothly.'
-< ), ('text2','test1','test2');
-< SELECT v0 FROM t1 WHERE MATCH(v1) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
-< v0
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test');
-< SELECT v0, MATCH(v1) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
-< v0 rating
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
-< SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
-< v0 rating
-< text4 1.3705332279205322
-< SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
-< v0
-< text1
-< SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE);
-< v0
-< SELECT v0, MATCH(v1) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('database' WITH QUERY EXPANSION);
-< v0 rating
-< text1 178.11756896972656
-< DROP TABLE t1;
----
-> ERROR HY000: The used table type doesn't support FULLTEXT indexes
-> # ERROR: Statement ended with errno 1214, errname ER_TABLE_CANT_HANDLE_FT (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_TABLE_CANT_HANDLE_FT.
-> # FULLTEXT indexes or VARCHAR|TEXT data types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-71,132c22,30
-< SHOW INDEXES IN t1;
-< Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-< t1 1 v1 1 v1 # # NULL NULL YES FULLTEXT
-< t1 1 v1_v2 1 v1 # # NULL NULL YES FULLTEXT
-< t1 1 v1_v2 2 v2 # # NULL NULL YES FULLTEXT
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text1','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
-< If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
-< For developers who want to code on MariaDB or MySQL
-< * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
-< o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
-< o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
-< * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
-< o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
-< For MariaDB / MySQL end users
-< * MariaDB Crash Course by Ben Forta
-< o First MariaDB book!
-< o For people who want to learn SQL and the basics of MariaDB.
-< o Now shipping. Purchase at Amazon.com or your favorite bookseller.
-< * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
-< o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
-< o Free to read in the Knowledgebase!
-< * MySQL (4th Edition) by Paul DuBois
-< o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
-< * MySQL Cookbook by Paul DuBois
-< o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
-< * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
-< o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
-<
-< * MySQL Admin Cookbook
-< o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
-<
-< * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
-< o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ',
-< 'There are several reasons why contributing code is one of the easiest and most rewarding ways to contribute to MariaDB:
-<
-< 1. We are very responsive toward reviews of submitted code and as soon as the review is done, the submitted code is merged into an existing MariaDB tree and made available to everyone, not just select customers.
-< 2. Code reviews are performed by the MariaDB core development team and the quality, detail, and timeliness of our reviews are better than you will find elsewhere.
-< 3. With MariaDB everyone has access to the latest code.
-< 4. If a patch is very safe and/or very useful we are willing to push it into the stable code (as long as it can\'t break any existing applications). We are willing to do this to ensure the freedom to add small, needed fixes on a stable release so users don\'t have to wait a year for something to be added which is critical to their business.
-< 5. If you are an active contributor, you can become a member of maria-captains, even if you aren\'t working for Monty Program Ab. All captains have the same rights as any other captain to accept and reject patches. Our development model is truly open for everyone.
-< The Contributing Code page details many of the actual steps involved in working with the MariaDB source code. It\'s important that you use the same tools and submit patches in the same way as other developers to keep development running smoothly.'
-< ), ('text2','test1','test2');
-< SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
-< v0
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test');
-< SELECT v0, MATCH(v1,v2) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
-< v0 rating
-< text1 0.2809644043445587
-< INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
-< SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
-< v0 rating
-< text4 1.3705332279205322
-< SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
-< v0
-< text1
-< SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE);
-< v0
-< SELECT v0, MATCH(v1,v2) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('database' WITH QUERY EXPANSION);
-< v0 rating
-< text1 190.56150817871094
-< text4 1.1758291721343994
-< DROP TABLE t1;
----
-> ERROR HY000: The used table type doesn't support FULLTEXT indexes
-> # ERROR: Statement ended with errno 1214, errname ER_TABLE_CANT_HANDLE_FT (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_TABLE_CANT_HANDLE_FT.
-> # FULLTEXT indexes or multiple keys or VARCHAR|TEXT data types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- fulltext_search.result 2013-01-22 22:05:05.246633000 +0400
++++ fulltext_search.reject 2013-01-23 02:50:28.807890289 +0400
+@@ -4,129 +4,27 @@
+ v2 TEXT <CUSTOM_COL_OPTIONS>,
+ FULLTEXT v1 (v1)
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW INDEXES IN t1;
+-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 v1 1 v1 # # NULL NULL YES FULLTEXT
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text1','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+-If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+-For developers who want to code on MariaDB or MySQL
+-* Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+-o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+-o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+-* MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+-o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+-For MariaDB / MySQL end users
+-* MariaDB Crash Course by Ben Forta
+-o First MariaDB book!
+-o For people who want to learn SQL and the basics of MariaDB.
+-o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+-* SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+-o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+-o Free to read in the Knowledgebase!
+-* MySQL (4th Edition) by Paul DuBois
+-o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+-* MySQL Cookbook by Paul DuBois
+-o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+-* High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+-o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+-
+- * MySQL Admin Cookbook
+- o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+-
+- * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+- o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ',
+-'There are several reasons why contributing code is one of the easiest and most rewarding ways to contribute to MariaDB:
+-
+- 1. We are very responsive toward reviews of submitted code and as soon as the review is done, the submitted code is merged into an existing MariaDB tree and made available to everyone, not just select customers.
+- 2. Code reviews are performed by the MariaDB core development team and the quality, detail, and timeliness of our reviews are better than you will find elsewhere.
+- 3. With MariaDB everyone has access to the latest code.
+- 4. If a patch is very safe and/or very useful we are willing to push it into the stable code (as long as it can\'t break any existing applications). We are willing to do this to ensure the freedom to add small, needed fixes on a stable release so users don\'t have to wait a year for something to be added which is critical to their business.
+- 5. If you are an active contributor, you can become a member of maria-captains, even if you aren\'t working for Monty Program Ab. All captains have the same rights as any other captain to accept and reject patches. Our development model is truly open for everyone.
+-The Contributing Code page details many of the actual steps involved in working with the MariaDB source code. It\'s important that you use the same tools and submit patches in the same way as other developers to keep development running smoothly.'
+- ), ('text2','test1','test2');
+-SELECT v0 FROM t1 WHERE MATCH(v1) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+-v0
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test');
+-SELECT v0, MATCH(v1) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+-v0 rating
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
+-SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
+-v0 rating
+-text4 1.3705332279205322
+-SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
+-v0
+-text1
+-SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE);
+-v0
+-SELECT v0, MATCH(v1) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('database' WITH QUERY EXPANSION);
+-v0 rating
+-text1 178.11756896972656
+-DROP TABLE t1;
++ERROR HY000: The used table type doesn't support FULLTEXT indexes
++# ERROR: Statement ended with errno 1214, errname ER_TABLE_CANT_HANDLE_FT (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_TABLE_CANT_HANDLE_FT.
++# FULLTEXT indexes or VARCHAR|TEXT data types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (v0 VARCHAR(64) <CUSTOM_COL_OPTIONS>,
+ v1 VARCHAR(16384) <CUSTOM_COL_OPTIONS>,
+ v2 TEXT <CUSTOM_COL_OPTIONS>,
+ FULLTEXT v1 (v1),
+ FULLTEXT v1_v2 (v1,v2)
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW INDEXES IN t1;
+-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 v1 1 v1 # # NULL NULL YES FULLTEXT
+-t1 1 v1_v2 1 v1 # # NULL NULL YES FULLTEXT
+-t1 1 v1_v2 2 v2 # # NULL NULL YES FULLTEXT
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text1','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off.
+-If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment.
+-For developers who want to code on MariaDB or MySQL
+-* Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB.
+-o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB!
+-o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic.
+-* MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings
+-o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB!
+-For MariaDB / MySQL end users
+-* MariaDB Crash Course by Ben Forta
+-o First MariaDB book!
+-o For people who want to learn SQL and the basics of MariaDB.
+-o Now shipping. Purchase at Amazon.com or your favorite bookseller.
+-* SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer.
+-o Everything you wanted to know about the SQL 99 standard. Excellent reference book!
+-o Free to read in the Knowledgebase!
+-* MySQL (4th Edition) by Paul DuBois
+-o The \'default\' book to read if you wont to learn to use MySQL / MariaDB.
+-* MySQL Cookbook by Paul DuBois
+-o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject.
+-* High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al.
+-o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly)
+-
+- * MySQL Admin Cookbook
+- o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration
+-
+- * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen
+- o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. ',
+-'There are several reasons why contributing code is one of the easiest and most rewarding ways to contribute to MariaDB:
+-
+- 1. We are very responsive toward reviews of submitted code and as soon as the review is done, the submitted code is merged into an existing MariaDB tree and made available to everyone, not just select customers.
+- 2. Code reviews are performed by the MariaDB core development team and the quality, detail, and timeliness of our reviews are better than you will find elsewhere.
+- 3. With MariaDB everyone has access to the latest code.
+- 4. If a patch is very safe and/or very useful we are willing to push it into the stable code (as long as it can\'t break any existing applications). We are willing to do this to ensure the freedom to add small, needed fixes on a stable release so users don\'t have to wait a year for something to be added which is critical to their business.
+- 5. If you are an active contributor, you can become a member of maria-captains, even if you aren\'t working for Monty Program Ab. All captains have the same rights as any other captain to accept and reject patches. Our development model is truly open for everyone.
+-The Contributing Code page details many of the actual steps involved in working with the MariaDB source code. It\'s important that you use the same tools and submit patches in the same way as other developers to keep development running smoothly.'
+- ), ('text2','test1','test2');
+-SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+-v0
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test');
+-SELECT v0, MATCH(v1,v2) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+-v0 rating
+-text1 0.2809644043445587
+-INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
+-SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
+-v0 rating
+-text4 1.3705332279205322
+-SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
+-v0
+-text1
+-SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE);
+-v0
+-SELECT v0, MATCH(v1,v2) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('database' WITH QUERY EXPANSION);
+-v0 rating
+-text1 190.56150817871094
+-text4 1.1758291721343994
+-DROP TABLE t1;
++ERROR HY000: The used table type doesn't support FULLTEXT indexes
++# ERROR: Statement ended with errno 1214, errname ER_TABLE_CANT_HANDLE_FT (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_TABLE_CANT_HANDLE_FT.
++# FULLTEXT indexes or multiple keys or VARCHAR|TEXT data types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
diff --git a/storage/myisammrg/mysql-test/storage_engine/handler.rdiff b/storage/myisammrg/mysql-test/storage_engine/handler.rdiff
index f0c01085ad9..c56cfae0e3a 100644
--- a/storage/myisammrg/mysql-test/storage_engine/handler.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/handler.rdiff
@@ -1,79 +1,88 @@
-5,47c5,12
-< HANDLER t1 READ FIRST;
-< ERROR 42S02: Unknown table 't1' in HANDLER
-< HANDLER h1 READ FIRST;
-< a b
-< foobar 1000
-< HANDLER h1 READ NEXT;
-< a b
-< a 1
-< HANDLER h1 READ FIRST WHERE a < 'foo';
-< a b
-< a 1
-< HANDLER h1 READ NEXT;
-< a b
-< bar 200
-< HANDLER h1 READ NEXT;
-< a b
-< foo 100
-< HANDLER h1 READ NEXT;
-< a b
-< HANDLER h1 READ FIRST LIMIT 2;
-< a b
-< foobar 1000
-< a 1
-< HANDLER h1 READ NEXT;
-< a b
-< bar 200
-< HANDLER h1 READ NEXT WHERE b>500 LIMIT 2;
-< a b
-< HANDLER t1 OPEN;
-< HANDLER h1 READ FIRST WHERE b>500 LIMIT 5;
-< a b
-< foobar 1000
-< HANDLER t1 READ NEXT;
-< a b
-< foobar 1000
-< HANDLER h1 READ NEXT WHERE b<100;
-< a b
-< HANDLER t1 CLOSE;
-< HANDLER h1 READ FIRST;
-< a b
-< foobar 1000
-< HANDLER t1 CLOSE;
-< ERROR 42S02: Unknown table 't1' in HANDLER
----
-> ERROR HY000: Table storage engine for 'h1' doesn't have this option
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_ILLEGAL_HA.
-> # Functionality or the syntax or the mix could be unsupported.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-54,76c19
-< HANDLER h1 READ a = (100);
-< a b
-< 100 f
-< HANDLER h1 READ a <= (100) WHERE b < 'f';
-< a b
-< 2 c
-< HANDLER h1 READ a > (2) WHERE b IS NOT NULL LIMIT 2;
-< a b
-< 100 f
-< 101 b
-< HANDLER h1 READ a FIRST;
-< a b
-< 1 a
-< HANDLER h1 READ a LAST;
-< a b
-< 200 b
-< HANDLER h1 READ a PREV;
-< a b
-< 101 b
-< HANDLER h1 READ a NEXT;
-< a b
-< 200 b
-< HANDLER h1 CLOSE;
----
-> ERROR HY000: Table storage engine for 'h1' doesn't have this option
+--- handler.result 2013-01-22 22:05:05.246633000 +0400
++++ handler.reject 2013-01-23 02:50:29.411882697 +0400
+@@ -2,76 +2,19 @@
+ CREATE TABLE t1 (a <CHAR_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES ('foobar',1000),('a',1),('bar',200),('foo',100);
+ HANDLER t1 OPEN AS h1;
+-HANDLER t1 READ FIRST;
+-ERROR 42S02: Unknown table 't1' in HANDLER
+-HANDLER h1 READ FIRST;
+-a b
+-foobar 1000
+-HANDLER h1 READ NEXT;
+-a b
+-a 1
+-HANDLER h1 READ FIRST WHERE a < 'foo';
+-a b
+-a 1
+-HANDLER h1 READ NEXT;
+-a b
+-bar 200
+-HANDLER h1 READ NEXT;
+-a b
+-foo 100
+-HANDLER h1 READ NEXT;
+-a b
+-HANDLER h1 READ FIRST LIMIT 2;
+-a b
+-foobar 1000
+-a 1
+-HANDLER h1 READ NEXT;
+-a b
+-bar 200
+-HANDLER h1 READ NEXT WHERE b>500 LIMIT 2;
+-a b
+-HANDLER t1 OPEN;
+-HANDLER h1 READ FIRST WHERE b>500 LIMIT 5;
+-a b
+-foobar 1000
+-HANDLER t1 READ NEXT;
+-a b
+-foobar 1000
+-HANDLER h1 READ NEXT WHERE b<100;
+-a b
+-HANDLER t1 CLOSE;
+-HANDLER h1 READ FIRST;
+-a b
+-foobar 1000
+-HANDLER t1 CLOSE;
+-ERROR 42S02: Unknown table 't1' in HANDLER
++ERROR HY000: Table storage engine for 'h1' doesn't have this option
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_ILLEGAL_HA.
++# Functionality or the syntax or the mix could be unsupported.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t1;
+ HANDLER h1 CLOSE;
+ ERROR 42S02: Unknown table 'h1' in HANDLER
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (b,a) VALUES ('a',1),('b',200),('f',100),('b',101),('c',2);
+ HANDLER t1 OPEN AS h1;
+-HANDLER h1 READ a = (100);
+-a b
+-100 f
+-HANDLER h1 READ a <= (100) WHERE b < 'f';
+-a b
+-2 c
+-HANDLER h1 READ a > (2) WHERE b IS NOT NULL LIMIT 2;
+-a b
+-100 f
+-101 b
+-HANDLER h1 READ a FIRST;
+-a b
+-1 a
+-HANDLER h1 READ a LAST;
+-a b
+-200 b
+-HANDLER h1 READ a PREV;
+-a b
+-101 b
+-HANDLER h1 READ a NEXT;
+-a b
+-200 b
+-HANDLER h1 CLOSE;
++ERROR HY000: Table storage engine for 'h1' doesn't have this option
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/index.rdiff b/storage/myisammrg/mysql-test/storage_engine/index.rdiff
index 6e6f18e39bc..bf6806979ec 100644
--- a/storage/myisammrg/mysql-test/storage_engine/index.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/index.rdiff
@@ -1,6 +1,11 @@
-64,66c64
-< ERROR 23000: Duplicate entry '1' for key 'a'
-< # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
-< # If you got a difference in error message, just add it to rdiff file
----
-> # ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+--- index.result 2013-01-22 22:05:05.246633000 +0400
++++ index.reject 2013-01-23 02:50:30.111873897 +0400
+@@ -61,7 +61,5 @@
+ ALTER TABLE t1 DROP INDEX a;
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ALTER TABLE t1 ADD UNIQUE INDEX a(a) ;
+-ERROR 23000: Duplicate entry '1' for key 'a'
+-# Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
+-# If you got a difference in error message, just add it to rdiff file
++# ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/index_enable_disable.rdiff b/storage/myisammrg/mysql-test/storage_engine/index_enable_disable.rdiff
index 8913f17a2a6..7b89e177ffb 100644
--- a/storage/myisammrg/mysql-test/storage_engine/index_enable_disable.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/index_enable_disable.rdiff
@@ -1,17 +1,33 @@
-13a14,15
-> Warnings:
-> Note 1031 Table storage engine for 't1' doesn't have this option
-16c18
-< t1 1 a 1 a # # NULL NULL YES BTREE disabled
----
-> t1 1 a 1 a # # NULL NULL YES BTREE
-19c21
-< 1 SIMPLE t1 ALL NULL NULL NULL NULL 19 Using filesort
----
-> 1 SIMPLE t1 index NULL a 5 NULL 19 Using index
-22a25,26
-> Warnings:
-> Note 1031 Table storage engine for 't1' doesn't have this option
-34a39,40
-> Warnings:
-> Note 1031 Table storage engine for 't1' doesn't have this option
+--- index_enable_disable.result 2013-01-22 22:05:05.246633000 +0400
++++ index_enable_disable.reject 2013-01-23 02:50:30.723866202 +0400
+@@ -11,15 +11,19 @@
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ t1 1 a 1 a # # NULL NULL YES BTREE
+ ALTER TABLE t1 DISABLE KEYS;
++Warnings:
++Note 1031 Table storage engine for 't1' doesn't have this option
+ SHOW INDEX IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL YES BTREE disabled
++t1 1 a 1 a # # NULL NULL YES BTREE
+ EXPLAIN SELECT a FROM t1 ORDER BY a;
+ id select_type table type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 ALL NULL NULL NULL NULL 19 Using filesort
++1 SIMPLE t1 index NULL a 5 NULL 19 Using index
+ INSERT INTO t1 (a) VALUES
+ (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+ ALTER TABLE t1 ENABLE KEYS;
++Warnings:
++Note 1031 Table storage engine for 't1' doesn't have this option
+ SHOW INDEX IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ t1 1 a 1 a # # NULL NULL YES BTREE
+@@ -32,6 +36,8 @@
+ (1),(2),(3),(4),(5),(6),(7),(8),(9),
+ (21),(22),(23),(24),(25),(26),(27),(28),(29);
+ ALTER TABLE t1 DISABLE KEYS;
++Warnings:
++Note 1031 Table storage engine for 't1' doesn't have this option
+ INSERT INTO t1 (a) VALUES (29);
+ ERROR 23000: Duplicate entry '29' for key 'a'
+ # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
diff --git a/storage/myisammrg/mysql-test/storage_engine/index_type_btree.rdiff b/storage/myisammrg/mysql-test/storage_engine/index_type_btree.rdiff
index 6e6f18e39bc..1874b0d5891 100644
--- a/storage/myisammrg/mysql-test/storage_engine/index_type_btree.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/index_type_btree.rdiff
@@ -1,6 +1,11 @@
-64,66c64
-< ERROR 23000: Duplicate entry '1' for key 'a'
-< # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
-< # If you got a difference in error message, just add it to rdiff file
----
-> # ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+--- index_type_btree.result 2013-01-22 22:05:05.246633000 +0400
++++ index_type_btree.reject 2013-01-23 02:50:31.963850614 +0400
+@@ -61,7 +61,5 @@
+ ALTER TABLE t1 DROP INDEX a;
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ALTER TABLE t1 ADD UNIQUE INDEX a(a) USING BTREE;
+-ERROR 23000: Duplicate entry '1' for key 'a'
+-# Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
+-# If you got a difference in error message, just add it to rdiff file
++# ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/index_type_hash.rdiff b/storage/myisammrg/mysql-test/storage_engine/index_type_hash.rdiff
index 1b57afe5b64..f6fd1e391aa 100644
--- a/storage/myisammrg/mysql-test/storage_engine/index_type_hash.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/index_type_hash.rdiff
@@ -1,34 +1,69 @@
-7c7
-< t1 1 a 1 a # # NULL NULL # HASH
----
-> t1 1 a 1 a # # NULL NULL # BTREE
-15,16c15,16
-< t1 1 a_b 1 a # # NULL NULL # HASH a_b index
-< t1 1 a_b 2 b # # NULL NULL # HASH a_b index
----
-> t1 1 a_b 1 a # # NULL NULL # BTREE a_b index
-> t1 1 a_b 2 b # # NULL NULL # BTREE a_b index
-25,26c25,26
-< t1 1 a 1 a # # NULL NULL # HASH
-< t1 1 b 1 b # # NULL NULL # HASH
----
-> t1 1 a 1 a # # NULL NULL # BTREE
-> t1 1 b 1 b # # NULL NULL # BTREE
-34c34
-< t1 0 a 1 a # # NULL NULL # HASH
----
-> t1 0 a 1 a # # NULL NULL # BTREE
-46c46
-< t1 1 a 1 a # # NULL NULL # HASH simple index on a
----
-> t1 1 a 1 a # # NULL NULL # BTREE simple index on a
-55c55
-< t1 0 a 1 a # # NULL NULL # HASH
----
-> t1 0 a 1 a # # NULL NULL # BTREE
-64,66c64
-< ERROR 23000: Duplicate entry '1' for key 'a'
-< # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
-< # If you got a difference in error message, just add it to rdiff file
----
-> # ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+--- index_type_hash.result 2013-01-22 22:05:05.246633000 +0400
++++ index_type_hash.reject 2013-01-23 02:50:32.647842015 +0400
+@@ -4,7 +4,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH
++t1 1 a 1 a # # NULL NULL # BTREE
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -12,8 +12,8 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a_b 1 a # # NULL NULL # HASH a_b index
+-t1 1 a_b 2 b # # NULL NULL # HASH a_b index
++t1 1 a_b 1 a # # NULL NULL # BTREE a_b index
++t1 1 a_b 2 b # # NULL NULL # BTREE a_b index
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -22,8 +22,8 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH
+-t1 1 b 1 b # # NULL NULL # HASH
++t1 1 a 1 a # # NULL NULL # BTREE
++t1 1 b 1 b # # NULL NULL # BTREE
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -31,7 +31,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 0 a 1 a # # NULL NULL # HASH
++t1 0 a 1 a # # NULL NULL # BTREE
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ERROR 23000: Duplicate entry '1' for key 'a'
+@@ -43,7 +43,7 @@
+ ALTER TABLE t1 ADD <CUSTOM_INDEX> (a) USING HASH COMMENT 'simple index on a';
+ SHOW INDEX FROM t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH simple index on a
++t1 1 a 1 a # # NULL NULL # BTREE simple index on a
+ ALTER TABLE t1 DROP KEY a;
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+@@ -52,7 +52,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 0 a 1 a # # NULL NULL # HASH
++t1 0 a 1 a # # NULL NULL # BTREE
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ERROR 23000: Duplicate entry '1' for key 'a'
+@@ -61,7 +61,5 @@
+ ALTER TABLE t1 DROP INDEX a;
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ALTER TABLE t1 ADD UNIQUE INDEX a(a) USING HASH;
+-ERROR 23000: Duplicate entry '1' for key 'a'
+-# Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
+-# If you got a difference in error message, just add it to rdiff file
++# ERROR: Statement succeeded (expected results: ER_DUP_ENTRY,ER_DUP_KEY)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/insert_delayed.rdiff b/storage/myisammrg/mysql-test/storage_engine/insert_delayed.rdiff
index e0d0eefabe4..24ffa2ab82f 100644
--- a/storage/myisammrg/mysql-test/storage_engine/insert_delayed.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/insert_delayed.rdiff
@@ -1,14 +1,26 @@
-7a8,15
-> ERROR HY000: DELAYED option not supported for table 't1'
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_DELAYED_NOT_SUPPORTED.
-> # INSERT DELAYED or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-8a17
-> ERROR HY000: DELAYED option not supported for table 't1'
-23,24d31
-< 3 c
-< 4 d
+--- insert_delayed.result 2013-01-23 01:23:49.461254916 +0400
++++ insert_delayed.reject 2013-01-23 02:50:34.475819034 +0400
+@@ -5,7 +5,16 @@
+ connect con0,localhost,root,,;
+ SET lock_wait_timeout = 1;
+ INSERT DELAYED INTO t1 (a,b) VALUES (3,'c');
++ERROR HY000: DELAYED option not supported for table 't1'
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_DELAYED_NOT_SUPPORTED.
++# INSERT DELAYED or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ INSERT DELAYED INTO t1 SET a=4, b='d';
++ERROR HY000: DELAYED option not supported for table 't1'
+ INSERT DELAYED INTO t1 (a,b) SELECT 5, 'e';
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ disconnect con0;
+@@ -20,6 +29,4 @@
+ a b
+ 1 f
+ 2 b
+-3 c
+-4 d
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/lock.rdiff b/storage/myisammrg/mysql-test/storage_engine/lock.rdiff
index d035648551a..598e47b6883 100644
--- a/storage/myisammrg/mysql-test/storage_engine/lock.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/lock.rdiff
@@ -1,62 +1,80 @@
-44a45,46
-> ERROR HY000: Table 't1' was not locked with LOCK TABLES
-> # ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
-45a48,49
-> ERROR HY000: Table 't2' was not locked with LOCK TABLES
-> # ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
-46a51
-> ERROR HY000: Table 't2' was not locked with LOCK TABLES
-47a53
-> ERROR HY000: Table 't2' was not locked with LOCK TABLES
-48a55
-> ERROR 42S02: Table 'test.t1' doesn't exist
-49a57
-> ERROR 42S02: Table 'test.t1' doesn't exist
-50a59
-> ERROR 42S02: Table 'test.t1' doesn't exist
-52a62
-> ERROR 42S02: Table 'test.t1' doesn't exist
-54c64,65
-< ERROR HY000: Table 't1' was not locked with LOCK TABLES
----
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: ER_TABLE_NOT_LOCKED)
-56a68
-> ERROR 42S02: Table 'test.t1' doesn't exist
-57a70
-> ERROR 42S02: Table 'test.t1' doesn't exist
-59c72,73
-< ERROR HY000: Table 't2' was not locked with LOCK TABLES
----
-> ERROR 42S02: Unknown table 't1,t2'
-> # ERROR: Statement ended with errno 1051, errname ER_BAD_TABLE_ERROR (expected results: ER_TABLE_NOT_LOCKED)
-61a76
-> ERROR 42S02: Unknown table 't1,t2'
-67a83,84
-> ERROR HY000: Table 't1' was not locked with LOCK TABLES
-> # ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
-68a86,87
-> ERROR HY000: Table 't2' was not locked with LOCK TABLES
-> # ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
-69a89,90
-> ERROR HY000: Table 't3' was not locked with LOCK TABLES
-> # ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
-70a92
-> ERROR 42S02: Table 'test.t1' doesn't exist
-71a94,103
-> ERROR 42S02: Table 'test.t2' doesn't exist
-> # ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ ALTER TABLE t2 ADD COLUMN c2 INT(11) /*!*/ /*Custom column options*/ ]
-> # The statement|command finished with ER_NO_SUCH_TABLE.
-> # ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-72a105
-> ERROR 42S02: Unknown table 't1,t2,t3'
-109c142
-< a b
----
-> ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist
+--- lock.result 2013-01-23 01:24:01.797100027 +0400
++++ lock.reject 2013-01-23 02:50:53.291582487 +0400
+@@ -42,34 +42,67 @@
+ UPDATE t1 SET id=1 WHERE id=-1;
+ DROP TABLE t1,t2;
+ CREATE TABLE t1 (i1 <INT_COLUMN>, nr <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
++ERROR HY000: Table 't1' was not locked with LOCK TABLES
++# ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
+ CREATE TABLE t2 (nr <INT_COLUMN>, nm <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
++ERROR HY000: Table 't2' was not locked with LOCK TABLES
++# ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
+ INSERT INTO t2 (nr,nm) VALUES (1,3);
++ERROR HY000: Table 't2' was not locked with LOCK TABLES
+ INSERT INTO t2 (nr,nm) VALUES (2,4);
++ERROR HY000: Table 't2' was not locked with LOCK TABLES
+ lock tables t1 write, t2 read;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ UNLOCK TABLES;
+ LOCK TABLES t1 WRITE;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1;
+-ERROR HY000: Table 't1' was not locked with LOCK TABLES
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: ER_TABLE_NOT_LOCKED)
+ UNLOCK TABLES;
+ LOCK TABLES t1 WRITE, t1 AS t1_alias READ;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ DROP TABLE t1,t2;
+-ERROR HY000: Table 't2' was not locked with LOCK TABLES
++ERROR 42S02: Unknown table 't1,t2'
++# ERROR: Statement ended with errno 1051, errname ER_BAD_TABLE_ERROR (expected results: ER_TABLE_NOT_LOCKED)
+ UNLOCK TABLES;
+ DROP TABLE t1,t2;
++ERROR 42S02: Unknown table 't1,t2'
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CREATE TABLE t3 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE;
+ DROP TABLE t2, t3, t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
++ERROR HY000: Table 't1' was not locked with LOCK TABLES
++# ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
++ERROR HY000: Table 't2' was not locked with LOCK TABLES
++# ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
+ CREATE TABLE t3 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
++ERROR HY000: Table 't3' was not locked with LOCK TABLES
++# ERROR: Statement ended with errno 1100, errname ER_TABLE_NOT_LOCKED (expected to succeed)
+ LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ;
++ERROR 42S02: Table 'test.t1' doesn't exist
+ ALTER TABLE t2 ADD COLUMN c2 <INT_COLUMN>;
++ERROR 42S02: Table 'test.t2' doesn't exist
++# ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ ALTER TABLE t2 ADD COLUMN c2 INT(11) /*!*/ /*Custom column options*/ ]
++# The statement|command finished with ER_NO_SUCH_TABLE.
++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t1, t2, t3;
++ERROR 42S02: Unknown table 't1,t2,t3'
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ LOCK TABLE t1 READ, t2 READ;
+@@ -106,6 +139,6 @@
+ FLUSH TABLE t1;
+ DROP TEMPORARY TABLE t1;
+ SELECT a,b FROM t1;
+-a b
++ERROR HY000: Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist
+ UNLOCK TABLES;
+ DROP TABLE t1, t2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/optimize_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/optimize_table.rdiff
index f74c49cedea..1b611adf25a 100644
--- a/storage/myisammrg/mysql-test/storage_engine/optimize_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/optimize_table.rdiff
@@ -1,24 +1,35 @@
-8c8
-< test.t1 optimize status OK
----
-> test.t1 optimize note The storage engine for the table doesn't support optimize
-12c12
-< test.t2 optimize status OK
----
-> test.t2 optimize note The storage engine for the table doesn't support optimize
-17,18c17,18
-< test.t1 optimize status OK
-< test.t2 optimize status OK
----
-> test.t1 optimize note The storage engine for the table doesn't support optimize
-> test.t2 optimize note The storage engine for the table doesn't support optimize
-21,22c21,22
-< test.t1 optimize status Table is already up to date
-< test.t2 optimize status Table is already up to date
----
-> test.t1 optimize note The storage engine for the table doesn't support optimize
-> test.t2 optimize note The storage engine for the table doesn't support optimize
-28c28
-< test.t1 optimize status OK
----
-> test.t1 optimize note The storage engine for the table doesn't support optimize
+--- optimize_table.result 2013-01-22 22:05:05.246633000 +0400
++++ optimize_table.reject 2013-01-23 02:50:54.339569313 +0400
+@@ -5,25 +5,25 @@
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
++test.t1 optimize note The storage engine for the table doesn't support optimize
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 optimize status OK
++test.t2 optimize note The storage engine for the table doesn't support optimize
+ INSERT INTO t2 (a,b) VALUES (5,'e');
+ INSERT INTO t1 (a,b) VALUES (6,'f');
+ OPTIMIZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
+-test.t2 optimize status OK
++test.t1 optimize note The storage engine for the table doesn't support optimize
++test.t2 optimize note The storage engine for the table doesn't support optimize
+ OPTIMIZE TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status Table is already up to date
+-test.t2 optimize status Table is already up to date
++test.t1 optimize note The storage engine for the table doesn't support optimize
++test.t2 optimize note The storage engine for the table doesn't support optimize
+ DROP TABLE t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
++test.t1 optimize note The storage engine for the table doesn't support optimize
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/alter_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/alter_table.rdiff
index a2cb0c1dcb1..4c7ba7d8232 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/alter_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/alter_table.rdiff
@@ -1,63 +1,68 @@
-3,36c3,12
-< INSERT INTO t1 (a) VALUES (1),(2),(2),(3),(4);
-< ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
-< EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 3;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p3 # # # # # # #
-< ALTER TABLE t1 COALESCE PARTITION 1;
-< EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 3;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< ALTER TABLE t1 REORGANIZE PARTITION;
-< EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 2;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< ALTER TABLE t1 REBUILD PARTITION p0;
-< EXPLAIN PARTiTIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< ALTER TABLE t1 REMOVE PARTITIONING;
-< EXPLAIN PARTiTIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 NULL # # # # # # #
-< ALTER TABLE t1 PARTITION BY LIST(a) (PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (101,102));
-< ERROR HY000: Table has no partition for value 4
-< ALTER TABLE t1 PARTITION BY LIST(a) (PARTITION p0 VALUES IN (1,2,3,4), PARTITION p1 VALUES IN (101,102));
-< INSERT INTO t1 (a) VALUES (50);
-< ERROR HY000: Table has no partition for value 50
-< ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (50,51));
-< INSERT INTO t1 (a) VALUES (50);
-< ALTER TABLE t1 DROP PARTITION p1;
-< ALTER TABLE t1 REORGANIZE PARTITION p0, p2 INTO (PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4), PARTITION p2 VALUES IN (50,51), PARTITION p3 VALUES IN (101,102));
-< EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 2;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-41,42c17,29
-< ALTER TABLE t1 DROP PARTITION abc;
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
-> PARTITION abc VALUES IN (1,2,3),
-> PARTITION def VALUES IN (100,101,102)
-> ) ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or subpartitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- alter_table.result 2013-01-22 22:05:05.246633000 +0400
++++ alter_table.reject 2013-01-23 03:16:22.620356221 +0400
+@@ -1,42 +1,29 @@
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a) VALUES (1),(2),(2),(3),(4);
+-ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+-EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 3;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p3 # # # # # # #
+-ALTER TABLE t1 COALESCE PARTITION 1;
+-EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 3;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-ALTER TABLE t1 REORGANIZE PARTITION;
+-EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 2;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-ALTER TABLE t1 REBUILD PARTITION p0;
+-EXPLAIN PARTiTIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-ALTER TABLE t1 REMOVE PARTITIONING;
+-EXPLAIN PARTiTIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 NULL # # # # # # #
+-ALTER TABLE t1 PARTITION BY LIST(a) (PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (101,102));
+-ERROR HY000: Table has no partition for value 4
+-ALTER TABLE t1 PARTITION BY LIST(a) (PARTITION p0 VALUES IN (1,2,3,4), PARTITION p1 VALUES IN (101,102));
+-INSERT INTO t1 (a) VALUES (50);
+-ERROR HY000: Table has no partition for value 50
+-ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (50,51));
+-INSERT INTO t1 (a) VALUES (50);
+-ALTER TABLE t1 DROP PARTITION p1;
+-ALTER TABLE t1 REORGANIZE PARTITION p0, p2 INTO (PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4), PARTITION p2 VALUES IN (50,51), PARTITION p3 VALUES IN (101,102));
+-EXPLAIN PARTiTIONS SELECT a FROM t1 WHERE a = 2;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
+ PARTITION abc VALUES IN (1,2,3),
+ PARTITION def VALUES IN (100,101,102)
+ );
+-ALTER TABLE t1 DROP PARTITION abc;
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
++PARTITION abc VALUES IN (1,2,3),
++PARTITION def VALUES IN (100,101,102)
++) ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or subpartitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/analyze_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/analyze_table.rdiff
index ce29f3dcf52..d1934b9d2ac 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/analyze_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/analyze_table.rdiff
@@ -1,83 +1,87 @@
-3,18c3,12
-< INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
-< CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< INSERT INTO t2 (a,b) SELECT a,b FROM t1;
-< INSERT INTO t1 (a,b) VALUES (3,'c');
-< ALTER TABLE t1 ANALYZE PARTITION p0;
-< Table Op Msg_type Msg_text
-< test.t1 analyze status OK
-< INSERT INTO t2 (a,b) VALUES (4,'d'), (1000,'e');
-< ALTER TABLE t1 ANALYZE PARTITION LOCAL ALL;
-< Table Op Msg_type Msg_text
-< test.t1 analyze status OK
-< INSERT INTO t1 (a,b) VALUES (5,'f'),(50,'g');
-< ALTER TABLE t1 ANALYZE PARTITION NO_WRITE_TO_BINLOG p1,p0;
-< Table Op Msg_type Msg_text
-< test.t1 analyze status OK
-< DROP TABLE t1, t2;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-20a15,16
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-21a18
-> ERROR 42S02: Table 'test.t1' doesn't exist
-22a20,21
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-23a23
-> ERROR 42S02: Table 'test.t1' doesn't exist
-26c26,27
-< test.t1 analyze status OK
----
-> test.t1 analyze Error Table 'test.t1' doesn't exist
-> test.t1 analyze status Operation failed
-27a29
-> ERROR 42S02: Table 'test.t2' doesn't exist
-30c32,33
-< test.t2 analyze status OK
----
-> test.t2 analyze Error Table 'test.t2' doesn't exist
-> test.t2 analyze status Operation failed
-31a35
-> ERROR 42S02: Table 'test.t1' doesn't exist
-32a37
-> ERROR 42S02: Table 'test.t2' doesn't exist
-35,36c40,43
-< test.t1 analyze status OK
-< test.t2 analyze status OK
----
-> test.t1 analyze Error Table 'test.t1' doesn't exist
-> test.t1 analyze status Operation failed
-> test.t2 analyze Error Table 'test.t2' doesn't exist
-> test.t2 analyze status Operation failed
-37a45
-> ERROR 42S02: Unknown table 't1,t2'
-38a47,48
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-39a50
-> ERROR 42S02: Table 'test.t1' doesn't exist
-42c53,54
-< test.t1 analyze status OK
----
-> test.t1 analyze Error Table 'test.t1' doesn't exist
-> test.t1 analyze status Operation failed
-43a56
-> ERROR 42S02: Table 'test.t1' doesn't exist
-46c59,60
-< test.t1 analyze status OK
----
-> test.t1 analyze Error Table 'test.t1' doesn't exist
-> test.t1 analyze status Operation failed
-47a62
-> ERROR 42S02: Unknown table 't1'
+--- analyze_table.result 2013-01-22 22:05:05.246633000 +0400
++++ analyze_table.reject 2013-01-23 03:16:23.240348427 +0400
+@@ -1,47 +1,62 @@
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
+-CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-INSERT INTO t2 (a,b) SELECT a,b FROM t1;
+-INSERT INTO t1 (a,b) VALUES (3,'c');
+-ALTER TABLE t1 ANALYZE PARTITION p0;
+-Table Op Msg_type Msg_text
+-test.t1 analyze status OK
+-INSERT INTO t2 (a,b) VALUES (4,'d'), (1000,'e');
+-ALTER TABLE t1 ANALYZE PARTITION LOCAL ALL;
+-Table Op Msg_type Msg_text
+-test.t1 analyze status OK
+-INSERT INTO t1 (a,b) VALUES (5,'f'),(50,'g');
+-ALTER TABLE t1 ANALYZE PARTITION NO_WRITE_TO_BINLOG p1,p0;
+-Table Op Msg_type Msg_text
+-test.t1 analyze status OK
+-DROP TABLE t1, t2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (3,'c');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze Error Table 'test.t1' doesn't exist
++test.t1 analyze status Operation failed
+ INSERT INTO t2 (a,b) VALUES (4,'d');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ ANALYZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 analyze status OK
++test.t2 analyze Error Table 'test.t2' doesn't exist
++test.t2 analyze status Operation failed
+ INSERT INTO t1 (a,b) VALUES (5,'e');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (6,'f');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ ANALYZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
+-test.t2 analyze status OK
++test.t1 analyze Error Table 'test.t1' doesn't exist
++test.t1 analyze status Operation failed
++test.t2 analyze Error Table 'test.t2' doesn't exist
++test.t2 analyze status Operation failed
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
+ CREATE TABLE t1 (a <INT_COLUMN>, <CUSTOM_INDEX>(a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a) VALUES (1),(2),(4),(7);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze Error Table 'test.t1' doesn't exist
++test.t1 analyze status Operation failed
+ INSERT INTO t1 (a) VALUES (8),(10),(11),(12);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ ANALYZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 analyze status OK
++test.t1 analyze Error Table 'test.t1' doesn't exist
++test.t1 analyze status Operation failed
+ DROP TABLE t1;
++ERROR 42S02: Unknown table 't1'
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/check_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/check_table.rdiff
index 3f3db85a23e..21ccf192e08 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/check_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/check_table.rdiff
@@ -1,172 +1,176 @@
-3,36c3,12
-< INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
-< CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
-< PARTITION p0 VALUES LESS THAN (100),
-< PARTITION p1 VALUES LESS THAN MAXVALUE
-< );
-< INSERT INTO t2 (a,b) SELECT a, b FROM t1;
-< ALTER TABLE t1 CHECK PARTITION p0;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< INSERT INTO t1 (a,b) VALUES (3,'c');
-< ALTER TABLE t1 CHECK PARTITION p0, p1 FOR UPGRADE;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< INSERT INTO t2 (a,b) VALUES (10000,'e');
-< ALTER TABLE t2 CHECK PARTITION p0 QUICK;
-< Table Op Msg_type Msg_text
-< test.t2 check status OK
-< INSERT INTO t1 (a,b) VALUES (6,'f');
-< ALTER TABLE t1 CHECK PARTITION p1 FAST;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< INSERT INTO t2 (a,b) VALUES (8,'h');
-< ALTER TABLE t2 CHECK PARTITION p1 MEDIUM;
-< Table Op Msg_type Msg_text
-< test.t2 check status OK
-< INSERT INTO t1 (a,b) VALUES (9,'i');
-< ALTER TABLE t1 CHECK PARTITION ALL EXTENDED;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< INSERT INTO t1 (a,b) VALUES (11,'k');
-< ALTER TABLE t1 CHECK PARTITION p0 CHANGED;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< DROP TABLE t1, t2;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-38a15,16
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-39a18
-> ERROR 42S02: Table 'test.t1' doesn't exist
-40a20,21
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-43c24,25
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-44a27
-> ERROR 42S02: Table 'test.t1' doesn't exist
-45a29
-> ERROR 42S02: Table 'test.t2' doesn't exist
-48,49c32,35
-< test.t1 check status OK
-< test.t2 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-> test.t2 check Error Table 'test.t2' doesn't exist
-> test.t2 check status Operation failed
-50a37
-> ERROR 42S02: Table 'test.t2' doesn't exist
-53c40,41
-< test.t2 check status OK
----
-> test.t2 check Error Table 'test.t2' doesn't exist
-> test.t2 check status Operation failed
-54a43
-> ERROR 42S02: Table 'test.t1' doesn't exist
-57c46,47
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-58a49
-> ERROR 42S02: Table 'test.t1' doesn't exist
-59a51
-> ERROR 42S02: Table 'test.t2' doesn't exist
-62,63c54,57
-< test.t2 check status OK
-< test.t1 check status OK
----
-> test.t2 check Error Table 'test.t2' doesn't exist
-> test.t2 check status Operation failed
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-64a59
-> ERROR 42S02: Table 'test.t1' doesn't exist
-65a61
-> ERROR 42S02: Table 'test.t2' doesn't exist
-68,69c64,67
-< test.t1 check status OK
-< test.t2 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-> test.t2 check Error Table 'test.t2' doesn't exist
-> test.t2 check status Operation failed
-70a69
-> ERROR 42S02: Table 'test.t1' doesn't exist
-73c72,73
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-74a75
-> ERROR 42S02: Unknown table 't1,t2'
-75a77,78
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-76a80
-> ERROR 42S02: Table 'test.t1' doesn't exist
-79c83,84
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-80a86
-> ERROR 42S02: Table 'test.t1' doesn't exist
-83c89,90
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-84a92
-> ERROR 42S02: Table 'test.t1' doesn't exist
-87c95,96
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-88a98
-> ERROR 42S02: Table 'test.t1' doesn't exist
-91c101,102
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-92a104
-> ERROR 42S02: Table 'test.t1' doesn't exist
-95c107,108
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-96a110
-> ERROR 42S02: Table 'test.t1' doesn't exist
-99c113,114
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-100a116
-> ERROR 42S02: Table 'test.t1' doesn't exist
-103c119,120
-< test.t1 check status OK
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-104a122
-> ERROR 42S02: Unknown table 't1'
+--- check_table.result 2013-01-22 22:05:05.246633000 +0400
++++ check_table.reject 2013-01-23 03:16:23.872340482 +0400
+@@ -1,104 +1,122 @@
+ DROP TABLE IF EXISTS t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
+-CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
+-PARTITION p0 VALUES LESS THAN (100),
+-PARTITION p1 VALUES LESS THAN MAXVALUE
+-);
+-INSERT INTO t2 (a,b) SELECT a, b FROM t1;
+-ALTER TABLE t1 CHECK PARTITION p0;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-INSERT INTO t1 (a,b) VALUES (3,'c');
+-ALTER TABLE t1 CHECK PARTITION p0, p1 FOR UPGRADE;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-INSERT INTO t2 (a,b) VALUES (10000,'e');
+-ALTER TABLE t2 CHECK PARTITION p0 QUICK;
+-Table Op Msg_type Msg_text
+-test.t2 check status OK
+-INSERT INTO t1 (a,b) VALUES (6,'f');
+-ALTER TABLE t1 CHECK PARTITION p1 FAST;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-INSERT INTO t2 (a,b) VALUES (8,'h');
+-ALTER TABLE t2 CHECK PARTITION p1 MEDIUM;
+-Table Op Msg_type Msg_text
+-test.t2 check status OK
+-INSERT INTO t1 (a,b) VALUES (9,'i');
+-ALTER TABLE t1 CHECK PARTITION ALL EXTENDED;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-INSERT INTO t1 (a,b) VALUES (11,'k');
+-ALTER TABLE t1 CHECK PARTITION p0 CHANGED;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-DROP TABLE t1, t2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a,b) VALUES (3,'c');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (4,'d');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ CHECK TABLE t1, t2 FOR UPGRADE;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
+-test.t2 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
++test.t2 check Error Table 'test.t2' doesn't exist
++test.t2 check status Operation failed
+ INSERT INTO t2 (a,b) VALUES (5,'e');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ CHECK TABLE t2 QUICK;
+ Table Op Msg_type Msg_text
+-test.t2 check status OK
++test.t2 check Error Table 'test.t2' doesn't exist
++test.t2 check status Operation failed
+ INSERT INTO t1 (a,b) VALUES (6,'f');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 FAST;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a,b) VALUES (7,'g');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (8,'h');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ CHECK TABLE t2, t1 MEDIUM;
+ Table Op Msg_type Msg_text
+-test.t2 check status OK
+-test.t1 check status OK
++test.t2 check Error Table 'test.t2' doesn't exist
++test.t2 check status Operation failed
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a,b) VALUES (9,'i');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (10,'j');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ CHECK TABLE t1, t2 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
+-test.t2 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
++test.t2 check Error Table 'test.t2' doesn't exist
++test.t2 check status Operation failed
+ INSERT INTO t1 (a,b) VALUES (11,'k');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 CHANGED;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
+ CREATE TABLE t1 (a <INT_COLUMN>, <CUSTOM_INDEX>(a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a) VALUES (1),(2),(5);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (6),(8),(12);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 FOR UPGRADE;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (13),(15),(16);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 QUICK;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (17),(120),(132);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 FAST;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (801),(900),(7714);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 MEDIUM;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (8760),(10023),(12000);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028);
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CHECK TABLE t1 CHANGED;
+ Table Op Msg_type Msg_text
+-test.t1 check status OK
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ DROP TABLE t1;
++ERROR 42S02: Unknown table 't1'
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/checksum_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/checksum_table.rdiff
index bc5b07686bf..3d9ebdfe5c4 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/checksum_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/checksum_table.rdiff
@@ -1,81 +1,89 @@
-2a3,4
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-3a6
-> ERROR 42S02: Table 'test.t1' doesn't exist
-4a8,9
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-7c12,14
-< test.t1 4272806499
----
-> test.t1 NULL
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-10,11c17,21
-< test.t2 0
-< test.t1 4272806499
----
-> test.t2 NULL
-> test.t1 NULL
-> Warnings:
-> Error 1146 Table 'test.t2' doesn't exist
-> Error 1146 Table 'test.t1' doesn't exist
-15a26,28
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-> Error 1146 Table 'test.t2' doesn't exist
-18,19c31,35
-< test.t1 4272806499
-< test.t2 0
----
-> test.t1 NULL
-> test.t2 NULL
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-> Error 1146 Table 'test.t2' doesn't exist
-20a37
-> ERROR 42S02: Unknown table 't1,t2'
-22a40,41
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-23a43
-> ERROR 42S02: Table 'test.t1' doesn't exist
-24a45,46
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-27c49,51
-< test.t1 0
----
-> test.t1 NULL
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-30,31c54,58
-< test.t2 0
-< test.t1 0
----
-> test.t2 NULL
-> test.t1 NULL
-> Warnings:
-> Error 1146 Table 'test.t2' doesn't exist
-> Error 1146 Table 'test.t1' doesn't exist
-34,35c61,65
-< test.t1 0
-< test.t2 0
----
-> test.t1 NULL
-> test.t2 NULL
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-> Error 1146 Table 'test.t2' doesn't exist
-38,39c68,72
-< test.t1 4272806499
-< test.t2 0
----
-> test.t1 NULL
-> test.t2 NULL
-> Warnings:
-> Error 1146 Table 'test.t1' doesn't exist
-> Error 1146 Table 'test.t2' doesn't exist
-40a74
-> ERROR 42S02: Unknown table 't1,t2'
+--- checksum_table.result 2013-01-22 22:05:05.246633000 +0400
++++ checksum_table.reject 2013-01-23 03:16:24.496332636 +0400
+@@ -1,40 +1,74 @@
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> CHECKSUM=0 PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> CHECKSUM=0 PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ CHECKSUM TABLE t1;
+ Table Checksum
+-test.t1 4272806499
++test.t1 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
+ CHECKSUM TABLE t2, t1;
+ Table Checksum
+-test.t2 0
+-test.t1 4272806499
++test.t2 NULL
++test.t1 NULL
++Warnings:
++Error 1146 Table 'test.t2' doesn't exist
++Error 1146 Table 'test.t1' doesn't exist
+ CHECKSUM TABLE t1, t2 QUICK;
+ Table Checksum
+ test.t1 NULL
+ test.t2 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
++Error 1146 Table 'test.t2' doesn't exist
+ CHECKSUM TABLE t1, t2 EXTENDED;
+ Table Checksum
+-test.t1 4272806499
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
++Error 1146 Table 'test.t2' doesn't exist
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> CHECKSUM=1 PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> CHECKSUM=1 PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ CHECKSUM TABLE t1;
+ Table Checksum
+-test.t1 0
++test.t1 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
+ CHECKSUM TABLE t2, t1;
+ Table Checksum
+-test.t2 0
+-test.t1 0
++test.t2 NULL
++test.t1 NULL
++Warnings:
++Error 1146 Table 'test.t2' doesn't exist
++Error 1146 Table 'test.t1' doesn't exist
+ CHECKSUM TABLE t1, t2 QUICK;
+ Table Checksum
+-test.t1 0
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
++Error 1146 Table 'test.t2' doesn't exist
+ CHECKSUM TABLE t1, t2 EXTENDED;
+ Table Checksum
+-test.t1 4272806499
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
++Warnings:
++Error 1146 Table 'test.t1' doesn't exist
++Error 1146 Table 'test.t2' doesn't exist
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/create_table.rdiff
index 11fbc4812dc..d6aa75f1c44 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/create_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/create_table.rdiff
@@ -1,156 +1,159 @@
-3,10c3,12
-< INSERT INTO t1 (a) VALUES (1),(2),(3),(2);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a=2;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-12,19c14,23
-< INSERT INTO t1 (a) VALUES ('a'),('b'),('c');
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 'b';
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p1 # # # # # # #
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or CHAR types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-21,31c25,34
-< SHOW INDEX IN t1;
-< Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-< t1 1 a 1 a # # NULL NULL # #
-< INSERT INTO t1 (a) VALUES (1),(2),(3),(5);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a IN (1,3);
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/, /*!INDEX*/ /*Custom index*/ (a)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-33,43c36,45
-< SHOW INDEX IN t1;
-< Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-< t1 0 PRIMARY 1 a # # NULL NULL # #
-< INSERT INTO t1 (a) VALUES (1),(200),(3),(2);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a=2;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p1 # # # # # # #
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/ PRIMARY KEY) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY() PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # PK or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-48,58c50,63
-< INSERT INTO t1 (a) VALUES (1),(2),(400);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 2;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0 # # # # # # #
-< INSERT INTO t1 (a) VALUES (10000);
-< ERROR HY000: Table has no partition for value 10000
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) (
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY RANGE(a) (
-> PARTITION p0 VALUES LESS THAN (10),
-> PARTITION p1 VALUES LESS THAN (1000)
-> ) ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-> CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
-62,72c67,70
-< INSERT INTO t1 (a) VALUES (1),(101),(1);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 abc,def # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE NULL NULL # # # # # # #
-< INSERT INTO t1 (a) VALUES (50);
-< ERROR HY000: Table has no partition for value 50
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
-75,91c73,79
-< );
-< SHOW INDEX IN t1;
-< Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-< INSERT INTO t1 (a,b) VALUES (1,1),(101,2),(1,3);
-< EXPLAIN PARTITIONS SELECT a FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 abc_abcsp0,def_defsp0 # # # # # # #
-< EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE NULL NULL # # # # # # #
-< SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, SUBPARTITION_NAME, PARTITION_METHOD, SUBPARTITION_METHOD
-< FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 't1';
-< TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_METHOD SUBPARTITION_METHOD
-< test t1 abc abcsp0 LIST HASH
-< test t1 def defsp0 LIST HASH
-< SELECT * FROM INFORMATION_SCHEMA.PARTITIONS;
-< DROP TABLE t1;
----
-> ) ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or subpartitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- create_table.result 2013-01-22 22:05:05.246633000 +0400
++++ create_table.reject 2013-01-23 03:16:25.160324290 +0400
+@@ -1,91 +1,79 @@
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a) VALUES (1),(2),(3),(2);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a=2;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY KEY(a) PARTITIONS 2;
+-INSERT INTO t1 (a) VALUES ('a'),('b'),('c');
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 'b';
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p1 # # # # # # #
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or CHAR types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY KEY(a) PARTITIONS 2;
+-SHOW INDEX IN t1;
+-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # #
+-INSERT INTO t1 (a) VALUES (1),(2),(3),(5);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a IN (1,3);
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/, /*!INDEX*/ /*Custom index*/ (a)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN> PRIMARY KEY) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY KEY() PARTITIONS 2;
+-SHOW INDEX IN t1;
+-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 0 PRIMARY 1 a # # NULL NULL # #
+-INSERT INTO t1 (a) VALUES (1),(200),(3),(2);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a=2;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p1 # # # # # # #
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/ PRIMARY KEY) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY KEY() PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# PK or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
+ PARTITION p0 VALUES LESS THAN (10),
+ PARTITION p1 VALUES LESS THAN (1000)
+ );
+-INSERT INTO t1 (a) VALUES (1),(2),(400);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 2;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0 # # # # # # #
+-INSERT INTO t1 (a) VALUES (10000);
+-ERROR HY000: Table has no partition for value 10000
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) (
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY RANGE(a) (
++PARTITION p0 VALUES LESS THAN (10),
++PARTITION p1 VALUES LESS THAN (1000)
++) ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
++CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
+ PARTITION abc VALUES IN (1,2,3),
+ PARTITION def VALUES IN (100,101,102)
+ );
+-INSERT INTO t1 (a) VALUES (1),(101),(1);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 abc,def # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE NULL NULL # # # # # # #
+-INSERT INTO t1 (a) VALUES (50);
+-ERROR HY000: Table has no partition for value 50
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY LIST(a) SUBPARTITION by HASH(b) (
+ PARTITION abc VALUES IN (1,2,3),
+ PARTITION def VALUES IN (100,101,102)
+-);
+-SHOW INDEX IN t1;
+-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-INSERT INTO t1 (a,b) VALUES (1,1),(101,2),(1,3);
+-EXPLAIN PARTITIONS SELECT a FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 abc_abcsp0,def_defsp0 # # # # # # #
+-EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE NULL NULL # # # # # # #
+-SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, SUBPARTITION_NAME, PARTITION_METHOD, SUBPARTITION_METHOD
+-FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 't1';
+-TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_METHOD SUBPARTITION_METHOD
+-test t1 abc abcsp0 LIST HASH
+-test t1 def defsp0 LIST HASH
+-SELECT * FROM INFORMATION_SCHEMA.PARTITIONS;
+-DROP TABLE t1;
++) ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or subpartitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/optimize_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/optimize_table.rdiff
index 350d93fe91f..242b3778846 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/optimize_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/optimize_table.rdiff
@@ -1,91 +1,95 @@
-3,25c3,12
-< INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
-< CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
-< PARTITION p0 VALUES LESS THAN (100),
-< PARTITION p1 VALUES LESS THAN MAXVALUE
-< );
-< INSERT INTO t2 (a,b) SELECT a, b FROM t1;
-< INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
-< ALTER TABLE t1 OPTIMIZE PARTITION p1;
-< Table Op Msg_type Msg_text
-< test.t1 optimize status OK
-< INSERT INTO t2 (a,b) VALUES (4,'d');
-< ALTER TABLE t2 OPTIMIZE PARTITION p0 NO_WRITE_TO_BINLOG;
-< Table Op Msg_type Msg_text
-< test.t2 optimize status OK
-< INSERT INTO t1 (a,b) VALUES (6,'f');
-< ALTER TABLE t1 OPTIMIZE PARTITION ALL LOCAL;
-< Table Op Msg_type Msg_text
-< test.t1 optimize status OK
-< INSERT INTO t2 (a,b) VALUES (5,'e');
-< ALTER TABLE t2 OPTIMIZE PARTITION p1,p0;
-< Table Op Msg_type Msg_text
-< test.t2 optimize status OK
-< DROP TABLE t1, t2;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-27a15,16
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-28a18
-> ERROR 42S02: Table 'test.t1' doesn't exist
-29a20,21
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-30a23
-> ERROR 42S02: Table 'test.t1' doesn't exist
-33c26,27
-< test.t1 optimize status OK
----
-> test.t1 optimize Error Table 'test.t1' doesn't exist
-> test.t1 optimize status Operation failed
-34a29
-> ERROR 42S02: Table 'test.t2' doesn't exist
-37c32,33
-< test.t2 optimize status OK
----
-> test.t2 optimize Error Table 'test.t2' doesn't exist
-> test.t2 optimize status Operation failed
-38a35
-> ERROR 42S02: Table 'test.t2' doesn't exist
-39a37
-> ERROR 42S02: Table 'test.t1' doesn't exist
-42,43c40,43
-< test.t1 optimize status OK
-< test.t2 optimize status OK
----
-> test.t1 optimize Error Table 'test.t1' doesn't exist
-> test.t1 optimize status Operation failed
-> test.t2 optimize Error Table 'test.t2' doesn't exist
-> test.t2 optimize status Operation failed
-46,47c46,49
-< test.t1 optimize status OK
-< test.t2 optimize status OK
----
-> test.t1 optimize Error Table 'test.t1' doesn't exist
-> test.t1 optimize status Operation failed
-> test.t2 optimize Error Table 'test.t2' doesn't exist
-> test.t2 optimize status Operation failed
-48a51
-> ERROR 42S02: Unknown table 't1,t2'
-49a53,54
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-50a56
-> ERROR 42S02: Table 'test.t1' doesn't exist
-53c59,60
-< test.t1 optimize status OK
----
-> test.t1 optimize Error Table 'test.t1' doesn't exist
-> test.t1 optimize status Operation failed
-54a62
-> ERROR 42S02: Unknown table 't1'
+--- optimize_table.result 2013-01-22 22:05:05.246633000 +0400
++++ optimize_table.reject 2013-01-23 03:16:25.780316495 +0400
+@@ -1,54 +1,62 @@
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
+-CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
+-PARTITION p0 VALUES LESS THAN (100),
+-PARTITION p1 VALUES LESS THAN MAXVALUE
+-);
+-INSERT INTO t2 (a,b) SELECT a, b FROM t1;
+-INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+-ALTER TABLE t1 OPTIMIZE PARTITION p1;
+-Table Op Msg_type Msg_text
+-test.t1 optimize status OK
+-INSERT INTO t2 (a,b) VALUES (4,'d');
+-ALTER TABLE t2 OPTIMIZE PARTITION p0 NO_WRITE_TO_BINLOG;
+-Table Op Msg_type Msg_text
+-test.t2 optimize status OK
+-INSERT INTO t1 (a,b) VALUES (6,'f');
+-ALTER TABLE t1 OPTIMIZE PARTITION ALL LOCAL;
+-Table Op Msg_type Msg_text
+-test.t1 optimize status OK
+-INSERT INTO t2 (a,b) VALUES (5,'e');
+-ALTER TABLE t2 OPTIMIZE PARTITION p1,p0;
+-Table Op Msg_type Msg_text
+-test.t2 optimize status OK
+-DROP TABLE t1, t2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
++test.t1 optimize Error Table 'test.t1' doesn't exist
++test.t1 optimize status Operation failed
+ INSERT INTO t2 (a,b) VALUES (4,'d');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 optimize status OK
++test.t2 optimize Error Table 'test.t2' doesn't exist
++test.t2 optimize status Operation failed
+ INSERT INTO t2 (a,b) VALUES (5,'e');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ INSERT INTO t1 (a,b) VALUES (6,'f');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ OPTIMIZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
+-test.t2 optimize status OK
++test.t1 optimize Error Table 'test.t1' doesn't exist
++test.t1 optimize status Operation failed
++test.t2 optimize Error Table 'test.t2' doesn't exist
++test.t2 optimize status Operation failed
+ OPTIMIZE TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
+-test.t2 optimize status OK
++test.t1 optimize Error Table 'test.t1' doesn't exist
++test.t1 optimize status Operation failed
++test.t2 optimize Error Table 'test.t2' doesn't exist
++test.t2 optimize status Operation failed
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status OK
++test.t1 optimize Error Table 'test.t1' doesn't exist
++test.t1 optimize status Operation failed
+ DROP TABLE t1;
++ERROR 42S02: Unknown table 't1'
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
index 4c39421261c..2b5d5f68346 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff
@@ -1,295 +1,303 @@
-4,33c4,13
-< INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
-< CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
-< PARTITION p0 VALUES LESS THAN (100),
-< PARTITION p1 VALUES LESS THAN MAXVALUE
-< );
-< INSERT INTO t2 (a,b) SELECT a, b FROM t1;
-< ALTER TABLE t1 REPAIR PARTITION p0;
-< Table Op Msg_type Msg_text
-< test.t1 repair status OK
-< INSERT INTO t1 VALUES (3,'c');
-< ALTER TABLE t1 REPAIR PARTITION NO_WRITE_TO_BINLOG p0, p1;
-< Table Op Msg_type Msg_text
-< test.t1 repair status OK
-< INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
-< ALTER TABLE t2 REPAIR PARTITION LOCAL p1;
-< Table Op Msg_type Msg_text
-< test.t2 repair status OK
-< INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
-< ALTER TABLE t1 REPAIR PARTITION LOCAL ALL EXTENDED;
-< Table Op Msg_type Msg_text
-< test.t1 repair status OK
-< INSERT INTO t1 VALUES (10,'j');
-< ALTER TABLE t1 REPAIR PARTITION p1 QUICK USE_FRM;
-< Table Op Msg_type Msg_text
-< test.t1 repair status OK
-< INSERT INTO t2 (a,b) VALUES (12,'l');
-< ALTER TABLE t2 REPAIR PARTITION NO_WRITE_TO_BINLOG ALL QUICK EXTENDED USE_FRM;
-< Table Op Msg_type Msg_text
-< test.t2 repair status OK
-< DROP TABLE t1, t2;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-35a16,17
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-36a19
-> ERROR 42S02: Table 'test.t1' doesn't exist
-37a21,22
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-40c25,26
-< test.t1 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-41a28
-> ERROR 42S02: Table 'test.t1' doesn't exist
-42a30
-> ERROR 42S02: Table 'test.t2' doesn't exist
-45,46c33,36
-< test.t1 repair status OK
-< test.t2 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-> test.t2 repair Error Table 'test.t2' doesn't exist
-> test.t2 repair status Operation failed
-47a38
-> ERROR 42S02: Table 'test.t2' doesn't exist
-50c41,42
-< test.t2 repair status OK
----
-> test.t2 repair Error Table 'test.t2' doesn't exist
-> test.t2 repair status Operation failed
-51a44
-> ERROR 42S02: Table 'test.t1' doesn't exist
-52a46
-> ERROR 42S02: Table 'test.t2' doesn't exist
-55,56c49,52
-< test.t2 repair status OK
-< test.t1 repair status OK
----
-> test.t2 repair Error Table 'test.t2' doesn't exist
-> test.t2 repair status Operation failed
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-57a54
-> ERROR 42S02: Table 'test.t1' doesn't exist
-58a56
-> ERROR 42S02: Table 'test.t2' doesn't exist
-61,62c59,62
-< test.t1 repair status OK
-< test.t2 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-> test.t2 repair Error Table 'test.t2' doesn't exist
-> test.t2 repair status Operation failed
-63a64
-> ERROR 42S02: Table 'test.t1' doesn't exist
-64a66
-> ERROR 42S02: Table 'test.t2' doesn't exist
-67,68c69,72
-< test.t1 repair status OK
-< test.t2 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-> test.t2 repair Error Table 'test.t2' doesn't exist
-> test.t2 repair status Operation failed
-71,73c75,76
-< ERROR HY000: Failed to read from the .par file
-< # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
-< # If you got a difference in error message, just add it to rdiff file
----
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: 0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY)
-76,78c79,80
-< test.t1 check Error Failed to read from the .par file
-< test.t1 check Error Incorrect information in file: './test/t1.frm'
-< test.t1 check error Corrupt
----
-> test.t1 check Error Table 'test.t1' doesn't exist
-> test.t1 check status Operation failed
-80,82c82,83
-< ERROR HY000: Failed to read from the .par file
-< # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
-< # If you got a difference in error message, just add it to rdiff file
----
-> ERROR 42S02: Table 'test.t1' doesn't exist
-> # ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: 0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY)
-85,87c86,87
-< test.t1 repair Error Failed to read from the .par file
-< test.t1 repair Error Incorrect information in file: './test/t1.frm'
-< test.t1 repair error Corrupt
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-88a89
-> ERROR 42S02: Unknown table 't1,t2'
-93a95,96
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-96c99,100
-< test.t1 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-97a102
-> ERROR 42S02: Table 'test.t1' doesn't exist
-100c105,106
-< test.t1 repair status OK
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-101a108
-> ERROR 42S02: Table 'test.t1' doesn't exist
-104,235c111,112
-< test.t1 repair status OK
-< t1#P#p0.MYD
-< t1#P#p0.MYI
-< t1#P#p1.MYD
-< t1#P#p1.MYI
-< t1.frm
-< t1.par
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1#P#p0.MYD
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check error Size of datafile is: 26 Should be: 39
-< test.t1 check error Partition p0 returned error
-< test.t1 check error Corrupt
-< SELECT * FROM t1;
-< a b
-< 8 h
-< 10 j
-< 7 g
-< 15 o
-< Warnings:
-< Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
-< Error 1194 Table 't1' is marked as crashed and should be repaired
-< Error 1034 Number of rows changed from 3 to 2
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1#P#p0.MYI
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check warning Size of datafile is: 39 Should be: 26
-< test.t1 check error Record-count is not ok; is 3 Should be: 2
-< test.t1 check warning Found 3 key parts. Should be: 2
-< test.t1 check error Partition p0 returned error
-< test.t1 check error Corrupt
-< SELECT * FROM t1;
-< a b
-< 8 h
-< 10 j
-< 14 n
-< 7 g
-< 15 o
-< 15 o
-< Warnings:
-< Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
-< Error 1194 Table 't1' is marked as crashed and should be repaired
-< Error 1034 Number of rows changed from 2 to 3
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1#P#p1.MYD
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check error Size of datafile is: 39 Should be: 52
-< test.t1 check error Partition p1 returned error
-< test.t1 check error Corrupt
-< SELECT * FROM t1;
-< a b
-< 8 h
-< 10 j
-< 14 n
-< 14 n
-< 7 g
-< 15 o
-< 15 o
-< Warnings:
-< Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
-< Error 1194 Table 't1' is marked as crashed and should be repaired
-< Error 1034 Number of rows changed from 4 to 3
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1#P#p1.MYI
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check warning Size of datafile is: 52 Should be: 39
-< test.t1 check error Record-count is not ok; is 4 Should be: 3
-< test.t1 check warning Found 4 key parts. Should be: 3
-< test.t1 check error Partition p1 returned error
-< test.t1 check error Corrupt
-< SELECT * FROM t1;
-< a b
-< 8 h
-< 10 j
-< 14 n
-< 14 n
-< 14 n
-< 7 g
-< 15 o
-< 15 o
-< 15 o
-< Warnings:
-< Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
-< Error 1194 Table 't1' is marked as crashed and should be repaired
-< Error 1034 Number of rows changed from 3 to 4
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1.par
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check status OK
-< SELECT * FROM t1;
-< a b
-< 8 h
-< 10 j
-< 14 n
-< 14 n
-< 14 n
-< 14 n
-< 7 g
-< 15 o
-< 15 o
-< 15 o
-< 15 o
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
----
-> test.t1 repair Error Table 'test.t1' doesn't exist
-> test.t1 repair status Operation failed
-236a114
-> ERROR 42S02: Unknown table 't1'
+--- repair_table.result 2013-01-23 01:35:44.388267080 +0400
++++ repair_table.reject 2013-01-23 03:16:26.468307847 +0400
+@@ -1,236 +1,114 @@
+ call mtr.add_suppression("Table '.*t1.*' is marked as crashed and should be repaired");
+ DROP TABLE IF EXISTS t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
+-CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY RANGE(a) (
+-PARTITION p0 VALUES LESS THAN (100),
+-PARTITION p1 VALUES LESS THAN MAXVALUE
+-);
+-INSERT INTO t2 (a,b) SELECT a, b FROM t1;
+-ALTER TABLE t1 REPAIR PARTITION p0;
+-Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-INSERT INTO t1 (a,b) VALUES (3,'c');
+-ALTER TABLE t1 REPAIR PARTITION NO_WRITE_TO_BINLOG p0, p1;
+-Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
+-ALTER TABLE t2 REPAIR PARTITION LOCAL p1;
+-Table Op Msg_type Msg_text
+-test.t2 repair status OK
+-INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+-ALTER TABLE t1 REPAIR PARTITION LOCAL ALL EXTENDED;
+-Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-INSERT INTO t1 (a,b) VALUES (10,'j');
+-ALTER TABLE t1 REPAIR PARTITION p1 QUICK USE_FRM;
+-Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-INSERT INTO t2 (a,b) VALUES (12,'l');
+-ALTER TABLE t2 REPAIR PARTITION NO_WRITE_TO_BINLOG ALL QUICK EXTENDED USE_FRM;
+-Table Op Msg_type Msg_text
+-test.t2 repair status OK
+-DROP TABLE t1, t2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE IF EXISTS t1,t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (3,'c');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (4,'d');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
++test.t2 repair Error Table 'test.t2' doesn't exist
++test.t2 repair status Operation failed
+ INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ REPAIR LOCAL TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
++test.t2 repair Error Table 'test.t2' doesn't exist
++test.t2 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (9,'i');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ REPAIR LOCAL TABLE t2, t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
+-test.t1 repair status OK
++test.t2 repair Error Table 'test.t2' doesn't exist
++test.t2 repair status Operation failed
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (10,'j');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (11,'k');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ REPAIR TABLE t1, t2 QUICK USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
++test.t2 repair Error Table 'test.t2' doesn't exist
++test.t2 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (12,'l');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ INSERT INTO t2 (a,b) VALUES (13,'m');
++ERROR 42S02: Table 'test.t2' doesn't exist
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
++test.t2 repair Error Table 'test.t2' doesn't exist
++test.t2 repair status Operation failed
+ FLUSH TABLE t1;
+ INSERT INTO t1 (a,b) VALUES (14,'n');
+-ERROR HY000: Failed to read from the .par file
+-# Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+-# If you got a difference in error message, just add it to rdiff file
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: 0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY)
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check Error Failed to read from the .par file
+-test.t1 check Error Incorrect information in file: './test/t1.frm'
+-test.t1 check error Corrupt
++test.t1 check Error Table 'test.t1' doesn't exist
++test.t1 check status Operation failed
+ SELECT a,b FROM t1;
+-ERROR HY000: Failed to read from the .par file
+-# Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+-# If you got a difference in error message, just add it to rdiff file
++ERROR 42S02: Table 'test.t1' doesn't exist
++# ERROR: Statement ended with errno 1146, errname ER_NO_SUCH_TABLE (expected results: 0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY)
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair Error Failed to read from the .par file
+-test.t1 repair Error Incorrect information in file: './test/t1.frm'
+-test.t1 repair error Corrupt
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ DROP TABLE t1, t2;
++ERROR 42S02: Unknown table 't1,t2'
+ call mtr.add_suppression("Got an error from thread_id=.*");
+ call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
+ call mtr.add_suppression(" '\..test.t1'");
+ call mtr.add_suppression("Couldn't repair table: test.t1");
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ REPAIR TABLE t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ INSERT INTO t1 (a,b) VALUES (10,'j');
++ERROR 42S02: Table 'test.t1' doesn't exist
+ REPAIR TABLE t1 USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-t1#P#p0.MYD
+-t1#P#p0.MYI
+-t1#P#p1.MYD
+-t1#P#p1.MYI
+-t1.frm
+-t1.par
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p0.MYD
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 26 Should be: 39
+-test.t1 check error Partition p0 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-7 g
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 3 to 2
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p0.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Size of datafile is: 39 Should be: 26
+-test.t1 check error Record-count is not ok; is 3 Should be: 2
+-test.t1 check warning Found 3 key parts. Should be: 2
+-test.t1 check error Partition p0 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-7 g
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 2 to 3
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p1.MYD
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 39 Should be: 52
+-test.t1 check error Partition p1 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-14 n
+-7 g
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 4 to 3
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p1.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Size of datafile is: 52 Should be: 39
+-test.t1 check error Record-count is not ok; is 4 Should be: 3
+-test.t1 check warning Found 4 key parts. Should be: 3
+-test.t1 check error Partition p1 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-14 n
+-14 n
+-7 g
+-15 o
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 3 to 4
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1.par
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check status OK
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-14 n
+-14 n
+-14 n
+-7 g
+-15 o
+-15 o
+-15 o
+-15 o
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
++test.t1 repair Error Table 'test.t1' doesn't exist
++test.t1 repair status Operation failed
+ DROP TABLE t1;
++ERROR 42S02: Unknown table 't1'
diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/truncate_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/truncate_table.rdiff
index 2c1364d52e1..01bf3702a3f 100644
--- a/storage/myisammrg/mysql-test/storage_engine/parts/truncate_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/parts/truncate_table.rdiff
@@ -1,100 +1,101 @@
-3,8c3,12
-< TRUNCATE TABLE t1;
-< INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
-< TRUNCATE TABLE t1;
-< SELECT * FROM t1;
-< a b
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-10,52c14,23
-< SHOW CREATE TABLE t1;
-< Table Create Table
-< t1 CREATE TABLE `t1` (
-< `a` int(11) NOT NULL AUTO_INCREMENT,
-< `c` char(8) DEFAULT NULL,
-< PRIMARY KEY (`a`)
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
-< /*!50100 PARTITION BY HASH (a)
-< PARTITIONS 2 */
-< INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
-< SHOW CREATE TABLE t1;
-< Table Create Table
-< t1 CREATE TABLE `t1` (
-< `a` int(11) NOT NULL AUTO_INCREMENT,
-< `c` char(8) DEFAULT NULL,
-< PRIMARY KEY (`a`)
-< ) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
-< /*!50100 PARTITION BY HASH (a)
-< PARTITIONS 2 */
-< TRUNCATE TABLE t1;
-< SHOW CREATE TABLE t1;
-< Table Create Table
-< t1 CREATE TABLE `t1` (
-< `a` int(11) NOT NULL AUTO_INCREMENT,
-< `c` char(8) DEFAULT NULL,
-< PRIMARY KEY (`a`)
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
-< /*!50100 PARTITION BY HASH (a)
-< PARTITIONS 2 */
-< INSERT INTO t1 (c) VALUES ('d');
-< SHOW CREATE TABLE t1;
-< Table Create Table
-< t1 CREATE TABLE `t1` (
-< `a` int(11) NOT NULL AUTO_INCREMENT,
-< `c` char(8) DEFAULT NULL,
-< PRIMARY KEY (`a`)
-< ) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
-< /*!50100 PARTITION BY HASH (a)
-< PARTITIONS 2 */
-< SELECT * FROM t1;
-< a c
-< 1 d
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/ KEY AUTO_INCREMENT, c CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or PK or auto-increment or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-54,68c25,34
-< INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
-< ALTER TABLE t1 TRUNCATE PARTITION p0;
-< SELECT * FROM t1;
-< a b
-< 1 a
-< 101 g
-< 3 c
-< EXPLAIN PARTITIONS SELECT * FROM t1;
-< id select_type table partitions type possible_keys key key_len ref rows Extra
-< 1 SIMPLE t1 p0,p1 # # # # # #
-< INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
-< ALTER TABLE t1 TRUNCATE PARTITION ALL;
-< SELECT * FROM t1;
-< a b
-< DROP TABLE t1;
----
-> ERROR HY000: Engine cannot be used in partitioned tables
-> # ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
-> # The statement|command finished with ER_PARTITION_MERGE_ERROR.
-> # Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- truncate_table.result 2013-01-22 22:05:05.246633000 +0400
++++ truncate_table.reject 2013-01-23 03:16:27.076300201 +0400
+@@ -1,68 +1,34 @@
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-TRUNCATE TABLE t1;
+-INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
+-TRUNCATE TABLE t1;
+-SELECT a,b FROM t1;
+-a b
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN> KEY AUTO_INCREMENT, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-SHOW CREATE TABLE t1;
+-Table Create Table
+-t1 CREATE TABLE `t1` (
+- `a` int(11) NOT NULL AUTO_INCREMENT,
+- `c` char(8) DEFAULT NULL,
+- PRIMARY KEY (`a`)
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+-/*!50100 PARTITION BY HASH (a)
+-PARTITIONS 2 */
+-INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
+-SHOW CREATE TABLE t1;
+-Table Create Table
+-t1 CREATE TABLE `t1` (
+- `a` int(11) NOT NULL AUTO_INCREMENT,
+- `c` char(8) DEFAULT NULL,
+- PRIMARY KEY (`a`)
+-) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+-/*!50100 PARTITION BY HASH (a)
+-PARTITIONS 2 */
+-TRUNCATE TABLE t1;
+-SHOW CREATE TABLE t1;
+-Table Create Table
+-t1 CREATE TABLE `t1` (
+- `a` int(11) NOT NULL AUTO_INCREMENT,
+- `c` char(8) DEFAULT NULL,
+- PRIMARY KEY (`a`)
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+-/*!50100 PARTITION BY HASH (a)
+-PARTITIONS 2 */
+-INSERT INTO t1 (c) VALUES ('d');
+-SHOW CREATE TABLE t1;
+-Table Create Table
+-t1 CREATE TABLE `t1` (
+- `a` int(11) NOT NULL AUTO_INCREMENT,
+- `c` char(8) DEFAULT NULL,
+- PRIMARY KEY (`a`)
+-) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
+-/*!50100 PARTITION BY HASH (a)
+-PARTITIONS 2 */
+-SELECT a,c FROM t1;
+-a c
+-1 d
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom indexed column options*/ KEY AUTO_INCREMENT, c CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or PK or auto-increment or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+-INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(2,'d'),(4,'e'),(100,'f'),(101,'g');
+-ALTER TABLE t1 TRUNCATE PARTITION p0;
+-SELECT a,b FROM t1;
+-a b
+-1 a
+-101 g
+-3 c
+-EXPLAIN PARTITIONS SELECT a,b FROM t1;
+-id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 p0,p1 # # # # # #
+-INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c');
+-ALTER TABLE t1 TRUNCATE PARTITION ALL;
+-SELECT a,b FROM t1;
+-a b
+-DROP TABLE t1;
++ERROR HY000: Engine cannot be used in partitioned tables
++# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b CHAR(8) /*!*/ /*Custom column options*/) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST PARTITION BY HASH(a) PARTITIONS 2 ]
++# The statement|command finished with ER_PARTITION_MERGE_ERROR.
++# Partitions or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
diff --git a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
index 180647323bc..9ff8f906511 100644
--- a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff
@@ -1,103 +1,131 @@
-7c7
-< test.t1 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-12,13c12,13
-< test.t1 repair status OK
-< test.t2 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-> test.t2 repair note The storage engine for the table doesn't support repair
-17c17
-< test.t2 repair status OK
----
-> test.t2 repair note The storage engine for the table doesn't support repair
-22,23c22,23
-< test.t2 repair status OK
-< test.t1 repair status OK
----
-> test.t2 repair note The storage engine for the table doesn't support repair
-> test.t1 repair note The storage engine for the table doesn't support repair
-28,31c28,29
-< test.t1 repair warning Number of rows changed from 0 to 6
-< test.t1 repair status OK
-< test.t2 repair warning Number of rows changed from 0 to 5
-< test.t2 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-> test.t2 repair note The storage engine for the table doesn't support repair
-36,39c34,35
-< test.t1 repair warning Number of rows changed from 0 to 7
-< test.t1 repair status OK
-< test.t2 repair warning Number of rows changed from 0 to 6
-< test.t2 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-> test.t2 repair note The storage engine for the table doesn't support repair
-42c38
-< ERROR HY000: Incorrect file format 't1'
----
-> ERROR HY000: Table 't1' is read only
-47,48c43
-< test.t1 check Error Incorrect file format 't1'
-< test.t1 check error Corrupt
----
-> test.t1 check status OK
-50c45
-< ERROR HY000: Incorrect file format 't1'
----
-> a b
-55,56c50
-< test.t1 repair Error Incorrect file format 't1'
-< test.t1 repair error Corrupt
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-65c59
-< test.t1 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-69c63
-< test.t1 repair status OK
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-73,76c67,68
-< test.t1 repair warning Number of rows changed from 0 to 3
-< test.t1 repair status OK
-< t1.MYD
-< t1.MYI
----
-> test.t1 repair note The storage engine for the table doesn't support repair
-> t1.MRG
-82c74
-< Restoring <DATADIR>/test/t1.MYD
----
-> Restoring <DATADIR>/test/t1.MRG
-85,86c77
-< test.t1 check error Size of datafile is: 39 Should be: 65
-< test.t1 check error Corrupt
----
-> test.t1 check status OK
-88,103c79,84
-< ERROR HY000: Incorrect key file for table 't1'; try to repair it
-< # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
-< # If you got a difference in error message, just add it to rdiff file
-< INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
-< ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
-< # Statement ended with one of expected results (0,144).
-< # If you got a difference in error message, just add it to rdiff file
-< FLUSH TABLE t1;
-< Restoring <DATADIR>/test/t1.MYI
-< CHECK TABLE t1;
-< Table Op Msg_type Msg_text
-< test.t1 check warning Table is marked as crashed and last repair failed
-< test.t1 check error Size of datafile is: 39 Should be: 65
-< test.t1 check error Corrupt
-< SELECT * FROM t1;
-< ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
----
-> a b
-> 7 g
-> 8 h
-> 10 j
-> 14 n
-> 15 o
+--- repair_table.result 2013-01-23 01:26:05.995538460 +0400
++++ repair_table.reject 2013-01-23 02:50:55.035560564 +0400
+@@ -4,56 +4,50 @@
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (3,'c');
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
+ REPAIR LOCAL TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ INSERT INTO t2 (a,b) VALUES (9,'i');
+ REPAIR LOCAL TABLE t2, t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
+-test.t1 repair status OK
++test.t2 repair note The storage engine for the table doesn't support repair
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ INSERT INTO t2 (a,b) VALUES (11,'k');
+ REPAIR TABLE t1, t2 QUICK USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 6
+-test.t1 repair status OK
+-test.t2 repair warning Number of rows changed from 0 to 5
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (12,'l');
+ INSERT INTO t2 (a,b) VALUES (13,'m');
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 7
+-test.t1 repair status OK
+-test.t2 repair warning Number of rows changed from 0 to 6
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ FLUSH TABLE t1;
+ INSERT INTO t1 (a,b) VALUES (14,'n');
+-ERROR HY000: Incorrect file format 't1'
++ERROR HY000: Table 't1' is read only
+ # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+ # If you got a difference in error message, just add it to rdiff file
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check Error Incorrect file format 't1'
+-test.t1 check error Corrupt
++test.t1 check status OK
+ SELECT a,b FROM t1;
+-ERROR HY000: Incorrect file format 't1'
++a b
+ # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+ # If you got a difference in error message, just add it to rdiff file
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair Error Incorrect file format 't1'
+-test.t1 repair error Corrupt
++test.t1 repair note The storage engine for the table doesn't support repair
+ DROP TABLE t1, t2;
+ call mtr.add_suppression("Got an error from thread_id=.*");
+ call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
+@@ -62,45 +56,32 @@
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ REPAIR TABLE t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ REPAIR TABLE t1 USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 3
+-test.t1 repair status OK
+-t1.MYD
+-t1.MYI
++test.t1 repair note The storage engine for the table doesn't support repair
++t1.MRG
+ t1.frm
+ INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+ # Statement ended with one of expected results (0,144).
+ # If you got a difference in error message, just add it to rdiff file
+ FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1.MYD
++Restoring <DATADIR>/test/t1.MRG
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 39 Should be: 65
+-test.t1 check error Corrupt
++test.t1 check status OK
+ SELECT a,b FROM t1;
+-ERROR HY000: Incorrect key file for table 't1'; try to repair it
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Table is marked as crashed and last repair failed
+-test.t1 check error Size of datafile is: 39 Should be: 65
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
++a b
++7 g
++8 h
++10 j
++14 n
++15 o
+ # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+ # If you got a difference in error message, just add it to rdiff file
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/show_engine.rdiff b/storage/myisammrg/mysql-test/storage_engine/show_engine.rdiff
index e7c9b0176b6..e78e6fdad53 100644
--- a/storage/myisammrg/mysql-test/storage_engine/show_engine.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/show_engine.rdiff
@@ -1,2 +1,10 @@
-7d6
-< <STORAGE_ENGINE> ### Engine status, can be long and changeable ###
+--- show_engine.result 2013-01-22 22:05:05.246633000 +0400
++++ show_engine.reject 2013-01-23 02:50:56.871537482 +0400
+@@ -4,7 +4,6 @@
+ # volatile data (timestamps, memory info, etc.)
+ SHOW ENGINE <STORAGE_ENGINE> STATUS;
+ Type Name Status
+-<STORAGE_ENGINE> ### Engine status, can be long and changeable ###
+ # For SHOW MUTEX even the number of lines is volatile, so the result logging is disabled,
+ # the test only checks that the command does not produce any errors
+ SHOW ENGINE <STORAGE_ENGINE> MUTEX;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_ai.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_ai.rdiff
index bca6fa60d13..4de7e81ffba 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_ai.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_ai.rdiff
@@ -1,8 +1,16 @@
-7c7
-< ) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=10 DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-13c13
-< ) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=100 DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_ai.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_ai.reject 2013-01-23 02:50:57.547528984 +0400
+@@ -4,11 +4,11 @@
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=10 DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 AUTO_INCREMENT=100;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> AUTO_INCREMENT=100 DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_avg_row_length.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_avg_row_length.rdiff
index 7a3ac54fd3e..2632fabf89d 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_avg_row_length.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_avg_row_length.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_avg_row_length.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_avg_row_length.reject 2013-01-23 02:50:58.123521742 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 AVG_ROW_LENGTH=30000000;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_checksum.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_checksum.rdiff
index ecb3fadb479..baad32dd4db 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_checksum.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_checksum.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CHECKSUM=1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CHECKSUM=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_checksum.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_checksum.reject 2013-01-23 02:50:58.739513998 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CHECKSUM=1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CHECKSUM=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 CHECKSUM=0;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_connection.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_connection.rdiff
index d3ebd87ad1e..3dc06fb2704 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_connection.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_connection.rdiff
@@ -1,8 +1,19 @@
-13c13
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CONNECTION='test_connection'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) CONNECTION='test_connection'
-20c20
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CONNECTION='test_connection2'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) CONNECTION='test_connection2'
+--- tbl_opt_connection.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_connection.reject 2013-01-23 02:50:59.335506506 +0400
+@@ -10,14 +10,14 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CONNECTION='test_connection'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) CONNECTION='test_connection'
+ ALTER TABLE t1 CONNECTION='test_connection2';
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 CONNECTION='test_connection2'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) CONNECTION='test_connection2'
+ DROP TABLE t1;
+ DROP SERVER test_connection;
+ DROP SERVER test_connection2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
index 3c15e10d1f1..e6055278b3c 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
@@ -1,8 +1,18 @@
-7c7
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_data_index_dir.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_data_index_dir.reject 2013-01-23 02:50:59.951498762 +0400
+@@ -4,7 +4,7 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ SHOW CREATE TABLE t1;
+@@ -12,5 +12,5 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_delay_key_write.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_delay_key_write.rdiff
index 5723e425b4d..2c2e40fa4b7 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_delay_key_write.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_delay_key_write.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_delay_key_write.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_delay_key_write.reject 2013-01-23 02:51:00.591490716 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 DELAY_KEY_WRITE=0;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_insert_method.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_insert_method.rdiff
index 2ced7647483..f5dc536c7ee 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_insert_method.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_insert_method.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`mrg`.`t1`)
+--- tbl_opt_insert_method.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_insert_method.reject 2013-01-23 02:51:01.211482922 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 INSERT_METHOD=NO;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_key_block_size.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_key_block_size.rdiff
index 8378f04ebcb..be90252f0b9 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_key_block_size.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_key_block_size.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_key_block_size.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_key_block_size.reject 2013-01-23 02:51:01.787475681 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 KEY_BLOCK_SIZE=1;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_max_rows.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_max_rows.rdiff
index f89147826c6..3eebf8cce9b 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_max_rows.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_max_rows.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=10000000
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=10000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=30000000
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=30000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_max_rows.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_max_rows.reject 2013-01-23 02:51:02.403467936 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=10000000
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=10000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 MAX_ROWS=30000000;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=30000000
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MAX_ROWS=30000000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_min_rows.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_min_rows.rdiff
index 3ce28480dcf..48c7124ca36 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_min_rows.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_min_rows.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=10000
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=10000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_min_rows.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_min_rows.reject 2013-01-23 02:51:02.983460644 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 MIN_ROWS=10000;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=10000
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 MIN_ROWS=10000 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_pack_keys.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_pack_keys.rdiff
index 246c7397a96..ab16cbcb0dc 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_pack_keys.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_pack_keys.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=0
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=0 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_pack_keys.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_pack_keys.reject 2013-01-23 02:51:03.563453353 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 PACK_KEYS=0;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=0
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 PACK_KEYS=0 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_password.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_password.rdiff
index b1ef20c6f30..dc8303682db 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_password.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_password.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_password.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_password.reject 2013-01-23 02:51:04.155445910 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 PASSWORD='new_password';
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
index 9c72c7c06ba..f7e0905d4e7 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff
@@ -1,8 +1,17 @@
-8c8
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
-15c15
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_opt_row_format.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_row_format.reject 2013-01-23 02:51:04.743438518 +0400
+@@ -5,12 +5,12 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_union.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_union.rdiff
index f77753f4d6b..e4e098a1b94 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_union.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_union.rdiff
@@ -1,8 +1,16 @@
-7c7
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`)
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`child1`)
-13c13
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`,`child2`)
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`child1`,`child2`)
+--- tbl_opt_union.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_opt_union.reject 2013-01-23 02:51:05.375430573 +0400
+@@ -4,11 +4,11 @@
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`child1`)
+ ALTER TABLE t1 UNION = (child1,child2);
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`,`child2`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`child1`,`child2`)
+ DROP TABLE t1, child1, child2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_standard_opts.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_standard_opts.rdiff
index 9b7ffc6af76..a929b6df54b 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_standard_opts.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_standard_opts.rdiff
@@ -1,8 +1,19 @@
-11c11
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 COMMENT='standard table options'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) COMMENT='standard table options'
-18c18
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 COMMENT='table altered'
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) COMMENT='table altered'
+--- tbl_standard_opts.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_standard_opts.reject 2013-01-23 02:51:05.991422829 +0400
+@@ -8,14 +8,14 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 COMMENT='standard table options'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) COMMENT='standard table options'
+ ALTER TABLE t1 COMMENT = 'table altered';
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 COMMENT='table altered'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=utf8 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) COMMENT='table altered'
+ ALTER TABLE t1 ENGINE=MEMORY;
+ SHOW CREATE TABLE t1;
+ Table Create Table
diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_temporary.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_temporary.rdiff
index 45a229c98eb..d2c7d4f2836 100644
--- a/storage/myisammrg/mysql-test/storage_engine/tbl_temporary.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/tbl_temporary.rdiff
@@ -1,4 +1,10 @@
-9c9
-< ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
----
-> ) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+--- tbl_temporary.result 2013-01-22 22:05:05.246633000 +0400
++++ tbl_temporary.reject 2013-01-23 02:51:06.599415185 +0400
+@@ -6,6 +6,6 @@
+ t1 CREATE TEMPORARY TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`)
+ DROP TEMPORARY TABLE t1;
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
index 3a89f730540..e4de81bae70 100644
--- a/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/truncate_table.rdiff
@@ -1,35 +1,48 @@
-12c12
-< t1 # # # # # # # # # 1 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-16c16
-< t1 # # # # # # # # # 4 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-20c20
-< t1 # # # # # # # # # 1 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-24c24
-< t1 # # # # # # # # # 2 # # # # # # #
----
-> t1 # # # # # # # # # 0 # # # # # # #
-32,40c32,39
-< HANDLER h1 READ FIRST;
-< a b
-< 1 a
-< TRUNCATE TABLE t1;
-< HANDLER h1 READ NEXT;
-< ERROR 42S02: Unknown table 'h1' in HANDLER
-< HANDLER t1 OPEN AS h2;
-< HANDLER h2 READ FIRST;
-< a b
----
-> ERROR HY000: Table storage engine for 'h1' doesn't have this option
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command finished with ER_ILLEGAL_HA.
-> # HANDLER or the syntax or the mix could be unsupported.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- truncate_table.result 2013-01-22 22:05:05.246633000 +0400
++++ truncate_table.reject 2013-01-23 02:51:07.507403770 +0400
+@@ -9,19 +9,19 @@
+ CREATE TABLE t1 (a <INT_COLUMN> KEY AUTO_INCREMENT, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW TABLE STATUS LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 1 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (c) VALUES ('a'),('b'),('c');
+ SHOW TABLE STATUS LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 4 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ TRUNCATE TABLE t1;
+ SHOW TABLE STATUS LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 1 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ INSERT INTO t1 (c) VALUES ('d');
+ SHOW TABLE STATUS LIKE 't1';
+ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+-t1 # # # # # # # # # 2 # # # # # # #
++t1 # # # # # # # # # 0 # # # # # # #
+ SELECT a,c FROM t1;
+ a c
+ 1 d
+@@ -29,13 +29,12 @@
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+ HANDLER t1 OPEN AS h1;
+-HANDLER h1 READ FIRST;
+-a b
+-1 a
+-TRUNCATE TABLE t1;
+-HANDLER h1 READ NEXT;
+-ERROR 42S02: Unknown table 'h1' in HANDLER
+-HANDLER t1 OPEN AS h2;
+-HANDLER h2 READ FIRST;
+-a b
++ERROR HY000: Table storage engine for 'h1' doesn't have this option
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_ILLEGAL_HA.
++# HANDLER or the syntax or the mix could be unsupported.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
index 018b28bd24c..94cfa74f384 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_repeatable_read.rdiff
@@ -1,9 +1,20 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-13a20
-> 1
+--- cons_snapshot_repeatable_read.result 2013-01-22 22:05:05.246633000 +0400
++++ cons_snapshot_repeatable_read.reject 2013-01-23 03:22:34.255684132 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ connect con2,localhost,root,,;
+@@ -11,6 +17,7 @@
+ # If consistent read works on this isolation level (REPEATABLE READ), the following SELECT should not return the value we inserted (1)
+ SELECT a FROM t1;
+ a
++1
+ COMMIT;
+ connection default;
+ disconnect con1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
index 018b28bd24c..a9b9ba7f94b 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
@@ -1,9 +1,20 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-13a20
-> 1
+--- cons_snapshot_serializable.result 2013-01-22 22:05:05.246633000 +0400
++++ cons_snapshot_serializable.reject 2013-01-23 03:22:34.847676690 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ connect con2,localhost,root,,;
+@@ -11,6 +17,7 @@
+ # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
+ SELECT a FROM t1;
+ a
++1
+ COMMIT;
+ connection default;
+ disconnect con1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/delete.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/delete.rdiff
index 45a3e5fb52d..e4249478c76 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/delete.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/delete.rdiff
@@ -1,34 +1,50 @@
-0a1,12
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file (recommended), or add the test to disabled.def.
-> # If savepoints should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-48a61,62
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-51,64d64
-< 10000 foobar
-< 10000 foobar
-< 2 b
-< 2 b
-< 4 d
-< 4 d
-< 5 e
-< 5 e
-< 6 f
-< 6 f
-< 7 g
-< 7 g
-< 8 h
-< 8 h
-70a71,72
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
+--- delete.result 2013-01-22 22:05:05.246633000 +0400
++++ delete.reject 2013-01-23 03:22:35.419669500 +0400
+@@ -1,3 +1,15 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file (recommended), or add the test to disabled.def.
++# If savepoints should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar');
+@@ -46,27 +58,17 @@
+ DELETE FROM t1;
+ RELEASE SAVEPOINT spt1;
+ ROLLBACK;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ SELECT a,b FROM t1;
+ a b
+-10000 foobar
+-10000 foobar
+-2 b
+-2 b
+-4 d
+-4 d
+-5 e
+-5 e
+-6 f
+-6 f
+-7 g
+-7 g
+-8 h
+-8 h
+ BEGIN;
+ DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1;
+ SAVEPOINT spt1;
+ DELETE FROM t1;
+ INSERT INTO t1 (a,b) VALUES (1,'a');
+ ROLLBACK TO SAVEPOINT spt1;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ COMMIT;
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/insert.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/insert.rdiff
index 69981f90e13..d5e99b6d7dc 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/insert.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/insert.rdiff
@@ -1,32 +1,65 @@
-0a1,12
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file (recommended), or add the test to disabled.def.
-> # If savepoints should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-23a36,37
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-25a40
-> 0 test
-33a49,50
-> NULL NULL
-> NULL NULL
-39a57,58
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-43a63
-> 0 test
-49a70
-> 11 f
-54a76,78
-> NULL NULL
-> NULL NULL
-> NULL NULL
+--- insert.result 2013-01-22 22:05:05.246633000 +0400
++++ insert.reject 2013-01-23 03:22:35.987662359 +0400
+@@ -1,3 +1,15 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file (recommended), or add the test to disabled.def.
++# If savepoints should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ BEGIN;
+@@ -21,8 +33,11 @@
+ RELEASE SAVEPOINT spt1;
+ INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT);
+ ROLLBACK;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ SELECT a,b FROM t1;
+ a b
++0 test
+ 1 a
+ 10 foo
+ 100 foo
+@@ -31,25 +46,34 @@
+ 3 c
+ 4 d
+ 5 e
++NULL NULL
++NULL NULL
+ BEGIN;
+ INSERT t1 (a) VALUE (10),(20);
+ SAVEPOINT spt1;
+ INSERT INTO t1 SET a = 11, b = 'f';
+ INSERT t1 SET b = DEFAULT;
+ ROLLBACK TO SAVEPOINT spt1;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ INSERT INTO t1 (b,a) VALUES ('test1',10);
+ COMMIT;
+ SELECT a,b FROM t1;
+ a b
++0 test
+ 1 a
+ 10 NULL
+ 10 foo
+ 10 test1
+ 100 foo
+ 11 abc
++11 f
+ 2 b
+ 20 NULL
+ 3 c
+ 4 d
+ 5 e
++NULL NULL
++NULL NULL
++NULL NULL
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/level_read_committed.rdiff
index 0837c74f5ff..9e7c340c524 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/level_read_committed.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/level_read_committed.rdiff
@@ -1,44 +1,94 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-18a25
-> 1
-25a33,34
-> 1
-> 2
-30a40,43
-> 1
-> 101
-> 102
-> 2
-34a48,49
-> 101
-> 102
-39a55,56
-> 101
-> 102
-44a62,63
-> 101
-> 102
-51a71,72
-> 101
-> 102
-54a76,77
-> 301
-> 302
-58a82,83
-> 101
-> 102
-61a87,88
-> 301
-> 302
-65a93,94
-> 101
-> 102
-68a98,99
-> 301
-> 302
+--- level_read_committed.result 2013-01-22 22:05:05.246633000 +0400
++++ level_read_committed.reject 2013-01-23 03:22:36.603654615 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+@@ -16,6 +22,7 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -23,25 +30,37 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ connection con1;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -49,23 +68,35 @@
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
index bd9569267e5..d44e4aa7a12 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
@@ -1,7 +1,12 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
+--- level_read_uncommitted.result 2013-01-22 22:05:05.246633000 +0400
++++ level_read_uncommitted.reject 2013-01-23 03:22:37.263646318 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/level_repeatable_read.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
index 82f7c5c5ba6..b24376a9c28 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/level_repeatable_read.rdiff
@@ -1,53 +1,96 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-18a25
-> 1
-25a33,34
-> 1
-> 2
-27,28c36
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-31a40,43
-> 1
-> 101
-> 102
-> 2
-35a48,49
-> 101
-> 102
-40a55,56
-> 101
-> 102
-44a61,64
-> 1
-> 101
-> 102
-> 2
-49a70,73
-> 1
-> 101
-> 102
-> 2
-51a76,77
-> 301
-> 302
-55a82,83
-> 101
-> 102
-58a87,88
-> 301
-> 302
-62a93,94
-> 101
-> 102
-65a98,99
-> 301
-> 302
+--- level_repeatable_read.result 2013-01-22 22:05:05.246633000 +0400
++++ level_repeatable_read.reject 2013-01-23 03:22:37.867638724 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+@@ -16,6 +22,7 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+ # WARNING: Statement ended with errno 0, errname ''.
+@@ -23,46 +30,73 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ 201
+ 202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
+ 1
++101
++102
+ 2
+ 201
+ 202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/level_serializable.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/level_serializable.rdiff
index 3924b3784a9..3567e718459 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/level_serializable.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/level_serializable.rdiff
@@ -1,69 +1,103 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-14,15c20
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-19a25
-> 1
-22,23c28
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-< # WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
----
-> # WARNING: Statement ended with errno 0, errname ''.
-27a33,34
-> 1
-> 2
-32a40,43
-> 1
-> 101
-> 102
-> 2
-35a47,50
-> 1
-> 101
-> 102
-> 2
-38a54,57
-> 1
-> 101
-> 102
-> 2
-41a61,64
-> 1
-> 101
-> 102
-> 2
-46a70,77
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
-49a81,88
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
-52a92,99
-> 1
-> 101
-> 102
-> 2
-> 201
-> 202
-> 301
-> 302
+--- level_serializable.result 2013-01-22 22:05:05.246633000 +0400
++++ level_serializable.reject 2013-01-23 03:22:38.471631132 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+@@ -11,45 +17,86 @@
+ connection con2;
+ BEGIN;
+ INSERT INTO t1 (a) VALUES(1);
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+-# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'.
++# WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ INSERT INTO t1 (a) SELECT a+100 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con2;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ COMMIT;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
+ INSERT INTO t1 (a) SELECT a+200 FROM t1;
+ # WARNING: Statement ended with errno 0, errname ''.
+ # If it differs from the result file, it might indicate a problem.
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ COMMIT;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ connection con2;
+ SELECT a FROM t1;
+ a
++1
++101
++102
++2
++201
++202
++301
++302
+ connection default;
+ disconnect con1;
+ disconnect con2;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/select_for_update.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/select_for_update.rdiff
index f8ffe67586d..bad014d87ed 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/select_for_update.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/select_for_update.rdiff
@@ -1,40 +1,50 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-17c23,33
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
----
-> a b
-> 1 a
-> 3 a
-> # ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # SELECT .. FOR UPDATE or LOCK IN SHARE MODE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-19c35,42
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
----
-> # ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # UPDATE or SELECT .. FOR UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-24c47
-< 1 a
----
-> 1 c
-26c49
-< 3 a
----
-> 3 c
+--- select_for_update.result 2013-01-22 22:05:05.246633000 +0400
++++ select_for_update.reject 2013-01-23 03:22:39.123622935 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+@@ -14,16 +20,33 @@
+ 1 a
+ 3 a
+ SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE;
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
++a b
++1 a
++3 a
++# ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# SELECT .. FOR UPDATE or LOCK IN SHARE MODE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ UPDATE t1 SET b='c' WHERE b='a';
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
++# ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# UPDATE or SELECT .. FOR UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ connection con1;
+ COMMIT;
+ SELECT a,b FROM t1;
+ a b
+-1 a
++1 c
+ 2 b
+-3 a
++3 c
+ disconnect con1;
+ connection default;
+ UPDATE t1 SET b='c' WHERE b='a';
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
index e316993830a..db3eec1bc24 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/select_lock_in_share_mode.rdiff
@@ -1,26 +1,37 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-21c27,34
-< ERROR HY000: Lock wait timeout exceeded; try restarting transaction
----
-> # ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
-> # ------------ UNEXPECTED RESULT ------------
-> # The statement|command succeeded unexpectedly.
-> # LOCK IN SHARE MODE or UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-26c39
-< 1 a
----
-> 1 c
-28c41
-< 3 a
----
-> 3 c
+--- select_lock_in_share_mode.result 2013-01-22 22:05:05.246633000 +0400
++++ select_lock_in_share_mode.reject 2013-01-23 03:22:39.739615191 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a');
+@@ -18,14 +24,21 @@
+ 1 a
+ 3 a
+ UPDATE t1 SET b='c' WHERE b='a';
+-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
++# ERROR: Statement succeeded (expected results: ER_LOCK_WAIT_TIMEOUT)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command succeeded unexpectedly.
++# LOCK IN SHARE MODE or UPDATE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ connection con1;
+ COMMIT;
+ SELECT a,b FROM t1;
+ a b
+-1 a
++1 c
+ 2 b
+-3 a
++3 c
+ disconnect con1;
+ connection default;
+ UPDATE t1 SET b='c' WHERE b='a';
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/update.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/update.rdiff
index 7ad463053eb..baac054766b 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/update.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/update.rdiff
@@ -1,41 +1,58 @@
-0a1,12
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support transactions.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If transactions should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file (recommended), or add the test to disabled.def.
-> # If savepoints should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-26a39,40
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-31a46,47
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-38,47c54,63
-< 51 update2
-< 51 update2
-< 52 update2
-< 52 update2
-< 53 update2
-< 53 update2
-< 54 update2
-< 54 update2
-< 55 update2
-< 55 update2
----
-> 51
-> 51
-> 52
-> 52
-> 53
-> 53
-> 54
-> 54
-> 55
-> 55
+--- update.result 2013-01-22 22:05:05.246633000 +0400
++++ update.reject 2013-01-23 03:22:40.355607446 +0400
+@@ -1,3 +1,15 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support transactions.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If transactions should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support savepoints.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file (recommended), or add the test to disabled.def.
++# If savepoints should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar');
+@@ -24,25 +36,29 @@
+ UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3;
+ UPDATE t1 SET b = '';
+ ROLLBACK;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ BEGIN;
+ UPDATE t1 SET b = 'update2' WHERE a <= 100;
+ SAVEPOINT spt1;
+ UPDATE t1 SET b = '';
+ ROLLBACK TO SAVEPOINT spt1;
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ UPDATE t1 SET b = 'upd' WHERE a = 10050;
+ COMMIT;
+ SELECT a,b FROM t1;
+ a b
+ 10050 upd
+ 10050 upd
+-51 update2
+-51 update2
+-52 update2
+-52 update2
+-53 update2
+-53 update2
+-54 update2
+-54 update2
+-55 update2
+-55 update2
++51
++51
++52
++52
++53
++53
++54
++54
++55
++55
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/xa.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/xa.rdiff
index ee7c2a984be..a491287337d 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/xa.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/xa.rdiff
@@ -1,34 +1,89 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support XA.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If XA should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-11a18
-> 1
-17a25,26
-> 1
-> 2
-22a32,33
-> 1
-> 2
-40a52
-> 3
-48a61,62
-> 3
-> 4
-67a82
-> 5
-77a93,94
-> 5
-> 6
-86a104,105
-> 5
-> 6
-88a108,109
-> Warnings:
-> Warning 1196 Some non-transactional changed tables couldn't be rolled back
-95a117,118
-> 5
-> 6
+--- xa.result 2013-01-22 22:05:05.246633000 +0400
++++ xa.reject 2013-01-23 03:22:41.047598747 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support XA.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If XA should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ DROP TABLE IF EXISTS t1;
+ connect con1,localhost,root,,;
+ connect con2,localhost,root,,;
+@@ -9,17 +15,22 @@
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
+ connection con2;
+ INSERT INTO t1 (a) VALUES (2);
+ XA END 'xa1';
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ connection con2;
+ XA PREPARE 'xa1';
+ connection con1;
+ SELECT a FROM t1;
+ a
++1
++2
+ connection con2;
+ XA RECOVER;
+ formatID gtrid_length bqual_length data
+@@ -38,6 +49,7 @@
+ a
+ 1
+ 2
++3
+ connection con2;
+ INSERT INTO t1 (a) VALUES (4);
+ XA END 'xa2';
+@@ -46,6 +58,8 @@
+ a
+ 1
+ 2
++3
++4
+ connection con2;
+ XA COMMIT 'xa2' ONE PHASE;
+ connection con1;
+@@ -65,6 +79,7 @@
+ 2
+ 3
+ 4
++5
+ connection con2;
+ INSERT INTO t1 (a) VALUES (6);
+ XA END 'xa3';
+@@ -75,6 +90,8 @@
+ 2
+ 3
+ 4
++5
++6
+ connection con2;
+ XA PREPARE 'xa3';
+ connection con1;
+@@ -84,8 +101,12 @@
+ 2
+ 3
+ 4
++5
++6
+ connection con2;
+ XA ROLLBACK 'xa3';
++Warnings:
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ connection con1;
+ SELECT a FROM t1;
+ a
+@@ -93,4 +114,6 @@
+ 2
+ 3
+ 4
++5
++6
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff b/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff
index 362e3a8914a..8dc888a1d58 100644
--- a/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/trx/xa_recovery.rdiff
@@ -1,22 +1,33 @@
-0a1,6
-> # -- WARNING ----------------------------------------------------------------
-> # According to I_S.ENGINES, MRG_MYISAM does not support XA.
-> # If it is true, the test will most likely fail; you can
-> # either create an rdiff file, or add the test to disabled.def.
-> # If XA should be supported, check the data in Information Schema.
-> # ---------------------------------------------------------------------------
-21,22d26
-< 1 3 0 xa1
-< 1 3 0 xa2
-23a28
-> ERROR XAE04: XAER_NOTA: Unknown XID
-24a30
-> ERROR XAE04: XAER_NOTA: Unknown XID
-26a33,34
-> 1
-> 2
-28a37,40
-> Warnings:
-> Error 145 Table './mrg/t1' is marked as crashed and should be repaired
-> Error 1194 Table 't1' is marked as crashed and should be repaired
-> Error 1034 1 client is using or hasn't closed the table properly
+--- xa_recovery.result 2013-01-22 22:05:05.246633000 +0400
++++ xa_recovery.reject 2013-01-23 03:22:43.247571090 +0400
+@@ -1,3 +1,9 @@
++# -- WARNING ----------------------------------------------------------------
++# According to I_S.ENGINES, MRG_MYISAM does not support XA.
++# If it is true, the test will most likely fail; you can
++# either create an rdiff file, or add the test to disabled.def.
++# If XA should be supported, check the data in Information Schema.
++# ---------------------------------------------------------------------------
+ call mtr.add_suppression("Found 2 prepared XA transactions");
+ FLUSH TABLES;
+ DROP TABLE IF EXISTS t1;
+@@ -18,12 +24,18 @@
+ connection default;
+ XA RECOVER;
+ formatID gtrid_length bqual_length data
+-1 3 0 xa1
+-1 3 0 xa2
+ XA ROLLBACK 'xa1';
++ERROR XAE04: XAER_NOTA: Unknown XID
+ XA COMMIT 'xa2';
++ERROR XAE04: XAER_NOTA: Unknown XID
+ SELECT a FROM t1;
+ a
++1
++2
+ 3
+ 4
++Warnings:
++Error 145 Table './mrg/t1' is marked as crashed and should be repaired
++Error 1194 Table 't1' is marked as crashed and should be repaired
++Error 1034 1 client is using or hasn't closed the table properly
+ DROP TABLE t1;
diff --git a/storage/myisammrg/mysql-test/storage_engine/type_char_indexes.rdiff b/storage/myisammrg/mysql-test/storage_engine/type_char_indexes.rdiff
index a0a418e0bcc..c80d4acced5 100644
--- a/storage/myisammrg/mysql-test/storage_engine/type_char_indexes.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/type_char_indexes.rdiff
@@ -1,8 +1,20 @@
-101c101
-< # # # range c_v c_v # # # Using index condition
----
-> # # # ALL c_v NULL # # # Using where
-138c138
-< # # # range # v16 # # # #
----
-> # # # ALL # NULL # # # #
+--- type_char_indexes.result 2013-01-22 22:05:05.246633000 +0400
++++ type_char_indexes.reject 2013-01-23 02:51:10.055371738 +0400
+@@ -98,7 +98,7 @@
+ SET SESSION optimizer_switch = 'engine_condition_pushdown=on';
+ EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
+ id select_type table type possible_keys key key_len ref rows Extra
+-# # # range c_v c_v # # # Using index condition
++# # # ALL c_v NULL # # # Using where
+ SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a';
+ c c20 v16 v128
+ b char3 varchar1a varchar1b
+@@ -135,7 +135,7 @@
+ r3a
+ EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ id select_type table type possible_keys key key_len ref rows Extra
+-# # # range # v16 # # # #
++# # # ALL # NULL # # # #
+ SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ c c20 v16 v128
+ a char1 varchar1a varchar1b
diff --git a/storage/myisammrg/mysql-test/storage_engine/type_float_indexes.rdiff b/storage/myisammrg/mysql-test/storage_engine/type_float_indexes.rdiff
index 640e1050a99..eb4da4db951 100644
--- a/storage/myisammrg/mysql-test/storage_engine/type_float_indexes.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/type_float_indexes.rdiff
@@ -1,4 +1,11 @@
-63c63
-< # # # # # d # # # #
----
-> # # # # # NULL # # # #
+--- type_float_indexes.result 2013-01-22 22:05:05.246633000 +0400
++++ type_float_indexes.reject 2013-01-23 02:51:13.059333973 +0400
+@@ -60,7 +60,7 @@
+ ALTER TABLE t1 ADD UNIQUE KEY(d);
+ EXPLAIN SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d;
+ id select_type table type possible_keys key key_len ref rows Extra
+-# # # # # d # # # #
++# # # # # NULL # # # #
+ SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d;
+ d
+ 1.2345
diff --git a/storage/myisammrg/mysql-test/storage_engine/type_spatial.rdiff b/storage/myisammrg/mysql-test/storage_engine/type_spatial.rdiff
index a4d7d4390b8..dbf29fb8d00 100644
--- a/storage/myisammrg/mysql-test/storage_engine/type_spatial.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/type_spatial.rdiff
@@ -1,706 +1,712 @@
-5,698c5,14
-< CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE gis_ogs;
-< CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shore POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerlines MULTILINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT,
-< footprint POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< type CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shores MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
-< neatline POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE test;
-< SHOW FIELDS FROM gis_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g point YES NULL
-< SHOW FIELDS FROM gis_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g linestring YES NULL
-< SHOW FIELDS FROM gis_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g polygon YES NULL
-< SHOW FIELDS FROM gis_multi_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipoint YES NULL
-< SHOW FIELDS FROM gis_multi_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multilinestring YES NULL
-< SHOW FIELDS FROM gis_multi_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipolygon YES NULL
-< SHOW FIELDS FROM gis_geometrycollection;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometrycollection YES NULL
-< SHOW FIELDS FROM gis_geometry;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometry YES NULL
-< INSERT INTO gis_point VALUES
-< (101, PointFromText('POINT(10 10)')),
-< (102, PointFromText('POINT(20 10)')),
-< (103, PointFromText('POINT(20 20)')),
-< (104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
-< INSERT INTO gis_line VALUES
-< (105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
-< (106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
-< (107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
-< INSERT INTO gis_polygon VALUES
-< (108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
-< (109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
-< (110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
-< INSERT INTO gis_multi_point VALUES
-< (111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
-< (112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
-< (113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
-< INSERT INTO gis_multi_line VALUES
-< (114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
-< (115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
-< (116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
-< INSERT INTO gis_multi_polygon VALUES
-< (117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
-< INSERT INTO gis_geometrycollection VALUES
-< (120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
-< (121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
-< (122, GeomFromText('GeometryCollection()')),
-< (123, GeomFromText('GeometryCollection EMPTY'));
-< INSERT into gis_geometry SELECT * FROM gis_point;
-< INSERT into gis_geometry SELECT * FROM gis_line;
-< INSERT into gis_geometry SELECT * FROM gis_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_multi_point;
-< INSERT into gis_geometry SELECT * FROM gis_multi_line;
-< INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
-< SELECT fid, AsText(g) FROM gis_point;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< SELECT fid, AsText(g) FROM gis_line;
-< fid AsText(g)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< SELECT fid, AsText(g) FROM gis_polygon;
-< fid AsText(g)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< SELECT fid, AsText(g) FROM gis_multi_point;
-< fid AsText(g)
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< SELECT fid, AsText(g) FROM gis_multi_line;
-< fid AsText(g)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< SELECT fid, AsText(g) FROM gis_multi_polygon;
-< fid AsText(g)
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< SELECT fid, AsText(g) FROM gis_geometrycollection;
-< fid AsText(g)
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, AsText(g) FROM gis_geometry;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, Dimension(g) FROM gis_geometry;
-< fid Dimension(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 1
-< 106 1
-< 107 1
-< 108 2
-< 109 2
-< 110 2
-< 111 0
-< 112 0
-< 113 0
-< 114 1
-< 115 1
-< 116 1
-< 117 2
-< 118 2
-< 119 2
-< 120 1
-< 121 1
-< 122 0
-< 123 0
-< SELECT fid, GeometryType(g) FROM gis_geometry;
-< fid GeometryType(g)
-< 101 POINT
-< 102 POINT
-< 103 POINT
-< 104 POINT
-< 105 LINESTRING
-< 106 LINESTRING
-< 107 LINESTRING
-< 108 POLYGON
-< 109 POLYGON
-< 110 POLYGON
-< 111 MULTIPOINT
-< 112 MULTIPOINT
-< 113 MULTIPOINT
-< 114 MULTILINESTRING
-< 115 MULTILINESTRING
-< 116 MULTILINESTRING
-< 117 MULTIPOLYGON
-< 118 MULTIPOLYGON
-< 119 MULTIPOLYGON
-< 120 GEOMETRYCOLLECTION
-< 121 GEOMETRYCOLLECTION
-< 122 GEOMETRYCOLLECTION
-< 123 GEOMETRYCOLLECTION
-< SELECT fid, IsEmpty(g) FROM gis_geometry;
-< fid IsEmpty(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 0
-< 106 0
-< 107 0
-< 108 0
-< 109 0
-< 110 0
-< 111 0
-< 112 0
-< 113 0
-< 114 0
-< 115 0
-< 116 0
-< 117 0
-< 118 0
-< 119 0
-< 120 0
-< 121 0
-< 122 0
-< 123 0
-< SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
-< fid AsText(Envelope(g))
-< 101 POLYGON((10 10,10 10,10 10,10 10,10 10))
-< 102 POLYGON((20 10,20 10,20 10,20 10,20 10))
-< 103 POLYGON((20 20,20 20,20 20,20 20,20 20))
-< 104 POLYGON((10 20,10 20,10 20,10 20,10 20))
-< 105 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 106 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 107 POLYGON((10 10,40 10,40 10,10 10,10 10))
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0))
-< 110 POLYGON((0 0,30 0,30 30,0 30,0 0))
-< 111 POLYGON((0 0,20 0,20 20,0 20,0 0))
-< 112 POLYGON((1 1,21 1,21 21,1 21,1 1))
-< 113 POLYGON((3 6,4 6,4 10,3 10,3 6))
-< 114 POLYGON((10 0,16 0,16 48,10 48,10 0))
-< 115 POLYGON((10 0,10 0,10 48,10 48,10 0))
-< 116 POLYGON((1 2,21 2,21 8,1 8,1 2))
-< 117 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 118 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 119 POLYGON((0 0,3 0,3 3,0 3,0 0))
-< 120 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 121 POLYGON((3 6,44 6,44 9,3 9,3 6))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, X(g) FROM gis_point;
-< fid X(g)
-< 101 10
-< 102 20
-< 103 20
-< 104 10
-< SELECT fid, Y(g) FROM gis_point;
-< fid Y(g)
-< 101 10
-< 102 10
-< 103 20
-< 104 20
-< SELECT fid, AsText(StartPoint(g)) FROM gis_line;
-< fid AsText(StartPoint(g))
-< 105 POINT(0 0)
-< 106 POINT(10 10)
-< 107 POINT(10 10)
-< SELECT fid, AsText(EndPoint(g)) FROM gis_line;
-< fid AsText(EndPoint(g))
-< 105 POINT(10 0)
-< 106 POINT(10 10)
-< 107 POINT(40 10)
-< SELECT fid, GLength(g) FROM gis_line;
-< fid GLength(g)
-< 105 24.14213562373095
-< 106 40
-< 107 30
-< SELECT fid, NumPoints(g) FROM gis_line;
-< fid NumPoints(g)
-< 105 3
-< 106 5
-< 107 2
-< SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
-< fid AsText(PointN(g, 2))
-< 105 POINT(0 10)
-< 106 POINT(20 10)
-< 107 POINT(40 10)
-< SELECT fid, IsClosed(g) FROM gis_line;
-< fid IsClosed(g)
-< 105 0
-< 106 1
-< 107 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
-< fid AsText(Centroid(g))
-< 108 POINT(15 15)
-< 109 POINT(25.416666666666668 25.416666666666668)
-< 110 POINT(20 10)
-< SELECT fid, Area(g) FROM gis_polygon;
-< fid Area(g)
-< 108 100
-< 109 2400
-< 110 450
-< SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
-< fid AsText(ExteriorRing(g))
-< 108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
-< 110 LINESTRING(0 0,30 0,30 30,0 0)
-< SELECT fid, NumInteriorRings(g) FROM gis_polygon;
-< fid NumInteriorRings(g)
-< 108 0
-< 109 1
-< 110 0
-< SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
-< fid AsText(InteriorRingN(g, 1))
-< 108 NULL
-< 109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 110 NULL
-< SELECT fid, IsClosed(g) FROM gis_multi_line;
-< fid IsClosed(g)
-< 114 0
-< 115 0
-< 116 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
-< fid AsText(Centroid(g))
-< 117 POINT(55.58852775304245 17.426536064113982)
-< 118 POINT(55.58852775304245 17.426536064113982)
-< 119 POINT(2 2)
-< SELECT fid, Area(g) FROM gis_multi_polygon;
-< fid Area(g)
-< 117 1684.5
-< 118 1684.5
-< 119 4.5
-< SELECT fid, NumGeometries(g) from gis_multi_point;
-< fid NumGeometries(g)
-< 111 4
-< 112 4
-< 113 2
-< SELECT fid, NumGeometries(g) from gis_multi_line;
-< fid NumGeometries(g)
-< 114 2
-< 115 1
-< 116 2
-< SELECT fid, NumGeometries(g) from gis_multi_polygon;
-< fid NumGeometries(g)
-< 117 2
-< 118 2
-< 119 1
-< SELECT fid, NumGeometries(g) from gis_geometrycollection;
-< fid NumGeometries(g)
-< 120 2
-< 121 2
-< 122 0
-< 123 0
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
-< fid AsText(GeometryN(g, 2))
-< 111 POINT(10 10)
-< 112 POINT(11 11)
-< 113 POINT(4 10)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
-< fid AsText(GeometryN(g, 2))
-< 114 LINESTRING(16 0,16 23,16 48)
-< 115 NULL
-< 116 LINESTRING(2 5,5 8,21 7)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
-< fid AsText(GeometryN(g, 2))
-< 117 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 118 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 119 NULL
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 2))
-< 120 LINESTRING(0 0,10 10)
-< 121 LINESTRING(3 6,7 9)
-< 122 NULL
-< 123 NULL
-< SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 1))
-< 120 POINT(0 0)
-< 121 POINT(44 6)
-< 122 NULL
-< 123 NULL
-< SELECT g1.fid as first, g2.fid as second,
-< Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
-< Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
-< Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
-< FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
-< first second w c o e d t i r
-< 120 120 1 1 0 1 0 1 1 0
-< 120 121 0 0 1 0 0 0 1 0
-< 120 122 0 1 NULL 0 NULL 0 NULL 0
-< 120 123 0 1 NULL 0 NULL 0 NULL 0
-< 121 120 0 0 1 0 0 0 1 0
-< 121 121 1 1 0 1 0 1 1 0
-< 121 122 0 1 NULL 0 NULL 0 NULL 0
-< 121 123 0 1 NULL 0 NULL 0 NULL 0
-< 122 120 1 0 NULL 0 NULL 0 NULL 0
-< 122 121 1 0 NULL 0 NULL 0 NULL 0
-< 122 122 1 1 NULL 1 NULL 0 NULL 0
-< 122 123 1 1 NULL 1 NULL 0 NULL 0
-< 123 120 1 0 NULL 0 NULL 0 NULL 0
-< 123 121 1 0 NULL 0 NULL 0 NULL 0
-< 123 122 1 1 NULL 1 NULL 0 NULL 0
-< 123 123 1 1 NULL 1 NULL 0 NULL 0
-< DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
-< USE gis_ogs;
-< # Lakes
-< INSERT INTO lakes VALUES (
-< 101, 'BLUE LAKE',
-< PolyFromText(
-< 'POLYGON(
-< (52 18,66 23,73 9,48 6,52 18),
-< (59 18,67 18,67 13,59 13,59 18)
-< )',
-< 101));
-< # Road Segments
-< INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
-< INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4,
-< LineFromText(
-< 'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
-< INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 70 38, 72 48 )' ,101));
-< INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4,
-< LineFromText(
-< 'LINESTRING( 70 38, 84 42 )' ,101));
-< INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL,
-< 1,
-< LineFromText(
-< 'LINESTRING( 28 26, 28 0 )',101));
-< # DividedRoutes
-< INSERT INTO divided_routes VALUES(119, 'Route 75', 4,
-< MLineFromText(
-< 'MULTILINESTRING((10 48,10 21,10 0),
-< (16 0,16 23,16 48))', 101));
-< # Forests
-< INSERT INTO forests VALUES(109, 'Green Forest',
-< MPolyFromText(
-< 'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
-< (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
-< 101));
-< # Bridges
-< INSERT INTO bridges VALUES(110, 'Cam Bridge', PointFromText(
-< 'POINT( 44 31 )', 101));
-< # Streams
-< INSERT INTO streams VALUES(111, 'Cam Stream',
-< LineFromText(
-< 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
-< INSERT INTO streams VALUES(112, NULL,
-< LineFromText(
-< 'LINESTRING( 76 0, 78 4, 73 9 )', 101));
-< # Buildings
-< INSERT INTO buildings VALUES(113, '123 Main Street',
-< PointFromText(
-< 'POINT( 52 30 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
-< INSERT INTO buildings VALUES(114, '215 Main Street',
-< PointFromText(
-< 'POINT( 64 33 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
-< # Ponds
-< INSERT INTO ponds VALUES(120, NULL, 'Stock Pond',
-< MPolyFromText(
-< 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
-< ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
-< # Named Places
-< INSERT INTO named_places VALUES(117, 'Ashton',
-< PolyFromText(
-< 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
-< INSERT INTO named_places VALUES(118, 'Goose Island',
-< PolyFromText(
-< 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
-< # Map Neatlines
-< INSERT INTO map_neatlines VALUES(115,
-< PolyFromText(
-< 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
-< SELECT Dimension(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< Dimension(shore)
-< 2
-< SELECT GeometryType(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GeometryType(centerlines)
-< MULTILINESTRING
-< SELECT AsText(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(boundary)
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(PolyFromWKB(AsBinary(boundary),101))
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT SRID(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< SRID(boundary)
-< 101
-< SELECT IsEmpty(centerline)
-< FROM road_segments
-< WHERE name = 'Route 5'
-< AND aliases = 'Main Street';
-< IsEmpty(centerline)
-< 0
-< SELECT AsText(Envelope(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Envelope(boundary))
-< POLYGON((59 13,67 13,67 18,59 18,59 13))
-< SELECT X(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< X(position)
-< 44
-< SELECT Y(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< Y(position)
-< 31
-< SELECT AsText(StartPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(StartPoint(centerline))
-< POINT(0 18)
-< SELECT AsText(EndPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(EndPoint(centerline))
-< POINT(44 31)
-< SELECT GLength(centerline)
-< FROM road_segments
-< WHERE fid = 106;
-< GLength(centerline)
-< 26
-< SELECT NumPoints(centerline)
-< FROM road_segments
-< WHERE fid = 102;
-< NumPoints(centerline)
-< 5
-< SELECT AsText(PointN(centerline, 1))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(PointN(centerline, 1))
-< POINT(0 18)
-< SELECT AsText(Centroid(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Centroid(boundary))
-< POINT(63 15.5)
-< SELECT Area(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< Area(boundary)
-< 40
-< SELECT AsText(ExteriorRing(shore))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(ExteriorRing(shore))
-< LINESTRING(52 18,66 23,73 9,48 6,52 18)
-< SELECT NumInteriorRings(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< NumInteriorRings(shore)
-< 1
-< SELECT AsText(InteriorRingN(shore, 1))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(InteriorRingN(shore, 1))
-< LINESTRING(59 18,67 18,67 13,59 13,59 18)
-< SELECT NumGeometries(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< NumGeometries(centerlines)
-< 2
-< SELECT AsText(GeometryN(centerlines, 2))
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< AsText(GeometryN(centerlines, 2))
-< LINESTRING(16 0,16 23,16 48)
-< SELECT IsClosed(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< IsClosed(centerlines)
-< 0
-< SELECT GLength(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GLength(centerlines)
-< 96
-< SELECT AsText(Centroid(shores))
-< FROM ponds
-< WHERE fid = 120;
-< AsText(Centroid(shores))
-< POINT(25 42)
-< SELECT Area(shores)
-< FROM ponds
-< WHERE fid = 120;
-< Area(shores)
-< 8
-< SELECT ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< 1
-< SELECT ST_Disjoint(centerlines, boundary)
-< FROM divided_routes, named_places
-< WHERE divided_routes.name = 'Route 75'
-< AND named_places.name = 'Ashton';
-< ST_Disjoint(centerlines, boundary)
-< 1
-< SELECT ST_Touches(centerline, shore)
-< FROM streams, lakes
-< WHERE streams.name = 'Cam Stream'
-< AND lakes.name = 'Blue Lake';
-< ST_Touches(centerline, shore)
-< 1
-< SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< Crosses(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Contains(forests.boundary, named_places.boundary)
-< FROM forests, named_places
-< WHERE forests.name = 'Green Forest'
-< AND named_places.name = 'Ashton';
-< ST_Contains(forests.boundary, named_places.boundary)
-< 0
-< SELECT ST_Distance(position, boundary)
-< FROM bridges, named_places
-< WHERE bridges.name = 'Cam Bridge'
-< AND named_places.name = 'Ashton';
-< ST_Distance(position, boundary)
-< 12
-< SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
-< FROM named_places, forests
-< WHERE named_places.name = 'Ashton'
-< AND forests.name = 'Green Forest';
-< AsText(ST_Difference(named_places.boundary, forests.boundary))
-< POLYGON((56 34,62 48,84 48,84 42,56 34))
-< SELECT AsText(ST_Union(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Goose Island';
-< AsText(ST_Union(shore, boundary))
-< POLYGON((48 6,52 18,66 23,73 9,48 6))
-< SELECT AsText(ST_SymDifference(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Ashton';
-< AsText(ST_SymDifference(shore, boundary))
-< MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
-< SELECT count(*)
-< FROM buildings, bridges
-< WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
-< count(*)
-< 1
----
-> ERROR 42000: The storage engine for the table doesn't support GEOMETRY
-> # ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
-> # The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
-> # Geometry types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- type_spatial.result 2013-01-23 01:25:39.143876032 +0400
++++ type_spatial.reject 2013-01-23 02:51:14.535315418 +0400
+@@ -2,699 +2,15 @@
+ DROP DATABASE IF EXISTS gis_ogs;
+ CREATE DATABASE gis_ogs;
+ CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE gis_ogs;
+-CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shore POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerlines MULTILINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT,
+-footprint POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-type CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shores MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
+-neatline POLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE test;
+-SHOW FIELDS FROM gis_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g point YES NULL
+-SHOW FIELDS FROM gis_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g linestring YES NULL
+-SHOW FIELDS FROM gis_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g polygon YES NULL
+-SHOW FIELDS FROM gis_multi_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipoint YES NULL
+-SHOW FIELDS FROM gis_multi_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multilinestring YES NULL
+-SHOW FIELDS FROM gis_multi_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipolygon YES NULL
+-SHOW FIELDS FROM gis_geometrycollection;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometrycollection YES NULL
+-SHOW FIELDS FROM gis_geometry;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometry YES NULL
+-INSERT INTO gis_point (fid,g) VALUES
+-(101, PointFromText('POINT(10 10)')),
+-(102, PointFromText('POINT(20 10)')),
+-(103, PointFromText('POINT(20 20)')),
+-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
+-INSERT INTO gis_line (fid,g) VALUES
+-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
+-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
+-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
+-INSERT INTO gis_polygon (fid,g) VALUES
+-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
+-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
+-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
+-INSERT INTO gis_multi_point (fid,g) VALUES
+-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
+-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
+-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
+-INSERT INTO gis_multi_line (fid,g) VALUES
+-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
+-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
+-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
+-INSERT INTO gis_multi_polygon (fid,g) VALUES
+-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+-INSERT INTO gis_geometrycollection (fid,g) VALUES
+-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
+-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
+-(122, GeomFromText('GeometryCollection()')),
+-(123, GeomFromText('GeometryCollection EMPTY'));
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
+-SELECT fid, AsText(g) FROM gis_point;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-SELECT fid, AsText(g) FROM gis_line;
+-fid AsText(g)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-SELECT fid, AsText(g) FROM gis_polygon;
+-fid AsText(g)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-SELECT fid, AsText(g) FROM gis_multi_point;
+-fid AsText(g)
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-SELECT fid, AsText(g) FROM gis_multi_line;
+-fid AsText(g)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-SELECT fid, AsText(g) FROM gis_multi_polygon;
+-fid AsText(g)
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-SELECT fid, AsText(g) FROM gis_geometrycollection;
+-fid AsText(g)
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, AsText(g) FROM gis_geometry;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, Dimension(g) FROM gis_geometry;
+-fid Dimension(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 1
+-106 1
+-107 1
+-108 2
+-109 2
+-110 2
+-111 0
+-112 0
+-113 0
+-114 1
+-115 1
+-116 1
+-117 2
+-118 2
+-119 2
+-120 1
+-121 1
+-122 0
+-123 0
+-SELECT fid, GeometryType(g) FROM gis_geometry;
+-fid GeometryType(g)
+-101 POINT
+-102 POINT
+-103 POINT
+-104 POINT
+-105 LINESTRING
+-106 LINESTRING
+-107 LINESTRING
+-108 POLYGON
+-109 POLYGON
+-110 POLYGON
+-111 MULTIPOINT
+-112 MULTIPOINT
+-113 MULTIPOINT
+-114 MULTILINESTRING
+-115 MULTILINESTRING
+-116 MULTILINESTRING
+-117 MULTIPOLYGON
+-118 MULTIPOLYGON
+-119 MULTIPOLYGON
+-120 GEOMETRYCOLLECTION
+-121 GEOMETRYCOLLECTION
+-122 GEOMETRYCOLLECTION
+-123 GEOMETRYCOLLECTION
+-SELECT fid, IsEmpty(g) FROM gis_geometry;
+-fid IsEmpty(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 0
+-106 0
+-107 0
+-108 0
+-109 0
+-110 0
+-111 0
+-112 0
+-113 0
+-114 0
+-115 0
+-116 0
+-117 0
+-118 0
+-119 0
+-120 0
+-121 0
+-122 0
+-123 0
+-SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
+-fid AsText(Envelope(g))
+-101 POLYGON((10 10,10 10,10 10,10 10,10 10))
+-102 POLYGON((20 10,20 10,20 10,20 10,20 10))
+-103 POLYGON((20 20,20 20,20 20,20 20,20 20))
+-104 POLYGON((10 20,10 20,10 20,10 20,10 20))
+-105 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-106 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-107 POLYGON((10 10,40 10,40 10,10 10,10 10))
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0))
+-110 POLYGON((0 0,30 0,30 30,0 30,0 0))
+-111 POLYGON((0 0,20 0,20 20,0 20,0 0))
+-112 POLYGON((1 1,21 1,21 21,1 21,1 1))
+-113 POLYGON((3 6,4 6,4 10,3 10,3 6))
+-114 POLYGON((10 0,16 0,16 48,10 48,10 0))
+-115 POLYGON((10 0,10 0,10 48,10 48,10 0))
+-116 POLYGON((1 2,21 2,21 8,1 8,1 2))
+-117 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-118 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-119 POLYGON((0 0,3 0,3 3,0 3,0 0))
+-120 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-121 POLYGON((3 6,44 6,44 9,3 9,3 6))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, X(g) FROM gis_point;
+-fid X(g)
+-101 10
+-102 20
+-103 20
+-104 10
+-SELECT fid, Y(g) FROM gis_point;
+-fid Y(g)
+-101 10
+-102 10
+-103 20
+-104 20
+-SELECT fid, AsText(StartPoint(g)) FROM gis_line;
+-fid AsText(StartPoint(g))
+-105 POINT(0 0)
+-106 POINT(10 10)
+-107 POINT(10 10)
+-SELECT fid, AsText(EndPoint(g)) FROM gis_line;
+-fid AsText(EndPoint(g))
+-105 POINT(10 0)
+-106 POINT(10 10)
+-107 POINT(40 10)
+-SELECT fid, GLength(g) FROM gis_line;
+-fid GLength(g)
+-105 24.14213562373095
+-106 40
+-107 30
+-SELECT fid, NumPoints(g) FROM gis_line;
+-fid NumPoints(g)
+-105 3
+-106 5
+-107 2
+-SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
+-fid AsText(PointN(g, 2))
+-105 POINT(0 10)
+-106 POINT(20 10)
+-107 POINT(40 10)
+-SELECT fid, IsClosed(g) FROM gis_line;
+-fid IsClosed(g)
+-105 0
+-106 1
+-107 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
+-fid AsText(Centroid(g))
+-108 POINT(15 15)
+-109 POINT(25.416666666666668 25.416666666666668)
+-110 POINT(20 10)
+-SELECT fid, Area(g) FROM gis_polygon;
+-fid Area(g)
+-108 100
+-109 2400
+-110 450
+-SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
+-fid AsText(ExteriorRing(g))
+-108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
+-110 LINESTRING(0 0,30 0,30 30,0 0)
+-SELECT fid, NumInteriorRings(g) FROM gis_polygon;
+-fid NumInteriorRings(g)
+-108 0
+-109 1
+-110 0
+-SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
+-fid AsText(InteriorRingN(g, 1))
+-108 NULL
+-109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-110 NULL
+-SELECT fid, IsClosed(g) FROM gis_multi_line;
+-fid IsClosed(g)
+-114 0
+-115 0
+-116 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
+-fid AsText(Centroid(g))
+-117 POINT(55.58852775304245 17.426536064113982)
+-118 POINT(55.58852775304245 17.426536064113982)
+-119 POINT(2 2)
+-SELECT fid, Area(g) FROM gis_multi_polygon;
+-fid Area(g)
+-117 1684.5
+-118 1684.5
+-119 4.5
+-SELECT fid, NumGeometries(g) from gis_multi_point;
+-fid NumGeometries(g)
+-111 4
+-112 4
+-113 2
+-SELECT fid, NumGeometries(g) from gis_multi_line;
+-fid NumGeometries(g)
+-114 2
+-115 1
+-116 2
+-SELECT fid, NumGeometries(g) from gis_multi_polygon;
+-fid NumGeometries(g)
+-117 2
+-118 2
+-119 1
+-SELECT fid, NumGeometries(g) from gis_geometrycollection;
+-fid NumGeometries(g)
+-120 2
+-121 2
+-122 0
+-123 0
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
+-fid AsText(GeometryN(g, 2))
+-111 POINT(10 10)
+-112 POINT(11 11)
+-113 POINT(4 10)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
+-fid AsText(GeometryN(g, 2))
+-114 LINESTRING(16 0,16 23,16 48)
+-115 NULL
+-116 LINESTRING(2 5,5 8,21 7)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
+-fid AsText(GeometryN(g, 2))
+-117 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-118 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-119 NULL
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 2))
+-120 LINESTRING(0 0,10 10)
+-121 LINESTRING(3 6,7 9)
+-122 NULL
+-123 NULL
+-SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 1))
+-120 POINT(0 0)
+-121 POINT(44 6)
+-122 NULL
+-123 NULL
+-SELECT g1.fid as first, g2.fid as second,
+-Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
+-Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
+-Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
+-FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
+-first second w c o e d t i r
+-120 120 1 1 0 1 0 1 1 0
+-120 121 0 0 1 0 0 0 1 0
+-120 122 0 1 NULL 0 NULL 0 NULL 0
+-120 123 0 1 NULL 0 NULL 0 NULL 0
+-121 120 0 0 1 0 0 0 1 0
+-121 121 1 1 0 1 0 1 1 0
+-121 122 0 1 NULL 0 NULL 0 NULL 0
+-121 123 0 1 NULL 0 NULL 0 NULL 0
+-122 120 1 0 NULL 0 NULL 0 NULL 0
+-122 121 1 0 NULL 0 NULL 0 NULL 0
+-122 122 1 1 NULL 1 NULL 0 NULL 0
+-122 123 1 1 NULL 1 NULL 0 NULL 0
+-123 120 1 0 NULL 0 NULL 0 NULL 0
+-123 121 1 0 NULL 0 NULL 0 NULL 0
+-123 122 1 1 NULL 1 NULL 0 NULL 0
+-123 123 1 1 NULL 1 NULL 0 NULL 0
+-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
+-USE gis_ogs;
+-# Lakes
+-INSERT INTO lakes (fid,name,shore) VALUES (
+-101, 'BLUE LAKE',
+-PolyFromText(
+-'POLYGON(
+- (52 18,66 23,73 9,48 6,52 18),
+- (59 18,67 18,67 13,59 13,59 18)
+- )',
+-101));
+-# Road Segments
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
+-LineFromText(
+-'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 70 38, 72 48 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
+-LineFromText(
+-'LINESTRING( 70 38, 84 42 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
+-1,
+-LineFromText(
+-'LINESTRING( 28 26, 28 0 )',101));
+-# DividedRoutes
+-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
+-MLineFromText(
+-'MULTILINESTRING((10 48,10 21,10 0),
+- (16 0,16 23,16 48))', 101));
+-# Forests
+-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
+-MPolyFromText(
+-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
+- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
+-101));
+-# Bridges
+-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
+-'POINT( 44 31 )', 101));
+-# Streams
+-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
+-LineFromText(
+-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
+-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
+-LineFromText(
+-'LINESTRING( 76 0, 78 4, 73 9 )', 101));
+-# Buildings
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
+-PointFromText(
+-'POINT( 52 30 )', 101),
+-PolyFromText(
+-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
+-PointFromText(
+-'POINT( 64 33 )', 101),
+-PolyFromText(
+-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
+-# Ponds
+-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
+-MPolyFromText(
+-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
+- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
+-# Named Places
+-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
+-PolyFromText(
+-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
+-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
+-PolyFromText(
+-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
+-# Map Neatlines
+-INSERT INTO map_neatlines (fid,neatline) VALUES(115,
+-PolyFromText(
+-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
+-SELECT Dimension(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-Dimension(shore)
+-2
+-SELECT GeometryType(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GeometryType(centerlines)
+-MULTILINESTRING
+-SELECT AsText(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(boundary)
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(PolyFromWKB(AsBinary(boundary),101))
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT SRID(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-SRID(boundary)
+-101
+-SELECT IsEmpty(centerline)
+-FROM road_segments
+-WHERE name = 'Route 5'
+-AND aliases = 'Main Street';
+-IsEmpty(centerline)
+-0
+-SELECT AsText(Envelope(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Envelope(boundary))
+-POLYGON((59 13,67 13,67 18,59 18,59 13))
+-SELECT X(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-X(position)
+-44
+-SELECT Y(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-Y(position)
+-31
+-SELECT AsText(StartPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(StartPoint(centerline))
+-POINT(0 18)
+-SELECT AsText(EndPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(EndPoint(centerline))
+-POINT(44 31)
+-SELECT GLength(centerline)
+-FROM road_segments
+-WHERE fid = 106;
+-GLength(centerline)
+-26
+-SELECT NumPoints(centerline)
+-FROM road_segments
+-WHERE fid = 102;
+-NumPoints(centerline)
+-5
+-SELECT AsText(PointN(centerline, 1))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(PointN(centerline, 1))
+-POINT(0 18)
+-SELECT AsText(Centroid(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Centroid(boundary))
+-POINT(63 15.5)
+-SELECT Area(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-Area(boundary)
+-40
+-SELECT AsText(ExteriorRing(shore))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(ExteriorRing(shore))
+-LINESTRING(52 18,66 23,73 9,48 6,52 18)
+-SELECT NumInteriorRings(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-NumInteriorRings(shore)
+-1
+-SELECT AsText(InteriorRingN(shore, 1))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(InteriorRingN(shore, 1))
+-LINESTRING(59 18,67 18,67 13,59 13,59 18)
+-SELECT NumGeometries(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-NumGeometries(centerlines)
+-2
+-SELECT AsText(GeometryN(centerlines, 2))
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-AsText(GeometryN(centerlines, 2))
+-LINESTRING(16 0,16 23,16 48)
+-SELECT IsClosed(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-IsClosed(centerlines)
+-0
+-SELECT GLength(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GLength(centerlines)
+-96
+-SELECT AsText(Centroid(shores))
+-FROM ponds
+-WHERE fid = 120;
+-AsText(Centroid(shores))
+-POINT(25 42)
+-SELECT Area(shores)
+-FROM ponds
+-WHERE fid = 120;
+-Area(shores)
+-8
+-SELECT ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-1
+-SELECT ST_Disjoint(centerlines, boundary)
+-FROM divided_routes, named_places
+-WHERE divided_routes.name = 'Route 75'
+-AND named_places.name = 'Ashton';
+-ST_Disjoint(centerlines, boundary)
+-1
+-SELECT ST_Touches(centerline, shore)
+-FROM streams, lakes
+-WHERE streams.name = 'Cam Stream'
+-AND lakes.name = 'Blue Lake';
+-ST_Touches(centerline, shore)
+-1
+-SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-Crosses(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Contains(forests.boundary, named_places.boundary)
+-FROM forests, named_places
+-WHERE forests.name = 'Green Forest'
+-AND named_places.name = 'Ashton';
+-ST_Contains(forests.boundary, named_places.boundary)
+-0
+-SELECT ST_Distance(position, boundary)
+-FROM bridges, named_places
+-WHERE bridges.name = 'Cam Bridge'
+-AND named_places.name = 'Ashton';
+-ST_Distance(position, boundary)
+-12
+-SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
+-FROM named_places, forests
+-WHERE named_places.name = 'Ashton'
+-AND forests.name = 'Green Forest';
+-AsText(ST_Difference(named_places.boundary, forests.boundary))
+-POLYGON((56 34,62 48,84 48,84 42,56 34))
+-SELECT AsText(ST_Union(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Goose Island';
+-AsText(ST_Union(shore, boundary))
+-POLYGON((48 6,52 18,66 23,73 9,48 6))
+-SELECT AsText(ST_SymDifference(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Ashton';
+-AsText(ST_SymDifference(shore, boundary))
+-MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
+-SELECT count(*)
+-FROM buildings, bridges
+-WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
+-count(*)
+-1
++ERROR 42000: The storage engine for the table doesn't support GEOMETRY
++# ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
++# The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
++# Geometry types or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP DATABASE gis_ogs;
+ USE test;
diff --git a/storage/myisammrg/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/myisammrg/mysql-test/storage_engine/type_spatial_indexes.rdiff
index 89f1100f550..20d98db1ff2 100644
--- a/storage/myisammrg/mysql-test/storage_engine/type_spatial_indexes.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/type_spatial_indexes.rdiff
@@ -1,1412 +1,1422 @@
-5,698c5,14
-< CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING, <CUSTOM_INDEX> g(g(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON, <CUSTOM_INDEX> g(g(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT, <CUSTOM_INDEX> g(g(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING, <CUSTOM_INDEX> g(g(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE gis_ogs;
-< CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shore POLYGON, <CUSTOM_INDEX> s(shore(64))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING, <CUSTOM_INDEX> c(centerline(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerlines MULTILINESTRING, <CUSTOM_INDEX> c(centerlines(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary MULTIPOLYGON, <CUSTOM_INDEX> b(boundary(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT, <CUSTOM_INDEX> p(`position`(64))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING, <CUSTOM_INDEX> c(centerline(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT,
-< footprint POLYGON, <CUSTOM_INDEX> p(`position`(64)), <CUSTOM_INDEX> f(footprint(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< type CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shores MULTIPOLYGON, <CUSTOM_INDEX> s(shores(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary POLYGON, <CUSTOM_INDEX> b(boundary(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
-< neatline POLYGON, <CUSTOM_INDEX> n(neatline(700))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE test;
-< SHOW FIELDS FROM gis_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g point YES MUL NULL
-< SHOW FIELDS FROM gis_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g linestring YES MUL NULL
-< SHOW FIELDS FROM gis_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g polygon YES MUL NULL
-< SHOW FIELDS FROM gis_multi_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipoint YES MUL NULL
-< SHOW FIELDS FROM gis_multi_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multilinestring YES MUL NULL
-< SHOW FIELDS FROM gis_multi_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipolygon YES NULL
-< SHOW FIELDS FROM gis_geometrycollection;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometrycollection YES NULL
-< SHOW FIELDS FROM gis_geometry;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometry YES NULL
-< INSERT INTO gis_point VALUES
-< (101, PointFromText('POINT(10 10)')),
-< (102, PointFromText('POINT(20 10)')),
-< (103, PointFromText('POINT(20 20)')),
-< (104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
-< INSERT INTO gis_line VALUES
-< (105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
-< (106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
-< (107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
-< INSERT INTO gis_polygon VALUES
-< (108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
-< (109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
-< (110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
-< INSERT INTO gis_multi_point VALUES
-< (111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
-< (112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
-< (113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
-< INSERT INTO gis_multi_line VALUES
-< (114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
-< (115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
-< (116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
-< INSERT INTO gis_multi_polygon VALUES
-< (117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
-< INSERT INTO gis_geometrycollection VALUES
-< (120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
-< (121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
-< (122, GeomFromText('GeometryCollection()')),
-< (123, GeomFromText('GeometryCollection EMPTY'));
-< INSERT into gis_geometry SELECT * FROM gis_point;
-< INSERT into gis_geometry SELECT * FROM gis_line;
-< INSERT into gis_geometry SELECT * FROM gis_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_multi_point;
-< INSERT into gis_geometry SELECT * FROM gis_multi_line;
-< INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
-< SELECT fid, AsText(g) FROM gis_point;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< SELECT fid, AsText(g) FROM gis_line;
-< fid AsText(g)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< SELECT fid, AsText(g) FROM gis_polygon;
-< fid AsText(g)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< SELECT fid, AsText(g) FROM gis_multi_point;
-< fid AsText(g)
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< SELECT fid, AsText(g) FROM gis_multi_line;
-< fid AsText(g)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< SELECT fid, AsText(g) FROM gis_multi_polygon;
-< fid AsText(g)
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< SELECT fid, AsText(g) FROM gis_geometrycollection;
-< fid AsText(g)
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, AsText(g) FROM gis_geometry;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, Dimension(g) FROM gis_geometry;
-< fid Dimension(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 1
-< 106 1
-< 107 1
-< 108 2
-< 109 2
-< 110 2
-< 111 0
-< 112 0
-< 113 0
-< 114 1
-< 115 1
-< 116 1
-< 117 2
-< 118 2
-< 119 2
-< 120 1
-< 121 1
-< 122 0
-< 123 0
-< SELECT fid, GeometryType(g) FROM gis_geometry;
-< fid GeometryType(g)
-< 101 POINT
-< 102 POINT
-< 103 POINT
-< 104 POINT
-< 105 LINESTRING
-< 106 LINESTRING
-< 107 LINESTRING
-< 108 POLYGON
-< 109 POLYGON
-< 110 POLYGON
-< 111 MULTIPOINT
-< 112 MULTIPOINT
-< 113 MULTIPOINT
-< 114 MULTILINESTRING
-< 115 MULTILINESTRING
-< 116 MULTILINESTRING
-< 117 MULTIPOLYGON
-< 118 MULTIPOLYGON
-< 119 MULTIPOLYGON
-< 120 GEOMETRYCOLLECTION
-< 121 GEOMETRYCOLLECTION
-< 122 GEOMETRYCOLLECTION
-< 123 GEOMETRYCOLLECTION
-< SELECT fid, IsEmpty(g) FROM gis_geometry;
-< fid IsEmpty(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 0
-< 106 0
-< 107 0
-< 108 0
-< 109 0
-< 110 0
-< 111 0
-< 112 0
-< 113 0
-< 114 0
-< 115 0
-< 116 0
-< 117 0
-< 118 0
-< 119 0
-< 120 0
-< 121 0
-< 122 0
-< 123 0
-< SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
-< fid AsText(Envelope(g))
-< 101 POLYGON((10 10,10 10,10 10,10 10,10 10))
-< 102 POLYGON((20 10,20 10,20 10,20 10,20 10))
-< 103 POLYGON((20 20,20 20,20 20,20 20,20 20))
-< 104 POLYGON((10 20,10 20,10 20,10 20,10 20))
-< 105 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 106 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 107 POLYGON((10 10,40 10,40 10,10 10,10 10))
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0))
-< 110 POLYGON((0 0,30 0,30 30,0 30,0 0))
-< 111 POLYGON((0 0,20 0,20 20,0 20,0 0))
-< 112 POLYGON((1 1,21 1,21 21,1 21,1 1))
-< 113 POLYGON((3 6,4 6,4 10,3 10,3 6))
-< 114 POLYGON((10 0,16 0,16 48,10 48,10 0))
-< 115 POLYGON((10 0,10 0,10 48,10 48,10 0))
-< 116 POLYGON((1 2,21 2,21 8,1 8,1 2))
-< 117 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 118 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 119 POLYGON((0 0,3 0,3 3,0 3,0 0))
-< 120 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 121 POLYGON((3 6,44 6,44 9,3 9,3 6))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, X(g) FROM gis_point;
-< fid X(g)
-< 101 10
-< 102 20
-< 103 20
-< 104 10
-< SELECT fid, Y(g) FROM gis_point;
-< fid Y(g)
-< 101 10
-< 102 10
-< 103 20
-< 104 20
-< SELECT fid, AsText(StartPoint(g)) FROM gis_line;
-< fid AsText(StartPoint(g))
-< 105 POINT(0 0)
-< 106 POINT(10 10)
-< 107 POINT(10 10)
-< SELECT fid, AsText(EndPoint(g)) FROM gis_line;
-< fid AsText(EndPoint(g))
-< 105 POINT(10 0)
-< 106 POINT(10 10)
-< 107 POINT(40 10)
-< SELECT fid, GLength(g) FROM gis_line;
-< fid GLength(g)
-< 105 24.14213562373095
-< 106 40
-< 107 30
-< SELECT fid, NumPoints(g) FROM gis_line;
-< fid NumPoints(g)
-< 105 3
-< 106 5
-< 107 2
-< SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
-< fid AsText(PointN(g, 2))
-< 105 POINT(0 10)
-< 106 POINT(20 10)
-< 107 POINT(40 10)
-< SELECT fid, IsClosed(g) FROM gis_line;
-< fid IsClosed(g)
-< 105 0
-< 106 1
-< 107 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
-< fid AsText(Centroid(g))
-< 108 POINT(15 15)
-< 109 POINT(25.416666666666668 25.416666666666668)
-< 110 POINT(20 10)
-< SELECT fid, Area(g) FROM gis_polygon;
-< fid Area(g)
-< 108 100
-< 109 2400
-< 110 450
-< SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
-< fid AsText(ExteriorRing(g))
-< 108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
-< 110 LINESTRING(0 0,30 0,30 30,0 0)
-< SELECT fid, NumInteriorRings(g) FROM gis_polygon;
-< fid NumInteriorRings(g)
-< 108 0
-< 109 1
-< 110 0
-< SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
-< fid AsText(InteriorRingN(g, 1))
-< 108 NULL
-< 109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 110 NULL
-< SELECT fid, IsClosed(g) FROM gis_multi_line;
-< fid IsClosed(g)
-< 114 0
-< 115 0
-< 116 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
-< fid AsText(Centroid(g))
-< 117 POINT(55.58852775304245 17.426536064113982)
-< 118 POINT(55.58852775304245 17.426536064113982)
-< 119 POINT(2 2)
-< SELECT fid, Area(g) FROM gis_multi_polygon;
-< fid Area(g)
-< 117 1684.5
-< 118 1684.5
-< 119 4.5
-< SELECT fid, NumGeometries(g) from gis_multi_point;
-< fid NumGeometries(g)
-< 111 4
-< 112 4
-< 113 2
-< SELECT fid, NumGeometries(g) from gis_multi_line;
-< fid NumGeometries(g)
-< 114 2
-< 115 1
-< 116 2
-< SELECT fid, NumGeometries(g) from gis_multi_polygon;
-< fid NumGeometries(g)
-< 117 2
-< 118 2
-< 119 1
-< SELECT fid, NumGeometries(g) from gis_geometrycollection;
-< fid NumGeometries(g)
-< 120 2
-< 121 2
-< 122 0
-< 123 0
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
-< fid AsText(GeometryN(g, 2))
-< 111 POINT(10 10)
-< 112 POINT(11 11)
-< 113 POINT(4 10)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
-< fid AsText(GeometryN(g, 2))
-< 114 LINESTRING(16 0,16 23,16 48)
-< 115 NULL
-< 116 LINESTRING(2 5,5 8,21 7)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
-< fid AsText(GeometryN(g, 2))
-< 117 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 118 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 119 NULL
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 2))
-< 120 LINESTRING(0 0,10 10)
-< 121 LINESTRING(3 6,7 9)
-< 122 NULL
-< 123 NULL
-< SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 1))
-< 120 POINT(0 0)
-< 121 POINT(44 6)
-< 122 NULL
-< 123 NULL
-< SELECT g1.fid as first, g2.fid as second,
-< Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
-< Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
-< Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
-< FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
-< first second w c o e d t i r
-< 120 120 1 1 0 1 0 1 1 0
-< 120 121 0 0 1 0 0 0 1 0
-< 120 122 0 1 NULL 0 NULL 0 NULL 0
-< 120 123 0 1 NULL 0 NULL 0 NULL 0
-< 121 120 0 0 1 0 0 0 1 0
-< 121 121 1 1 0 1 0 1 1 0
-< 121 122 0 1 NULL 0 NULL 0 NULL 0
-< 121 123 0 1 NULL 0 NULL 0 NULL 0
-< 122 120 1 0 NULL 0 NULL 0 NULL 0
-< 122 121 1 0 NULL 0 NULL 0 NULL 0
-< 122 122 1 1 NULL 1 NULL 0 NULL 0
-< 122 123 1 1 NULL 1 NULL 0 NULL 0
-< 123 120 1 0 NULL 0 NULL 0 NULL 0
-< 123 121 1 0 NULL 0 NULL 0 NULL 0
-< 123 122 1 1 NULL 1 NULL 0 NULL 0
-< 123 123 1 1 NULL 1 NULL 0 NULL 0
-< DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
-< USE gis_ogs;
-< # Lakes
-< INSERT INTO lakes VALUES (
-< 101, 'BLUE LAKE',
-< PolyFromText(
-< 'POLYGON(
-< (52 18,66 23,73 9,48 6,52 18),
-< (59 18,67 18,67 13,59 13,59 18)
-< )',
-< 101));
-< # Road Segments
-< INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
-< INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4,
-< LineFromText(
-< 'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
-< INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 70 38, 72 48 )' ,101));
-< INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4,
-< LineFromText(
-< 'LINESTRING( 70 38, 84 42 )' ,101));
-< INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL,
-< 1,
-< LineFromText(
-< 'LINESTRING( 28 26, 28 0 )',101));
-< # DividedRoutes
-< INSERT INTO divided_routes VALUES(119, 'Route 75', 4,
-< MLineFromText(
-< 'MULTILINESTRING((10 48,10 21,10 0),
-< (16 0,16 23,16 48))', 101));
-< # Forests
-< INSERT INTO forests VALUES(109, 'Green Forest',
-< MPolyFromText(
-< 'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
-< (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
-< 101));
-< # Bridges
-< INSERT INTO bridges VALUES(110, 'Cam Bridge', PointFromText(
-< 'POINT( 44 31 )', 101));
-< # Streams
-< INSERT INTO streams VALUES(111, 'Cam Stream',
-< LineFromText(
-< 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
-< INSERT INTO streams VALUES(112, NULL,
-< LineFromText(
-< 'LINESTRING( 76 0, 78 4, 73 9 )', 101));
-< # Buildings
-< INSERT INTO buildings VALUES(113, '123 Main Street',
-< PointFromText(
-< 'POINT( 52 30 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
-< INSERT INTO buildings VALUES(114, '215 Main Street',
-< PointFromText(
-< 'POINT( 64 33 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
-< # Ponds
-< INSERT INTO ponds VALUES(120, NULL, 'Stock Pond',
-< MPolyFromText(
-< 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
-< ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
-< # Named Places
-< INSERT INTO named_places VALUES(117, 'Ashton',
-< PolyFromText(
-< 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
-< INSERT INTO named_places VALUES(118, 'Goose Island',
-< PolyFromText(
-< 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
-< # Map Neatlines
-< INSERT INTO map_neatlines VALUES(115,
-< PolyFromText(
-< 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
-< SELECT Dimension(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< Dimension(shore)
-< 2
-< SELECT GeometryType(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GeometryType(centerlines)
-< MULTILINESTRING
-< SELECT AsText(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(boundary)
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(PolyFromWKB(AsBinary(boundary),101))
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT SRID(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< SRID(boundary)
-< 101
-< SELECT IsEmpty(centerline)
-< FROM road_segments
-< WHERE name = 'Route 5'
-< AND aliases = 'Main Street';
-< IsEmpty(centerline)
-< 0
-< SELECT AsText(Envelope(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Envelope(boundary))
-< POLYGON((59 13,67 13,67 18,59 18,59 13))
-< SELECT X(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< X(position)
-< 44
-< SELECT Y(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< Y(position)
-< 31
-< SELECT AsText(StartPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(StartPoint(centerline))
-< POINT(0 18)
-< SELECT AsText(EndPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(EndPoint(centerline))
-< POINT(44 31)
-< SELECT GLength(centerline)
-< FROM road_segments
-< WHERE fid = 106;
-< GLength(centerline)
-< 26
-< SELECT NumPoints(centerline)
-< FROM road_segments
-< WHERE fid = 102;
-< NumPoints(centerline)
-< 5
-< SELECT AsText(PointN(centerline, 1))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(PointN(centerline, 1))
-< POINT(0 18)
-< SELECT AsText(Centroid(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Centroid(boundary))
-< POINT(63 15.5)
-< SELECT Area(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< Area(boundary)
-< 40
-< SELECT AsText(ExteriorRing(shore))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(ExteriorRing(shore))
-< LINESTRING(52 18,66 23,73 9,48 6,52 18)
-< SELECT NumInteriorRings(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< NumInteriorRings(shore)
-< 1
-< SELECT AsText(InteriorRingN(shore, 1))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(InteriorRingN(shore, 1))
-< LINESTRING(59 18,67 18,67 13,59 13,59 18)
-< SELECT NumGeometries(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< NumGeometries(centerlines)
-< 2
-< SELECT AsText(GeometryN(centerlines, 2))
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< AsText(GeometryN(centerlines, 2))
-< LINESTRING(16 0,16 23,16 48)
-< SELECT IsClosed(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< IsClosed(centerlines)
-< 0
-< SELECT GLength(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GLength(centerlines)
-< 96
-< SELECT AsText(Centroid(shores))
-< FROM ponds
-< WHERE fid = 120;
-< AsText(Centroid(shores))
-< POINT(25 42)
-< SELECT Area(shores)
-< FROM ponds
-< WHERE fid = 120;
-< Area(shores)
-< 8
-< SELECT ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< 1
-< SELECT ST_Disjoint(centerlines, boundary)
-< FROM divided_routes, named_places
-< WHERE divided_routes.name = 'Route 75'
-< AND named_places.name = 'Ashton';
-< ST_Disjoint(centerlines, boundary)
-< 1
-< SELECT ST_Touches(centerline, shore)
-< FROM streams, lakes
-< WHERE streams.name = 'Cam Stream'
-< AND lakes.name = 'Blue Lake';
-< ST_Touches(centerline, shore)
-< 1
-< SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< Crosses(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Contains(forests.boundary, named_places.boundary)
-< FROM forests, named_places
-< WHERE forests.name = 'Green Forest'
-< AND named_places.name = 'Ashton';
-< ST_Contains(forests.boundary, named_places.boundary)
-< 0
-< SELECT ST_Distance(position, boundary)
-< FROM bridges, named_places
-< WHERE bridges.name = 'Cam Bridge'
-< AND named_places.name = 'Ashton';
-< ST_Distance(position, boundary)
-< 12
-< SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
-< FROM named_places, forests
-< WHERE named_places.name = 'Ashton'
-< AND forests.name = 'Green Forest';
-< AsText(ST_Difference(named_places.boundary, forests.boundary))
-< POLYGON((56 34,62 48,84 48,84 42,56 34))
-< SELECT AsText(ST_Union(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Goose Island';
-< AsText(ST_Union(shore, boundary))
-< POLYGON((48 6,52 18,66 23,73 9,48 6))
-< SELECT AsText(ST_SymDifference(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Ashton';
-< AsText(ST_SymDifference(shore, boundary))
-< MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
-< SELECT count(*)
-< FROM buildings, bridges
-< WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
-< count(*)
-< 1
----
-> ERROR 42000: The storage engine for the table doesn't support GEOMETRY
-> # ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT, /*!INDEX*/ /*Custom index*/ g(g(128))) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
-> # The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
-> # Geometry types or indexes on them or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
-705,1398c21,30
-< CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE gis_ogs;
-< CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< num_lanes INT <CUSTOM_COL_OPTIONS>,
-< centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< position POINT NOT NULL,
-< footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< type CHAR(64) <CUSTOM_COL_OPTIONS>,
-< shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
-< name CHAR(64) <CUSTOM_COL_OPTIONS>,
-< boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
-< neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< USE test;
-< SHOW FIELDS FROM gis_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g point NO MUL NULL
-< SHOW FIELDS FROM gis_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g linestring NO MUL NULL
-< SHOW FIELDS FROM gis_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g polygon NO MUL NULL
-< SHOW FIELDS FROM gis_multi_point;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipoint NO MUL NULL
-< SHOW FIELDS FROM gis_multi_line;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multilinestring NO MUL NULL
-< SHOW FIELDS FROM gis_multi_polygon;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g multipolygon NO MUL NULL
-< SHOW FIELDS FROM gis_geometrycollection;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometrycollection NO MUL NULL
-< SHOW FIELDS FROM gis_geometry;
-< Field Type Null Key Default Extra
-< fid int(11) YES NULL
-< g geometry NO NULL
-< INSERT INTO gis_point VALUES
-< (101, PointFromText('POINT(10 10)')),
-< (102, PointFromText('POINT(20 10)')),
-< (103, PointFromText('POINT(20 20)')),
-< (104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
-< INSERT INTO gis_line VALUES
-< (105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
-< (106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
-< (107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
-< INSERT INTO gis_polygon VALUES
-< (108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
-< (109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
-< (110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
-< INSERT INTO gis_multi_point VALUES
-< (111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
-< (112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
-< (113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
-< INSERT INTO gis_multi_line VALUES
-< (114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
-< (115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
-< (116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
-< INSERT INTO gis_multi_polygon VALUES
-< (117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
-< (119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
-< INSERT INTO gis_geometrycollection VALUES
-< (120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
-< (121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
-< (122, GeomFromText('GeometryCollection()')),
-< (123, GeomFromText('GeometryCollection EMPTY'));
-< INSERT into gis_geometry SELECT * FROM gis_point;
-< INSERT into gis_geometry SELECT * FROM gis_line;
-< INSERT into gis_geometry SELECT * FROM gis_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_multi_point;
-< INSERT into gis_geometry SELECT * FROM gis_multi_line;
-< INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
-< INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
-< SELECT fid, AsText(g) FROM gis_point;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< SELECT fid, AsText(g) FROM gis_line;
-< fid AsText(g)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< SELECT fid, AsText(g) FROM gis_polygon;
-< fid AsText(g)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< SELECT fid, AsText(g) FROM gis_multi_point;
-< fid AsText(g)
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< SELECT fid, AsText(g) FROM gis_multi_line;
-< fid AsText(g)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< SELECT fid, AsText(g) FROM gis_multi_polygon;
-< fid AsText(g)
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< SELECT fid, AsText(g) FROM gis_geometrycollection;
-< fid AsText(g)
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, AsText(g) FROM gis_geometry;
-< fid AsText(g)
-< 101 POINT(10 10)
-< 102 POINT(20 10)
-< 103 POINT(20 20)
-< 104 POINT(10 20)
-< 105 LINESTRING(0 0,0 10,10 0)
-< 106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 107 LINESTRING(10 10,40 10)
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
-< 110 POLYGON((0 0,30 0,30 30,0 0))
-< 111 MULTIPOINT(0 0,10 10,10 20,20 20)
-< 112 MULTIPOINT(1 1,11 11,11 21,21 21)
-< 113 MULTIPOINT(3 6,4 10)
-< 114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
-< 115 MULTILINESTRING((10 48,10 21,10 0))
-< 116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
-< 117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
-< 119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
-< 120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
-< 121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, Dimension(g) FROM gis_geometry;
-< fid Dimension(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 1
-< 106 1
-< 107 1
-< 108 2
-< 109 2
-< 110 2
-< 111 0
-< 112 0
-< 113 0
-< 114 1
-< 115 1
-< 116 1
-< 117 2
-< 118 2
-< 119 2
-< 120 1
-< 121 1
-< 122 0
-< 123 0
-< SELECT fid, GeometryType(g) FROM gis_geometry;
-< fid GeometryType(g)
-< 101 POINT
-< 102 POINT
-< 103 POINT
-< 104 POINT
-< 105 LINESTRING
-< 106 LINESTRING
-< 107 LINESTRING
-< 108 POLYGON
-< 109 POLYGON
-< 110 POLYGON
-< 111 MULTIPOINT
-< 112 MULTIPOINT
-< 113 MULTIPOINT
-< 114 MULTILINESTRING
-< 115 MULTILINESTRING
-< 116 MULTILINESTRING
-< 117 MULTIPOLYGON
-< 118 MULTIPOLYGON
-< 119 MULTIPOLYGON
-< 120 GEOMETRYCOLLECTION
-< 121 GEOMETRYCOLLECTION
-< 122 GEOMETRYCOLLECTION
-< 123 GEOMETRYCOLLECTION
-< SELECT fid, IsEmpty(g) FROM gis_geometry;
-< fid IsEmpty(g)
-< 101 0
-< 102 0
-< 103 0
-< 104 0
-< 105 0
-< 106 0
-< 107 0
-< 108 0
-< 109 0
-< 110 0
-< 111 0
-< 112 0
-< 113 0
-< 114 0
-< 115 0
-< 116 0
-< 117 0
-< 118 0
-< 119 0
-< 120 0
-< 121 0
-< 122 0
-< 123 0
-< SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
-< fid AsText(Envelope(g))
-< 101 POLYGON((10 10,10 10,10 10,10 10,10 10))
-< 102 POLYGON((20 10,20 10,20 10,20 10,20 10))
-< 103 POLYGON((20 20,20 20,20 20,20 20,20 20))
-< 104 POLYGON((10 20,10 20,10 20,10 20,10 20))
-< 105 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 106 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 107 POLYGON((10 10,40 10,40 10,10 10,10 10))
-< 108 POLYGON((10 10,20 10,20 20,10 20,10 10))
-< 109 POLYGON((0 0,50 0,50 50,0 50,0 0))
-< 110 POLYGON((0 0,30 0,30 30,0 30,0 0))
-< 111 POLYGON((0 0,20 0,20 20,0 20,0 0))
-< 112 POLYGON((1 1,21 1,21 21,1 21,1 1))
-< 113 POLYGON((3 6,4 6,4 10,3 10,3 6))
-< 114 POLYGON((10 0,16 0,16 48,10 48,10 0))
-< 115 POLYGON((10 0,10 0,10 48,10 48,10 0))
-< 116 POLYGON((1 2,21 2,21 8,1 8,1 2))
-< 117 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 118 POLYGON((28 0,84 0,84 42,28 42,28 0))
-< 119 POLYGON((0 0,3 0,3 3,0 3,0 0))
-< 120 POLYGON((0 0,10 0,10 10,0 10,0 0))
-< 121 POLYGON((3 6,44 6,44 9,3 9,3 6))
-< 122 GEOMETRYCOLLECTION EMPTY
-< 123 GEOMETRYCOLLECTION EMPTY
-< SELECT fid, X(g) FROM gis_point;
-< fid X(g)
-< 101 10
-< 102 20
-< 103 20
-< 104 10
-< SELECT fid, Y(g) FROM gis_point;
-< fid Y(g)
-< 101 10
-< 102 10
-< 103 20
-< 104 20
-< SELECT fid, AsText(StartPoint(g)) FROM gis_line;
-< fid AsText(StartPoint(g))
-< 105 POINT(0 0)
-< 106 POINT(10 10)
-< 107 POINT(10 10)
-< SELECT fid, AsText(EndPoint(g)) FROM gis_line;
-< fid AsText(EndPoint(g))
-< 105 POINT(10 0)
-< 106 POINT(10 10)
-< 107 POINT(40 10)
-< SELECT fid, GLength(g) FROM gis_line;
-< fid GLength(g)
-< 105 24.14213562373095
-< 106 40
-< 107 30
-< SELECT fid, NumPoints(g) FROM gis_line;
-< fid NumPoints(g)
-< 105 3
-< 106 5
-< 107 2
-< SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
-< fid AsText(PointN(g, 2))
-< 105 POINT(0 10)
-< 106 POINT(20 10)
-< 107 POINT(40 10)
-< SELECT fid, IsClosed(g) FROM gis_line;
-< fid IsClosed(g)
-< 105 0
-< 106 1
-< 107 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
-< fid AsText(Centroid(g))
-< 108 POINT(15 15)
-< 109 POINT(25.416666666666668 25.416666666666668)
-< 110 POINT(20 10)
-< SELECT fid, Area(g) FROM gis_polygon;
-< fid Area(g)
-< 108 100
-< 109 2400
-< 110 450
-< SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
-< fid AsText(ExteriorRing(g))
-< 108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
-< 110 LINESTRING(0 0,30 0,30 30,0 0)
-< SELECT fid, NumInteriorRings(g) FROM gis_polygon;
-< fid NumInteriorRings(g)
-< 108 0
-< 109 1
-< 110 0
-< SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
-< fid AsText(InteriorRingN(g, 1))
-< 108 NULL
-< 109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
-< 110 NULL
-< SELECT fid, IsClosed(g) FROM gis_multi_line;
-< fid IsClosed(g)
-< 114 0
-< 115 0
-< 116 0
-< SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
-< fid AsText(Centroid(g))
-< 117 POINT(55.58852775304245 17.426536064113982)
-< 118 POINT(55.58852775304245 17.426536064113982)
-< 119 POINT(2 2)
-< SELECT fid, Area(g) FROM gis_multi_polygon;
-< fid Area(g)
-< 117 1684.5
-< 118 1684.5
-< 119 4.5
-< SELECT fid, NumGeometries(g) from gis_multi_point;
-< fid NumGeometries(g)
-< 111 4
-< 112 4
-< 113 2
-< SELECT fid, NumGeometries(g) from gis_multi_line;
-< fid NumGeometries(g)
-< 114 2
-< 115 1
-< 116 2
-< SELECT fid, NumGeometries(g) from gis_multi_polygon;
-< fid NumGeometries(g)
-< 117 2
-< 118 2
-< 119 1
-< SELECT fid, NumGeometries(g) from gis_geometrycollection;
-< fid NumGeometries(g)
-< 120 2
-< 121 2
-< 122 0
-< 123 0
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
-< fid AsText(GeometryN(g, 2))
-< 111 POINT(10 10)
-< 112 POINT(11 11)
-< 113 POINT(4 10)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
-< fid AsText(GeometryN(g, 2))
-< 114 LINESTRING(16 0,16 23,16 48)
-< 115 NULL
-< 116 LINESTRING(2 5,5 8,21 7)
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
-< fid AsText(GeometryN(g, 2))
-< 117 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 118 POLYGON((59 18,67 18,67 13,59 13,59 18))
-< 119 NULL
-< SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 2))
-< 120 LINESTRING(0 0,10 10)
-< 121 LINESTRING(3 6,7 9)
-< 122 NULL
-< 123 NULL
-< SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
-< fid AsText(GeometryN(g, 1))
-< 120 POINT(0 0)
-< 121 POINT(44 6)
-< 122 NULL
-< 123 NULL
-< SELECT g1.fid as first, g2.fid as second,
-< Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
-< Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
-< Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
-< FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
-< first second w c o e d t i r
-< 120 120 1 1 0 1 0 1 1 0
-< 120 121 0 0 1 0 0 0 1 0
-< 120 122 0 1 NULL 0 NULL 0 NULL 0
-< 120 123 0 1 NULL 0 NULL 0 NULL 0
-< 121 120 0 0 1 0 0 0 1 0
-< 121 121 1 1 0 1 0 1 1 0
-< 121 122 0 1 NULL 0 NULL 0 NULL 0
-< 121 123 0 1 NULL 0 NULL 0 NULL 0
-< 122 120 1 0 NULL 0 NULL 0 NULL 0
-< 122 121 1 0 NULL 0 NULL 0 NULL 0
-< 122 122 1 1 NULL 1 NULL 0 NULL 0
-< 122 123 1 1 NULL 1 NULL 0 NULL 0
-< 123 120 1 0 NULL 0 NULL 0 NULL 0
-< 123 121 1 0 NULL 0 NULL 0 NULL 0
-< 123 122 1 1 NULL 1 NULL 0 NULL 0
-< 123 123 1 1 NULL 1 NULL 0 NULL 0
-< DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
-< USE gis_ogs;
-< # Lakes
-< INSERT INTO lakes VALUES (
-< 101, 'BLUE LAKE',
-< PolyFromText(
-< 'POLYGON(
-< (52 18,66 23,73 9,48 6,52 18),
-< (59 18,67 18,67 13,59 13,59 18)
-< )',
-< 101));
-< # Road Segments
-< INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
-< INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4,
-< LineFromText(
-< 'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
-< INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2,
-< LineFromText(
-< 'LINESTRING( 70 38, 72 48 )' ,101));
-< INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4,
-< LineFromText(
-< 'LINESTRING( 70 38, 84 42 )' ,101));
-< INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL,
-< 1,
-< LineFromText(
-< 'LINESTRING( 28 26, 28 0 )',101));
-< # DividedRoutes
-< INSERT INTO divided_routes VALUES(119, 'Route 75', 4,
-< MLineFromText(
-< 'MULTILINESTRING((10 48,10 21,10 0),
-< (16 0,16 23,16 48))', 101));
-< # Forests
-< INSERT INTO forests VALUES(109, 'Green Forest',
-< MPolyFromText(
-< 'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
-< (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
-< 101));
-< # Bridges
-< INSERT INTO bridges VALUES(110, 'Cam Bridge', PointFromText(
-< 'POINT( 44 31 )', 101));
-< # Streams
-< INSERT INTO streams VALUES(111, 'Cam Stream',
-< LineFromText(
-< 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
-< INSERT INTO streams VALUES(112, NULL,
-< LineFromText(
-< 'LINESTRING( 76 0, 78 4, 73 9 )', 101));
-< # Buildings
-< INSERT INTO buildings VALUES(113, '123 Main Street',
-< PointFromText(
-< 'POINT( 52 30 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
-< INSERT INTO buildings VALUES(114, '215 Main Street',
-< PointFromText(
-< 'POINT( 64 33 )', 101),
-< PolyFromText(
-< 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
-< # Ponds
-< INSERT INTO ponds VALUES(120, NULL, 'Stock Pond',
-< MPolyFromText(
-< 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
-< ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
-< # Named Places
-< INSERT INTO named_places VALUES(117, 'Ashton',
-< PolyFromText(
-< 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
-< INSERT INTO named_places VALUES(118, 'Goose Island',
-< PolyFromText(
-< 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
-< # Map Neatlines
-< INSERT INTO map_neatlines VALUES(115,
-< PolyFromText(
-< 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
-< SELECT Dimension(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< Dimension(shore)
-< 2
-< SELECT GeometryType(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GeometryType(centerlines)
-< MULTILINESTRING
-< SELECT AsText(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(boundary)
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(PolyFromWKB(AsBinary(boundary),101))
-< POLYGON((67 13,67 18,59 18,59 13,67 13))
-< SELECT SRID(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< SRID(boundary)
-< 101
-< SELECT IsEmpty(centerline)
-< FROM road_segments
-< WHERE name = 'Route 5'
-< AND aliases = 'Main Street';
-< IsEmpty(centerline)
-< 0
-< SELECT AsText(Envelope(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Envelope(boundary))
-< POLYGON((59 13,67 13,67 18,59 18,59 13))
-< SELECT X(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< X(position)
-< 44
-< SELECT Y(position)
-< FROM bridges
-< WHERE name = 'Cam Bridge';
-< Y(position)
-< 31
-< SELECT AsText(StartPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(StartPoint(centerline))
-< POINT(0 18)
-< SELECT AsText(EndPoint(centerline))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(EndPoint(centerline))
-< POINT(44 31)
-< SELECT GLength(centerline)
-< FROM road_segments
-< WHERE fid = 106;
-< GLength(centerline)
-< 26
-< SELECT NumPoints(centerline)
-< FROM road_segments
-< WHERE fid = 102;
-< NumPoints(centerline)
-< 5
-< SELECT AsText(PointN(centerline, 1))
-< FROM road_segments
-< WHERE fid = 102;
-< AsText(PointN(centerline, 1))
-< POINT(0 18)
-< SELECT AsText(Centroid(boundary))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< AsText(Centroid(boundary))
-< POINT(63 15.5)
-< SELECT Area(boundary)
-< FROM named_places
-< WHERE name = 'Goose Island';
-< Area(boundary)
-< 40
-< SELECT AsText(ExteriorRing(shore))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(ExteriorRing(shore))
-< LINESTRING(52 18,66 23,73 9,48 6,52 18)
-< SELECT NumInteriorRings(shore)
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< NumInteriorRings(shore)
-< 1
-< SELECT AsText(InteriorRingN(shore, 1))
-< FROM lakes
-< WHERE name = 'Blue Lake';
-< AsText(InteriorRingN(shore, 1))
-< LINESTRING(59 18,67 18,67 13,59 13,59 18)
-< SELECT NumGeometries(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< NumGeometries(centerlines)
-< 2
-< SELECT AsText(GeometryN(centerlines, 2))
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< AsText(GeometryN(centerlines, 2))
-< LINESTRING(16 0,16 23,16 48)
-< SELECT IsClosed(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< IsClosed(centerlines)
-< 0
-< SELECT GLength(centerlines)
-< FROM divided_routes
-< WHERE name = 'Route 75';
-< GLength(centerlines)
-< 96
-< SELECT AsText(Centroid(shores))
-< FROM ponds
-< WHERE fid = 120;
-< AsText(Centroid(shores))
-< POINT(25 42)
-< SELECT Area(shores)
-< FROM ponds
-< WHERE fid = 120;
-< Area(shores)
-< 8
-< SELECT ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< FROM named_places
-< WHERE name = 'Goose Island';
-< ST_Equals(boundary,
-< PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
-< 1
-< SELECT ST_Disjoint(centerlines, boundary)
-< FROM divided_routes, named_places
-< WHERE divided_routes.name = 'Route 75'
-< AND named_places.name = 'Ashton';
-< ST_Disjoint(centerlines, boundary)
-< 1
-< SELECT ST_Touches(centerline, shore)
-< FROM streams, lakes
-< WHERE streams.name = 'Cam Stream'
-< AND lakes.name = 'Blue Lake';
-< ST_Touches(centerline, shore)
-< 1
-< SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< Crosses(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< FROM road_segments, divided_routes
-< WHERE road_segments.fid = 102
-< AND divided_routes.name = 'Route 75';
-< ST_Intersects(road_segments.centerline, divided_routes.centerlines)
-< 1
-< SELECT ST_Contains(forests.boundary, named_places.boundary)
-< FROM forests, named_places
-< WHERE forests.name = 'Green Forest'
-< AND named_places.name = 'Ashton';
-< ST_Contains(forests.boundary, named_places.boundary)
-< 0
-< SELECT ST_Distance(position, boundary)
-< FROM bridges, named_places
-< WHERE bridges.name = 'Cam Bridge'
-< AND named_places.name = 'Ashton';
-< ST_Distance(position, boundary)
-< 12
-< SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
-< FROM named_places, forests
-< WHERE named_places.name = 'Ashton'
-< AND forests.name = 'Green Forest';
-< AsText(ST_Difference(named_places.boundary, forests.boundary))
-< POLYGON((56 34,62 48,84 48,84 42,56 34))
-< SELECT AsText(ST_Union(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Goose Island';
-< AsText(ST_Union(shore, boundary))
-< POLYGON((48 6,52 18,66 23,73 9,48 6))
-< SELECT AsText(ST_SymDifference(shore, boundary))
-< FROM lakes, named_places
-< WHERE lakes.name = 'Blue Lake'
-< AND named_places.name = 'Ashton';
-< AsText(ST_SymDifference(shore, boundary))
-< MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
-< SELECT count(*)
-< FROM buildings, bridges
-< WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
-< count(*)
-< 1
----
-> ERROR 42000: The storage engine for the table doesn't support GEOMETRY
-> # ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
-> # The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
-> # Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- type_spatial_indexes.result 2013-01-23 01:25:45.367797786 +0400
++++ type_spatial_indexes.reject 2013-01-23 02:51:15.247306467 +0400
+@@ -2,1399 +2,31 @@
+ DROP DATABASE IF EXISTS gis_ogs;
+ CREATE DATABASE gis_ogs;
+ CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT, <CUSTOM_INDEX> g(g(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING, <CUSTOM_INDEX> g(g(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON, <CUSTOM_INDEX> g(g(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT, <CUSTOM_INDEX> g(g(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING, <CUSTOM_INDEX> g(g(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE gis_ogs;
+-CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shore POLYGON, <CUSTOM_INDEX> s(shore(64))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING, <CUSTOM_INDEX> c(centerline(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerlines MULTILINESTRING, <CUSTOM_INDEX> c(centerlines(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary MULTIPOLYGON, <CUSTOM_INDEX> b(boundary(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT, <CUSTOM_INDEX> p(`position`(64))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING, <CUSTOM_INDEX> c(centerline(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT,
+-footprint POLYGON, <CUSTOM_INDEX> p(`position`(64)), <CUSTOM_INDEX> f(footprint(128))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-type CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shores MULTIPOLYGON, <CUSTOM_INDEX> s(shores(256))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary POLYGON, <CUSTOM_INDEX> b(boundary(512))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
+-neatline POLYGON, <CUSTOM_INDEX> n(neatline(700))) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE test;
+-SHOW FIELDS FROM gis_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g point YES MUL NULL
+-SHOW FIELDS FROM gis_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g linestring YES MUL NULL
+-SHOW FIELDS FROM gis_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g polygon YES MUL NULL
+-SHOW FIELDS FROM gis_multi_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipoint YES MUL NULL
+-SHOW FIELDS FROM gis_multi_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multilinestring YES MUL NULL
+-SHOW FIELDS FROM gis_multi_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipolygon YES NULL
+-SHOW FIELDS FROM gis_geometrycollection;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometrycollection YES NULL
+-SHOW FIELDS FROM gis_geometry;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometry YES NULL
+-INSERT INTO gis_point (fid,g) VALUES
+-(101, PointFromText('POINT(10 10)')),
+-(102, PointFromText('POINT(20 10)')),
+-(103, PointFromText('POINT(20 20)')),
+-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
+-INSERT INTO gis_line (fid,g) VALUES
+-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
+-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
+-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
+-INSERT INTO gis_polygon (fid,g) VALUES
+-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
+-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
+-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
+-INSERT INTO gis_multi_point (fid,g) VALUES
+-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
+-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
+-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
+-INSERT INTO gis_multi_line (fid,g) VALUES
+-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
+-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
+-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
+-INSERT INTO gis_multi_polygon (fid,g) VALUES
+-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+-INSERT INTO gis_geometrycollection (fid,g) VALUES
+-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
+-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
+-(122, GeomFromText('GeometryCollection()')),
+-(123, GeomFromText('GeometryCollection EMPTY'));
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
+-SELECT fid, AsText(g) FROM gis_point;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-SELECT fid, AsText(g) FROM gis_line;
+-fid AsText(g)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-SELECT fid, AsText(g) FROM gis_polygon;
+-fid AsText(g)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-SELECT fid, AsText(g) FROM gis_multi_point;
+-fid AsText(g)
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-SELECT fid, AsText(g) FROM gis_multi_line;
+-fid AsText(g)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-SELECT fid, AsText(g) FROM gis_multi_polygon;
+-fid AsText(g)
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-SELECT fid, AsText(g) FROM gis_geometrycollection;
+-fid AsText(g)
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, AsText(g) FROM gis_geometry;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, Dimension(g) FROM gis_geometry;
+-fid Dimension(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 1
+-106 1
+-107 1
+-108 2
+-109 2
+-110 2
+-111 0
+-112 0
+-113 0
+-114 1
+-115 1
+-116 1
+-117 2
+-118 2
+-119 2
+-120 1
+-121 1
+-122 0
+-123 0
+-SELECT fid, GeometryType(g) FROM gis_geometry;
+-fid GeometryType(g)
+-101 POINT
+-102 POINT
+-103 POINT
+-104 POINT
+-105 LINESTRING
+-106 LINESTRING
+-107 LINESTRING
+-108 POLYGON
+-109 POLYGON
+-110 POLYGON
+-111 MULTIPOINT
+-112 MULTIPOINT
+-113 MULTIPOINT
+-114 MULTILINESTRING
+-115 MULTILINESTRING
+-116 MULTILINESTRING
+-117 MULTIPOLYGON
+-118 MULTIPOLYGON
+-119 MULTIPOLYGON
+-120 GEOMETRYCOLLECTION
+-121 GEOMETRYCOLLECTION
+-122 GEOMETRYCOLLECTION
+-123 GEOMETRYCOLLECTION
+-SELECT fid, IsEmpty(g) FROM gis_geometry;
+-fid IsEmpty(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 0
+-106 0
+-107 0
+-108 0
+-109 0
+-110 0
+-111 0
+-112 0
+-113 0
+-114 0
+-115 0
+-116 0
+-117 0
+-118 0
+-119 0
+-120 0
+-121 0
+-122 0
+-123 0
+-SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
+-fid AsText(Envelope(g))
+-101 POLYGON((10 10,10 10,10 10,10 10,10 10))
+-102 POLYGON((20 10,20 10,20 10,20 10,20 10))
+-103 POLYGON((20 20,20 20,20 20,20 20,20 20))
+-104 POLYGON((10 20,10 20,10 20,10 20,10 20))
+-105 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-106 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-107 POLYGON((10 10,40 10,40 10,10 10,10 10))
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0))
+-110 POLYGON((0 0,30 0,30 30,0 30,0 0))
+-111 POLYGON((0 0,20 0,20 20,0 20,0 0))
+-112 POLYGON((1 1,21 1,21 21,1 21,1 1))
+-113 POLYGON((3 6,4 6,4 10,3 10,3 6))
+-114 POLYGON((10 0,16 0,16 48,10 48,10 0))
+-115 POLYGON((10 0,10 0,10 48,10 48,10 0))
+-116 POLYGON((1 2,21 2,21 8,1 8,1 2))
+-117 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-118 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-119 POLYGON((0 0,3 0,3 3,0 3,0 0))
+-120 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-121 POLYGON((3 6,44 6,44 9,3 9,3 6))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, X(g) FROM gis_point;
+-fid X(g)
+-101 10
+-102 20
+-103 20
+-104 10
+-SELECT fid, Y(g) FROM gis_point;
+-fid Y(g)
+-101 10
+-102 10
+-103 20
+-104 20
+-SELECT fid, AsText(StartPoint(g)) FROM gis_line;
+-fid AsText(StartPoint(g))
+-105 POINT(0 0)
+-106 POINT(10 10)
+-107 POINT(10 10)
+-SELECT fid, AsText(EndPoint(g)) FROM gis_line;
+-fid AsText(EndPoint(g))
+-105 POINT(10 0)
+-106 POINT(10 10)
+-107 POINT(40 10)
+-SELECT fid, GLength(g) FROM gis_line;
+-fid GLength(g)
+-105 24.14213562373095
+-106 40
+-107 30
+-SELECT fid, NumPoints(g) FROM gis_line;
+-fid NumPoints(g)
+-105 3
+-106 5
+-107 2
+-SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
+-fid AsText(PointN(g, 2))
+-105 POINT(0 10)
+-106 POINT(20 10)
+-107 POINT(40 10)
+-SELECT fid, IsClosed(g) FROM gis_line;
+-fid IsClosed(g)
+-105 0
+-106 1
+-107 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
+-fid AsText(Centroid(g))
+-108 POINT(15 15)
+-109 POINT(25.416666666666668 25.416666666666668)
+-110 POINT(20 10)
+-SELECT fid, Area(g) FROM gis_polygon;
+-fid Area(g)
+-108 100
+-109 2400
+-110 450
+-SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
+-fid AsText(ExteriorRing(g))
+-108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
+-110 LINESTRING(0 0,30 0,30 30,0 0)
+-SELECT fid, NumInteriorRings(g) FROM gis_polygon;
+-fid NumInteriorRings(g)
+-108 0
+-109 1
+-110 0
+-SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
+-fid AsText(InteriorRingN(g, 1))
+-108 NULL
+-109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-110 NULL
+-SELECT fid, IsClosed(g) FROM gis_multi_line;
+-fid IsClosed(g)
+-114 0
+-115 0
+-116 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
+-fid AsText(Centroid(g))
+-117 POINT(55.58852775304245 17.426536064113982)
+-118 POINT(55.58852775304245 17.426536064113982)
+-119 POINT(2 2)
+-SELECT fid, Area(g) FROM gis_multi_polygon;
+-fid Area(g)
+-117 1684.5
+-118 1684.5
+-119 4.5
+-SELECT fid, NumGeometries(g) from gis_multi_point;
+-fid NumGeometries(g)
+-111 4
+-112 4
+-113 2
+-SELECT fid, NumGeometries(g) from gis_multi_line;
+-fid NumGeometries(g)
+-114 2
+-115 1
+-116 2
+-SELECT fid, NumGeometries(g) from gis_multi_polygon;
+-fid NumGeometries(g)
+-117 2
+-118 2
+-119 1
+-SELECT fid, NumGeometries(g) from gis_geometrycollection;
+-fid NumGeometries(g)
+-120 2
+-121 2
+-122 0
+-123 0
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
+-fid AsText(GeometryN(g, 2))
+-111 POINT(10 10)
+-112 POINT(11 11)
+-113 POINT(4 10)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
+-fid AsText(GeometryN(g, 2))
+-114 LINESTRING(16 0,16 23,16 48)
+-115 NULL
+-116 LINESTRING(2 5,5 8,21 7)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
+-fid AsText(GeometryN(g, 2))
+-117 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-118 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-119 NULL
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 2))
+-120 LINESTRING(0 0,10 10)
+-121 LINESTRING(3 6,7 9)
+-122 NULL
+-123 NULL
+-SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 1))
+-120 POINT(0 0)
+-121 POINT(44 6)
+-122 NULL
+-123 NULL
+-SELECT g1.fid as first, g2.fid as second,
+-Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
+-Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
+-Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
+-FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
+-first second w c o e d t i r
+-120 120 1 1 0 1 0 1 1 0
+-120 121 0 0 1 0 0 0 1 0
+-120 122 0 1 NULL 0 NULL 0 NULL 0
+-120 123 0 1 NULL 0 NULL 0 NULL 0
+-121 120 0 0 1 0 0 0 1 0
+-121 121 1 1 0 1 0 1 1 0
+-121 122 0 1 NULL 0 NULL 0 NULL 0
+-121 123 0 1 NULL 0 NULL 0 NULL 0
+-122 120 1 0 NULL 0 NULL 0 NULL 0
+-122 121 1 0 NULL 0 NULL 0 NULL 0
+-122 122 1 1 NULL 1 NULL 0 NULL 0
+-122 123 1 1 NULL 1 NULL 0 NULL 0
+-123 120 1 0 NULL 0 NULL 0 NULL 0
+-123 121 1 0 NULL 0 NULL 0 NULL 0
+-123 122 1 1 NULL 1 NULL 0 NULL 0
+-123 123 1 1 NULL 1 NULL 0 NULL 0
+-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
+-USE gis_ogs;
+-# Lakes
+-INSERT INTO lakes (fid,name,shore) VALUES (
+-101, 'BLUE LAKE',
+-PolyFromText(
+-'POLYGON(
+- (52 18,66 23,73 9,48 6,52 18),
+- (59 18,67 18,67 13,59 13,59 18)
+- )',
+-101));
+-# Road Segments
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
+-LineFromText(
+-'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 70 38, 72 48 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
+-LineFromText(
+-'LINESTRING( 70 38, 84 42 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
+-1,
+-LineFromText(
+-'LINESTRING( 28 26, 28 0 )',101));
+-# DividedRoutes
+-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
+-MLineFromText(
+-'MULTILINESTRING((10 48,10 21,10 0),
+- (16 0,16 23,16 48))', 101));
+-# Forests
+-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
+-MPolyFromText(
+-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
+- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
+-101));
+-# Bridges
+-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
+-'POINT( 44 31 )', 101));
+-# Streams
+-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
+-LineFromText(
+-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
+-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
+-LineFromText(
+-'LINESTRING( 76 0, 78 4, 73 9 )', 101));
+-# Buildings
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
+-PointFromText(
+-'POINT( 52 30 )', 101),
+-PolyFromText(
+-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
+-PointFromText(
+-'POINT( 64 33 )', 101),
+-PolyFromText(
+-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
+-# Ponds
+-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
+-MPolyFromText(
+-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
+- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
+-# Named Places
+-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
+-PolyFromText(
+-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
+-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
+-PolyFromText(
+-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
+-# Map Neatlines
+-INSERT INTO map_neatlines (fid,neatline) VALUES(115,
+-PolyFromText(
+-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
+-SELECT Dimension(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-Dimension(shore)
+-2
+-SELECT GeometryType(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GeometryType(centerlines)
+-MULTILINESTRING
+-SELECT AsText(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(boundary)
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(PolyFromWKB(AsBinary(boundary),101))
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT SRID(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-SRID(boundary)
+-101
+-SELECT IsEmpty(centerline)
+-FROM road_segments
+-WHERE name = 'Route 5'
+-AND aliases = 'Main Street';
+-IsEmpty(centerline)
+-0
+-SELECT AsText(Envelope(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Envelope(boundary))
+-POLYGON((59 13,67 13,67 18,59 18,59 13))
+-SELECT X(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-X(position)
+-44
+-SELECT Y(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-Y(position)
+-31
+-SELECT AsText(StartPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(StartPoint(centerline))
+-POINT(0 18)
+-SELECT AsText(EndPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(EndPoint(centerline))
+-POINT(44 31)
+-SELECT GLength(centerline)
+-FROM road_segments
+-WHERE fid = 106;
+-GLength(centerline)
+-26
+-SELECT NumPoints(centerline)
+-FROM road_segments
+-WHERE fid = 102;
+-NumPoints(centerline)
+-5
+-SELECT AsText(PointN(centerline, 1))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(PointN(centerline, 1))
+-POINT(0 18)
+-SELECT AsText(Centroid(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Centroid(boundary))
+-POINT(63 15.5)
+-SELECT Area(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-Area(boundary)
+-40
+-SELECT AsText(ExteriorRing(shore))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(ExteriorRing(shore))
+-LINESTRING(52 18,66 23,73 9,48 6,52 18)
+-SELECT NumInteriorRings(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-NumInteriorRings(shore)
+-1
+-SELECT AsText(InteriorRingN(shore, 1))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(InteriorRingN(shore, 1))
+-LINESTRING(59 18,67 18,67 13,59 13,59 18)
+-SELECT NumGeometries(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-NumGeometries(centerlines)
+-2
+-SELECT AsText(GeometryN(centerlines, 2))
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-AsText(GeometryN(centerlines, 2))
+-LINESTRING(16 0,16 23,16 48)
+-SELECT IsClosed(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-IsClosed(centerlines)
+-0
+-SELECT GLength(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GLength(centerlines)
+-96
+-SELECT AsText(Centroid(shores))
+-FROM ponds
+-WHERE fid = 120;
+-AsText(Centroid(shores))
+-POINT(25 42)
+-SELECT Area(shores)
+-FROM ponds
+-WHERE fid = 120;
+-Area(shores)
+-8
+-SELECT ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-1
+-SELECT ST_Disjoint(centerlines, boundary)
+-FROM divided_routes, named_places
+-WHERE divided_routes.name = 'Route 75'
+-AND named_places.name = 'Ashton';
+-ST_Disjoint(centerlines, boundary)
+-1
+-SELECT ST_Touches(centerline, shore)
+-FROM streams, lakes
+-WHERE streams.name = 'Cam Stream'
+-AND lakes.name = 'Blue Lake';
+-ST_Touches(centerline, shore)
+-1
+-SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-Crosses(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Contains(forests.boundary, named_places.boundary)
+-FROM forests, named_places
+-WHERE forests.name = 'Green Forest'
+-AND named_places.name = 'Ashton';
+-ST_Contains(forests.boundary, named_places.boundary)
+-0
+-SELECT ST_Distance(position, boundary)
+-FROM bridges, named_places
+-WHERE bridges.name = 'Cam Bridge'
+-AND named_places.name = 'Ashton';
+-ST_Distance(position, boundary)
+-12
+-SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
+-FROM named_places, forests
+-WHERE named_places.name = 'Ashton'
+-AND forests.name = 'Green Forest';
+-AsText(ST_Difference(named_places.boundary, forests.boundary))
+-POLYGON((56 34,62 48,84 48,84 42,56 34))
+-SELECT AsText(ST_Union(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Goose Island';
+-AsText(ST_Union(shore, boundary))
+-POLYGON((48 6,52 18,66 23,73 9,48 6))
+-SELECT AsText(ST_SymDifference(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Ashton';
+-AsText(ST_SymDifference(shore, boundary))
+-MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
+-SELECT count(*)
+-FROM buildings, bridges
+-WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
+-count(*)
+-1
++ERROR 42000: The storage engine for the table doesn't support GEOMETRY
++# ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT, /*!INDEX*/ /*Custom index*/ g(g(128))) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
++# The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
++# Geometry types or indexes on them or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP DATABASE gis_ogs;
+ USE test;
+ DROP TABLE IF EXISTS t1, gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
+ DROP DATABASE IF EXISTS gis_ogs;
+ CREATE DATABASE gis_ogs;
+ CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE gis_ogs;
+-CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT NOT NULL,
+-footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-type CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
+-neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE test;
+-SHOW FIELDS FROM gis_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g point NO MUL NULL
+-SHOW FIELDS FROM gis_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g linestring NO MUL NULL
+-SHOW FIELDS FROM gis_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g polygon NO MUL NULL
+-SHOW FIELDS FROM gis_multi_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipoint NO MUL NULL
+-SHOW FIELDS FROM gis_multi_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multilinestring NO MUL NULL
+-SHOW FIELDS FROM gis_multi_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipolygon NO MUL NULL
+-SHOW FIELDS FROM gis_geometrycollection;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometrycollection NO MUL NULL
+-SHOW FIELDS FROM gis_geometry;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometry NO NULL
+-INSERT INTO gis_point (fid,g) VALUES
+-(101, PointFromText('POINT(10 10)')),
+-(102, PointFromText('POINT(20 10)')),
+-(103, PointFromText('POINT(20 20)')),
+-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
+-INSERT INTO gis_line (fid,g) VALUES
+-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
+-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
+-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
+-INSERT INTO gis_polygon (fid,g) VALUES
+-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
+-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
+-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
+-INSERT INTO gis_multi_point (fid,g) VALUES
+-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
+-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
+-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
+-INSERT INTO gis_multi_line (fid,g) VALUES
+-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
+-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
+-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
+-INSERT INTO gis_multi_polygon (fid,g) VALUES
+-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+-INSERT INTO gis_geometrycollection (fid,g) VALUES
+-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
+-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
+-(122, GeomFromText('GeometryCollection()')),
+-(123, GeomFromText('GeometryCollection EMPTY'));
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
+-SELECT fid, AsText(g) FROM gis_point;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-SELECT fid, AsText(g) FROM gis_line;
+-fid AsText(g)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-SELECT fid, AsText(g) FROM gis_polygon;
+-fid AsText(g)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-SELECT fid, AsText(g) FROM gis_multi_point;
+-fid AsText(g)
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-SELECT fid, AsText(g) FROM gis_multi_line;
+-fid AsText(g)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-SELECT fid, AsText(g) FROM gis_multi_polygon;
+-fid AsText(g)
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-SELECT fid, AsText(g) FROM gis_geometrycollection;
+-fid AsText(g)
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, AsText(g) FROM gis_geometry;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, Dimension(g) FROM gis_geometry;
+-fid Dimension(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 1
+-106 1
+-107 1
+-108 2
+-109 2
+-110 2
+-111 0
+-112 0
+-113 0
+-114 1
+-115 1
+-116 1
+-117 2
+-118 2
+-119 2
+-120 1
+-121 1
+-122 0
+-123 0
+-SELECT fid, GeometryType(g) FROM gis_geometry;
+-fid GeometryType(g)
+-101 POINT
+-102 POINT
+-103 POINT
+-104 POINT
+-105 LINESTRING
+-106 LINESTRING
+-107 LINESTRING
+-108 POLYGON
+-109 POLYGON
+-110 POLYGON
+-111 MULTIPOINT
+-112 MULTIPOINT
+-113 MULTIPOINT
+-114 MULTILINESTRING
+-115 MULTILINESTRING
+-116 MULTILINESTRING
+-117 MULTIPOLYGON
+-118 MULTIPOLYGON
+-119 MULTIPOLYGON
+-120 GEOMETRYCOLLECTION
+-121 GEOMETRYCOLLECTION
+-122 GEOMETRYCOLLECTION
+-123 GEOMETRYCOLLECTION
+-SELECT fid, IsEmpty(g) FROM gis_geometry;
+-fid IsEmpty(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 0
+-106 0
+-107 0
+-108 0
+-109 0
+-110 0
+-111 0
+-112 0
+-113 0
+-114 0
+-115 0
+-116 0
+-117 0
+-118 0
+-119 0
+-120 0
+-121 0
+-122 0
+-123 0
+-SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
+-fid AsText(Envelope(g))
+-101 POLYGON((10 10,10 10,10 10,10 10,10 10))
+-102 POLYGON((20 10,20 10,20 10,20 10,20 10))
+-103 POLYGON((20 20,20 20,20 20,20 20,20 20))
+-104 POLYGON((10 20,10 20,10 20,10 20,10 20))
+-105 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-106 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-107 POLYGON((10 10,40 10,40 10,10 10,10 10))
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0))
+-110 POLYGON((0 0,30 0,30 30,0 30,0 0))
+-111 POLYGON((0 0,20 0,20 20,0 20,0 0))
+-112 POLYGON((1 1,21 1,21 21,1 21,1 1))
+-113 POLYGON((3 6,4 6,4 10,3 10,3 6))
+-114 POLYGON((10 0,16 0,16 48,10 48,10 0))
+-115 POLYGON((10 0,10 0,10 48,10 48,10 0))
+-116 POLYGON((1 2,21 2,21 8,1 8,1 2))
+-117 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-118 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-119 POLYGON((0 0,3 0,3 3,0 3,0 0))
+-120 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-121 POLYGON((3 6,44 6,44 9,3 9,3 6))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, X(g) FROM gis_point;
+-fid X(g)
+-101 10
+-102 20
+-103 20
+-104 10
+-SELECT fid, Y(g) FROM gis_point;
+-fid Y(g)
+-101 10
+-102 10
+-103 20
+-104 20
+-SELECT fid, AsText(StartPoint(g)) FROM gis_line;
+-fid AsText(StartPoint(g))
+-105 POINT(0 0)
+-106 POINT(10 10)
+-107 POINT(10 10)
+-SELECT fid, AsText(EndPoint(g)) FROM gis_line;
+-fid AsText(EndPoint(g))
+-105 POINT(10 0)
+-106 POINT(10 10)
+-107 POINT(40 10)
+-SELECT fid, GLength(g) FROM gis_line;
+-fid GLength(g)
+-105 24.14213562373095
+-106 40
+-107 30
+-SELECT fid, NumPoints(g) FROM gis_line;
+-fid NumPoints(g)
+-105 3
+-106 5
+-107 2
+-SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
+-fid AsText(PointN(g, 2))
+-105 POINT(0 10)
+-106 POINT(20 10)
+-107 POINT(40 10)
+-SELECT fid, IsClosed(g) FROM gis_line;
+-fid IsClosed(g)
+-105 0
+-106 1
+-107 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
+-fid AsText(Centroid(g))
+-108 POINT(15 15)
+-109 POINT(25.416666666666668 25.416666666666668)
+-110 POINT(20 10)
+-SELECT fid, Area(g) FROM gis_polygon;
+-fid Area(g)
+-108 100
+-109 2400
+-110 450
+-SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
+-fid AsText(ExteriorRing(g))
+-108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
+-110 LINESTRING(0 0,30 0,30 30,0 0)
+-SELECT fid, NumInteriorRings(g) FROM gis_polygon;
+-fid NumInteriorRings(g)
+-108 0
+-109 1
+-110 0
+-SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
+-fid AsText(InteriorRingN(g, 1))
+-108 NULL
+-109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-110 NULL
+-SELECT fid, IsClosed(g) FROM gis_multi_line;
+-fid IsClosed(g)
+-114 0
+-115 0
+-116 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
+-fid AsText(Centroid(g))
+-117 POINT(55.58852775304245 17.426536064113982)
+-118 POINT(55.58852775304245 17.426536064113982)
+-119 POINT(2 2)
+-SELECT fid, Area(g) FROM gis_multi_polygon;
+-fid Area(g)
+-117 1684.5
+-118 1684.5
+-119 4.5
+-SELECT fid, NumGeometries(g) from gis_multi_point;
+-fid NumGeometries(g)
+-111 4
+-112 4
+-113 2
+-SELECT fid, NumGeometries(g) from gis_multi_line;
+-fid NumGeometries(g)
+-114 2
+-115 1
+-116 2
+-SELECT fid, NumGeometries(g) from gis_multi_polygon;
+-fid NumGeometries(g)
+-117 2
+-118 2
+-119 1
+-SELECT fid, NumGeometries(g) from gis_geometrycollection;
+-fid NumGeometries(g)
+-120 2
+-121 2
+-122 0
+-123 0
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
+-fid AsText(GeometryN(g, 2))
+-111 POINT(10 10)
+-112 POINT(11 11)
+-113 POINT(4 10)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
+-fid AsText(GeometryN(g, 2))
+-114 LINESTRING(16 0,16 23,16 48)
+-115 NULL
+-116 LINESTRING(2 5,5 8,21 7)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
+-fid AsText(GeometryN(g, 2))
+-117 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-118 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-119 NULL
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 2))
+-120 LINESTRING(0 0,10 10)
+-121 LINESTRING(3 6,7 9)
+-122 NULL
+-123 NULL
+-SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 1))
+-120 POINT(0 0)
+-121 POINT(44 6)
+-122 NULL
+-123 NULL
+-SELECT g1.fid as first, g2.fid as second,
+-Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
+-Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
+-Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
+-FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
+-first second w c o e d t i r
+-120 120 1 1 0 1 0 1 1 0
+-120 121 0 0 1 0 0 0 1 0
+-120 122 0 1 NULL 0 NULL 0 NULL 0
+-120 123 0 1 NULL 0 NULL 0 NULL 0
+-121 120 0 0 1 0 0 0 1 0
+-121 121 1 1 0 1 0 1 1 0
+-121 122 0 1 NULL 0 NULL 0 NULL 0
+-121 123 0 1 NULL 0 NULL 0 NULL 0
+-122 120 1 0 NULL 0 NULL 0 NULL 0
+-122 121 1 0 NULL 0 NULL 0 NULL 0
+-122 122 1 1 NULL 1 NULL 0 NULL 0
+-122 123 1 1 NULL 1 NULL 0 NULL 0
+-123 120 1 0 NULL 0 NULL 0 NULL 0
+-123 121 1 0 NULL 0 NULL 0 NULL 0
+-123 122 1 1 NULL 1 NULL 0 NULL 0
+-123 123 1 1 NULL 1 NULL 0 NULL 0
+-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
+-USE gis_ogs;
+-# Lakes
+-INSERT INTO lakes (fid,name,shore) VALUES (
+-101, 'BLUE LAKE',
+-PolyFromText(
+-'POLYGON(
+- (52 18,66 23,73 9,48 6,52 18),
+- (59 18,67 18,67 13,59 13,59 18)
+- )',
+-101));
+-# Road Segments
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
+-LineFromText(
+-'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 70 38, 72 48 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
+-LineFromText(
+-'LINESTRING( 70 38, 84 42 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
+-1,
+-LineFromText(
+-'LINESTRING( 28 26, 28 0 )',101));
+-# DividedRoutes
+-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
+-MLineFromText(
+-'MULTILINESTRING((10 48,10 21,10 0),
+- (16 0,16 23,16 48))', 101));
+-# Forests
+-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
+-MPolyFromText(
+-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
+- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
+-101));
+-# Bridges
+-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
+-'POINT( 44 31 )', 101));
+-# Streams
+-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
+-LineFromText(
+-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
+-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
+-LineFromText(
+-'LINESTRING( 76 0, 78 4, 73 9 )', 101));
+-# Buildings
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
+-PointFromText(
+-'POINT( 52 30 )', 101),
+-PolyFromText(
+-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
+-PointFromText(
+-'POINT( 64 33 )', 101),
+-PolyFromText(
+-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
+-# Ponds
+-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
+-MPolyFromText(
+-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
+- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
+-# Named Places
+-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
+-PolyFromText(
+-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
+-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
+-PolyFromText(
+-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
+-# Map Neatlines
+-INSERT INTO map_neatlines (fid,neatline) VALUES(115,
+-PolyFromText(
+-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
+-SELECT Dimension(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-Dimension(shore)
+-2
+-SELECT GeometryType(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GeometryType(centerlines)
+-MULTILINESTRING
+-SELECT AsText(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(boundary)
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(PolyFromWKB(AsBinary(boundary),101))
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT SRID(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-SRID(boundary)
+-101
+-SELECT IsEmpty(centerline)
+-FROM road_segments
+-WHERE name = 'Route 5'
+-AND aliases = 'Main Street';
+-IsEmpty(centerline)
+-0
+-SELECT AsText(Envelope(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Envelope(boundary))
+-POLYGON((59 13,67 13,67 18,59 18,59 13))
+-SELECT X(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-X(position)
+-44
+-SELECT Y(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-Y(position)
+-31
+-SELECT AsText(StartPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(StartPoint(centerline))
+-POINT(0 18)
+-SELECT AsText(EndPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(EndPoint(centerline))
+-POINT(44 31)
+-SELECT GLength(centerline)
+-FROM road_segments
+-WHERE fid = 106;
+-GLength(centerline)
+-26
+-SELECT NumPoints(centerline)
+-FROM road_segments
+-WHERE fid = 102;
+-NumPoints(centerline)
+-5
+-SELECT AsText(PointN(centerline, 1))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(PointN(centerline, 1))
+-POINT(0 18)
+-SELECT AsText(Centroid(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Centroid(boundary))
+-POINT(63 15.5)
+-SELECT Area(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-Area(boundary)
+-40
+-SELECT AsText(ExteriorRing(shore))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(ExteriorRing(shore))
+-LINESTRING(52 18,66 23,73 9,48 6,52 18)
+-SELECT NumInteriorRings(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-NumInteriorRings(shore)
+-1
+-SELECT AsText(InteriorRingN(shore, 1))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(InteriorRingN(shore, 1))
+-LINESTRING(59 18,67 18,67 13,59 13,59 18)
+-SELECT NumGeometries(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-NumGeometries(centerlines)
+-2
+-SELECT AsText(GeometryN(centerlines, 2))
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-AsText(GeometryN(centerlines, 2))
+-LINESTRING(16 0,16 23,16 48)
+-SELECT IsClosed(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-IsClosed(centerlines)
+-0
+-SELECT GLength(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GLength(centerlines)
+-96
+-SELECT AsText(Centroid(shores))
+-FROM ponds
+-WHERE fid = 120;
+-AsText(Centroid(shores))
+-POINT(25 42)
+-SELECT Area(shores)
+-FROM ponds
+-WHERE fid = 120;
+-Area(shores)
+-8
+-SELECT ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-1
+-SELECT ST_Disjoint(centerlines, boundary)
+-FROM divided_routes, named_places
+-WHERE divided_routes.name = 'Route 75'
+-AND named_places.name = 'Ashton';
+-ST_Disjoint(centerlines, boundary)
+-1
+-SELECT ST_Touches(centerline, shore)
+-FROM streams, lakes
+-WHERE streams.name = 'Cam Stream'
+-AND lakes.name = 'Blue Lake';
+-ST_Touches(centerline, shore)
+-1
+-SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-Crosses(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Contains(forests.boundary, named_places.boundary)
+-FROM forests, named_places
+-WHERE forests.name = 'Green Forest'
+-AND named_places.name = 'Ashton';
+-ST_Contains(forests.boundary, named_places.boundary)
+-0
+-SELECT ST_Distance(position, boundary)
+-FROM bridges, named_places
+-WHERE bridges.name = 'Cam Bridge'
+-AND named_places.name = 'Ashton';
+-ST_Distance(position, boundary)
+-12
+-SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
+-FROM named_places, forests
+-WHERE named_places.name = 'Ashton'
+-AND forests.name = 'Green Forest';
+-AsText(ST_Difference(named_places.boundary, forests.boundary))
+-POLYGON((56 34,62 48,84 48,84 42,56 34))
+-SELECT AsText(ST_Union(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Goose Island';
+-AsText(ST_Union(shore, boundary))
+-POLYGON((48 6,52 18,66 23,73 9,48 6))
+-SELECT AsText(ST_SymDifference(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Ashton';
+-AsText(ST_SymDifference(shore, boundary))
+-MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
+-SELECT count(*)
+-FROM buildings, bridges
+-WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
+-count(*)
+-1
++ERROR 42000: The storage engine for the table doesn't support GEOMETRY
++# ERROR: Statement ended with errno 1178, errname ER_CHECK_NOT_IMPLEMENTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.gis_point) INSERT_METHOD=LAST ]
++# The statement|command finished with ER_CHECK_NOT_IMPLEMENTED.
++# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP DATABASE gis_ogs;
+ USE test;
diff --git a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
index 094b26668c1..8b2710be221 100644
--- a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
@@ -1,79 +1,82 @@
-3,69c3,12
-< SHOW COLUMNS IN t1;
-< Field Type Null Key Default Extra
-< a int(11) # #
-< b int(11) # # VIRTUAL
-< INSERT INTO t1 (a) VALUES (1),(2);
-< INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-< Warnings:
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< SELECT * FROM t1;
-< a b
-< 1 2
-< 2 3
-< 3 4
-< 4 5
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) PERSISTENT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< SHOW COLUMNS IN t1;
-< Field Type Null Key Default Extra
-< a int(11) # #
-< b int(11) # # PERSISTENT
-< INSERT INTO t1 (a) VALUES (1),(2);
-< INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-< Warnings:
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< SELECT * FROM t1;
-< a b
-< 1 2
-< 2 3
-< 3 4
-< 4 5
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) VIRTUAL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< SHOW COLUMNS IN t1;
-< Field Type Null Key Default Extra
-< a int(11) # #
-< b int(11) # # VIRTUAL
-< INSERT INTO t1 (a) VALUES (1),(2);
-< INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-< Warnings:
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< SELECT * FROM t1;
-< a b
-< 1 2
-< 2 3
-< 3 4
-< 4 5
-< DROP TABLE t1;
-< CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> AS (a+1) PERSISTENT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
-< SHOW COLUMNS IN t1;
-< Field Type Null Key Default Extra
-< a int(11) # #
-< b int(11) # # PERSISTENT
-< INSERT INTO t1 (a) VALUES (1),(2);
-< INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
-< Warnings:
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< Warning 1906 The value specified for computed column 'b' in table 't1' ignored
-< SELECT * FROM t1;
-< a b
-< 1 2
-< 2 3
-< 3 4
-< 4 5
-< DROP TABLE t1;
----
-> ERROR HY000: MRG_MyISAM storage engine does not support computed columns
-> # ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed)
-> # ------------ UNEXPECTED RESULT ------------
-> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ]
-> # The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS.
-> # Virtual columns or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
-> # You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
-> # Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
-> # Also, this problem may cause a chain effect (more errors of different kinds in the test).
-> # -------------------------------------------
+--- vcol.result 2013-01-22 22:05:05.246633000 +0400
++++ vcol.reject 2013-01-23 02:51:26.851160587 +0400
+@@ -1,69 +1,12 @@
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW COLUMNS IN t1;
+-Field Type Null Key Default Extra
+-a int(11) # #
+-b int(11) # # VIRTUAL
+-INSERT INTO t1 (a) VALUES (1),(2);
+-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
+-Warnings:
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-SELECT a,b FROM t1;
+-a b
+-1 2
+-2 3
+-3 4
+-4 5
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) PERSISTENT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW COLUMNS IN t1;
+-Field Type Null Key Default Extra
+-a int(11) # #
+-b int(11) # # PERSISTENT
+-INSERT INTO t1 (a) VALUES (1),(2);
+-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
+-Warnings:
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-SELECT a,b FROM t1;
+-a b
+-1 2
+-2 3
+-3 4
+-4 5
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> GENERATED ALWAYS AS (a+1) VIRTUAL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW COLUMNS IN t1;
+-Field Type Null Key Default Extra
+-a int(11) # #
+-b int(11) # # VIRTUAL
+-INSERT INTO t1 (a) VALUES (1),(2);
+-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
+-Warnings:
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-SELECT a,b FROM t1;
+-a b
+-1 2
+-2 3
+-3 4
+-4 5
+-DROP TABLE t1;
+-CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN> AS (a+1) PERSISTENT) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-SHOW COLUMNS IN t1;
+-Field Type Null Key Default Extra
+-a int(11) # #
+-b int(11) # # PERSISTENT
+-INSERT INTO t1 (a) VALUES (1),(2);
+-INSERT INTO t1 (a,b) VALUES (3,3),(4,4);
+-Warnings:
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-Warning 1906 The value specified for computed column 'b' in table 't1' ignored
+-SELECT a,b FROM t1;
+-a b
+-1 2
+-2 3
+-3 4
+-4 5
+-DROP TABLE t1;
++ERROR HY000: MRG_MYISAM storage engine does not support computed columns
++# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ]
++# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS.
++# Virtual columns or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
diff --git a/storage/oqgraph/CMakeLists.txt b/storage/oqgraph/CMakeLists.txt
index 83acb99b2a2..593fced6235 100644
--- a/storage/oqgraph/CMakeLists.txt
+++ b/storage/oqgraph/CMakeLists.txt
@@ -3,6 +3,7 @@ IF(NOT Boost_FOUND)
RETURN()
ENDIF()
INCLUDE_DIRECTORIES(BEFORE ${Boost_INCLUDE_DIRS})
+SET(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES} ${Boost_INCLUDE_DIRS})
IF(MSVC)
# lp:756966 OQGRAPH on Win64 does not compile
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index ffbe00a1caf..b4292f33066 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -58,10 +58,7 @@ static HASH oqgraph_open_tables;
static mysql_mutex_t LOCK_oqgraph;
static bool oqgraph_init_done= 0;
-#define HASH_KEY_LENGTH size_t
-
-static uchar* get_key(const uchar *ptr, HASH_KEY_LENGTH *length,
- my_bool)
+static uchar* get_key(const uchar *ptr, size_t *length, my_bool)
{
const OQGRAPH_INFO *share= (const OQGRAPH_INFO*) ptr;
*length= strlen(share->name);
@@ -83,7 +80,6 @@ static void init_psi_keys()
#define init_psi_keys() /* no-op */
#endif /* HAVE_PSI_INTERFACE */
-
static handler* oqgraph_create_handler(handlerton *hton, TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
@@ -292,16 +288,6 @@ ha_oqgraph::ha_oqgraph(handlerton *hton, TABLE_SHARE *table_arg)
{ }
-static const char *ha_oqgraph_exts[] =
-{
- NullS
-};
-
-const char **ha_oqgraph::bas_ext() const
-{
- return ha_oqgraph_exts;
-}
-
ulonglong ha_oqgraph::table_flags() const
{
return (HA_NO_BLOBS | HA_NULL_IN_KEY |
diff --git a/storage/oqgraph/ha_oqgraph.h b/storage/oqgraph/ha_oqgraph.h
index ee88e38c526..9f55fb233d8 100644
--- a/storage/oqgraph/ha_oqgraph.h
+++ b/storage/oqgraph/ha_oqgraph.h
@@ -68,7 +68,6 @@ public:
}
/* Rows also use a fixed-size format */
enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
- const char **bas_ext() const;
ulong index_flags(uint inx, uint part, bool all_parts) const;
uint max_supported_keys() const { return MAX_KEY; }
uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
diff --git a/storage/pbxt/src/discover_xt.cc b/storage/pbxt/src/discover_xt.cc
index 05891bdf19d..23941374fb1 100644
--- a/storage/pbxt/src/discover_xt.cc
+++ b/storage/pbxt/src/discover_xt.cc
@@ -1196,7 +1196,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
else if (length == 0)
{
- my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
+ column->field_name);
DBUG_RETURN(TRUE);
}
if (length > file->max_key_part_length() && key->type != Key::FULLTEXT)
diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc
index 773d822af2b..c6c967ceb9d 100644
--- a/storage/perfschema/ha_perfschema.cc
+++ b/storage/perfschema/ha_perfschema.cc
@@ -217,15 +217,6 @@ ha_perfschema::ha_perfschema(handlerton *hton, TABLE_SHARE *share)
ha_perfschema::~ha_perfschema()
{}
-static const char *ha_pfs_exts[]= {
- NullS
-};
-
-const char **ha_perfschema::bas_ext() const
-{
- return ha_pfs_exts;
-}
-
int ha_perfschema::open(const char *name, int mode, uint test_if_locked)
{
DBUG_ENTER("ha_perfschema::open");
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index dc465da3758..e088c79b26f 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -46,8 +46,6 @@ public:
const char *index_type(uint) { return ""; }
- const char **bas_ext(void) const;
-
/** Capabilities of the performance schema tables. */
ulonglong table_flags(void) const
{
diff --git a/storage/sequence/CMakeLists.txt b/storage/sequence/CMakeLists.txt
new file mode 100644
index 00000000000..9a68901520c
--- /dev/null
+++ b/storage/sequence/CMakeLists.txt
@@ -0,0 +1 @@
+MYSQL_ADD_PLUGIN(sequence sequence.cc STORAGE_ENGINE)
diff --git a/storage/sequence/mysql-test/sequence/inc.inc b/storage/sequence/mysql-test/sequence/inc.inc
new file mode 100644
index 00000000000..702e90b2890
--- /dev/null
+++ b/storage/sequence/mysql-test/sequence/inc.inc
@@ -0,0 +1,4 @@
+if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'sequence' AND support='YES'`)
+{
+ --skip Test requires sequence engine
+}
diff --git a/storage/sequence/mysql-test/sequence/inc.opt b/storage/sequence/mysql-test/sequence/inc.opt
new file mode 100644
index 00000000000..5b96925ff59
--- /dev/null
+++ b/storage/sequence/mysql-test/sequence/inc.opt
@@ -0,0 +1,2 @@
+--plugin-load=$HA_SEQUENCE_SO
+--loose-sequence
diff --git a/storage/sequence/mysql-test/sequence/simple.result b/storage/sequence/mysql-test/sequence/simple.result
new file mode 100644
index 00000000000..b14fde2cfef
--- /dev/null
+++ b/storage/sequence/mysql-test/sequence/simple.result
@@ -0,0 +1,270 @@
+select * from information_schema.engines where engine='sequence';
+ENGINE SEQUENCE
+SUPPORT YES
+COMMENT Generated tables filled with sequential values
+TRANSACTIONS YES
+XA YES
+SAVEPOINTS YES
+set sql_quote_show_create=0;
+show create table seq_1_to_15_step_2;
+Table Create Table
+seq_1_to_15_step_2 CREATE TABLE seq_1_to_15_step_2 (
+ seq bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (seq)
+) ENGINE=SEQUENCE DEFAULT CHARSET=latin1
+show create table seq_1_to_15_step;
+ERROR 42S02: Table 'test.seq_1_to_15_step' doesn't exist
+show create table seq_1_to_15_st;
+ERROR 42S02: Table 'test.seq_1_to_15_st' doesn't exist
+show create table seq_1_to_15;
+Table Create Table
+seq_1_to_15 CREATE TABLE seq_1_to_15 (
+ seq bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (seq)
+) ENGINE=SEQUENCE DEFAULT CHARSET=latin1
+show create table seq_1_to_1;
+Table Create Table
+seq_1_to_1 CREATE TABLE seq_1_to_1 (
+ seq bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (seq)
+) ENGINE=SEQUENCE DEFAULT CHARSET=latin1
+show create table seq_1_to_;
+ERROR 42S02: Table 'test.seq_1_to_' doesn't exist
+show create table seq_1_t;
+ERROR 42S02: Table 'test.seq_1_t' doesn't exist
+show create table seq_1;
+ERROR 42S02: Table 'test.seq_1' doesn't exist
+show create table seq_;
+ERROR 42S02: Table 'test.seq_' doesn't exist
+show create table se;
+ERROR 42S02: Table 'test.se' doesn't exist
+show create table seq_1_to_15_step_0;
+ERROR HY000: Got error 140 "Wrong create options" from storage engine SEQUENCE
+select * from seq_1_to_15_step_2;
+seq
+1
+3
+5
+7
+9
+11
+13
+15
+select * from seq_1_to_15;
+seq
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+select * from seq_1_to_1;
+seq
+1
+select * from seq_15_to_1;
+seq
+15
+14
+13
+12
+11
+10
+9
+8
+7
+6
+5
+4
+3
+2
+1
+select * from seq_15_to_1_step_2;
+seq
+15
+13
+11
+9
+7
+5
+3
+1
+select * from seq_1_to_15_step_12345;
+seq
+1
+select * from seq_15_to_1_step_12345;
+seq
+15
+explain select * from seq_15_to_1_step_12345;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_15_to_1_step_12345 ALL NULL NULL NULL NULL 1
+show open tables from test;
+Database Table In_use Name_locked
+test seq_15_to_1 0 0
+test seq_15_to_1_step_12345 0 0
+test seq_15_to_1_step_2 0 0
+test seq_1_to_1 0 0
+test seq_1_to_15 0 0
+test seq_1_to_15_step_12345 0 0
+test seq_1_to_15_step_2 0 0
+show tables;
+Tables_in_test
+explain select * from seq_1_to_15_step_2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 index NULL PRIMARY 8 NULL 8 Using index
+explain select * from seq_1_to_15_step_2 where seq > 4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 6 Using where; Using index
+explain select * from seq_1_to_15_step_2 where seq between 4 and 9;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 3 Using where; Using index
+explain select * from seq_1_to_15_step_2 where seq between 20 and 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain select * from seq_1_to_15_step_2 where seq between 4 and 6;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 1 Using where; Using index
+explain select * from seq_1_to_15_step_2 where seq between 4 and 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 1 Using where; Using index
+explain select * from seq_1_to_15_step_2 where seq between 4 and 4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain select * from seq_1_to_15_step_2 where seq between 5 and 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_15_step_2 const PRIMARY PRIMARY 8 const 1 Using index
+create table t1 (a int, aa int, b varchar(100));
+insert t1 select seq, seq*seq, if (seq % 2, 'odd', 'even') from seq_1_to_20;
+select * from t1;
+a aa b
+1 1 odd
+2 4 even
+3 9 odd
+4 16 even
+5 25 odd
+6 36 even
+7 49 odd
+8 64 even
+9 81 odd
+10 100 even
+11 121 odd
+12 144 even
+13 169 odd
+14 196 even
+15 225 odd
+16 256 even
+17 289 odd
+18 324 even
+19 361 odd
+20 400 even
+select aa, b from t1, seq_1_to_20_step_3 as seq where a=seq;
+aa b
+1 odd
+16 even
+49 odd
+100 even
+169 odd
+256 even
+361 odd
+insert t1
+select seq, seq*seq, if (seq % 2, 'odd', 'even') from seq_1_to_30
+where seq > (select max(a) from t1);
+select * from t1;
+a aa b
+1 1 odd
+2 4 even
+3 9 odd
+4 16 even
+5 25 odd
+6 36 even
+7 49 odd
+8 64 even
+9 81 odd
+10 100 even
+11 121 odd
+12 144 even
+13 169 odd
+14 196 even
+15 225 odd
+16 256 even
+17 289 odd
+18 324 even
+19 361 odd
+20 400 even
+21 441 odd
+22 484 even
+23 529 odd
+24 576 even
+25 625 odd
+26 676 even
+27 729 odd
+28 784 even
+29 841 odd
+30 900 even
+drop table t1;
+select seq from seq_2_to_50 s1 where 0 not in
+(select s1.seq % s2.seq from seq_2_to_50 s2 where s2.seq <= sqrt(s1.seq));
+seq
+2
+3
+5
+7
+11
+13
+17
+19
+23
+29
+31
+37
+41
+43
+47
+explain select seq from seq_2_to_50 s1 where 0 not in
+(select s1.seq % s2.seq from seq_2_to_50 s2 where s2.seq <= sqrt(s1.seq));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY s1 index NULL PRIMARY 8 NULL 49 Using where; Using index
+2 DEPENDENT SUBQUERY s2 index PRIMARY PRIMARY 8 NULL 49 Using where; Using index
+select year(dt) from
+(select '1901-02-28' + interval seq year as dt from seq_0_to_99) as seqdt
+where weekday(dt) = 0;
+year(dt)
+1910
+1916
+1921
+1927
+1938
+1944
+1949
+1955
+1966
+1972
+1977
+1983
+1994
+2000
+create table t1 (a int) engine=innodb;
+reset master;
+start transaction;
+insert t1 select * from seq_1_to_10;
+savepoint s1;
+insert t1 select * from seq_11_to_20;
+rollback to savepoint s1;
+commit;
+select count(*) from t1;
+count(*)
+10
+show binlog events limit 2,10;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 286 Query 1 354 BEGIN
+master-bin.000001 354 Query 1 452 use test; insert t1 select * from seq_1_to_10
+master-bin.000001 452 Xid 1 479 COMMIT /* xid */
+drop table t1;
diff --git a/storage/sequence/mysql-test/sequence/simple.test b/storage/sequence/mysql-test/sequence/simple.test
new file mode 100644
index 00000000000..9559e42fafd
--- /dev/null
+++ b/storage/sequence/mysql-test/sequence/simple.test
@@ -0,0 +1,93 @@
+--source inc.inc
+--source include/have_xtradb.inc
+--source include/have_binlog_format_statement.inc
+
+--query_vertical select * from information_schema.engines where engine='sequence'
+
+set sql_quote_show_create=0;
+
+show create table seq_1_to_15_step_2;
+--error ER_NO_SUCH_TABLE
+show create table seq_1_to_15_step;
+--error ER_NO_SUCH_TABLE
+show create table seq_1_to_15_st;
+show create table seq_1_to_15;
+show create table seq_1_to_1;
+--error ER_NO_SUCH_TABLE
+show create table seq_1_to_;
+--error ER_NO_SUCH_TABLE
+show create table seq_1_t;
+--error ER_NO_SUCH_TABLE
+show create table seq_1;
+--error ER_NO_SUCH_TABLE
+show create table seq_;
+--error ER_NO_SUCH_TABLE
+show create table se;
+--error ER_GET_ERRNO
+show create table seq_1_to_15_step_0;
+
+# simple select
+select * from seq_1_to_15_step_2;
+select * from seq_1_to_15;
+select * from seq_1_to_1;
+# backwards
+select * from seq_15_to_1;
+select * from seq_15_to_1_step_2;
+
+# step > |to - from|
+select * from seq_1_to_15_step_12345;
+select * from seq_15_to_1_step_12345;
+explain select * from seq_15_to_1_step_12345;
+
+--sorted_result
+show open tables from test;
+show tables;
+# row estimates
+explain select * from seq_1_to_15_step_2;
+explain select * from seq_1_to_15_step_2 where seq > 4;
+explain select * from seq_1_to_15_step_2 where seq between 4 and 9;
+explain select * from seq_1_to_15_step_2 where seq between 20 and 30;
+explain select * from seq_1_to_15_step_2 where seq between 4 and 6;
+explain select * from seq_1_to_15_step_2 where seq between 4 and 5;
+explain select * from seq_1_to_15_step_2 where seq between 4 and 4;
+explain select * from seq_1_to_15_step_2 where seq between 5 and 5;
+
+# join
+create table t1 (a int, aa int, b varchar(100));
+insert t1 select seq, seq*seq, if (seq % 2, 'odd', 'even') from seq_1_to_20;
+select * from t1;
+select aa, b from t1, seq_1_to_20_step_3 as seq where a=seq;
+# adding more rows, example
+insert t1
+ select seq, seq*seq, if (seq % 2, 'odd', 'even') from seq_1_to_30
+ where seq > (select max(a) from t1);
+select * from t1;
+drop table t1;
+
+# Prime Numbers from 2 to 50 :)
+select seq from seq_2_to_50 s1 where 0 not in
+ (select s1.seq % s2.seq from seq_2_to_50 s2 where s2.seq <= sqrt(s1.seq));
+explain select seq from seq_2_to_50 s1 where 0 not in
+ (select s1.seq % s2.seq from seq_2_to_50 s2 where s2.seq <= sqrt(s1.seq));
+
+# Years of XX-th century where 28th of February was Monday
+select year(dt) from
+ (select '1901-02-28' + interval seq year as dt from seq_0_to_99) as seqdt
+ where weekday(dt) = 0;
+
+# transactions and XA
+create table t1 (a int) engine=innodb;
+reset master;
+start transaction;
+# No warning about "accesses nontransactional table"
+insert t1 select * from seq_1_to_10;
+savepoint s1;
+insert t1 select * from seq_11_to_20;
+rollback to savepoint s1;
+commit;
+select count(*) from t1;
+# must show Xid event
+--replace_regex /xid=[0-9]+/xid/
+show binlog events limit 2,10;
+drop table t1;
+
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
new file mode 100644
index 00000000000..2349f7aae63
--- /dev/null
+++ b/storage/sequence/sequence.cc
@@ -0,0 +1,343 @@
+/*
+ Copyright (c) 2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/*
+ a engine that auto-creates tables with rows filled with sequential values
+*/
+
+#include <mysql_version.h>
+#include <handler.h>
+#include <table.h>
+#include <field.h>
+
+typedef struct st_share {
+ const char *name;
+ THR_LOCK lock;
+ uint use_count;
+ struct st_share *next;
+
+ ulonglong from, to, step;
+ bool reverse;
+} SHARE;
+
+class ha_seq: public handler
+{
+private:
+ THR_LOCK_DATA lock;
+ SHARE *seqs;
+ ulonglong cur;
+
+public:
+ ha_seq(handlerton *hton, TABLE_SHARE *table_arg)
+ : handler(hton, table_arg), seqs(0) { }
+ ulonglong table_flags() const { return 0; }
+
+ /* open/close/locking */
+ int create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info) { return HA_ERR_WRONG_COMMAND; }
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+ THR_LOCK_DATA **store_lock(THD *, THR_LOCK_DATA **, enum thr_lock_type);
+
+ /* table scan */
+ int rnd_init(bool scan);
+ int rnd_next(unsigned char *buf);
+ void position(const uchar *record);
+ int rnd_pos(uchar *buf, uchar *pos);
+ int info(uint flag);
+
+ /* indexes */
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ { return HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
+ HA_READ_RANGE | HA_KEYREAD_ONLY; }
+ uint max_supported_keys() const { return 1; }
+ int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_next(uchar *buf);
+ int index_prev(uchar *buf);
+ int index_first(uchar *buf);
+ int index_last(uchar *buf);
+ ha_rows records_in_range(uint inx, key_range *min_key,
+ key_range *max_key);
+
+ double scan_time() { return nvalues(); }
+ double read_time(uint index, uint ranges, ha_rows rows) { return rows; }
+ double keyread_time(uint index, uint ranges, ha_rows rows) { return rows; }
+
+private:
+ void set(uchar *buf);
+ ulonglong nvalues() { return (seqs->to - seqs->from)/seqs->step; }
+};
+
+THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ lock.type= TL_WRITE_ALLOW_WRITE;
+ *to ++= &lock;
+ return to;
+}
+
+void ha_seq::set(unsigned char *buf)
+{
+ my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]);
+ Field *field = table->field[0];
+ field->move_field_offset(offset);
+ field->store(cur, true);
+ field->move_field_offset(-offset);
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+}
+
+int ha_seq::rnd_init(bool scan)
+{
+ cur= seqs->reverse ? seqs->to : seqs->from;
+ return 0;
+}
+
+int ha_seq::rnd_next(unsigned char *buf)
+{
+ if (seqs->reverse)
+ return index_prev(buf);
+ else
+ return index_next(buf);
+}
+
+void ha_seq::position(const uchar *record)
+{
+ *(ulonglong*)ref= cur;
+}
+
+int ha_seq::rnd_pos(uchar *buf, uchar *pos)
+{
+ cur= *(ulonglong*)pos;
+ return rnd_next(buf);
+}
+
+int ha_seq::info(uint flag)
+{
+ if (flag & HA_STATUS_VARIABLE)
+ stats.records = nvalues();
+ return 0;
+}
+
+int ha_seq::index_read_map(uchar *buf, const uchar *key_arg,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
+{
+ ulonglong key= uint8korr(key_arg);
+ switch (find_flag) {
+ case HA_READ_AFTER_KEY:
+ key++;
+ // fall through
+ case HA_READ_KEY_OR_NEXT:
+ if (key <= seqs->from)
+ cur= seqs->from;
+ else
+ {
+ cur= (key - seqs->from + seqs->step - 1) / seqs->step * seqs->step + seqs->from;
+ if (cur >= seqs->to)
+ return HA_ERR_KEY_NOT_FOUND;
+ }
+ return index_next(buf);
+
+ case HA_READ_KEY_EXACT:
+ if ((key - seqs->from) % seqs->step != 0 || key < seqs->from || key >= seqs->to)
+ return HA_ERR_KEY_NOT_FOUND;
+ cur= key;
+ return index_next(buf);
+
+ case HA_READ_BEFORE_KEY:
+ key--;
+ // fall through
+ case HA_READ_PREFIX_LAST_OR_PREV:
+ if (key >= seqs->to)
+ cur= seqs->to;
+ else
+ {
+ if (key < seqs->from)
+ return HA_ERR_KEY_NOT_FOUND;
+ cur= (key - seqs->from) / seqs->step * seqs->step + seqs->from;
+ }
+ return index_prev(buf);
+ default: return HA_ERR_WRONG_COMMAND;
+ }
+}
+
+
+int ha_seq::index_next(uchar *buf)
+{
+ if (cur == seqs->to)
+ return HA_ERR_END_OF_FILE;
+ set(buf);
+ cur+= seqs->step;
+ return 0;
+}
+
+
+int ha_seq::index_prev(uchar *buf)
+{
+ if (cur == seqs->from)
+ return HA_ERR_END_OF_FILE;
+ cur-= seqs->step;
+ set(buf);
+ return 0;
+}
+
+
+int ha_seq::index_first(uchar *buf)
+{
+ cur= seqs->from;
+ return index_next(buf);
+}
+
+
+int ha_seq::index_last(uchar *buf)
+{
+ cur= seqs->to;
+ return index_prev(buf);
+}
+
+ha_rows ha_seq::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+{
+ ulonglong kmin= min_key ? uint8korr(min_key->key) : seqs->from;
+ ulonglong kmax= max_key ? uint8korr(max_key->key) : seqs->to - 1;
+ if (kmin >= seqs->to || kmax < seqs->from || kmin > kmax)
+ return 0;
+ return (kmax - seqs->from) / seqs->step -
+ (kmin - seqs->from + seqs->step - 1) / seqs->step + 1;
+}
+
+
+int ha_seq::open(const char *name, int mode, uint test_if_locked)
+{
+ mysql_mutex_lock(&table->s->LOCK_ha_data);
+ seqs= (SHARE*)table->s->ha_data;
+ DBUG_ASSERT(my_strcasecmp(table_alias_charset, name, seqs->name) == 0);
+ if (seqs->use_count++ == 0)
+ thr_lock_init(&seqs->lock);
+ mysql_mutex_unlock(&table->s->LOCK_ha_data);
+
+ ref_length= sizeof(cur);
+ thr_lock_data_init(&seqs->lock,&lock,NULL);
+ return 0;
+}
+
+int ha_seq::close(void)
+{
+ mysql_mutex_lock(&table->s->LOCK_ha_data);
+ if (--seqs->use_count == 0)
+ thr_lock_delete(&seqs->lock);
+ mysql_mutex_unlock(&table->s->LOCK_ha_data);
+ return 0;
+}
+
+static handler *create_handler(handlerton *hton, TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_seq(hton, table);
+}
+
+static int discover_table(handlerton *hton, THD *thd, TABLE_SHARE *share)
+{
+ // the table is discovered if it has the pattern of seq_1_to_10 or
+ // seq_1_to_10_step_3
+ ulonglong from, to, step= 1;
+ uint n1= 0, n2= 0;
+ bool reverse;
+ sscanf(share->table_name.str, "seq_%llu_to_%llu%n_step_%llu%n",
+ &from, &to, &n1, &step, &n2);
+ if (n1 != share->table_name.length && n2 != share->table_name.length)
+ return HA_ERR_NO_SUCH_TABLE;
+
+ if (step == 0)
+ return HA_WRONG_CREATE_OPTION;
+
+ const char *sql="create table seq (seq bigint unsigned primary key)";
+ int res= share->init_from_sql_statement_string(thd, 0, sql, strlen(sql));
+ if (res)
+ return res;
+
+ if ((reverse = from > to))
+ {
+ if (step > from - to)
+ to = from;
+ else
+ swap_variables(ulonglong, from, to);
+ /*
+ when keyread is allowed, optimizer will always prefer an index to a
+ table scan for our tables, and we'll never see the range reversed.
+ */
+ share->keys_for_keyread.clear_all();
+ }
+
+ to= (to - from) / step * step + step + from;
+
+ SHARE *seqs= (SHARE*)alloc_root(&share->mem_root, sizeof(*seqs));
+ bzero(seqs, sizeof(*seqs));
+ seqs->name = share->normalized_path.str;
+ seqs->from= from;
+ seqs->to= to;
+ seqs->step= step;
+ seqs->reverse= reverse;
+
+ share->ha_data = seqs;
+ return 0;
+}
+
+
+static int dummy_ret_int() { return 0; }
+
+static int init(void *p)
+{
+ handlerton *hton = (handlerton *)p;
+ hton->create = create_handler;
+ hton->discover_table = discover_table;
+ hton->discover_table_existence =
+ (int (*)(handlerton *, const char *, const char *)) &dummy_ret_int;
+ hton->commit= hton->rollback= hton->prepare=
+ (int (*)(handlerton *, THD *, bool)) &dummy_ret_int;
+ hton->savepoint_set= hton->savepoint_rollback= hton->savepoint_release=
+ (int (*)(handlerton *, THD *, void *)) &dummy_ret_int;
+
+ return 0;
+}
+
+static struct st_mysql_storage_engine descriptor =
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+maria_declare_plugin(sequence)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &descriptor,
+ "SEQUENCE",
+ "Sergei Golubchik",
+ "Generated tables filled with sequential values",
+ PLUGIN_LICENSE_GPL,
+ init,
+ NULL,
+ 0x0100,
+ NULL,
+ NULL,
+ "0.1",
+ MariaDB_PLUGIN_MATURITY_EXPERIMENTAL
+}
+maria_declare_plugin_end;
+
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 3305875c124..23bf21b6cbe 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -1285,7 +1285,7 @@ CSphSEQuery::~CSphSEQuery ()
SafeDeleteArray ( m_sQueryBuffer );
SafeDeleteArray ( m_pWeights );
SafeDeleteArray ( m_pBuf );
- for ( int i=0; i<m_dOverrides.elements(); i++ )
+ for ( size_t i=0; i<m_dOverrides.elements(); i++ )
SafeDelete ( m_dOverrides.at(i) );
SPH_VOID_RET();
}
@@ -1865,7 +1865,7 @@ int CSphSEQuery::BuildRequest ( char ** ppBuffer )
iReqSize += 8 + strlen(m_sFieldWeight[i] );
// overrides
iReqSize += 4;
- for ( int i=0; i<m_dOverrides.elements(); i++ )
+ for ( size_t i=0; i<m_dOverrides.elements(); i++ )
{
CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
const uint32 uSize = pOverride->m_iType==SPH_ATTR_BIGINT ? 16 : 12; // id64 + value
@@ -1972,13 +1972,13 @@ int CSphSEQuery::BuildRequest ( char ** ppBuffer )
// overrides
SendInt ( m_dOverrides.elements() );
- for ( int i=0; i<m_dOverrides.elements(); i++ )
+ for ( size_t i=0; i<m_dOverrides.elements(); i++ )
{
CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
SendString ( pOverride->m_sName );
SendDword ( pOverride->m_iType );
SendInt ( pOverride->m_dIds.elements() );
- for ( int j=0; j<pOverride->m_dIds.elements(); j++ )
+ for ( size_t j=0; j<pOverride->m_dIds.elements(); j++ )
{
SendUint64 ( pOverride->m_dIds.at(j) );
if ( pOverride->m_iType==SPH_ATTR_FLOAT )
@@ -2005,9 +2005,6 @@ int CSphSEQuery::BuildRequest ( char ** ppBuffer )
// SPHINX HANDLER
//////////////////////////////////////////////////////////////////////////////
-static const char * ha_sphinx_exts[] = { NullS };
-
-
#if MYSQL_VERSION_ID<50100
ha_sphinx::ha_sphinx ( TABLE_ARG * table )
: handler ( &sphinx_hton, table )
@@ -2047,16 +2044,6 @@ ha_sphinx::~ha_sphinx()
}
}
-
-// If frm_error() is called then we will use this to to find out what file extentions
-// exist for the storage engine. This is also used by the default rename_table and
-// delete_table method in handler.cc.
-const char ** ha_sphinx::bas_ext() const
-{
- return ha_sphinx_exts;
-}
-
-
// Used for opening tables. The name will be the name of the file.
// A table is opened when it needs to be opened. For instance
// when a request comes in for a select on the table (tables are not
@@ -3437,7 +3424,8 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
// report and bail
if ( sError[0] )
{
- my_error ( ER_CANT_CREATE_TABLE, MYF(0), sError, -1 );
+ my_error ( ER_CANT_CREATE_TABLE, MYF(0),
+ table->s->db.str, table->s->table_name, sError );
SPH_RET(-1);
}
diff --git a/storage/sphinx/ha_sphinx.h b/storage/sphinx/ha_sphinx.h
index f2aa726791b..f650e1641da 100644
--- a/storage/sphinx/ha_sphinx.h
+++ b/storage/sphinx/ha_sphinx.h
@@ -57,7 +57,6 @@ public:
const char * table_type () const { return "SPHINX"; } ///< SE name for display purposes
const char * index_type ( uint ) { return "HASH"; } ///< index type name for display purposes
- const char ** bas_ext () const; ///< my file extensions
#if MYSQL_VERSION_ID>50100
ulonglong table_flags () const { return HA_CAN_INDEX_BLOBS |
diff --git a/storage/test_sql_discovery/CMakeLists.txt b/storage/test_sql_discovery/CMakeLists.txt
new file mode 100644
index 00000000000..2039b08123c
--- /dev/null
+++ b/storage/test_sql_discovery/CMakeLists.txt
@@ -0,0 +1,2 @@
+MYSQL_ADD_PLUGIN(test_sql_discovery test_sql_discovery.cc STORAGE_ENGINE
+ COMPONENT Test)
diff --git a/storage/test_sql_discovery/mysql-test/archive/discover.rdiff b/storage/test_sql_discovery/mysql-test/archive/discover.rdiff
new file mode 100644
index 00000000000..3148999079f
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/archive/discover.rdiff
@@ -0,0 +1,35 @@
+--- suite/archive/discover.result 2013-04-08 00:06:37.000000000 +0200
++++ /usr/home/serg/Abk/mysql/10.0-serg/storage/test_sql_discovery/mysql-test/archive/discover.reject 2013-04-08 00:07:02.000000000 +0200
+@@ -42,6 +42,7 @@
+ t1 BASE TABLE
+ t2 BASE TABLE
+ t1.ARZ
++t1.frm
+ t2.ARZ
+ t2.frm
+ #
+@@ -60,6 +61,7 @@
+ flush tables;
+ rename table t2 to t0;
+ t0.ARZ
++t0.frm
+ t1.ARZ
+ t1.frm
+ #
+@@ -77,6 +79,7 @@
+ flush tables;
+ drop table t1;
+ t0.ARZ
++t0.frm
+ #
+ # discover of table non-existance on drop
+ #
+@@ -86,7 +89,7 @@
+ drop table t0;
+ show status like 'Handler_discover';
+ Variable_name Value
+-Handler_discover 7
++Handler_discover 8
+ #
+ # Bug#45377: ARCHIVE tables aren't discoverable after OPTIMIZE
+ #
diff --git a/storage/test_sql_discovery/mysql-test/archive/discover.test b/storage/test_sql_discovery/mysql-test/archive/discover.test
new file mode 100644
index 00000000000..6d7414280c5
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/archive/discover.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source discover.test
diff --git a/storage/test_sql_discovery/mysql-test/main/r/plugin.rdiff b/storage/test_sql_discovery/mysql-test/main/r/plugin.rdiff
new file mode 100644
index 00000000000..b9288d70f4b
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/r/plugin.rdiff
@@ -0,0 +1,11 @@
+--- r/plugin.result 2013-02-21 19:46:59.000000000 +0100
++++ r/plugin.reject 2013-02-27 11:13:22.000000000 +0100
+@@ -71,6 +71,8 @@
+ SELECT * FROM t2;
+ ERROR 42000: Unknown storage engine 'EXAMPLE'
+ DROP TABLE t2;
++Warnings:
++Error 1286 Unknown storage engine 'EXAMPLE'
+ UNINSTALL PLUGIN EXAMPLE;
+ ERROR 42000: PLUGIN EXAMPLE does not exist
+ UNINSTALL PLUGIN non_exist;
diff --git a/storage/test_sql_discovery/mysql-test/main/t/create.test b/storage/test_sql_discovery/mysql-test/main/t/create.test
new file mode 100644
index 00000000000..4b3dd16c0a2
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/create.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source create.test
diff --git a/storage/test_sql_discovery/mysql-test/main/t/drop.test b/storage/test_sql_discovery/mysql-test/main/t/drop.test
new file mode 100644
index 00000000000..2b3f864ec6b
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/drop.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source drop.test
diff --git a/storage/test_sql_discovery/mysql-test/main/t/mdl_sync.test b/storage/test_sql_discovery/mysql-test/main/t/mdl_sync.test
new file mode 100644
index 00000000000..9c052839d93
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/mdl_sync.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source mdl_sync.test
diff --git a/storage/test_sql_discovery/mysql-test/main/t/partition_disabled.test b/storage/test_sql_discovery/mysql-test/main/t/partition_disabled.test
new file mode 100644
index 00000000000..7fccc33fdbd
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/partition_disabled.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source partition_disabled.test
diff --git a/storage/test_sql_discovery/mysql-test/main/t/plugin.test b/storage/test_sql_discovery/mysql-test/main/t/plugin.test
new file mode 100644
index 00000000000..52315af0645
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/plugin.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source plugin.test
diff --git a/storage/test_sql_discovery/mysql-test/main/t/rename.test b/storage/test_sql_discovery/mysql-test/main/t/rename.test
new file mode 100644
index 00000000000..ba645131401
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/main/t/rename.test
@@ -0,0 +1,3 @@
+# run the normal test file with need_full_discover_for_existence>0
+--source ../sql_discovery/inc.inc
+--source rename.test
diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/inc.inc b/storage/test_sql_discovery/mysql-test/sql_discovery/inc.inc
new file mode 100644
index 00000000000..b13cbd7d12b
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/sql_discovery/inc.inc
@@ -0,0 +1,4 @@
+if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'test_sql_discovery' AND support='YES'`)
+{
+ --skip Test requires test_sql_discovery engine
+}
diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/inc.opt b/storage/test_sql_discovery/mysql-test/sql_discovery/inc.opt
new file mode 100644
index 00000000000..7d5c2404a50
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/sql_discovery/inc.opt
@@ -0,0 +1,2 @@
+--plugin-load=$HA_TEST_SQL_DISCOVERY_SO
+--loose-test-sql-discovery
diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result
new file mode 100644
index 00000000000..4fee0a983ed
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result
@@ -0,0 +1,199 @@
+show variables like 'test_sql_discovery%';
+Variable_name Value
+test_sql_discovery_statement
+test_sql_discovery_write_frm ON
+set sql_quote_show_create=0;
+create table t1 (a int) engine=test_sql_discovery;
+ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by database")
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+set @@test_sql_discovery_statement='t1:foobar bwa-ha-ha';
+select * from t0;
+ERROR 42S02: Table 'test.t0' doesn't exist
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1064 You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'foobar bwa-ha-ha' at line 1
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:select 1';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'select 1'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int primary key) partition by hash(id) partitions 2';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1290 The MariaDB server is running with the --skip-partition option so it cannot execute this statement
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int) union=(t3,t4)';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a int) union=(t3,t4)'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 like t2';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 like t2'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 select * from t2';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 select * from t2'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int) index directory="/tmp"';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a int) index directory="/tmp"'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int) data directory="/tmp"';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a int) data directory="/tmp"'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int) engine=myisam';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table t1 (a int) engine=myisam'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create temporary table t1 (a int)';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create temporary table t1 (a int)'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table if not exists t1 (a int)';
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+show warnings;
+Level Code Message
+Error 1939 Engine TEST_SQL_DISCOVERY failed to discover table `test`.`t1` with 'create table if not exists t1 (a int)'
+Error 1146 Table 'test.t1' doesn't exist
+Error 1030 Got error 130 "Incorrect file format" from storage engine TEST_SQL_DISCOVERY
+set @@test_sql_discovery_statement='t1:create table t1 (a int)';
+select * from t1;
+a
+show create table t1;
+Table Create Table
+t1 CREATE TABLE t1 (
+ a int(11) DEFAULT NULL
+) ENGINE=TEST_SQL_DISCOVERY DEFAULT CHARSET=latin1
+drop table t1;
+set @@test_sql_discovery_statement='t1:create table t2 (a int)';
+select * from t1;
+a
+select * from t2;
+ERROR 42S02: Table 'test.t2' doesn't exist
+drop table t1;
+set @@test_sql_discovery_statement='t1:
+create table t1 (
+ a int not null default 5 primary key,
+ b timestamp,
+ c tinyblob,
+ d decimal(5,2),
+ e varchar(30) character set ascii,
+ f geometry not null,
+ index (d,b),
+ unique index (c(10)),
+ fulltext (e),
+ spatial (f)
+) comment="abc" default character set utf8 max_rows=100 min_rows=10 checksum=1';
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 13
+show create table t1;
+Table Create Table
+t1 CREATE TABLE t1 (
+ a int(11) NOT NULL DEFAULT '5',
+ b timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ c tinyblob,
+ d decimal(5,2) DEFAULT NULL,
+ e varchar(30) CHARACTER SET ascii DEFAULT NULL,
+ f geometry NOT NULL,
+ PRIMARY KEY (a),
+ UNIQUE KEY c (c(10)),
+ KEY d (d,b),
+ SPATIAL KEY f (f),
+ FULLTEXT KEY e (e)
+) ENGINE=TEST_SQL_DISCOVERY DEFAULT CHARSET=utf8 MIN_ROWS=10 MAX_ROWS=100 CHECKSUM=1 COMMENT='abc'
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 14
+----
+t1.frm
+----
+show open tables from test;
+Database Table In_use Name_locked
+test t1 0 0
+select * from t1;
+a b c d e f
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 14
+flush tables;
+select * from t1;
+a b c d e f
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 14
+drop table t1;
+set @@test_sql_discovery_write_frm=0;
+set @@test_sql_discovery_statement='t1:create table t1 (a int)';
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 14
+show create table t1;
+Table Create Table
+t1 CREATE TABLE t1 (
+ a int(11) DEFAULT NULL
+) ENGINE=TEST_SQL_DISCOVERY DEFAULT CHARSET=latin1
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 15
+----
+----
+show open tables from test;
+Database Table In_use Name_locked
+test t1 0 0
+select * from t1;
+a
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 15
+flush tables;
+select * from t1;
+a
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 16
+drop table t1;
+show status like 'handler_discover';
+Variable_name Value
+Handler_discover 16
diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.test b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.test
new file mode 100644
index 00000000000..4aa5ac2864e
--- /dev/null
+++ b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.test
@@ -0,0 +1,133 @@
+--source inc.inc
+
+show variables like 'test_sql_discovery%';
+set sql_quote_show_create=0;
+let $mysqld_datadir= `select @@datadir`;
+
+--error ER_CANT_CREATE_TABLE
+create table t1 (a int) engine=test_sql_discovery;
+
+--error ER_NO_SUCH_TABLE
+select * from t1;
+
+set @@test_sql_discovery_statement='t1:foobar bwa-ha-ha';
+--error ER_NO_SUCH_TABLE
+select * from t0;
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+#
+# test different invalid discovering statements
+#
+
+set @@test_sql_discovery_statement='t1:select 1';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 (a int primary key) partition by hash(id) partitions 2';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 (a int) union=(t3,t4)';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 like t2';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 select * from t2';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 (a int) index directory="/tmp"';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 (a int) data directory="/tmp"';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table t1 (a int) engine=myisam';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create temporary table t1 (a int)';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+set @@test_sql_discovery_statement='t1:create table if not exists t1 (a int)';
+--error ER_NO_SUCH_TABLE
+select * from t1;
+show warnings;
+
+#
+# this should work:
+#
+set @@test_sql_discovery_statement='t1:create table t1 (a int)';
+select * from t1;
+show create table t1;
+drop table t1;
+
+# table name in the create table statement is ignored
+set @@test_sql_discovery_statement='t1:create table t2 (a int)';
+select * from t1;
+--error ER_NO_SUCH_TABLE
+select * from t2;
+drop table t1;
+
+# and something more complex
+set @@test_sql_discovery_statement='t1:
+create table t1 (
+ a int not null default 5 primary key,
+ b timestamp,
+ c tinyblob,
+ d decimal(5,2),
+ e varchar(30) character set ascii,
+ f geometry not null,
+ index (d,b),
+ unique index (c(10)),
+ fulltext (e),
+ spatial (f)
+) comment="abc" default character set utf8 max_rows=100 min_rows=10 checksum=1';
+show status like 'handler_discover';
+show create table t1;
+show status like 'handler_discover';
+--echo ----
+--list_files $mysqld_datadir/test t*
+--echo ----
+show open tables from test;
+select * from t1;
+show status like 'handler_discover';
+flush tables;
+select * from t1;
+show status like 'handler_discover';
+drop table t1;
+
+set @@test_sql_discovery_write_frm=0;
+set @@test_sql_discovery_statement='t1:create table t1 (a int)';
+show status like 'handler_discover';
+show create table t1;
+show status like 'handler_discover';
+--echo ----
+--list_files $mysqld_datadir/test t*
+--echo ----
+show open tables from test;
+select * from t1;
+show status like 'handler_discover';
+flush tables;
+select * from t1;
+show status like 'handler_discover';
+drop table t1;
+show status like 'handler_discover';
+
diff --git a/storage/test_sql_discovery/test_sql_discovery.cc b/storage/test_sql_discovery/test_sql_discovery.cc
new file mode 100644
index 00000000000..33438035b8d
--- /dev/null
+++ b/storage/test_sql_discovery/test_sql_discovery.cc
@@ -0,0 +1,175 @@
+/*
+ Copyright (c) 2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/*
+ a really minimal engine to test table discovery via sql statements.
+ See the archive engine if you're interested in real-life usable engine that
+ uses discovery via frm shipping.
+*/
+
+#include <mysql_version.h>
+#include <handler.h>
+#include <table.h>
+
+static MYSQL_THDVAR_STR(statement, PLUGIN_VAR_MEMALLOC,
+ "The table name and the SQL statement to discover the next table",
+ NULL, NULL, 0);
+
+static MYSQL_THDVAR_BOOL(write_frm, 0,
+ "Whether to cache discovered table metadata in frm files",
+ NULL, NULL, TRUE);
+
+static struct st_mysql_sys_var *sysvars[] = {
+ MYSQL_SYSVAR(statement),
+ MYSQL_SYSVAR(write_frm),
+ NULL
+};
+
+typedef struct st_share {
+ const char *name;
+ THR_LOCK lock;
+ uint use_count;
+ struct st_share *next;
+} SHARE;
+
+class ha_tsd: public handler
+{
+private:
+ THR_LOCK_DATA lock;
+ SHARE *share;
+
+public:
+ ha_tsd(handlerton *hton, TABLE_SHARE *table_arg)
+ : handler(hton, table_arg) { }
+ ulonglong table_flags() const
+ { // NO_TRANSACTIONS and everything that affects CREATE TABLE
+ return HA_NO_TRANSACTIONS | HA_CAN_GEOMETRY | HA_NULL_IN_KEY |
+ HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_CAN_RTREEKEYS |
+ HA_CAN_FULLTEXT;
+ }
+
+ ulong index_flags(uint inx, uint part, bool all_parts) const { return 0; }
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+ {
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ lock.type = lock_type;
+ *to ++= &lock;
+ return to;
+ }
+
+ int rnd_init(bool scan) { return 0; }
+ int rnd_next(unsigned char *buf) { return HA_ERR_END_OF_FILE; }
+ void position(const uchar *record) { }
+ int rnd_pos(uchar *buf, uchar *pos) { return HA_ERR_END_OF_FILE; }
+ int info(uint flag) { return 0; }
+ uint max_supported_keys() const { return 16; }
+ int create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info) { return HA_ERR_WRONG_COMMAND; }
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+};
+
+static SHARE *find_or_create_share(const char *table_name, TABLE *table)
+{
+ SHARE *share;
+ for (share = (SHARE*)table->s->ha_data; share; share = share->next)
+ if (my_strcasecmp(table_alias_charset, table_name, share->name) == 0)
+ return share;
+
+ share = (SHARE*)alloc_root(&table->s->mem_root, sizeof(*share));
+ bzero(share, sizeof(*share));
+ share->name = strdup_root(&table->s->mem_root, table_name);
+ share->next = (SHARE*)table->s->ha_data;
+ table->s->ha_data = share;
+ return share;
+}
+
+int ha_tsd::open(const char *name, int mode, uint test_if_locked)
+{
+ mysql_mutex_lock(&table->s->LOCK_ha_data);
+ share = find_or_create_share(name, table);
+ if (share->use_count++ == 0)
+ thr_lock_init(&share->lock);
+ mysql_mutex_unlock(&table->s->LOCK_ha_data);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+
+ return 0;
+}
+
+int ha_tsd::close(void)
+{
+ mysql_mutex_lock(&table->s->LOCK_ha_data);
+ if (--share->use_count == 0)
+ thr_lock_delete(&share->lock);
+ mysql_mutex_unlock(&table->s->LOCK_ha_data);
+ return 0;
+}
+
+static handler *create_handler(handlerton *hton, TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_tsd(hton, table);
+}
+
+static int discover_table(handlerton *hton, THD* thd, TABLE_SHARE *share)
+{
+ const char *sql= THDVAR(thd, statement);
+
+ // the table is discovered if sql starts from "table_name:"
+ if (!sql ||
+ strncmp(sql, share->table_name.str, share->table_name.length) ||
+ sql[share->table_name.length] != ':')
+ return HA_ERR_NO_SUCH_TABLE;
+
+ sql+= share->table_name.length + 1;
+ return share->init_from_sql_statement_string(thd, THDVAR(thd, write_frm),
+ sql, strlen(sql));
+}
+
+static int init(void *p)
+{
+ handlerton *hton = (handlerton *)p;
+ hton->create = create_handler;
+ hton->discover_table = discover_table;
+ return 0;
+}
+
+struct st_mysql_storage_engine descriptor =
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+maria_declare_plugin(test_sql_discovery)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &descriptor,
+ "TEST_SQL_DISCOVERY",
+ "Sergei Golubchik",
+ "Minimal engine to test table discovery via sql statements",
+ PLUGIN_LICENSE_GPL,
+ init,
+ NULL,
+ 0x0001,
+ NULL,
+ sysvars,
+ "0.1",
+ MariaDB_PLUGIN_MATURITY_EXPERIMENTAL
+}
+maria_declare_plugin_end;
+
diff --git a/storage/xtradb/btr/btr0btr.c b/storage/xtradb/btr/btr0btr.c
index ed97fc5bd58..a3e57d632a0 100644
--- a/storage/xtradb/btr/btr0btr.c
+++ b/storage/xtradb/btr/btr0btr.c
@@ -1641,7 +1641,7 @@ btr_page_reorganize_low(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
btr_assert_not_corrupted(block, index);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
data_size1 = page_get_data_size(page);
max_ins_size1 = page_get_max_insert_size_after_reorganize(page, 1);
@@ -1760,7 +1760,7 @@ btr_page_reorganize_low(
func_exit:
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
#ifndef UNIV_HOTBACKUP
buf_block_free(temp_block);
@@ -1835,7 +1835,7 @@ btr_page_empty(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_zip == buf_block_get_page_zip(block));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
btr_search_drop_page_hash_index(block);
@@ -1892,10 +1892,10 @@ btr_root_raise_and_insert(
root_block = btr_cur_get_block(cursor);
root_page_zip = buf_block_get_page_zip(root_block);
ut_ad(page_get_n_recs(root) > 0);
+ index = btr_cur_get_index(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!root_page_zip || page_zip_validate(root_page_zip, root));
+ ut_a(!root_page_zip || page_zip_validate(root_page_zip, root, index));
#endif /* UNIV_ZIP_DEBUG */
- index = btr_cur_get_index(cursor);
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
ulint space = dict_index_get_space(index);
@@ -2825,8 +2825,8 @@ insert_empty:
#ifdef UNIV_ZIP_DEBUG
if (UNIV_LIKELY_NULL(page_zip)) {
- ut_a(page_zip_validate(page_zip, page));
- ut_a(page_zip_validate(new_page_zip, new_page));
+ ut_a(page_zip_validate(page_zip, page, cursor->index));
+ ut_a(page_zip_validate(new_page_zip, new_page, cursor->index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -2860,7 +2860,8 @@ insert_empty:
= buf_block_get_page_zip(insert_block);
ut_a(!insert_page_zip
- || page_zip_validate(insert_page_zip, insert_page));
+ || page_zip_validate(insert_page_zip, insert_page,
+ cursor->index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3140,6 +3141,8 @@ btr_lift_page_up(
buf_block_t* blocks[BTR_MAX_LEVELS];
ulint n_blocks; /*!< last used index in blocks[] */
ulint i;
+ ibool lift_father_up = FALSE;
+ buf_block_t* block_orig = block;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
@@ -3150,11 +3153,13 @@ btr_lift_page_up(
{
btr_cur_t cursor;
- mem_heap_t* heap = mem_heap_create(100);
- ulint* offsets;
+ ulint* offsets = NULL;
+ mem_heap_t* heap = mem_heap_create(
+ sizeof(*offsets)
+ * (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields));
buf_block_t* b;
- offsets = btr_page_get_father_block(NULL, heap, index,
+ offsets = btr_page_get_father_block(offsets, heap, index,
block, mtr, &cursor);
father_block = btr_cur_get_block(&cursor);
father_page_zip = buf_block_get_page_zip(father_block);
@@ -3178,6 +3183,29 @@ btr_lift_page_up(
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
}
+ if (n_blocks && page_level == 0) {
+ /* The father page also should be the only on its level (not
+ root). We should lift up the father page at first.
+ Because the leaf page should be lifted up only for root page.
+ The freeing page is based on page_level (==0 or !=0)
+ to choose segment. If the page_level is changed ==0 from !=0,
+ later freeing of the page doesn't find the page allocation
+ to be freed.*/
+
+ lift_father_up = TRUE;
+ block = father_block;
+ page = buf_block_get_frame(block);
+ page_level = btr_page_get_level(page, mtr);
+
+ ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
+ ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
+ ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+
+ father_block = blocks[0];
+ father_page_zip = buf_block_get_page_zip(father_block);
+ father_page = buf_block_get_frame(father_block);
+ }
+
mem_heap_free(heap);
}
@@ -3185,6 +3213,7 @@ btr_lift_page_up(
/* Make the father empty */
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
+ page_level++;
/* Copy the records to the father page one by one. */
if (0
@@ -3217,7 +3246,7 @@ btr_lift_page_up(
lock_update_copy_and_discard(father_block, block);
/* Go upward to root page, decrementing levels by one. */
- for (i = 0; i < n_blocks; i++, page_level++) {
+ for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) {
page_t* page = buf_block_get_frame(blocks[i]);
page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
@@ -3225,7 +3254,7 @@ btr_lift_page_up(
btr_page_set_level(page, page_zip, page_level, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3239,7 +3268,7 @@ btr_lift_page_up(
ut_ad(page_validate(father_page, index));
ut_ad(btr_check_node_ptr(index, father_block, mtr));
- return(father_block);
+ return(lift_father_up ? block_orig : father_block);
}
/*************************************************************//**
@@ -3310,6 +3339,7 @@ btr_compress(
if (adjust) {
nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor));
+ ut_ad(nth_rec > 0);
}
/* Decide the page to which we try to merge and which will inherit
@@ -3400,8 +3430,8 @@ err_exit:
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
ut_a(page_zip);
- ut_a(page_zip_validate(merge_page_zip, merge_page));
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(merge_page_zip, merge_page, index));
+ ut_a(page_zip_validate(page_zip, page, index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3534,7 +3564,8 @@ err_exit:
ut_ad(page_validate(merge_page, index));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!merge_page_zip || page_zip_validate(merge_page_zip, merge_page));
+ ut_a(!merge_page_zip || page_zip_validate(merge_page_zip, merge_page,
+ index));
#endif /* UNIV_ZIP_DEBUG */
/* Free the file page */
@@ -3545,6 +3576,7 @@ func_exit:
mem_heap_free(heap);
if (adjust) {
+ ut_ad(nth_rec > 0);
btr_cur_position(
index,
page_rec_get_nth(merge_block->frame, nth_rec),
@@ -3716,7 +3748,7 @@ btr_discard_page(
page_zip_des_t* merge_page_zip
= buf_block_get_page_zip(merge_block);
ut_a(!merge_page_zip
- || page_zip_validate(merge_page_zip, merge_page));
+ || page_zip_validate(merge_page_zip, merge_page, index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -4058,8 +4090,22 @@ btr_index_page_validate(
{
page_cur_t cur;
ibool ret = TRUE;
+#ifndef DBUG_OFF
+ ulint nth = 1;
+#endif /* !DBUG_OFF */
page_cur_set_before_first(block, &cur);
+
+ /* Directory slot 0 should only contain the infimum record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(page_rec_get_nth_const(
+ page_cur_get_page(&cur), 0)
+ == cur.rec);
+ ut_a(page_dir_slot_get_n_owned(
+ page_dir_get_nth_slot(
+ page_cur_get_page(&cur), 0))
+ == 1););
+
page_cur_move_to_next(&cur);
for (;;) {
@@ -4073,6 +4119,16 @@ btr_index_page_validate(
return(FALSE);
}
+ /* Verify that page_rec_get_nth_const() is correctly
+ retrieving each record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(cur.rec == page_rec_get_nth_const(
+ page_cur_get_page(&cur),
+ page_rec_get_n_recs_before(
+ cur.rec)));
+ ut_a(nth++ == page_rec_get_n_recs_before(
+ cur.rec)););
+
page_cur_move_to_next(&cur);
}
@@ -4170,7 +4226,7 @@ btr_validate_level(
ut_a(space == page_get_space_id(page));
#ifdef UNIV_ZIP_DEBUG
page_zip = buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
ut_a(!page_is_leaf(page));
@@ -4198,7 +4254,7 @@ loop:
#ifdef UNIV_ZIP_DEBUG
page_zip = buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* Check ordering etc. of records */
diff --git a/storage/xtradb/btr/btr0cur.c b/storage/xtradb/btr/btr0cur.c
index 687853a422e..d089fb5ad22 100644
--- a/storage/xtradb/btr/btr0cur.c
+++ b/storage/xtradb/btr/btr0cur.c
@@ -97,6 +97,11 @@ srv_refresh_innodb_monitor_stats(). Referenced by
srv_printf_innodb_monitor(). */
UNIV_INTERN ulint btr_cur_n_sea_old = 0;
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+UNIV_INTERN uint btr_cur_limit_optimistic_insert_debug = 0;
+#endif /* UNIV_DEBUG */
+
/** In the optimistic insert, if the insert does not fit, but this much space
can be released by page reorganize, then it is reorganized */
#define BTR_CUR_PAGE_REORGANIZE_LIMIT (UNIV_PAGE_SIZE / 32)
@@ -748,7 +753,7 @@ retry_page_get:
#ifdef UNIV_ZIP_DEBUG
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
buf_block_dbg_add_level(
@@ -1378,6 +1383,9 @@ btr_cur_optimistic_insert(
}
}
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
+ goto fail);
+
/* If there have been many consecutive inserts, and we are on the leaf
level, check if we have to split the page to reserve enough free space
for future updates of records. */
@@ -2189,7 +2197,7 @@ any_extern:
page_zip = buf_block_get_page_zip(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_zip
@@ -2406,7 +2414,7 @@ btr_cur_pessimistic_update(
MTR_MEMO_X_LOCK));
ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
@@ -2561,7 +2569,7 @@ make_external:
btr_search_update_hash_on_delete(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
@@ -2668,7 +2676,7 @@ make_external:
buf_block_t* rec_block = btr_cur_get_block(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
page = buf_block_get_frame(rec_block);
#endif /* UNIV_ZIP_DEBUG */
page_zip = buf_block_get_page_zip(rec_block);
@@ -2694,7 +2702,7 @@ make_external:
return_after_reservations:
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (n_extents > 0) {
@@ -3066,7 +3074,7 @@ btr_cur_set_deleted_flag_for_ibuf(
when the tablespace is
uncompressed */
ibool val, /*!< in: value to set */
- mtr_t* mtr) /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
/* We do not need to reserve btr_search_latch, as the page
has just been read to the buffer pool and there cannot be
@@ -3171,12 +3179,14 @@ btr_cur_optimistic_delete(
page, 1);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip
+ || page_zip_validate(page_zip, page, cursor->index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
cursor->index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip
+ || page_zip_validate(page_zip, page, cursor->index));
#endif /* UNIV_ZIP_DEBUG */
if (dict_index_is_clust(cursor->index)
@@ -3273,7 +3283,7 @@ btr_cur_pessimistic_delete(
rec = btr_cur_get_rec(cursor);
page_zip = buf_block_get_page_zip(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
@@ -3283,7 +3293,7 @@ btr_cur_pessimistic_delete(
rec, offsets, page_zip,
rb_ctx, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3344,7 +3354,7 @@ btr_cur_pessimistic_delete(
page_cur_delete_rec(btr_cur_get_page_cur(cursor), index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
ut_ad(btr_check_node_ptr(index, block, mtr));
diff --git a/storage/xtradb/buf/buf0buf.c b/storage/xtradb/buf/buf0buf.c
index bbc1042ca78..5bcfb0f51b9 100644
--- a/storage/xtradb/buf/buf0buf.c
+++ b/storage/xtradb/buf/buf0buf.c
@@ -66,9 +66,7 @@ _increment_page_get_statistics(buf_block_t* block, trx_t* trx)
byte block_hash_offset;
ut_ad(block);
-
- if (!innobase_get_slow_log() || !trx || !trx->take_stats)
- return;
+ ut_ad(trx && trx->take_stats);
if (!trx->distinct_page_access_hash) {
trx->distinct_page_access_hash = mem_alloc(DPAH_SIZE);
@@ -279,7 +277,7 @@ the read requests for the whole area.
#ifndef UNIV_HOTBACKUP
/** Value in microseconds */
-static const int WAIT_FOR_READ = 5000;
+static const int WAIT_FOR_READ = 100;
/** Number of attemtps made to read in a page in the buffer pool */
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
@@ -412,6 +410,33 @@ buf_get_total_list_len(
}
/********************************************************************//**
+Get total list size in bytes from all buffer pools. */
+UNIV_INTERN
+void
+buf_get_total_list_size_in_bytes(
+/*=============================*/
+ buf_pools_list_size_t* buf_pools_list_size) /*!< out: list sizes
+ in all buffer pools */
+{
+ ulint i;
+ ut_ad(buf_pools_list_size);
+ memset(buf_pools_list_size, 0, sizeof(*buf_pools_list_size));
+
+ for (i = 0; i < srv_buf_pool_instances; i++) {
+ buf_pool_t* buf_pool;
+
+ buf_pool = buf_pool_from_array(i);
+ /* We don't need mutex protection since this is
+ for statistics purpose */
+ buf_pools_list_size->LRU_bytes += buf_pool->stat.LRU_bytes;
+ buf_pools_list_size->unzip_LRU_bytes +=
+ UT_LIST_GET_LEN(buf_pool->unzip_LRU) * UNIV_PAGE_SIZE;
+ buf_pools_list_size->flush_list_bytes +=
+ buf_pool->stat.flush_list_bytes;
+ }
+}
+
+/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
void
@@ -1807,40 +1832,24 @@ buf_page_make_young(
}
/********************************************************************//**
-Sets the time of the first access of a page and moves a page to the
-start of the buffer pool LRU list if it is too old. This high-level
-function can be used to prevent an important page from slipping
-out of the buffer pool. */
+Moves a page to the start of the buffer pool LRU list if it is too old.
+This high-level function can be used to prevent an important page from
+slipping out of the buffer pool. */
static
void
-buf_page_set_accessed_make_young(
-/*=============================*/
- buf_page_t* bpage, /*!< in/out: buffer block of a
+buf_page_make_young_if_needed(
+/*==========================*/
+ buf_page_t* bpage) /*!< in/out: buffer block of a
file page */
- unsigned access_time) /*!< in: bpage->access_time
- read under mutex protection,
- or 0 if unknown */
{
+#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
-
ut_ad(!buf_pool_mutex_own(buf_pool));
+#endif /* UNIV_DEBUG */
ut_a(buf_page_in_file(bpage));
if (buf_page_peek_if_too_old(bpage)) {
- //buf_pool_mutex_enter(buf_pool);
- mutex_enter(&buf_pool->LRU_list_mutex);
- buf_LRU_make_block_young(bpage);
- //buf_pool_mutex_exit(buf_pool);
- mutex_exit(&buf_pool->LRU_list_mutex);
- } else if (!access_time) {
- ulint time_ms = ut_time_ms();
- mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
- //buf_pool_mutex_enter(buf_pool);
- if (block_mutex) {
- buf_page_set_accessed(bpage, time_ms);
- mutex_exit(block_mutex);
- }
- //buf_pool_mutex_exit(buf_pool);
+ buf_page_make_young(bpage);
}
}
@@ -1959,7 +1968,6 @@ buf_page_get_zip(
buf_page_t* bpage;
mutex_t* block_mutex;
ibool must_read;
- unsigned access_time;
trx_t* trx = NULL;
ulint sec;
ulint ms;
@@ -1967,7 +1975,7 @@ buf_page_get_zip(
ib_uint64_t finish_time;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(innobase_get_slow_log())) {
trx = innobase_get_trx();
}
buf_pool->stat.n_page_gets++;
@@ -2089,13 +2097,14 @@ err_exit:
got_block:
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
- access_time = buf_page_is_accessed(bpage);
//buf_pool_mutex_exit(buf_pool);
+ buf_page_set_accessed(bpage);
+
mutex_exit(block_mutex);
- buf_page_set_accessed_make_young(bpage, access_time);
+ buf_page_make_young_if_needed(bpage);
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(!bpage->file_page_was_freed);
@@ -2111,7 +2120,7 @@ got_block:
/* Let us wait until the read operation
completes */
- if (innobase_get_slow_log() && trx && trx->take_stats)
+ if (UNIV_UNLIKELY(trx && trx->take_stats))
{
ut_usectime(&sec, &ms);
start_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -2132,7 +2141,7 @@ got_block:
break;
}
}
- if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
+ if (UNIV_UNLIKELY(start_time != 0))
{
ut_usectime(&sec, &ms);
finish_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -2487,7 +2496,7 @@ buf_page_get_gen(
|| ibuf_page_low(space, zip_size, offset,
FALSE, file, line, NULL));
#endif
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(innobase_get_slow_log())) {
trx = innobase_get_trx();
}
buf_pool->stat.n_page_gets++;
@@ -2774,6 +2783,8 @@ wait_until_unfixed:
UNIV_MEM_INVALID(bpage, sizeof *bpage);
+ access_time = buf_page_is_accessed(&block->page);
+
mutex_exit(block_mutex);
mutex_exit(&buf_pool->zip_mutex);
@@ -2781,18 +2792,22 @@ wait_until_unfixed:
buf_pool->n_pend_unzip++;
buf_pool_mutex_exit(buf_pool);
- //buf_pool_mutex_exit(buf_pool);
-
buf_page_free_descriptor(bpage);
- /* Decompress the page and apply buffered operations
- while not holding buf_pool->mutex or block->mutex. */
+ /* Decompress the page while not holding
+ buf_pool->mutex or block->mutex. */
success = buf_zip_decompress(block, srv_use_checksums);
ut_a(success);
if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
- ibuf_merge_or_delete_for_page(block, space, offset,
- zip_size, TRUE);
+ if (access_time) {
+#ifdef UNIV_IBUF_COUNT_DEBUG
+ ut_a(ibuf_count_get(space, offset) == 0);
+#endif /* UNIV_IBUF_COUNT_DEBUG */
+ } else {
+ ibuf_merge_or_delete_for_page(
+ block, space, offset, zip_size, TRUE);
+ }
}
/* Unfix and unlatch the block. */
@@ -2888,17 +2903,16 @@ wait_until_unfixed:
ut_a(mode == BUF_GET_POSSIBLY_FREED
|| !block->page.file_page_was_freed);
#endif
- //mutex_exit(&block->mutex);
/* Check if this is the first access to the page */
-
access_time = buf_page_is_accessed(&block->page);
- //buf_pool_mutex_exit(buf_pool);
- mutex_exit(block_mutex);
+ buf_page_set_accessed(&block->page);
+
+ mutex_exit(&block->mutex);
- if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
- buf_page_set_accessed_make_young(&block->page, access_time);
+ if (mode != BUF_PEEK_IF_IN_POOL) {
+ buf_page_make_young_if_needed(&block->page);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -2913,7 +2927,7 @@ wait_until_unfixed:
/* Let us wait until the read operation
completes */
- if (innobase_get_slow_log() && trx && trx->take_stats)
+ if (UNIV_UNLIKELY(trx && trx->take_stats))
{
ut_usectime(&sec, &ms);
start_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -2928,13 +2942,14 @@ wait_until_unfixed:
mutex_exit(&block->mutex);
if (io_fix == BUF_IO_READ) {
-
- os_thread_sleep(WAIT_FOR_READ);
+ /* wait by temporaly s-latch */
+ rw_lock_s_lock(&(block->lock));
+ rw_lock_s_unlock(&(block->lock));
} else {
break;
}
}
- if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
+ if (UNIV_UNLIKELY(start_time != 0))
{
ut_usectime(&sec, &ms);
finish_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -2961,7 +2976,7 @@ wait_until_unfixed:
mtr_memo_push(mtr, block, fix_type);
- if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) {
+ if (mode != BUF_PEEK_IF_IN_POOL && !access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -2973,7 +2988,7 @@ wait_until_unfixed:
ut_a(ibuf_count_get(buf_block_get_space(block),
buf_block_get_page_no(block)) == 0);
#endif
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(trx && trx->take_stats)) {
_increment_page_get_statistics(block, trx);
}
@@ -3018,15 +3033,13 @@ buf_page_optimistic_get(
buf_block_buf_fix_inc(block, file, line);
- mutex_exit(&block->mutex);
+ access_time = buf_page_is_accessed(&block->page);
- /* Check if this is the first access to the page.
- We do a dirty read on purpose, to avoid mutex contention.
- This field is only used for heuristic purposes; it does not
- affect correctness. */
+ buf_page_set_accessed(&block->page);
- access_time = buf_page_is_accessed(&block->page);
- buf_page_set_accessed_make_young(&block->page, access_time);
+ mutex_exit(&block->mutex);
+
+ buf_page_make_young_if_needed(&block->page);
ut_ad(!ibuf_inside(mtr)
|| ibuf_page(buf_block_get_space(block),
@@ -3078,11 +3091,11 @@ buf_page_optimistic_get(
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(block->page.file_page_was_freed == FALSE);
#endif
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(innobase_get_slow_log())) {
trx = innobase_get_trx();
}
- if (UNIV_UNLIKELY(!access_time)) {
+ if (!access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
@@ -3099,7 +3112,7 @@ buf_page_optimistic_get(
buf_pool = buf_pool_from_block(block);
buf_pool->stat.n_page_gets++;
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(trx && trx->take_stats)) {
_increment_page_get_statistics(block, trx);
}
return(TRUE);
@@ -3149,28 +3162,14 @@ buf_page_get_known_nowait(
buf_block_buf_fix_inc(block, file, line);
+ buf_page_set_accessed(&block->page);
+
mutex_exit(&block->mutex);
buf_pool = buf_pool_from_block(block);
- if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
- //buf_pool_mutex_enter(buf_pool);
- mutex_enter(&buf_pool->LRU_list_mutex);
- buf_LRU_make_block_young(&block->page);
- //buf_pool_mutex_exit(buf_pool);
- mutex_exit(&buf_pool->LRU_list_mutex);
- } else if (!buf_page_is_accessed(&block->page)) {
- /* Above, we do a dirty read on purpose, to avoid
- mutex contention. The field buf_page_t::access_time
- is only used for heuristic purposes. Writes to the
- field must be protected by mutex, however. */
- ulint time_ms = ut_time_ms();
-
- //buf_pool_mutex_enter(buf_pool);
- mutex_enter(&block->mutex);
- buf_page_set_accessed(&block->page, time_ms);
- //buf_pool_mutex_exit(buf_pool);
- mutex_exit(&block->mutex);
+ if (mode == BUF_MAKE_YOUNG) {
+ buf_page_make_young_if_needed(&block->page);
}
ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
@@ -3211,9 +3210,13 @@ buf_page_get_known_nowait(
#endif
buf_pool->stat.n_page_gets++;
- if (innobase_get_slow_log()) {
+ if (UNIV_UNLIKELY(innobase_get_slow_log())) {
+
trx = innobase_get_trx();
- _increment_page_get_statistics(block, trx);
+ if (trx != NULL && trx->take_stats) {
+
+ _increment_page_get_statistics(block, trx);
+ }
}
return(TRUE);
@@ -3342,6 +3345,7 @@ buf_page_init(
ulint offset, /*!< in: offset of the page within space
in units of a page */
ulint fold, /*!< in: buf_page_address_fold(space,offset) */
+ ulint zip_size,/*!< in: compressed page size, or 0 */
buf_block_t* block) /*!< in/out: block to init */
{
buf_page_t* hash_page;
@@ -3411,6 +3415,9 @@ buf_page_init(
ut_d(block->page.in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
fold, &block->page);
+ if (zip_size) {
+ page_zip_set_size(&block->page.zip, zip_size);
+ }
}
/********************************************************************//**
@@ -3537,7 +3544,7 @@ err_exit:
ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
- buf_page_init(buf_pool, space, offset, fold, block);
+ buf_page_init(buf_pool, space, offset, fold, zip_size, block);
rw_lock_x_unlock(&buf_pool->page_hash_latch);
@@ -3557,8 +3564,6 @@ err_exit:
buf_page_set_io_fix(bpage, BUF_IO_READ);
if (UNIV_UNLIKELY(zip_size)) {
- page_zip_set_size(&block->page.zip, zip_size);
-
/* buf_pool->mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to
@@ -3658,7 +3663,8 @@ err_exit:
rw_lock_x_unlock(&buf_pool->page_hash_latch);
- /* The block must be put to the LRU list, to the old blocks */
+ /* The block must be put to the LRU list, to the old blocks
+ The zip_size is already set into the page zip */
buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage);
@@ -3706,7 +3712,6 @@ buf_page_create(
buf_block_t* block;
ulint fold;
buf_block_t* free_block = NULL;
- ulint time_ms = ut_time_ms();
buf_pool_t* buf_pool = buf_pool_get(space, offset);
ut_ad(mtr);
@@ -3774,7 +3779,7 @@ retry:
mutex_enter(&block->mutex);
- buf_page_init(buf_pool, space, offset, fold, block);
+ buf_page_init(buf_pool, space, offset, fold, zip_size,block);
rw_lock_x_unlock(&buf_pool->page_hash_latch);
/* The block must be put to the LRU list */
@@ -3793,8 +3798,6 @@ retry:
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
-
- page_zip_set_size(&block->page.zip, zip_size);
mutex_exit(&block->mutex);
/* buf_pool->mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex
@@ -3818,13 +3821,12 @@ retry:
rw_lock_x_unlock(&block->lock);
}
- buf_page_set_accessed(&block->page, time_ms);
-
- //buf_pool_mutex_exit(buf_pool);
mutex_exit(&buf_pool->LRU_list_mutex);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
+ buf_page_set_accessed(&block->page);
+
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
@@ -3937,7 +3939,7 @@ buf_page_io_complete(
ensures that this is the only thread that handles the i/o for this
block. */
- io_type = buf_page_get_io_fix(bpage);
+ io_type = buf_page_get_io_fix_unlocked(bpage);
ut_ad(io_type == BUF_IO_READ || io_type == BUF_IO_WRITE);
if (io_type == BUF_IO_READ) {
diff --git a/storage/xtradb/buf/buf0flu.c b/storage/xtradb/buf/buf0flu.c
index 39351cd3678..663e2d8f537 100644
--- a/storage/xtradb/buf/buf0flu.c
+++ b/storage/xtradb/buf/buf0flu.c
@@ -79,6 +79,23 @@ static buf_flush_stat_t buf_flush_stat_sum;
/* @} */
+/******************************************************************//**
+Increases flush_list size in bytes with zip_size for compressed page,
+UNIV_PAGE_SIZE for uncompressed page in inline function */
+static inline
+void
+incr_flush_list_size_in_bytes(
+/*==========================*/
+ buf_block_t* block, /*!< in: control block */
+ buf_pool_t* buf_pool) /*!< in: buffer pool instance */
+{
+ ulint zip_size;
+ ut_ad(buf_flush_list_mutex_own(buf_pool));
+ zip_size = page_zip_get_size(&block->page.zip);
+ buf_pool->stat.flush_list_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
+ ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
+}
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/******************************************************************//**
Validates the flush list.
@@ -308,6 +325,7 @@ buf_flush_insert_into_flush_list(
ut_d(block->page.in_flush_list = TRUE);
block->page.oldest_modification = lsn;
UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, &block->page);
+ incr_flush_list_size_in_bytes(block, buf_pool);
#ifdef UNIV_DEBUG_VALGRIND
{
@@ -412,6 +430,8 @@ buf_flush_insert_sorted_into_flush_list(
prev_b, &block->page);
}
+ incr_flush_list_size_in_bytes(block, buf_pool);
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -514,6 +534,7 @@ buf_flush_remove(
buf_page_t* bpage) /*!< in: pointer to the block in question */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ulint zip_size;
//ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
@@ -552,6 +573,9 @@ buf_flush_remove(
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.flush_list_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE;
+
bpage->oldest_modification = 0;
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -915,7 +939,7 @@ flush:
"InnoDB: Page buf fix count %lu,"
" io fix %lu, state %lu\n",
(ulong)block->page.buf_fix_count,
- (ulong)buf_block_get_io_fix(block),
+ (ulong)buf_block_get_io_fix_unlocked(block),
(ulong)buf_block_get_state(block));
}
@@ -1115,7 +1139,7 @@ buf_flush_write_block_low(
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(!buf_flush_list_mutex_own(buf_pool));
ut_ad(!mutex_own(buf_page_get_mutex(bpage)));
- ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
+ ut_ad(buf_page_get_io_fix_unlocked(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
#ifdef UNIV_IBUF_COUNT_DEBUG
@@ -1181,10 +1205,10 @@ buf_flush_write_block_low(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
-NOTE: buf_pool->mutex and block->mutex must be held upon entering this
-function, and they will be released by this function after flushing.
+NOTE: block->mutex must be held upon entering this function, and it will be
+released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
-@return TRUE if the page was flushed and the mutexes released */
+@return TRUE if the page was flushed and the mutex released */
UNIV_INTERN
ibool
buf_flush_page_try(
@@ -1553,16 +1577,14 @@ scan:
Check if the block is modified and ready for flushing. If the the block
is ready to flush then flush the page and try o flush its neighbors.
-@return TRUE if buf_pool mutex was not released during this function.
+@return TRUE if LRU list mutex was not released during this function.
This does not guarantee that some pages were written as well.
Number of pages written are incremented to the count. */
static
ibool
buf_flush_page_and_try_neighbors(
/*=============================*/
- buf_page_t* bpage, /*!< in: buffer control block,
- must be
- buf_page_in_file(bpage) */
+ buf_page_t* bpage, /*!< in: buffer control block */
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
or BUF_FLUSH_LIST */
ulint n_to_flush, /*!< in: number of pages to
diff --git a/storage/xtradb/buf/buf0lru.c b/storage/xtradb/buf/buf0lru.c
index 14b5c65132c..cd99d3e4e13 100644
--- a/storage/xtradb/buf/buf0lru.c
+++ b/storage/xtradb/buf/buf0lru.c
@@ -153,6 +153,23 @@ buf_LRU_block_free_hashed_page(
ibool have_page_hash_mutex);
/******************************************************************//**
+Increases LRU size in bytes with zip_size for compressed page,
+UNIV_PAGE_SIZE for uncompressed page in inline function */
+static inline
+void
+incr_LRU_size_in_bytes(
+/*===================*/
+ buf_page_t* bpage, /*!< in: control block */
+ buf_pool_t* buf_pool) /*!< in: buffer pool instance */
+{
+ ulint zip_size;
+ ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.LRU_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
+ ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
+}
+
+/******************************************************************//**
Determines if the unzip_LRU list should be used for evicting a victim
instead of the general LRU list.
@return TRUE if should use unzip_LRU */
@@ -393,18 +410,18 @@ buf_flush_yield(
{
mutex_t* block_mutex;
+ block_mutex = buf_page_get_mutex(bpage);
+
+ ut_ad(mutex_own(block_mutex));
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(buf_page_in_file(bpage));
- block_mutex = buf_page_get_mutex(bpage);
-
- mutex_enter(block_mutex);
/* "Fix" the block so that the position cannot be
changed after we release the buffer pool and
block mutexes. */
buf_page_set_sticky(bpage);
- /* Now it is safe to release the buf_pool->mutex. */
+ /* Now it is safe to release the LRU list mutex. */
mutex_exit(&buf_pool->LRU_list_mutex);
mutex_exit(block_mutex);
@@ -415,7 +432,7 @@ buf_flush_yield(
mutex_enter(block_mutex);
/* "Unfix" the block now that we have both the
- buffer pool and block mutex again. */
+ LRU list and block mutex again. */
buf_page_unset_sticky(bpage);
mutex_exit(block_mutex);
}
@@ -431,7 +448,9 @@ buf_flush_try_yield(
/*================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_page_t* bpage, /*!< in/out: bpage to remove */
- ulint processed) /*!< in: number of pages processed */
+ ulint processed, /*!< in: number of pages processed */
+ ibool* must_restart) /*!< in/out: if TRUE, we have to
+ restart the flush list scan */
{
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
loop we release buf_pool->mutex to let other threads
@@ -441,10 +460,40 @@ buf_flush_try_yield(
if (bpage != NULL
&& processed >= BUF_LRU_DROP_SEARCH_SIZE
- && buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
+ && buf_page_get_io_fix_unlocked(bpage) == BUF_IO_NONE) {
+
+ mutex_t* block_mutex;
buf_flush_list_mutex_exit(buf_pool);
+ /* We don't have to worry about bpage becoming a dangling
+ pointer by a compressed page flush list relocation because
+ buf_page_get_gen() won't be called for pages from this
+ tablespace. */
+
+ block_mutex = buf_page_get_mutex_enter(bpage);
+ if (UNIV_UNLIKELY(block_mutex == NULL)) {
+
+ buf_flush_list_mutex_enter(buf_pool);
+
+ *must_restart = TRUE;
+ return FALSE;
+ }
+
+ /* Recheck the I/O fix and the flush list presence now that we
+ hold the right mutex */
+ if (UNIV_UNLIKELY(buf_page_get_io_fix(bpage) != BUF_IO_NONE
+ || bpage->oldest_modification == 0)) {
+
+ mutex_exit(block_mutex);
+ buf_flush_list_mutex_enter(buf_pool);
+
+ *must_restart = TRUE;
+ return FALSE;
+ }
+
+ *must_restart = FALSE;
+
/* Release the LRU list and block mutex
to give the other threads a go. */
@@ -473,7 +522,9 @@ ibool
buf_flush_or_remove_page(
/*=====================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
- buf_page_t* bpage) /*!< in/out: bpage to remove */
+ buf_page_t* bpage, /*!< in/out: bpage to remove */
+ ibool* must_restart) /*!< in/out: if TRUE, must restart the
+ flush list scan */
{
mutex_t* block_mutex;
ibool processed = FALSE;
@@ -487,7 +538,8 @@ buf_flush_or_remove_page(
buf_pool->mutex and block_mutex. It is safe to check
them while holding buf_pool->mutex only. */
- if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
+ if (UNIV_UNLIKELY(buf_page_get_io_fix_unlocked(bpage)
+ != BUF_IO_NONE)) {
/* We cannot remove this page during this scan
yet; maybe the system is currently reading it
@@ -496,21 +548,38 @@ buf_flush_or_remove_page(
} else {
/* We have to release the flush_list_mutex to obey the
- latching order. We are however guaranteed that the page
- will stay in the flush_list because buf_flush_remove()
- needs buf_pool->mutex as well (for the non-flush case). */
+ latching order. We are not however guaranteed that the page
+ will stay in the flush_list. */
buf_flush_list_mutex_exit(buf_pool);
+ /* We don't have to worry about bpage becoming a dangling
+ pointer by a compressed page flush list relocation because
+ buf_page_get_gen() won't be called for pages from this
+ tablespace. */
+
mutex_enter(block_mutex);
- ut_ad(bpage->oldest_modification != 0);
+ /* Recheck the page I/O fix and the flush list presence now
+ thatwe hold the right mutex. */
+ if (UNIV_UNLIKELY(buf_page_get_io_fix(bpage) != BUF_IO_NONE
+ || bpage->oldest_modification == 0)) {
- if (bpage->buf_fix_count == 0) {
+ /* The page became I/O-fixed or is not on the flush
+ list anymore, this invalidates any flush-list-page
+ pointers we have. */
+ *must_restart = TRUE;
- buf_flush_remove(bpage);
+ } else {
+
+ ut_ad(bpage->oldest_modification != 0);
+
+ if (bpage->buf_fix_count == 0) {
+
+ buf_flush_remove(bpage);
- processed = TRUE;
+ processed = TRUE;
+ }
}
mutex_exit(block_mutex);
@@ -541,11 +610,12 @@ buf_flush_or_remove_pages(
buf_page_t* bpage;
ulint processed = 0;
ibool all_freed = TRUE;
+ ibool must_restart = FALSE;
buf_flush_list_mutex_enter(buf_pool);
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
- bpage != NULL;
+ !must_restart && bpage != NULL;
bpage = prev) {
ut_a(buf_page_in_file(bpage));
@@ -561,22 +631,31 @@ buf_flush_or_remove_pages(
/* Skip this block, as it does not belong to
the target space. */
- } else if (!buf_flush_or_remove_page(buf_pool, bpage)) {
+ } else if (!buf_flush_or_remove_page(buf_pool, bpage,
+ &must_restart)) {
/* Remove was unsuccessful, we have to try again
by scanning the entire list from the end. */
all_freed = FALSE;
}
+ if (UNIV_UNLIKELY(must_restart)) {
+ ut_ad(!all_freed);
+ break;
+ }
++processed;
/* Yield if we have hogged the CPU and mutexes for too long. */
- if (buf_flush_try_yield(buf_pool, prev, processed)) {
+ if (buf_flush_try_yield(buf_pool, prev, processed,
+ &must_restart)) {
+ ut_ad(!must_restart);
/* Reset the batch size counter if we had to yield. */
processed = 0;
+ } else if (UNIV_UNLIKELY(must_restart)) {
+ all_freed = FALSE;
}
}
@@ -641,41 +720,39 @@ scan_again:
/* No op */) {
buf_page_t* prev_bpage;
- mutex_t* block_mutex = NULL;
+ mutex_t* block_mutex;
ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
- /* bpage->space and bpage->io_fix are protected by
- buf_pool->mutex and the block_mutex. It is safe to check
- them while holding buf_pool->mutex only. */
+ block_mutex = buf_page_get_mutex_enter(bpage);
+
+ if (!block_mutex) {
+ /* It may be impossible case...
+ Something wrong, so will be scan_again */
+
+ all_freed = FALSE;
+ goto next_page;
+ }
if (buf_page_get_space(bpage) != id) {
/* Skip this block, as it does not belong to
the space that is being invalidated. */
+
+ mutex_exit(block_mutex);
goto next_page;
} else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
/* We cannot remove this page during this scan
yet; maybe the system is currently reading it
in, or flushing the modifications to the file */
+ mutex_exit(block_mutex);
all_freed = FALSE;
goto next_page;
} else {
- block_mutex = buf_page_get_mutex_enter(bpage);
-
- if (!block_mutex) {
- /* It may be impossible case...
- Something wrong, so will be scan_again */
-
- all_freed = FALSE;
- goto next_page;
- }
-
-
if (bpage->buf_fix_count > 0) {
mutex_exit(block_mutex);
@@ -1491,6 +1568,7 @@ buf_LRU_remove_block(
buf_page_t* bpage) /*!< in: control block */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ulint zip_size;
ut_ad(buf_pool);
ut_ad(bpage);
@@ -1527,6 +1605,9 @@ buf_LRU_remove_block(
UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);
bpage->in_LRU_list = FALSE;
+ zip_size = page_zip_get_size(&bpage->zip);
+ buf_pool->stat.LRU_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE;
+
buf_unzip_LRU_remove_block_if_needed(bpage);
/* If the LRU list is so short that LRU_old is not defined,
@@ -1588,7 +1669,10 @@ buf_unzip_LRU_add_block(
}
/******************************************************************//**
-Adds a block to the LRU list end. */
+Adds a block to the LRU list end. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INLINE
void
buf_LRU_add_block_to_end_low(
@@ -1608,6 +1692,8 @@ buf_LRU_add_block_to_end_low(
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
bpage->in_LRU_list = TRUE;
+ incr_LRU_size_in_bytes(bpage, buf_pool);
+
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
ut_ad(buf_pool->LRU_old);
@@ -1636,7 +1722,10 @@ buf_LRU_add_block_to_end_low(
}
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INLINE
void
buf_LRU_add_block_low(
@@ -1679,6 +1768,8 @@ buf_LRU_add_block_low(
bpage->in_LRU_list = TRUE;
+ incr_LRU_size_in_bytes(bpage, buf_pool);
+
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
ut_ad(buf_pool->LRU_old);
@@ -1706,7 +1797,10 @@ buf_LRU_add_block_low(
}
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INTERN
void
buf_LRU_add_block(
@@ -1853,7 +1947,7 @@ alloc:
|| !buf_page_can_relocate(bpage)) {
not_freed:
if (b) {
- buf_buddy_free(buf_pool, b, sizeof *b, TRUE);
+ buf_page_free_descriptor(b);
}
if (!have_LRU_mutex)
mutex_exit(&buf_pool->LRU_list_mutex);
@@ -1935,6 +2029,8 @@ not_freed:
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
prev_b, b);
+ incr_LRU_size_in_bytes(b, buf_pool);
+
if (buf_page_is_old(b)) {
buf_pool->LRU_old_len++;
if (UNIV_UNLIKELY
@@ -2196,7 +2292,9 @@ buf_LRU_block_remove_hashed_page(
break;
case FIL_PAGE_INDEX:
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(&bpage->zip, page));
+ ut_a(page_zip_validate(
+ &bpage->zip, page,
+ ((buf_block_t*) bpage)->index));
#endif /* UNIV_ZIP_DEBUG */
break;
default:
diff --git a/storage/xtradb/buf/buf0rea.c b/storage/xtradb/buf/buf0rea.c
index 6d76a488af7..cf0a029df92 100644
--- a/storage/xtradb/buf/buf0rea.c
+++ b/storage/xtradb/buf/buf0rea.c
@@ -235,7 +235,8 @@ not_to_recover:
sync, space, 0, offset, 0, UNIV_PAGE_SIZE,
((buf_block_t*) bpage)->frame, bpage, trx);
}
- if(sync) {
+
+ if (sync) {
thd_wait_end(NULL);
}
diff --git a/storage/xtradb/dict/dict0dict.c b/storage/xtradb/dict/dict0dict.c
index 516b6e927e0..29063f028f1 100644
--- a/storage/xtradb/dict/dict0dict.c
+++ b/storage/xtradb/dict/dict0dict.c
@@ -525,6 +525,20 @@ dict_index_get_nth_col_or_prefix_pos(
return(ULINT_UNDEFINED);
}
+/********************************************************************//**
+Looks for column n in an index.
+@return position in internal representation of the index;
+ULINT_UNDEFINED if not contained */
+UNIV_INTERN
+ulint
+dict_index_get_nth_col_pos(
+/*=======================*/
+ const dict_index_t* index, /*!< in: index */
+ ulint n) /*!< in: column number */
+{
+ return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE));
+}
+
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Returns TRUE if the index contains a column or a prefix of that column.
@@ -2088,7 +2102,6 @@ dict_index_build_internal_clust(
{
dict_index_t* new_index;
dict_field_t* field;
- ulint fixed_size;
ulint trx_id_pos;
ulint i;
ibool* indexed;
@@ -2165,7 +2178,7 @@ dict_index_build_internal_clust(
for (i = 0; i < trx_id_pos; i++) {
- fixed_size = dict_col_get_fixed_size(
+ ulint fixed_size = dict_col_get_fixed_size(
dict_index_get_nth_col(new_index, i),
dict_table_is_comp(table));
@@ -2182,7 +2195,20 @@ dict_index_build_internal_clust(
break;
}
- new_index->trx_id_offset += (unsigned int) fixed_size;
+ /* Add fixed_size to new_index->trx_id_offset.
+ Because the latter is a bit-field, an overflow
+ can theoretically occur. Check for it. */
+ fixed_size += new_index->trx_id_offset;
+
+ new_index->trx_id_offset = fixed_size;
+
+ if (new_index->trx_id_offset != fixed_size) {
+ /* Overflow. Pretend that this is a
+ variable-length PRIMARY KEY. */
+ ut_ad(0);
+ new_index->trx_id_offset = 0;
+ break;
+ }
}
}
diff --git a/storage/xtradb/dict/dict0load.c b/storage/xtradb/dict/dict0load.c
index 0ef74ea8b7c..cdef0d1b270 100644
--- a/storage/xtradb/dict/dict0load.c
+++ b/storage/xtradb/dict/dict0load.c
@@ -2399,7 +2399,8 @@ dict_load_foreigns(
ibool check_charsets) /*!< in: TRUE=check charset
compatibility */
{
- char tuple_buf[DTUPLE_EST_ALLOC(1)];
+ ulint tuple_buf[(DTUPLE_EST_ALLOC(1) + sizeof(ulint) - 1)
+ / sizeof(ulint)];
btr_pcur_t pcur;
dtuple_t* tuple;
dfield_t* dfield;
diff --git a/storage/xtradb/fil/fil0fil.c b/storage/xtradb/fil/fil0fil.c
index 4ab2c16d41f..397c4de4b6e 100644
--- a/storage/xtradb/fil/fil0fil.c
+++ b/storage/xtradb/fil/fil0fil.c
@@ -195,14 +195,16 @@ struct fil_space_struct {
requests on the file */
ibool stop_new_ops;
/*!< we set this TRUE when we start
- deleting a single-table tablespace */
- ibool is_being_deleted;
- /*!< this is set to TRUE when we start
- deleting a single-table tablespace and its
- file; when this flag is set no further i/o
- or flush requests can be placed on this space,
- though there may be such requests still being
- processed on this space */
+ deleting a single-table tablespace.
+ When this is set following new ops
+ are not allowed:
+ * read IO request
+ * ibuf merge
+ * file flush
+ Note that we can still possibly have
+ new write operations because we don't
+ check this flag when doing flush
+ batches. */
ulint purpose;/*!< FIL_TABLESPACE, FIL_LOG, or
FIL_ARCH_LOG */
UT_LIST_BASE_NODE_T(fil_node_t) chain;
@@ -865,7 +867,7 @@ fil_node_close_file(
ut_ad(node && system);
ut_ad(mutex_own(&(system->mutex)));
ut_a(node->open);
- ut_a(node->n_pending == 0 || node->space->is_being_deleted);
+ ut_a(node->n_pending == 0 || node->space->stop_new_ops);
ut_a(node->n_pending_flushes == 0);
#ifndef UNIV_HOTBACKUP
ut_a(node->modification_counter == node->flush_counter
@@ -1099,7 +1101,7 @@ fil_node_free(
ut_ad(node && system && space);
ut_ad(mutex_own(&(system->mutex)));
ut_a(node->magic_n == FIL_NODE_MAGIC_N);
- ut_a(node->n_pending == 0 || space->is_being_deleted);
+ ut_a(node->n_pending == 0 || space->stop_new_ops);
if (node->open) {
/* We fool the assertion in fil_node_close_file() to think
@@ -1297,7 +1299,6 @@ try_again:
space->stop_ios = FALSE;
space->stop_new_ops = FALSE;
- space->is_being_deleted = FALSE;
space->purpose = purpose;
space->size = 0;
space->flags = flags;
@@ -1478,7 +1479,7 @@ fil_space_get_size(
ut_ad(fil_system);
- fil_mutex_enter_and_prepare_for_io(id);
+ mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id);
@@ -1493,6 +1494,23 @@ fil_space_get_size(
ut_a(1 == UT_LIST_GET_LEN(space->chain));
+ mutex_exit(&fil_system->mutex);
+
+ /* It is possible that the space gets evicted at this point
+ before the fil_mutex_enter_and_prepare_for_io() acquires
+ the fil_system->mutex. Check for this after completing the
+ call to fil_mutex_enter_and_prepare_for_io(). */
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ /* We are still holding the fil_system->mutex. Check if
+ the space is still in memory cache. */
+ space = fil_space_get_by_id(id);
+
+ if (space == NULL) {
+ mutex_exit(&fil_system->mutex);
+ return(0);
+ }
+
node = UT_LIST_GET_FIRST(space->chain);
/* It must be a single-table tablespace and we have not opened
@@ -1530,7 +1548,7 @@ fil_space_get_flags(
return(0);
}
- fil_mutex_enter_and_prepare_for_io(id);
+ mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id);
@@ -1545,6 +1563,23 @@ fil_space_get_flags(
ut_a(1 == UT_LIST_GET_LEN(space->chain));
+ mutex_exit(&fil_system->mutex);
+
+ /* It is possible that the space gets evicted at this point
+ before the fil_mutex_enter_and_prepare_for_io() acquires
+ the fil_system->mutex. Check for this after completing the
+ call to fil_mutex_enter_and_prepare_for_io(). */
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ /* We are still holding the fil_system->mutex. Check if
+ the space is still in memory cache. */
+ space = fil_space_get_by_id(id);
+
+ if (space == NULL) {
+ mutex_exit(&fil_system->mutex);
+ return(0);
+ }
+
node = UT_LIST_GET_FIRST(space->chain);
/* It must be a single-table tablespace and we have not opened
@@ -2325,11 +2360,9 @@ try_again:
return(FALSE);
}
- ut_a(space);
+ ut_a(space->stop_new_ops);
ut_a(space->n_pending_ops == 0);
- space->is_being_deleted = TRUE;
-
ut_a(UT_LIST_GET_LEN(space->chain) == 1);
node = UT_LIST_GET_FIRST(space->chain);
@@ -2372,12 +2405,26 @@ try_again:
rw_lock_x_lock(&space->latch);
#ifndef UNIV_HOTBACKUP
- /* Invalidate in the buffer pool all pages belonging to the
- tablespace. Since we have set space->is_being_deleted = TRUE, readahead
- or ibuf merge can no longer read more pages of this tablespace to the
- buffer pool. Thus we can clean the tablespace out of the buffer pool
- completely and permanently. The flag is_being_deleted also prevents
- fil_flush() from being applied to this tablespace. */
+ /* IMPORTANT: Because we have set space::stop_new_ops there
+ can't be any new ibuf merges, reads or flushes. We are here
+ because node::n_pending was zero above. However, it is still
+ possible to have pending read and write requests:
+
+ A read request can happen because the reader thread has
+ gone through the ::stop_new_ops check in buf_page_init_for_read()
+ before the flag was set and has not yet incremented ::n_pending
+ when we checked it above.
+
+ A write request can be issued any time because we don't check
+ the ::stop_new_ops flag when queueing a block for write.
+
+ We deal with pending write requests in the following function
+ where we'd minimally evict all dirty pages belonging to this
+ space from the flush_list. Not that if a block is IO-fixed
+ we'll wait for IO to complete.
+
+ To deal with potential read requests by checking the
+ ::stop_new_ops flag in fil_io() */
if (srv_lazy_drop_table) {
buf_LRU_mark_space_was_deleted(id);
@@ -2393,6 +2440,15 @@ try_again:
mutex_enter(&fil_system->mutex);
+ /* Double check the sanity of pending ops after reacquiring
+ the fil_system::mutex. */
+ if (fil_space_get_by_id(id)) {
+ ut_a(space->n_pending_ops == 0);
+ ut_a(UT_LIST_GET_LEN(space->chain) == 1);
+ node = UT_LIST_GET_FIRST(space->chain);
+ ut_a(node->n_pending == 0);
+ }
+
success = fil_space_free(id, TRUE);
mutex_exit(&fil_system->mutex);
@@ -2450,7 +2506,7 @@ fil_tablespace_is_being_deleted(
ut_a(space != NULL);
- is_being_deleted = space->is_being_deleted;
+ is_being_deleted = space->stop_new_ops;
mutex_exit(&fil_system->mutex);
@@ -2710,7 +2766,7 @@ retry:
mutex_exit(&fil_system->mutex);
#ifndef UNIV_HOTBACKUP
- if (success) {
+ if (success && !recv_recovery_on) {
mtr_t mtr;
mtr_start(&mtr);
@@ -4531,7 +4587,7 @@ fil_tablespace_deleted_or_being_deleted_in_mem(
space = fil_space_get_by_id(id);
- if (space == NULL || space->is_being_deleted) {
+ if (space == NULL || space->stop_new_ops) {
mutex_exit(&fil_system->mutex);
return(TRUE);
@@ -4809,6 +4865,24 @@ fil_extend_space_to_desired_size(
start_page_no = space->size;
file_start_page_no = space->size - node->size;
+#ifdef HAVE_POSIX_FALLOCATE
+ if (srv_use_posix_fallocate) {
+ offset_high = size_after_extend * page_size / (4ULL*1024*1024*1024);
+ offset_low = size_after_extend * page_size % (4ULL*1024*1024*1024);
+
+ mutex_exit(&fil_system->mutex);
+ success = os_file_set_size(node->name, node->handle,
+ offset_low, offset_high);
+ mutex_enter(&fil_system->mutex);
+ if (success) {
+ node->size += (size_after_extend - start_page_no);
+ space->size += (size_after_extend - start_page_no);
+ os_has_said_disk_full = FALSE;
+ }
+ goto complete_io;
+ }
+#endif
+
/* Extend at most 64 pages at a time */
buf_size = ut_min(64, size_after_extend - start_page_no) * page_size;
buf2 = mem_alloc(buf_size + page_size);
@@ -4865,6 +4939,10 @@ fil_extend_space_to_desired_size(
mem_free(buf2);
+#ifdef HAVE_POSIX_FALLOCATE
+complete_io:
+#endif
+
fil_node_complete_io(node, fil_system, OS_FILE_WRITE);
*actual_size = space->size;
@@ -5271,7 +5349,9 @@ _fil_io(
space = fil_space_get_by_id(space_id);
- if (!space) {
+ /* If we are deleting a tablespace we don't allow any read
+ operations on that. However, we do allow write operations. */
+ if (!space || (type == OS_FILE_READ && space->stop_new_ops)) {
mutex_exit(&fil_system->mutex);
ut_print_timestamp(stderr);
@@ -5362,8 +5442,8 @@ _fil_io(
/* Do aio */
- ut_a(byte_offset % OS_FILE_LOG_BLOCK_SIZE == 0);
- ut_a((len % OS_FILE_LOG_BLOCK_SIZE) == 0);
+ ut_a(byte_offset % OS_MIN_LOG_BLOCK_SIZE == 0);
+ ut_a((len % OS_MIN_LOG_BLOCK_SIZE) == 0);
if (srv_pass_corrupt_table == 1 && space->is_corrupt) {
/* should ignore i/o for the crashed space */
@@ -5551,7 +5631,7 @@ fil_aio_wait(
&& ((buf_page_t*)message)->space_was_being_deleted) {
/* intended not to be uncompress read page */
- ut_a(buf_page_get_io_fix(message) == BUF_IO_WRITE
+ ut_a(buf_page_get_io_fix_unlocked(message) == BUF_IO_WRITE
|| !buf_page_get_zip_size(message)
|| buf_page_get_state(message) != BUF_BLOCK_FILE_PAGE);
@@ -5612,7 +5692,7 @@ fil_flush(
space = fil_space_get_by_id(space_id);
- if (!space || space->is_being_deleted) {
+ if (!space || space->stop_new_ops) {
mutex_exit(&fil_system->mutex);
return;
@@ -5743,7 +5823,7 @@ fil_flush_file_spaces(
space;
space = UT_LIST_GET_NEXT(unflushed_spaces, space)) {
- if (space->purpose == purpose && !space->is_being_deleted) {
+ if (space->purpose == purpose && !space->stop_new_ops) {
space_ids[n_space_ids++] = space->id;
}
@@ -5982,3 +6062,26 @@ fil_space_set_corrupt(
mutex_exit(&fil_system->mutex);
}
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name) /*!< in: temp table name used while
+ swapping */
+{
+ mtr_t mtr;
+ mtr_start(&mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, old_space_id,
+ 0, 0, old_name, tmp_name, &mtr);
+ fil_op_write_log(MLOG_FILE_RENAME, new_space_id,
+ 0, 0, new_name, old_name, &mtr);
+ mtr_commit(&mtr);
+}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 12f6f5134d2..8b824bc994d 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -85,6 +85,7 @@ extern "C" {
#include "row0sel.h"
#include "row0upd.h"
#include "log0log.h"
+#include "log0online.h"
#include "lock0lock.h"
#include "dict0crea.h"
#include "btr0cur.h"
@@ -185,6 +186,8 @@ static my_bool innobase_file_format_check = TRUE;
static my_bool innobase_log_archive = FALSE;
static char* innobase_log_arch_dir = NULL;
#endif /* UNIV_LOG_ARCHIVE */
+static my_bool innobase_use_atomic_writes = FALSE;
+static my_bool innobase_use_fallocate = TRUE;
static my_bool innobase_use_doublewrite = TRUE;
static my_bool innobase_use_checksums = TRUE;
static my_bool innobase_fast_checksum = FALSE;
@@ -297,6 +300,7 @@ static PSI_mutex_info all_innodb_mutexes[] = {
{&ibuf_pessimistic_insert_mutex_key,
"ibuf_pessimistic_insert_mutex", 0},
{&kernel_mutex_key, "kernel_mutex", 0},
+ {&log_bmp_sys_mutex_key, "log_bmp_sys_mutex", 0},
{&log_sys_mutex_key, "log_sys_mutex", 0},
# ifdef UNIV_MEM_DEBUG
{&mem_hash_mutex_key, "mem_hash_mutex", 0},
@@ -437,6 +441,25 @@ uint
innobase_alter_table_flags(
/*=======================*/
uint flags);
+/************************************************************//**
+Synchronously read and parse the redo log up to the last
+checkpoint to write the changed page bitmap.
+@return 0 to indicate success. Current implementation cannot fail. */
+static
+my_bool
+innobase_flush_changed_page_bitmaps() __attribute__((unused));
+/*==================================*/
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+@return 0 to indicate success, 1 for failure. */
+static
+my_bool
+innobase_purge_changed_page_bitmaps(
+/*================================*/
+ ulonglong lsn) __attribute__((unused)); /*!< in: LSN to purge files up to */
static const char innobase_hton_name[]= "InnoDB";
@@ -688,8 +711,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_background_log_sync, SHOW_LONG},
{"buffer_pool_pages_data",
(char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG},
+ {"buffer_pool_bytes_data",
+ (char*) &export_vars.innodb_buffer_pool_bytes_data, SHOW_LONG},
{"buffer_pool_pages_dirty",
(char*) &export_vars.innodb_buffer_pool_pages_dirty, SHOW_LONG},
+ {"buffer_pool_bytes_dirty",
+ (char*) &export_vars.innodb_buffer_pool_bytes_dirty, SHOW_LONG},
{"buffer_pool_pages_flushed",
(char*) &export_vars.innodb_buffer_pool_pages_flushed, SHOW_LONG},
{"buffer_pool_pages_LRU_flushed",
@@ -870,6 +897,12 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_x_lock_spin_rounds, SHOW_LONGLONG},
{"x_lock_spin_waits",
(char*) &export_vars.innodb_x_lock_spin_waits, SHOW_LONGLONG},
+#ifdef UNIV_DEBUG
+ {"purge_trx_id_age",
+ (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
+ {"purge_view_trx_id_age",
+ (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
+#endif /* UNIV_DEBUG */
{NullS, NullS, SHOW_LONG}
};
@@ -1219,11 +1252,23 @@ convert_error_code_to_mysql(
case DB_TABLE_NOT_FOUND:
return(HA_ERR_NO_SUCH_TABLE);
- case DB_TOO_BIG_RECORD:
- my_error(ER_TOO_BIG_ROWSIZE, MYF(0),
- page_get_free_space_of_empty(flags
- & DICT_TF_COMPACT) / 2);
+ case DB_TOO_BIG_RECORD: {
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ bool prefix = ((flags & DICT_TF_FORMAT_MASK)
+ >> DICT_TF_FORMAT_SHIFT) < UNIV_FORMAT_B;
+ my_printf_error(ER_TOO_BIG_ROWSIZE,
+ "Row size too large (> %lu). Changing some columns "
+ "to TEXT or BLOB %smay help. In current row "
+ "format, BLOB prefix of %d bytes is stored inline.",
+ MYF(0),
+ page_get_free_space_of_empty(flags &
+ DICT_TF_COMPACT) / 2,
+ prefix ? "or using ROW_FORMAT=DYNAMIC "
+ "or ROW_FORMAT=COMPRESSED ": "",
+ prefix ? DICT_MAX_FIXED_COL_LEN : 0);
return(HA_ERR_TO_BIG_ROW);
+ }
case DB_TOO_BIG_INDEX_COL:
my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
@@ -1262,6 +1307,8 @@ convert_error_code_to_mysql(
return(HA_ERR_INDEX_CORRUPT);
case DB_UNDO_RECORD_TOO_BIG:
return(HA_ERR_UNDO_REC_TOO_BIG);
+ case DB_OUT_OF_MEMORY:
+ return(HA_ERR_OUT_OF_MEM);
}
}
@@ -1439,16 +1486,6 @@ innobase_get_lower_case_table_names(void)
return(lower_case_table_names);
}
-#if defined (__WIN__) && defined (MYSQL_DYNAMIC_PLUGIN)
-extern MYSQL_PLUGIN_IMPORT MY_TMPDIR mysql_tmpdir_list;
-/*******************************************************************//**
-Map an OS error to an errno value. The OS error number is stored in
-_doserrno and the mapped value is stored in errno) */
-extern "C"
-void __cdecl
-_dosmaperr(
- unsigned long); /*!< in: OS error value */
-
/*********************************************************************//**
Creates a temporary file.
@return temporary file descriptor, or < 0 on error */
@@ -1457,92 +1494,16 @@ int
innobase_mysql_tmpfile(void)
/*========================*/
{
- int fd; /* handle of opened file */
- HANDLE osfh; /* OS handle of opened file */
- char* tmpdir; /* point to the directory
- where to create file */
- TCHAR path_buf[MAX_PATH - 14]; /* buffer for tmp file path.
- The length cannot be longer
- than MAX_PATH - 14, or
- GetTempFileName will fail. */
- char filename[MAX_PATH]; /* name of the tmpfile */
- DWORD fileaccess = GENERIC_READ /* OS file access */
- | GENERIC_WRITE
- | DELETE;
- DWORD fileshare = FILE_SHARE_READ /* OS file sharing mode */
- | FILE_SHARE_WRITE
- | FILE_SHARE_DELETE;
- DWORD filecreate = CREATE_ALWAYS; /* OS method of open/create */
- DWORD fileattrib = /* OS file attribute flags */
- FILE_ATTRIBUTE_NORMAL
- | FILE_FLAG_DELETE_ON_CLOSE
- | FILE_ATTRIBUTE_TEMPORARY
- | FILE_FLAG_SEQUENTIAL_SCAN;
-
- DBUG_ENTER("innobase_mysql_tmpfile");
-
- tmpdir = my_tmpdir(&mysql_tmpdir_list);
-
- /* The tmpdir parameter can not be NULL for GetTempFileName. */
- if (!tmpdir) {
- uint ret;
-
- /* Use GetTempPath to determine path for temporary files. */
- ret = GetTempPath(sizeof(path_buf), path_buf);
- if (ret > sizeof(path_buf) || (ret == 0)) {
-
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- tmpdir = path_buf;
- }
-
- /* Use GetTempFileName to generate a unique filename. */
- if (!GetTempFileName(tmpdir, "ib", 0, filename)) {
-
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- DBUG_PRINT("info", ("filename: %s", filename));
-
- /* Open/Create the file. */
- osfh = CreateFile(filename, fileaccess, fileshare, NULL,
- filecreate, fileattrib, NULL);
- if (osfh == INVALID_HANDLE_VALUE) {
-
- /* open/create file failed! */
- _dosmaperr(GetLastError()); /* map error */
- DBUG_RETURN(-1);
- }
-
- do {
- /* Associates a CRT file descriptor with the OS file handle. */
- fd = _open_osfhandle((intptr_t) osfh, 0);
- } while (fd == -1 && errno == EINTR);
+ int fd2 = -1;
+ File fd;
- if (fd == -1) {
- /* Open failed, close the file handle. */
+ DBUG_EXECUTE_IF(
+ "innobase_tmpfile_creation_failure",
+ return(-1);
+ );
- _dosmaperr(GetLastError()); /* map error */
- CloseHandle(osfh); /* no need to check if
- CloseHandle fails */
- }
+ fd = mysql_tmpfile("ib");
- DBUG_RETURN(fd);
-}
-#else
-/*********************************************************************//**
-Creates a temporary file.
-@return temporary file descriptor, or < 0 on error */
-extern "C" UNIV_INTERN
-int
-innobase_mysql_tmpfile(void)
-/*========================*/
-{
- int fd2 = -1;
- File fd = mysql_tmpfile("ib");
if (fd >= 0) {
/* Copy the file descriptor, so that the additional resources
allocated by create_temp_file() can be freed by invoking
@@ -1586,7 +1547,6 @@ innobase_mysql_tmpfile(void)
}
return(fd2);
}
-#endif /* defined (__WIN__) && defined (MYSQL_DYNAMIC_PLUGIN) */
/*********************************************************************//**
Wrapper around MySQL's copy_and_convert function.
@@ -1702,10 +1662,13 @@ innobase_next_autoinc(
offset = 0;
}
- /* Check for overflow. */
+ /* Check for overflow. Current can be > max_value if the value is
+ in reality a negative value.The visual studio compilers converts
+ large double values automatically into unsigned long long datatype
+ maximum value */
if (block >= max_value
|| offset > max_value
- || current == max_value
+ || current >= max_value
|| max_value - offset <= offset) {
next_value = max_value;
@@ -1778,7 +1741,7 @@ innobase_trx_init(
trx->fake_changes = THDVAR(thd, fake_changes);
#ifdef EXTENDED_SLOWLOG
- if (thd_log_slow_verbosity(thd) & SLOG_V_INNODB) {
+ if (thd_log_slow_verbosity(thd) & (1ULL << SLOG_V_INNODB)) {
trx->take_stats = TRUE;
} else {
trx->take_stats = FALSE;
@@ -2599,6 +2562,13 @@ skip_overwrite:
}
+/****************************************************************//**
+Gives the file extension of an InnoDB single-table tablespace. */
+static const char* ha_innobase_exts[] = {
+ ".ibd",
+ NullS
+};
+
/*********************************************************************//**
Opens an InnoDB database.
@return 0 on success, error code on failure */
@@ -2648,6 +2618,9 @@ innobase_init(
innobase_hton->alter_table_flags = innobase_alter_table_flags;
innobase_hton->kill_query = innobase_kill_query;
+ if (srv_file_per_table)
+ innobase_hton->tablefile_extensions = ha_innobase_exts;
+
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifndef DBUG_OFF
@@ -2717,6 +2690,7 @@ innobase_init(
} else {
srv_log_block_size = 512;
}
+ ut_ad (srv_log_block_size >= OS_MIN_LOG_BLOCK_SIZE);
if (!srv_log_block_size) {
fprintf(stderr,
@@ -3107,6 +3081,38 @@ innobase_change_buffering_inited_ok:
srv_kill_idle_transaction = 0;
#endif
+#ifdef HAVE_POSIX_FALLOCATE
+ srv_use_posix_fallocate = (ibool) innobase_use_fallocate;
+#endif
+ srv_use_atomic_writes = (ibool) innobase_use_atomic_writes;
+ if (innobase_use_atomic_writes) {
+ fprintf(stderr, "InnoDB: using atomic writes.\n");
+
+ /* Force doublewrite buffer off, atomic writes replace it. */
+ if (srv_use_doublewrite_buf) {
+ fprintf(stderr, "InnoDB: Switching off doublewrite buffer "
+ "because of atomic writes.\n");
+ innobase_use_doublewrite = srv_use_doublewrite_buf = FALSE;
+ }
+
+ /* Force O_DIRECT on Unixes (on Windows writes are always unbuffered)*/
+#ifndef _WIN32
+ if(!innobase_file_flush_method ||
+ !strstr(innobase_file_flush_method, "O_DIRECT")) {
+ innobase_file_flush_method =
+ srv_file_flush_method_str = (char*)"O_DIRECT";
+ fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n");
+ }
+#endif
+#ifdef HAVE_POSIX_FALLOCATE
+ /* Due to a bug in directFS, using atomics needs
+ * posix_fallocate to extend the file
+ * pwrite() past end of the file won't work
+ */
+ srv_use_posix_fallocate = TRUE;
+#endif
+ }
+
#ifdef HAVE_PSI_INTERFACE
/* Register keys with MySQL performance schema */
if (PSI_server) {
@@ -3267,6 +3273,36 @@ innobase_alter_table_flags(
| HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE);
}
+/************************************************************//**
+Synchronously read and parse the redo log up to the last
+checkpoint to write the changed page bitmap.
+@return 0 to indicate success. Current implementation cannot fail. */
+static
+my_bool
+innobase_flush_changed_page_bitmaps()
+/*=================================*/
+{
+ if (srv_track_changed_pages) {
+ os_event_reset(srv_checkpoint_completed_event);
+ log_online_follow_redo_log();
+ }
+ return FALSE;
+}
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == IB_ULONGLONG_MAX (i.e. set by RESET request),
+restart the bitmap file sequence, otherwise continue it.
+@return 0 to indicate success, 1 for failure. */
+static
+my_bool
+innobase_purge_changed_page_bitmaps(
+/*================================*/
+ ulonglong lsn) /*!< in: LSN to purge files up to */
+{
+ return (my_bool)log_online_purge_changed_page_bitmaps(lsn);
+}
+
/****************************************************************//**
Copy the current replication position from MySQL to a transaction. */
static
@@ -3330,7 +3366,7 @@ innobase_commit_low(
header for undo purposes, see the comment at corresponding call
at innobase_xa_prepare(). */
- innobase_copy_repl_coords_to_trx(current_thd, trx);
+ innobase_copy_repl_coords_to_trx((THD *) trx->mysql_thd, trx);
trx_commit_for_mysql(trx);
}
@@ -4041,13 +4077,6 @@ ha_innobase::table_flags() const
}
/****************************************************************//**
-Gives the file extension of an InnoDB single-table tablespace. */
-static const char* ha_innobase_exts[] = {
- ".ibd",
- NullS
-};
-
-/****************************************************************//**
Returns the index type. */
UNIV_INTERN
const char*
@@ -4060,17 +4089,6 @@ ha_innobase::index_type(
}
/****************************************************************//**
-Returns the table file name extension.
-@return file extension string */
-UNIV_INTERN
-const char**
-ha_innobase::bas_ext() const
-/*========================*/
-{
- return(ha_innobase_exts);
-}
-
-/****************************************************************//**
Returns the operations supported for indexes.
@return flags of supported operations */
UNIV_INTERN
@@ -10646,23 +10664,26 @@ ha_innobase::external_lock(
if (trx->n_mysql_tables_in_use == 0) {
#ifdef EXTENDED_SLOWLOG
- increment_thd_innodb_stats(thd,
- (unsigned long long) trx->id,
- trx->io_reads,
- trx->io_read,
- trx->io_reads_wait_timer,
- trx->lock_que_wait_timer,
- trx->innodb_que_wait_timer,
- trx->distinct_page_access);
-
- trx->io_reads = 0;
- trx->io_read = 0;
- trx->io_reads_wait_timer = 0;
- trx->lock_que_wait_timer = 0;
- trx->innodb_que_wait_timer = 0;
- trx->distinct_page_access = 0;
- if (trx->distinct_page_access_hash)
- memset(trx->distinct_page_access_hash, 0, DPAH_SIZE);
+ if (UNIV_UNLIKELY(trx->take_stats)) {
+ increment_thd_innodb_stats(thd,
+ (unsigned long long) trx->id,
+ trx->io_reads,
+ trx->io_read,
+ trx->io_reads_wait_timer,
+ trx->lock_que_wait_timer,
+ trx->innodb_que_wait_timer,
+ trx->distinct_page_access);
+
+ trx->io_reads = 0;
+ trx->io_read = 0;
+ trx->io_reads_wait_timer = 0;
+ trx->lock_que_wait_timer = 0;
+ trx->innodb_que_wait_timer = 0;
+ trx->distinct_page_access = 0;
+ if (trx->distinct_page_access_hash)
+ memset(trx->distinct_page_access_hash, 0,
+ DPAH_SIZE);
+ }
#endif
trx->mysql_n_tables_locked = 0;
@@ -12740,7 +12761,8 @@ static MYSQL_SYSVAR_ULONG(page_size, innobase_page_size,
static MYSQL_SYSVAR_ULONG(log_block_size, innobase_log_block_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"###EXPERIMENTAL###: The log block size of the transaction log file. Changing for created log file is not supported. Use on your own risk!",
- NULL, NULL, (1 << 9)/*512*/, (1 << 9)/*512*/, (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
+ NULL, NULL, (1 << 9)/*512*/, OS_MIN_LOG_BLOCK_SIZE,
+ (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir,
PLUGIN_VAR_READONLY,
@@ -12764,6 +12786,20 @@ static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
"Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Prevent partial page writes, via atomic writes."
+ "The option is used to prevent partial writes in case of a crash/poweroff, "
+ "as faster alternative to doublewrite buffer."
+ "Currently this option works only "
+ "on Linux only with FusionIO device, and directFS filesystem.",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(use_fallocate, innobase_use_fallocate,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Preallocate files fast, using operating system functionality. On POSIX systems, posix_fallocate system call is used.",
+ NULL, NULL, TRUE);
+
static MYSQL_SYSVAR_ULONG(io_capacity, srv_io_capacity,
PLUGIN_VAR_RQCMDARG,
"Number of IOPs the server can do. Tunes the background IO rate",
@@ -13163,7 +13199,7 @@ static MYSQL_SYSVAR_ULONGLONG(max_bitmap_file_size, srv_max_bitmap_file_size,
"The maximum size of changed page bitmap files",
NULL, NULL, 100*1024*1024ULL, 4096ULL, ULONGLONG_MAX, 0);
-static MYSQL_SYSVAR_ULONGLONG(changed_pages_limit, srv_changed_pages_limit,
+static MYSQL_SYSVAR_ULONGLONG(max_changed_pages, srv_max_changed_pages,
PLUGIN_VAR_RQCMDARG,
"The maximum number of rows for "
"INFORMATION_SCHEMA.INNODB_CHANGED_PAGES table, "
@@ -13173,8 +13209,8 @@ static MYSQL_SYSVAR_ULONGLONG(changed_pages_limit, srv_changed_pages_limit,
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
static MYSQL_SYSVAR_UINT(change_buffering_debug, ibuf_debug,
PLUGIN_VAR_RQCMDARG,
- "Debug flags for InnoDB change buffering (0=none)",
- NULL, NULL, 0, 0, 1, 0);
+ "Debug flags for InnoDB change buffering (0=none, 2=crash at merge)",
+ NULL, NULL, 0, 0, 2, 0);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
static MYSQL_SYSVAR_BOOL(random_read_ahead, srv_random_read_ahead,
@@ -13188,11 +13224,23 @@ static MYSQL_SYSVAR_ULONG(read_ahead_threshold, srv_read_ahead_threshold,
"trigger a readahead.",
NULL, NULL, 56, 0, 64, 0);
-#ifdef UNIV_DEBUG_never
+#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_NOCMDOPT,
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
NULL, NULL, 0, 0, 1024, 0);
+
+static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
+ btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
+ "Artificially limit the number of records per B-tree page (0=unlimited).",
+ NULL, NULL, 0, 0, UINT_MAX32, 0);
+
+static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
+ srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDOPT,
+ "Pause actual purging any delete-marked records, but merely update the purge view. "
+ "It is to create artificially the situation the purge view have been updated "
+ "but the each purges were not done yet.",
+ NULL, NULL, FALSE);
#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_LONGLONG(ibuf_max_size, srv_ibuf_max_size,
@@ -13372,6 +13420,11 @@ static MYSQL_SYSVAR_BOOL(locking_fake_changes, srv_fake_changes_locks,
"not take any locks at all.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(print_all_deadlocks, srv_print_all_deadlocks,
+ PLUGIN_VAR_OPCMDARG,
+ "Print all deadlocks to MySQL error log (off by default)",
+ NULL, NULL, FALSE);
+
static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(page_size),
MYSQL_SYSVAR(log_block_size),
@@ -13391,6 +13444,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(doublewrite_file),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
+ MYSQL_SYSVAR(use_atomic_writes),
+ MYSQL_SYSVAR(use_fallocate),
MYSQL_SYSVAR(recovery_stats),
MYSQL_SYSVAR(fast_shutdown),
MYSQL_SYSVAR(file_io_threads),
@@ -13464,7 +13519,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(change_buffering),
MYSQL_SYSVAR(track_changed_pages),
MYSQL_SYSVAR(max_bitmap_file_size),
- MYSQL_SYSVAR(changed_pages_limit),
+ MYSQL_SYSVAR(max_changed_pages),
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
MYSQL_SYSVAR(change_buffering_debug),
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
@@ -13476,14 +13531,17 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(purge_threads),
MYSQL_SYSVAR(purge_batch_size),
MYSQL_SYSVAR(rollback_segments),
-#ifdef UNIV_DEBUG_never /* disable this flag. --innodb-trx becomes ambiguous */
+#ifdef UNIV_DEBUG
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
+ MYSQL_SYSVAR(limit_optimistic_insert_debug),
+ MYSQL_SYSVAR(trx_purge_view_update_only_debug),
#endif /* UNIV_DEBUG */
MYSQL_SYSVAR(corrupt_table_action),
MYSQL_SYSVAR(lazy_drop_table),
MYSQL_SYSVAR(fake_changes),
MYSQL_SYSVAR(locking_fake_changes),
MYSQL_SYSVAR(merge_sort_block_size),
+ MYSQL_SYSVAR(print_all_deadlocks),
NULL
};
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 359d0b95367..439be10fddb 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -81,12 +81,13 @@ class ha_innobase: public handler
uchar* upd_buf; /*!< buffer used in updates */
ulint upd_buf_size; /*!< the size of upd_buf in bytes */
- uchar srch_key_val1[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
- uchar srch_key_val2[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
+ uchar srch_key_val1[MAX_KEY_LENGTH + MAX_REF_PARTS*2];
+ uchar srch_key_val2[MAX_KEY_LENGTH + MAX_REF_PARTS*2];
/*!< buffers used in converting
search key values from MySQL format
- to InnoDB format. "+ 2" for the two
- bytes where the length is stored */
+ to InnoDB format. For each column
+ 2 bytes are used to store length,
+ hence MAX_REF_PARTS*2. */
Table_flags int_table_flags;
uint primary_key;
ulong start_of_scan; /*!< this is set to 1 when we are
@@ -124,7 +125,6 @@ class ha_innobase: public handler
enum row_type get_row_type() const;
const char* index_type(uint key_number);
- const char** bas_ext() const;
Table_flags table_flags() const;
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_keys() const;
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 0496cb98080..9886e8f6bd9 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -102,8 +102,6 @@ innobase_col_to_mysql(
ut_ad(flen >= len);
ut_ad(DATA_MBMAXLEN(col->mbminmaxlen)
>= DATA_MBMINLEN(col->mbminmaxlen));
- ut_ad(DATA_MBMAXLEN(col->mbminmaxlen)
- > DATA_MBMINLEN(col->mbminmaxlen) || flen == len);
memcpy(dest, data, len);
break;
@@ -113,13 +111,17 @@ innobase_col_to_mysql(
/* These column types should never be shipped to MySQL. */
ut_ad(0);
- case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_FLOAT:
case DATA_DOUBLE:
case DATA_DECIMAL:
/* Above are the valid column types for MySQL data. */
ut_ad(flen == len);
+ /* fall through */
+ case DATA_CHAR:
+ /* We may have flen > len when there is a shorter
+ prefix on a CHAR column. */
+ ut_ad(flen >= len);
#else /* UNIV_DEBUG */
default:
#endif /* UNIV_DEBUG */
@@ -152,7 +154,7 @@ innobase_rec_to_mysql(
field->reset();
- ipos = dict_index_get_nth_col_pos(index, i);
+ ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
if (UNIV_UNLIKELY(ipos == ULINT_UNDEFINED)) {
null_field:
@@ -309,7 +311,7 @@ innobase_check_index_keys(
}
}
- my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
@@ -323,7 +325,7 @@ innobase_check_index_keys(
continue;
}
- my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
key_part1.field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index 4b33d6a780c..d64a95a969e 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -169,7 +169,8 @@ do { \
} \
} while (0)
-#if !defined __STRICT_ANSI__ && defined __GNUC__ && (__GNUC__) > 2 && !defined __INTEL_COMPILER
+#if !defined __STRICT_ANSI__ && defined __GNUC__ && (__GNUC__) > 2 && \
+ !defined __INTEL_COMPILER && !defined __clang__
#define STRUCT_FLD(name, value) name: value
#else
#define STRUCT_FLD(name, value) value
@@ -1209,7 +1210,7 @@ trx_i_s_common_fill_table(
DBUG_ENTER("trx_i_s_common_fill_table");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -1369,7 +1370,7 @@ i_s_cmp_fill_low(
DBUG_ENTER("i_s_cmp_fill_low");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -1641,7 +1642,7 @@ i_s_cmpmem_fill_low(
DBUG_ENTER("i_s_cmpmem_fill_low");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -2274,7 +2275,7 @@ i_s_innodb_buffer_stats_fill_table(
DBUG_ENTER("i_s_innodb_buffer_fill_general");
/* Only allow the PROCESS privilege holder to access the stats */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -2967,7 +2968,7 @@ i_s_innodb_buffer_page_fill_table(
DBUG_ENTER("i_s_innodb_buffer_page_fill_table");
/* deny access to user without PROCESS privilege */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -3512,7 +3513,7 @@ i_s_innodb_buf_page_lru_fill_table(
DBUG_ENTER("i_s_innodb_buf_page_lru_fill_table");
/* deny access to any users that do not hold PROCESS_ACL */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -3746,7 +3747,7 @@ i_s_sys_tables_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -4049,7 +4050,7 @@ i_s_sys_tables_fill_table_stats(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -4293,7 +4294,7 @@ i_s_sys_indexes_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -4530,7 +4531,7 @@ i_s_sys_columns_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -4732,7 +4733,7 @@ i_s_sys_fields_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -4961,7 +4962,7 @@ i_s_sys_foreign_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -5172,7 +5173,7 @@ i_s_sys_foreign_cols_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -5387,7 +5388,7 @@ i_s_sys_stats_fill_table(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -5573,7 +5574,7 @@ i_s_innodb_rseg_fill(
DBUG_ENTER("i_s_innodb_rseg_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -5797,7 +5798,7 @@ i_s_innodb_table_stats_fill(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -5862,7 +5863,7 @@ i_s_innodb_index_stats_fill(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -6054,7 +6055,7 @@ i_s_innodb_admin_command_fill(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -6431,7 +6432,7 @@ i_s_innodb_buffer_pool_pages_fill(
DBUG_ENTER("i_s_innodb_buffer_pool_pages_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -6536,7 +6537,7 @@ i_s_innodb_buffer_pool_pages_index_fill(
DBUG_ENTER("i_s_innodb_buffer_pool_pages_index_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -6605,7 +6606,7 @@ i_s_innodb_buffer_pool_pages_blob_fill(
DBUG_ENTER("i_s_innodb_buffer_pool_pages_blob_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -7009,7 +7010,7 @@ i_s_innodb_undo_logs_fill(
DBUG_ENTER("i_s_innodb_undo_logs_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -7335,7 +7336,7 @@ i_s_innodb_changed_pages_fill(
DBUG_ENTER("i_s_innodb_changed_pages_fill");
/* deny access to non-superusers */
- if (check_global_access(thd, PROCESS_ACL)) {
+ if (check_global_access(thd, PROCESS_ACL, true)) {
DBUG_RETURN(0);
}
@@ -7356,8 +7357,8 @@ i_s_innodb_changed_pages_fill(
}
while(log_online_bitmap_iterator_next(&i) &&
- (!srv_changed_pages_limit ||
- output_rows_num < srv_changed_pages_limit) &&
+ (!srv_max_changed_pages ||
+ output_rows_num < srv_max_changed_pages) &&
/*
There is no need to compare both start LSN and end LSN fields
with maximum value. It's enough to compare only start LSN.
diff --git a/storage/xtradb/ibuf/ibuf0ibuf.c b/storage/xtradb/ibuf/ibuf0ibuf.c
index 77305e42fb1..96c264b32b4 100644
--- a/storage/xtradb/ibuf/ibuf0ibuf.c
+++ b/storage/xtradb/ibuf/ibuf0ibuf.c
@@ -2912,6 +2912,14 @@ ibuf_get_volume_buffered_count_func(
ut_a(len == 1);
ut_ad(trx_sys_multiple_tablespace_format);
+ if (rec_get_deleted_flag(rec, 0)) {
+ /* This record has been merged already,
+ but apparently the system crashed before
+ the change was discarded from the buffer.
+ Pretend that the record does not exist. */
+ return(0);
+ }
+
types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
switch (UNIV_EXPECT(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE,
@@ -4224,11 +4232,11 @@ ibuf_delete(
page, 1);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&page_cur, index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_zip) {
@@ -4333,6 +4341,22 @@ ibuf_delete_rec(
ut_ad(ibuf_rec_get_page_no(mtr, btr_pcur_get_rec(pcur)) == page_no);
ut_ad(ibuf_rec_get_space(mtr, btr_pcur_get_rec(pcur)) == space);
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+ if (ibuf_debug == 2) {
+ /* Inject a fault (crash). We do this before trying
+ optimistic delete, because a pessimistic delete in the
+ change buffer would require a larger test case. */
+
+ /* Flag the buffered record as processed, to avoid
+ an assertion failure after crash recovery. */
+ btr_cur_set_deleted_flag_for_ibuf(
+ btr_pcur_get_rec(pcur), NULL, TRUE, mtr);
+ ibuf_mtr_commit(mtr);
+ log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE);
+ DBUG_SUICIDE();
+ }
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
if (success) {
@@ -4367,7 +4391,13 @@ ibuf_delete_rec(
ut_ad(ibuf_rec_get_page_no(mtr, btr_pcur_get_rec(pcur)) == page_no);
ut_ad(ibuf_rec_get_space(mtr, btr_pcur_get_rec(pcur)) == space);
- /* We have to resort to a pessimistic delete from ibuf */
+ /* We have to resort to a pessimistic delete from ibuf.
+ Delete-mark the record so that it will not be applied again,
+ in case the server crashes before the pessimistic delete is
+ made persistent. */
+ btr_cur_set_deleted_flag_for_ibuf(
+ btr_pcur_get_rec(pcur), NULL, TRUE, mtr);
+
btr_pcur_store_position(pcur, mtr);
ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
@@ -4448,7 +4478,7 @@ ibuf_merge_or_delete_for_page(
ut_ad(!block || buf_block_get_space(block) == space);
ut_ad(!block || buf_block_get_page_no(block) == page_no);
ut_ad(!block || buf_block_get_zip_size(block) == zip_size);
- ut_ad(!block || buf_block_get_io_fix(block) == BUF_IO_READ);
+ ut_ad(!block || buf_block_get_io_fix_unlocked(block) == BUF_IO_READ);
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE
|| trx_sys_hdr_page(space, page_no)) {
@@ -4648,7 +4678,7 @@ loop:
fputs("InnoDB: Discarding record\n ", stderr);
rec_print_old(stderr, rec);
fputs("\nInnoDB: from the insert buffer!\n\n", stderr);
- } else if (block) {
+ } else if (block && !rec_get_deleted_flag(rec, 0)) {
/* Now we have at pcur a record which should be
applied on the index page; NOTE that the call below
copies pointers to fields in rec, and we must
diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h
index cb44129aeb5..97929d44159 100644
--- a/storage/xtradb/include/btr0cur.h
+++ b/storage/xtradb/include/btr0cur.h
@@ -636,7 +636,7 @@ btr_cur_set_deleted_flag_for_ibuf(
when the tablespace is
uncompressed */
ibool val, /*!< in: value to set */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/*######################################################################*/
/** In the pessimistic delete, if the page data size drops below this
@@ -806,6 +806,11 @@ srv_printf_innodb_monitor(). */
extern ulint btr_cur_n_sea_old;
#endif /* !UNIV_HOTBACKUP */
+#ifdef UNIV_DEBUG
+/* Flag to limit optimistic insert records */
+extern uint btr_cur_limit_optimistic_insert_debug;
+#endif /* UNIV_DEBUG */
+
#ifndef UNIV_NONINL
#include "btr0cur.ic"
#endif
diff --git a/storage/xtradb/include/btr0cur.ic b/storage/xtradb/include/btr0cur.ic
index e31f77c77eb..5fc4651ca13 100644
--- a/storage/xtradb/include/btr0cur.ic
+++ b/storage/xtradb/include/btr0cur.ic
@@ -27,6 +27,16 @@ Created 10/16/1994 Heikki Tuuri
#include "btr0btr.h"
#ifdef UNIV_DEBUG
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
+if (btr_cur_limit_optimistic_insert_debug\
+ && (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
+ CODE;\
+}
+#else
+# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
+#endif /* UNIV_DEBUG */
+
+#ifdef UNIV_DEBUG
/*********************************************************//**
Returns the page cursor component of a tree cursor.
@return pointer to page cursor component */
@@ -146,6 +156,9 @@ btr_cur_compress_recommendation(
page = btr_cur_get_page(cursor);
+ LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
+ return(FALSE));
+
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index d48c7f0212f..e0d7a974fc3 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -199,6 +199,15 @@ struct buf_pool_info_struct{
typedef struct buf_pool_info_struct buf_pool_info_t;
+/** The occupied bytes of lists in all buffer pools */
+struct buf_pools_list_size_struct {
+ ulint LRU_bytes; /*!< LRU size in bytes */
+ ulint unzip_LRU_bytes; /*!< unzip_LRU size in bytes */
+ ulint flush_list_bytes; /*!< flush_list size in bytes */
+};
+
+typedef struct buf_pools_list_size_struct buf_pools_list_size_t;
+
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Acquire mutex on all buffer pool instances */
@@ -958,7 +967,7 @@ buf_block_set_file_page(
ulint space, /*!< in: tablespace id */
ulint page_no);/*!< in: page number */
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -967,7 +976,17 @@ buf_page_get_io_fix(
const buf_page_t* bpage) /*!< in: pointer to the control block */
__attribute__((pure));
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_page_get_io_fix_unlocked(
+/*=========================*/
+ const buf_page_t* bpage) /*!< in: pointer to the control block */
+ __attribute__((pure));
+/*********************************************************************//**
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -976,6 +995,16 @@ buf_block_get_io_fix(
const buf_block_t* block) /*!< in: pointer to the control block */
__attribute__((pure));
/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_block_get_io_fix_unlocked(
+/*==========================*/
+ const buf_block_t* block) /*!< in: pointer to the control block */
+ __attribute__((pure));
+/*********************************************************************//**
Sets the io_fix state of a block. */
UNIV_INLINE
void
@@ -1054,8 +1083,7 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
__attribute__((nonnull));
/*********************************************************************//**
Gets the buf_block_t handle of a buffered file block if an uncompressed
@@ -1374,6 +1402,14 @@ buf_get_total_list_len(
ulint* free_len, /*!< out: length of all free lists */
ulint* flush_list_len);/*!< out: length of all flush lists */
/********************************************************************//**
+Get total list size in bytes from all buffer pools. */
+UNIV_INTERN
+void
+buf_get_total_list_size_in_bytes(
+/*=============================*/
+ buf_pools_list_size_t* buf_pools_list_size); /*!< out: list sizes
+ in all buffer pools */
+/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
void
@@ -1548,10 +1584,11 @@ struct buf_page_struct{
to read this for heuristic
purposes without holding any
mutex or latch */
- unsigned access_time:32; /*!< time of first access, or
- 0 if the block was never accessed
- in the buffer pool */
/* @} */
+ unsigned access_time; /*!< time of first access, or
+ 0 if the block was never accessed
+ in the buffer pool. Protected by
+ block mutex */
ibool space_was_being_deleted;
ibool is_corrupt;
# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
@@ -1741,6 +1778,8 @@ struct buf_pool_stat_struct{
young because the first access
was not long enough ago, in
buf_page_peek_if_too_old() */
+ ulint LRU_bytes; /*!< LRU size in bytes */
+ ulint flush_list_bytes;/*!< flush_list size in bytes */
};
/** Statistics of buddy blocks of a given size. */
diff --git a/storage/xtradb/include/buf0buf.ic b/storage/xtradb/include/buf0buf.ic
index 221f86d9d62..8d5c3edeef8 100644
--- a/storage/xtradb/include/buf0buf.ic
+++ b/storage/xtradb/include/buf0buf.ic
@@ -434,7 +434,7 @@ buf_block_set_file_page(
}
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -442,6 +442,20 @@ buf_page_get_io_fix(
/*================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
{
+ ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+ return buf_page_get_io_fix_unlocked(bpage);
+}
+
+/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_page_get_io_fix_unlocked(
+/*=========================*/
+ const buf_page_t* bpage) /*!< in: pointer to the control block */
+{
enum buf_io_fix io_fix = (enum buf_io_fix) bpage->io_fix;
#ifdef UNIV_DEBUG
switch (io_fix) {
@@ -457,7 +471,7 @@ buf_page_get_io_fix(
}
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -469,6 +483,19 @@ buf_block_get_io_fix(
}
/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_block_get_io_fix_unlocked(
+/*==========================*/
+ const buf_block_t* block) /*!< in: pointer to the control block */
+{
+ return(buf_page_get_io_fix_unlocked(&block->page));
+}
+
+/*********************************************************************//**
Sets the io_fix state of a block. */
UNIV_INLINE
void
@@ -638,19 +665,18 @@ UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
- buf_page_t* bpage, /*!< in/out: control block */
- ulint time_ms) /*!< in: ut_time_ms() */
+ buf_page_t* bpage) /*!< in/out: control block */
{
#ifdef UNIV_DEBUG
- //buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
- //ut_ad(buf_pool_mutex_own(buf_pool));
-#endif
+ buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ut_ad(!buf_pool_mutex_own(buf_pool));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+#endif
ut_a(buf_page_in_file(bpage));
if (!bpage->access_time) {
/* Make this the time of the first access. */
- bpage->access_time = time_ms;
+ bpage->access_time = ut_time_ms();
}
}
diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h
index efaa758f27a..2ea4f9b1ecf 100644
--- a/storage/xtradb/include/buf0lru.h
+++ b/storage/xtradb/include/buf0lru.h
@@ -158,7 +158,10 @@ buf_LRU_block_free_non_file_page(
buf_block_t* block, /*!< in: block, must not contain a file page */
ibool have_page_hash_mutex);
/******************************************************************//**
-Adds a block to the LRU list. */
+Adds a block to the LRU list. Please make sure that the zip_size is
+already set into the page zip when invoking the function, so that we
+can get correct zip_size from the buffer page when adding a block
+into LRU */
UNIV_INTERN
void
buf_LRU_add_block(
diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic
index 757dd815c5e..7ec2cb6cf36 100644
--- a/storage/xtradb/include/data0type.ic
+++ b/storage/xtradb/include/data0type.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -459,36 +459,18 @@ dtype_get_fixed_size_low(
} else if (!comp) {
return(len);
} else {
- /* We play it safe here and ask MySQL for
- mbminlen and mbmaxlen. Although
- mbminlen and mbmaxlen are
- initialized if and only if prtype
- is (in one of the 3 functions in this file),
- it could be that none of these functions
- has been called. */
-
+#ifdef UNIV_DEBUG
ulint i_mbminlen, i_mbmaxlen;
innobase_get_cset_width(
dtype_get_charset_coll(prtype),
&i_mbminlen, &i_mbmaxlen);
- if (UNIV_UNLIKELY
- (DATA_MBMINMAXLEN(i_mbminlen, i_mbmaxlen)
- != mbminmaxlen)) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: "
- "mbminlen=%lu, "
- "mbmaxlen=%lu, "
- "type->mbminlen=%lu, "
- "type->mbmaxlen=%lu\n",
- (ulong) i_mbminlen,
- (ulong) i_mbmaxlen,
- (ulong) DATA_MBMINLEN(mbminmaxlen),
- (ulong) DATA_MBMAXLEN(mbminmaxlen));
- }
- if (i_mbminlen == i_mbmaxlen) {
+ ut_ad(DATA_MBMINMAXLEN(i_mbminlen, i_mbmaxlen)
+ == mbminmaxlen);
+#endif /* UNIV_DEBUG */
+ if (DATA_MBMINLEN(mbminmaxlen)
+ == DATA_MBMAXLEN(mbminmaxlen)) {
return(len);
}
}
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index de3605b1dfb..1dd0b3f5082 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -903,7 +903,7 @@ dict_index_get_nth_col_no(
Looks for column n in an index.
@return position in internal representation of the index;
ULINT_UNDEFINED if not contained */
-UNIV_INLINE
+UNIV_INTERN
ulint
dict_index_get_nth_col_pos(
/*=======================*/
diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic
index 02eafcc5d9c..eeb916fe181 100644
--- a/storage/xtradb/include/dict0dict.ic
+++ b/storage/xtradb/include/dict0dict.ic
@@ -697,20 +697,6 @@ dict_index_get_nth_col_no(
return(dict_col_get_no(dict_index_get_nth_col(index, pos)));
}
-/********************************************************************//**
-Looks for column n in an index.
-@return position in internal representation of the index;
-ULINT_UNDEFINED if not contained */
-UNIV_INLINE
-ulint
-dict_index_get_nth_col_pos(
-/*=======================*/
- const dict_index_t* index, /*!< in: index */
- ulint n) /*!< in: column number */
-{
- return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE));
-}
-
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Returns the minimum data size of an index record.
diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h
index 54593a0b9c7..630942ae2ac 100644
--- a/storage/xtradb/include/dict0mem.h
+++ b/storage/xtradb/include/dict0mem.h
@@ -377,10 +377,15 @@ struct dict_index_struct{
unsigned type:DICT_IT_BITS;
/*!< index type (DICT_CLUSTERED, DICT_UNIQUE,
DICT_UNIVERSAL, DICT_IBUF, DICT_CORRUPT) */
- unsigned trx_id_offset:10;/*!< position of the trx id column
+#define MAX_KEY_LENGTH_BITS 12
+ unsigned trx_id_offset:MAX_KEY_LENGTH_BITS;
+ /*!< position of the trx id column
in a clustered index record, if the fields
before it are known to be of a fixed size,
0 otherwise */
+#if (1<<MAX_KEY_LENGTH_BITS) < MAX_KEY_LENGTH
+# error (1<<MAX_KEY_LENGTH_BITS) < MAX_KEY_LENGTH
+#endif
unsigned n_user_defined_cols:10;
/*!< number of columns the user defined to
be in the index: in the internal
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 7da62e68e56..2149d0aadca 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -776,6 +776,21 @@ fil_space_set_corrupt(
/*==================*/
ulint space_id);
+/****************************************************************//**
+Generate redo logs for swapping two .ibd files */
+UNIV_INTERN
+void
+fil_mtr_rename_log(
+/*===============*/
+ ulint old_space_id, /*!< in: tablespace id of the old
+ table. */
+ const char* old_name, /*!< in: old table name */
+ ulint new_space_id, /*!< in: tablespace id of the new
+ table */
+ const char* new_name, /*!< in: new table name */
+ const char* tmp_name); /*!< in: temp table name used while
+ swapping */
+
typedef struct fil_space_struct fil_space_t;
#endif
diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h
index ea636f985b4..2b659ab417e 100644
--- a/storage/xtradb/include/lock0lock.h
+++ b/storage/xtradb/include/lock0lock.h
@@ -798,14 +798,22 @@ lock_rec_get_page_no(
remains set when the waiting lock is granted,
or if the lock is inherited to a neighboring
record */
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_MODE_MASK
+#define LOCK_CONV_BY_OTHER 4096 /*!< this bit is set when the lock is created
+ by other transaction */
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_MODE_MASK
# error
#endif
-#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_TYPE_MASK
+#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_TYPE_MASK
# error
#endif
/* @} */
+/** Checks if this is a waiting lock created by lock->trx itself.
+@param type_mode lock->type_mode
+@return whether it is a waiting lock belonging to lock->trx */
+#define lock_is_wait_not_by_other(type_mode) \
+ ((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
+
/** Lock operation struct */
typedef struct lock_op_struct lock_op_t;
/** Lock operation struct */
diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h
index e7c3f301e45..999a317780e 100644
--- a/storage/xtradb/include/log0online.h
+++ b/storage/xtradb/include/log0online.h
@@ -41,23 +41,51 @@ typedef struct log_bitmap_iterator_struct log_bitmap_iterator_t;
Initializes the online log following subsytem. */
UNIV_INTERN
void
-log_online_read_init();
-/*===================*/
+log_online_read_init(void);
+/*=======================*/
/*********************************************************************//**
Shuts down the online log following subsystem. */
UNIV_INTERN
void
-log_online_read_shutdown();
-/*=======================*/
+log_online_read_shutdown(void);
+/*===========================*/
/*********************************************************************//**
Reads and parses the redo log up to last checkpoint LSN to build the changed
-page bitmap which is then written to disk. */
+page bitmap which is then written to disk.
+
+@return TRUE if log tracking succeeded, FALSE if bitmap write I/O error */
UNIV_INTERN
-void
-log_online_follow_redo_log();
-/*=========================*/
+ibool
+log_online_follow_redo_log(void);
+/*=============================*/
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn); /*!<in: LSN to purge files up to */
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn); /*!<in: LSN to purge files up to */
#define LOG_BITMAP_ITERATOR_START_LSN(i) \
((i).start_lsn)
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index 4c795d93141..05403a8e752 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -190,6 +190,8 @@ extern ulint os_n_file_reads;
extern ulint os_n_file_writes;
extern ulint os_n_fsyncs;
+#define OS_MIN_LOG_BLOCK_SIZE 512
+
extern ulint srv_log_block_size;
#ifdef UNIV_PFS_IO
diff --git a/storage/xtradb/include/page0zip.h b/storage/xtradb/include/page0zip.h
index fe3d2e52e0b..23a2cac618b 100644
--- a/storage/xtradb/include/page0zip.h
+++ b/storage/xtradb/include/page0zip.h
@@ -156,9 +156,10 @@ page_zip_validate_low(
/*==================*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index, /*!< in: index of the page, if known */
ibool sloppy) /*!< in: FALSE=strict,
TRUE=ignore the MIN_REC_FLAG */
- __attribute__((nonnull));
+ __attribute__((nonnull(1,2)));
/**********************************************************************//**
Check that the compressed and decompressed pages match. */
UNIV_INTERN
@@ -166,8 +167,9 @@ ibool
page_zip_validate(
/*==============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
- const page_t* page) /*!< in: uncompressed page */
- __attribute__((nonnull));
+ const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index) /*!< in: index of the page, if known */
+ __attribute__((nonnull(1,2)));
#endif /* UNIV_ZIP_DEBUG */
/**********************************************************************//**
diff --git a/storage/xtradb/include/rem0rec.h b/storage/xtradb/include/rem0rec.h
index 10b74d18c13..98bf889b996 100644
--- a/storage/xtradb/include/rem0rec.h
+++ b/storage/xtradb/include/rem0rec.h
@@ -362,24 +362,6 @@ rec_get_offsets_func(
rec_get_offsets_func(rec,index,offsets,n,heap,__FILE__,__LINE__)
/******************************************************//**
-Determine the offset to each field in a leaf-page record
-in ROW_FORMAT=COMPACT. This is a special case of
-rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
-void
-rec_init_offsets_comp_ordinary(
-/*===========================*/
- const rec_t* rec, /*!< in: physical record in
- ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets);/*!< in/out: array of offsets;
- in: n=rec_offs_n_fields(offsets) */
-
-/******************************************************//**
The following function determines the offsets to each field
in the record. It can reuse a previously allocated array. */
UNIV_INTERN
@@ -644,8 +626,48 @@ rec_copy(
/*=====*/
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in a temporary file.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+ __attribute__((warn_unused_result, nonnull));
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+ __attribute__((nonnull));
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+ __attribute__((nonnull));
+
/**************************************************************//**
Copies the first n fields of a physical record to a new physical record in
a buffer.
@@ -680,21 +702,6 @@ rec_fold(
__attribute__((pure));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************//**
-Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
-void
-rec_convert_dtuple_to_rec_comp(
-/*===========================*/
- rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
- const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields);/*!< in: number of data fields */
-/*********************************************************//**
Builds a physical record out of a data tuple and
stores it into the given buffer.
@return pointer to the origin of physical record */
@@ -727,10 +734,7 @@ UNIV_INTERN
ulint
rec_get_converted_size_comp_prefix(
/*===============================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
+ const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra); /*!< out: extra size */
diff --git a/storage/xtradb/include/row0undo.h b/storage/xtradb/include/row0undo.h
index 6eb4ca448b3..9420d022e3b 100644
--- a/storage/xtradb/include/row0undo.h
+++ b/storage/xtradb/include/row0undo.h
@@ -87,10 +87,6 @@ that index record. */
enum undo_exec {
UNDO_NODE_FETCH_NEXT = 1, /*!< we should fetch the next
undo log record */
- UNDO_NODE_PREV_VERS, /*!< the roll ptr to previous
- version of a row is stored in
- node, and undo should be done
- based on it */
UNDO_NODE_INSERT, /*!< undo a fresh insert of a
row to a table */
UNDO_NODE_MODIFY /*!< undo a modify operation
@@ -108,9 +104,6 @@ struct undo_node_struct{
undo_no_t undo_no;/*!< undo number of the record */
ulint rec_type;/*!< undo log record type: TRX_UNDO_INSERT_REC,
... */
- roll_ptr_t new_roll_ptr;
- /*!< roll ptr to restore to clustered index
- record */
trx_id_t new_trx_id; /*!< trx id to restore to clustered index
record */
btr_pcur_t pcur; /*!< persistent cursor used in searching the
diff --git a/storage/xtradb/include/row0upd.ic b/storage/xtradb/include/row0upd.ic
index 10646241125..6706c9f8c69 100644
--- a/storage/xtradb/include/row0upd.ic
+++ b/storage/xtradb/include/row0upd.ic
@@ -28,6 +28,7 @@ Created 12/27/1996 Heikki Tuuri
# include "trx0trx.h"
# include "trx0undo.h"
# include "row0row.h"
+# include "lock0lock.h"
#endif /* !UNIV_HOTBACKUP */
#include "page0zip.h"
@@ -171,6 +172,8 @@ row_upd_rec_sys_fields(
#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
#endif
+ ut_ad(lock_check_trx_id_sanity(trx_read_trx_id(rec + offset),
+ rec, index, offsets, FALSE));
trx_write_trx_id(rec + offset, trx->id);
trx_write_roll_ptr(rec + offset + DATA_TRX_ID_LEN, roll_ptr);
}
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index 6c5b61487f2..586c1e73879 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -148,7 +148,7 @@ extern my_bool srv_track_changed_pages;
extern ib_uint64_t srv_max_bitmap_file_size;
extern
-ulonglong srv_changed_pages_limit;
+ulonglong srv_max_changed_pages;
extern ibool srv_auto_extend_last_data_file;
extern ulint srv_last_file_size_max;
@@ -249,6 +249,11 @@ extern ulong srv_sys_stats_root_page;
#endif
extern ibool srv_use_doublewrite_buf;
+extern ibool srv_use_atomic_writes;
+#ifdef HAVE_POSIX_FALLOCATE
+extern ibool srv_use_posix_fallocate;
+#endif
+
extern ibool srv_use_checksums;
extern ibool srv_fast_checksum;
@@ -320,6 +325,10 @@ extern ulint srv_fatal_semaphore_wait_threshold;
extern ulint srv_dml_needed_delay;
extern long long srv_kill_idle_transaction;
+#ifdef UNIV_DEBUG
+extern my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
query threads, and lock table: we allocate
it from dynamic memory to get it to the
@@ -400,6 +409,9 @@ extern ibool srv_blocking_lru_restore;
When FALSE, row locks are not taken at all. */
extern my_bool srv_fake_changes_locks;
+/** print all user-level transactions deadlocks to mysqld stderr */
+extern my_bool srv_print_all_deadlocks;
+
/** Status variables to be passed to MySQL */
typedef struct export_var_struct export_struc;
@@ -794,7 +806,9 @@ struct export_var_struct{
ulint innodb_dict_tables;
ulint innodb_buffer_pool_pages_total; /*!< Buffer pool size */
ulint innodb_buffer_pool_pages_data; /*!< Data pages */
+ ulint innodb_buffer_pool_bytes_data; /*!< File bytes used */
ulint innodb_buffer_pool_pages_dirty; /*!< Dirty data pages */
+ ulint innodb_buffer_pool_bytes_dirty; /*!< File bytes modified */
ulint innodb_buffer_pool_pages_misc; /*!< Miscellanous pages */
ulint innodb_buffer_pool_pages_free; /*!< Free pages */
#ifdef UNIV_DEBUG
@@ -880,6 +894,11 @@ struct export_var_struct{
ib_int64_t innodb_x_lock_os_waits;
ib_int64_t innodb_x_lock_spin_rounds;
ib_int64_t innodb_x_lock_spin_waits;
+#ifdef UNIV_DEBUG
+ ulint innodb_purge_trx_id_age; /*!< max_trx_id - purged trx_id */
+ ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
+ - purged view's min trx_id */
+#endif /* UNIV_DEBUG */
};
/** Thread slot in the thread table */
diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h
index 4a2f55d90ff..b3b99b10630 100644
--- a/storage/xtradb/include/sync0sync.h
+++ b/storage/xtradb/include/sync0sync.h
@@ -89,6 +89,7 @@ extern mysql_pfs_key_t hash_table_mutex_key;
extern mysql_pfs_key_t ibuf_bitmap_mutex_key;
extern mysql_pfs_key_t ibuf_mutex_key;
extern mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
+extern mysql_pfs_key_t log_bmp_sys_mutex_key;
extern mysql_pfs_key_t log_sys_mutex_key;
extern mysql_pfs_key_t log_flush_order_mutex_key;
extern mysql_pfs_key_t kernel_mutex_key;
@@ -672,6 +673,7 @@ or row lock! */
#define SYNC_TRX_LOCK_HEAP 298
#define SYNC_TRX_SYS_HEADER 290
#define SYNC_PURGE_QUEUE 200
+#define SYNC_LOG_ONLINE 175
#define SYNC_LOG 170
#define SYNC_LOG_FLUSH_ORDER 156
#define SYNC_RECV 168
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index 77acf54d8dc..60c5cc79852 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -49,13 +49,10 @@ Created 1/20/1994 Heikki Tuuri
#define _IB_TO_STR(s) #s
#define IB_TO_STR(s) _IB_TO_STR(s)
-#define INNODB_VERSION_MAJOR 1
-#define INNODB_VERSION_MINOR 1
-#define INNODB_VERSION_BUGFIX 8
+#include <mysql_version.h>
-#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 29.3
-#endif
+#define INNODB_VERSION_MAJOR MYSQL_MAJOR_VERSION
+#define INNODB_VERSION_MINOR MYSQL_MINOR_VERSION
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -66,11 +63,11 @@ component, i.e. we show M.N.P as M.N */
#define INNODB_VERSION_SHORT \
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
-#define INNODB_VERSION_STR \
- IB_TO_STR(INNODB_VERSION_MAJOR) "." \
- IB_TO_STR(INNODB_VERSION_MINOR) "." \
- IB_TO_STR(INNODB_VERSION_BUGFIX) "-" \
- IB_TO_STR(PERCONA_INNODB_VERSION)
+#ifndef PERCONA_INNODB_VERSION
+#define PERCONA_INNODB_VERSION 30.1
+#endif
+
+#define INNODB_VERSION_STR MYSQL_SERVER_VERSION "-" IB_TO_STR(PERCONA_INNODB_VERSION)
#define REFMAN "http://dev.mysql.com/doc/refman/" \
IB_TO_STR(MYSQL_MAJOR_VERSION) "." \
@@ -300,6 +297,24 @@ management to ensure correct alignment for doubles etc. */
========================
*/
+/** There are currently two InnoDB file formats which are used to group
+features with similar restrictions and dependencies. Using an enum allows
+switch statements to give a compiler warning when a new one is introduced. */
+enum innodb_file_formats_enum {
+ /** Antelope File Format: InnoDB/MySQL up to 5.1.
+ This format includes REDUNDANT and COMPACT row formats */
+ UNIV_FORMAT_A = 0,
+
+ /** Barracuda File Format: Introduced in InnoDB plugin for 5.1:
+ This format includes COMPRESSED and DYNAMIC row formats. It
+ includes the ability to create secondary indexes from data that
+ is not on the clustered index page and the ability to store more
+ data off the clustered index page. */
+ UNIV_FORMAT_B = 1
+};
+
+typedef enum innodb_file_formats_enum innodb_file_formats_t;
+
/* The 2-logarithm of UNIV_PAGE_SIZE: */
/* #define UNIV_PAGE_SIZE_SHIFT 14 */
#define UNIV_PAGE_SIZE_SHIFT_MAX 14
diff --git a/storage/xtradb/lock/lock0lock.c b/storage/xtradb/lock/lock0lock.c
index f172ad6695b..47d082ed49f 100644
--- a/storage/xtradb/lock/lock0lock.c
+++ b/storage/xtradb/lock/lock0lock.c
@@ -790,12 +790,16 @@ lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /*!< in: record lock */
{
- ut_ad((lock->trx)->wait_lock == lock);
ut_ad(lock_get_wait(lock));
/* Reset the back pointer in trx to this waiting lock request */
- (lock->trx)->wait_lock = NULL;
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
+ ut_ad((lock->trx)->wait_lock == lock);
+ (lock->trx)->wait_lock = NULL;
+ } else {
+ ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ }
lock->type_mode &= ~LOCK_WAIT;
}
@@ -1431,9 +1435,9 @@ lock_rec_has_expl(
while (lock) {
if (lock->trx == trx
+ && !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(lock_get_mode(lock),
precise_mode & LOCK_MODE_MASK)
- && !lock_get_wait(lock)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
@@ -1731,9 +1735,9 @@ lock_rec_create(
HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), lock);
- lock_sys->rec_num++;
- if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
+ lock_sys->rec_num++;
+ if (lock_is_wait_not_by_other(type_mode)) {
lock_set_lock_and_trx_wait(lock, trx);
}
@@ -1763,10 +1767,11 @@ lock_rec_enqueue_waiting(
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
+ lock_t* lock, /*!< in: lock object; NULL if a new
+ one should be created. */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
- lock_t* lock;
trx_t* trx;
ulint sec;
ulint ms;
@@ -1803,9 +1808,17 @@ lock_rec_enqueue_waiting(
ut_ad(0);
}
- /* Enqueue the lock request that will wait to be granted */
- lock = lock_rec_create(type_mode | LOCK_WAIT,
- block, heap_no, index, trx);
+ if (lock == NULL) {
+ /* Enqueue the lock request that will wait to be granted */
+ lock = lock_rec_create(type_mode | LOCK_WAIT,
+ block, heap_no, index, trx);
+ } else {
+ ut_ad(lock->type_mode & LOCK_WAIT);
+ ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ lock_set_lock_and_trx_wait(lock, trx);
+ }
/* Check if a deadlock occurs: if yes, remove the lock request and
return an error code */
@@ -1829,7 +1842,7 @@ lock_rec_enqueue_waiting(
trx->que_state = TRX_QUE_LOCK_WAIT;
trx->was_chosen_as_deadlock_victim = FALSE;
trx->wait_started = time(NULL);
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
trx->lock_que_wait_ustarted = (ib_uint64_t)sec * 1000000 + ms;
}
@@ -2054,6 +2067,7 @@ lock_rec_lock_slow(
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
+ lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
@@ -2068,7 +2082,27 @@ lock_rec_lock_slow(
trx = thr_get_trx(thr);
- if (lock_rec_has_expl(mode, block, heap_no, trx)) {
+ lock = lock_rec_has_expl(mode, block, heap_no, trx);
+ if (lock) {
+ if (lock->type_mode & LOCK_CONV_BY_OTHER) {
+ /* This lock or lock waiting was created by the other
+ transaction, not by the transaction (trx) itself.
+ So, the transaction (trx) should treat it collectly
+ according as whether granted or not. */
+
+ if (lock->type_mode & LOCK_WAIT) {
+ /* This lock request was not granted yet.
+ Should wait for granted. */
+
+ goto enqueue_waiting;
+ } else {
+ /* This lock request was already granted.
+ Just clearing the flag. */
+
+ lock->type_mode &= ~LOCK_CONV_BY_OTHER;
+ }
+ }
+
/* The trx already has a strong enough lock on rec: do
nothing */
@@ -2078,8 +2112,10 @@ lock_rec_lock_slow(
the queue, as this transaction does not have a lock strong
enough already granted on the record, we have to wait. */
+ ut_ad(lock == NULL);
+enqueue_waiting:
return(lock_rec_enqueue_waiting(mode, block, heap_no,
- index, thr));
+ lock, index, thr));
} else if (!impl) {
/* Set the requested lock on the record */
@@ -2221,7 +2257,8 @@ lock_grant(
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
- if (lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
+ if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
+ && lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
trx_end_lock_wait(lock->trx);
}
}
@@ -2238,6 +2275,7 @@ lock_rec_cancel(
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2382,8 +2420,12 @@ lock_rec_reset_and_release_wait(
lock = lock_rec_get_first(block, heap_no);
while (lock != NULL) {
- if (lock_get_wait(lock)) {
+ if (lock_is_wait_not_by_other(lock->type_mode)) {
lock_rec_cancel(lock);
+ } else if (lock_get_wait(lock)) {
+ /* just reset LOCK_WAIT */
+ lock_rec_reset_nth_bit(lock, heap_no);
+ lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
@@ -3271,6 +3313,80 @@ lock_rec_restore_from_page_infimum(
/*=========== DEADLOCK CHECKING ======================================*/
+/*********************************************************************//**
+rewind(3) the file used for storing the latest detected deadlock and
+print a heading message to stderr if printing of all deadlocks to stderr
+is enabled. */
+UNIV_INLINE
+void
+lock_deadlock_start_print()
+/*=======================*/
+{
+ rewind(lock_latest_err_file);
+ ut_print_timestamp(lock_latest_err_file);
+
+ if (srv_print_all_deadlocks) {
+ fprintf(stderr, "InnoDB: transactions deadlock detected, "
+ "dumping detailed information.\n");
+ ut_print_timestamp(stderr);
+ }
+}
+
+/*********************************************************************//**
+Print a message to the deadlock file and possibly to stderr. */
+UNIV_INLINE
+void
+lock_deadlock_fputs(
+/*================*/
+ const char* msg) /*!< in: message to print */
+{
+ fputs(msg, lock_latest_err_file);
+
+ if (srv_print_all_deadlocks) {
+ fputs(msg, stderr);
+ }
+}
+
+/*********************************************************************//**
+Print transaction data to the deadlock file and possibly to stderr. */
+UNIV_INLINE
+void
+lock_deadlock_trx_print(
+/*====================*/
+ trx_t* trx, /*!< in: transaction */
+ ulint max_query_len) /*!< in: max query length to print, or 0 to
+ use the default max length */
+{
+ trx_print(lock_latest_err_file, trx, max_query_len);
+
+ if (srv_print_all_deadlocks) {
+ trx_print(stderr, trx, max_query_len);
+ }
+}
+
+/*********************************************************************//**
+Print lock data to the deadlock file and possibly to stderr. */
+UNIV_INLINE
+void
+lock_deadlock_lock_print(
+/*=====================*/
+ const lock_t* lock) /*!< in: record or table type lock */
+{
+ if (lock_get_type_low(lock) == LOCK_REC) {
+ lock_rec_print(lock_latest_err_file, lock);
+
+ if (srv_print_all_deadlocks) {
+ lock_rec_print(stderr, lock);
+ }
+ } else {
+ lock_table_print(lock_latest_err_file, lock);
+
+ if (srv_print_all_deadlocks) {
+ lock_table_print(stderr, lock);
+ }
+ }
+}
+
/********************************************************************//**
Checks if a lock request results in a deadlock.
@return TRUE if a deadlock was detected and we chose trx as a victim;
@@ -3314,31 +3430,26 @@ retry:
/* If the lock search exceeds the max step
or the max depth, the current trx will be
the victim. Print its information. */
- rewind(lock_latest_err_file);
- ut_print_timestamp(lock_latest_err_file);
+ lock_deadlock_start_print();
- fputs("TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
- " WAITS-FOR GRAPH, WE WILL ROLL BACK"
- " FOLLOWING TRANSACTION \n",
- lock_latest_err_file);
+ lock_deadlock_fputs(
+ "TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
+ " WAITS-FOR GRAPH, WE WILL ROLL BACK"
+ " FOLLOWING TRANSACTION \n\n"
+ "*** TRANSACTION:\n");
- fputs("\n*** TRANSACTION:\n", lock_latest_err_file);
- trx_print(lock_latest_err_file, trx, 3000);
+ lock_deadlock_trx_print(trx, 3000);
- fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n",
- lock_latest_err_file);
+ lock_deadlock_fputs(
+ "*** WAITING FOR THIS LOCK TO BE GRANTED:\n");
+
+ lock_deadlock_lock_print(lock);
- if (lock_get_type(lock) == LOCK_REC) {
- lock_rec_print(lock_latest_err_file, lock);
- } else {
- lock_table_print(lock_latest_err_file, lock);
- }
break;
case LOCK_VICTIM_IS_START:
srv_n_lock_deadlock_count++;
- fputs("*** WE ROLL BACK TRANSACTION (2)\n",
- lock_latest_err_file);
+ lock_deadlock_fputs("*** WE ROLL BACK TRANSACTION (2)\n");
break;
default:
@@ -3453,45 +3564,33 @@ lock_deadlock_recursive(
point: a deadlock detected; or we have
searched the waits-for graph too long */
- FILE* ef = lock_latest_err_file;
+ lock_deadlock_start_print();
- rewind(ef);
- ut_print_timestamp(ef);
+ lock_deadlock_fputs("\n*** (1) TRANSACTION:\n");
- fputs("\n*** (1) TRANSACTION:\n", ef);
+ lock_deadlock_trx_print(wait_lock->trx, 3000);
- trx_print(ef, wait_lock->trx, 3000);
+ lock_deadlock_fputs(
+ "*** (1) WAITING FOR THIS LOCK"
+ " TO BE GRANTED:\n");
- fputs("*** (1) WAITING FOR THIS LOCK"
- " TO BE GRANTED:\n", ef);
+ lock_deadlock_lock_print(wait_lock);
- if (lock_get_type_low(wait_lock) == LOCK_REC) {
- lock_rec_print(ef, wait_lock);
- } else {
- lock_table_print(ef, wait_lock);
- }
+ lock_deadlock_fputs("*** (2) TRANSACTION:\n");
- fputs("*** (2) TRANSACTION:\n", ef);
+ lock_deadlock_trx_print(lock->trx, 3000);
- trx_print(ef, lock->trx, 3000);
+ lock_deadlock_fputs(
+ "*** (2) HOLDS THE LOCK(S):\n");
- fputs("*** (2) HOLDS THE LOCK(S):\n", ef);
+ lock_deadlock_lock_print(lock);
- if (lock_get_type_low(lock) == LOCK_REC) {
- lock_rec_print(ef, lock);
- } else {
- lock_table_print(ef, lock);
- }
+ lock_deadlock_fputs(
+ "*** (2) WAITING FOR THIS LOCK"
+ " TO BE GRANTED:\n");
- fputs("*** (2) WAITING FOR THIS LOCK"
- " TO BE GRANTED:\n", ef);
+ lock_deadlock_lock_print(start->wait_lock);
- if (lock_get_type_low(start->wait_lock)
- == LOCK_REC) {
- lock_rec_print(ef, start->wait_lock);
- } else {
- lock_table_print(ef, start->wait_lock);
- }
#ifdef UNIV_DEBUG
if (lock_print_waits) {
fputs("Deadlock detected\n",
@@ -3514,8 +3613,8 @@ lock_deadlock_recursive(
as a victim to try to avoid deadlocking our
recursion starting point transaction */
- fputs("*** WE ROLL BACK TRANSACTION (1)\n",
- ef);
+ lock_deadlock_fputs(
+ "*** WE ROLL BACK TRANSACTION (1)\n");
wait_lock->trx->was_chosen_as_deadlock_victim
= TRUE;
@@ -3600,6 +3699,7 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
++table->n_waiting_or_granted_auto_inc_locks;
@@ -3837,7 +3937,7 @@ lock_table_enqueue_waiting(
return(DB_SUCCESS);
}
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
trx->lock_que_wait_ustarted = (ib_uint64_t)sec * 1000000 + ms;
}
@@ -4163,6 +4263,7 @@ lock_cancel_waiting_and_release(
lock_t* lock) /*!< in: waiting lock request */
{
ut_ad(mutex_own(&kernel_mutex));
+ ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
if (lock_get_type_low(lock) == LOCK_REC) {
@@ -4658,12 +4759,16 @@ loop:
lock_mutex_exit_kernel();
- mtr_start(&mtr);
+ if (srv_show_verbose_locks) {
+ mtr_start(&mtr);
- buf_page_get_with_no_latch(space, zip_size,
- page_no, &mtr);
+ buf_page_get_gen(space, zip_size, page_no,
+ RW_NO_LATCH, NULL,
+ BUF_GET_POSSIBLY_FREED,
+ __FILE__, __LINE__, &mtr);
- mtr_commit(&mtr);
+ mtr_commit(&mtr);
+ }
load_page_first = FALSE;
@@ -5211,7 +5316,7 @@ lock_rec_insert_check_and_lock(
err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
| LOCK_INSERT_INTENTION,
block, next_rec_heap_no,
- index, thr);
+ NULL, index, thr);
} else {
err = DB_SUCCESS;
}
@@ -5287,10 +5392,23 @@ lock_rec_convert_impl_to_expl(
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
heap_no, impl_trx)) {
+ ulint type_mode = (LOCK_REC | LOCK_X
+ | LOCK_REC_NOT_GAP);
+
+ /* If the delete-marked record was locked already,
+ we should reserve lock waiting for impl_trx as
+ implicit lock. Because cannot lock at this moment.*/
+
+ if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
+ && lock_rec_other_has_conflicting(
+ LOCK_X | LOCK_REC_NOT_GAP, block,
+ heap_no, impl_trx)) {
+
+ type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
+ }
lock_rec_add_to_queue(
- LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
- block, heap_no, index, impl_trx);
+ type_mode, block, heap_no, index, impl_trx);
}
}
}
diff --git a/storage/xtradb/log/log0online.c b/storage/xtradb/log/log0online.c
index 55eb9d17c46..be0a9708b8c 100644
--- a/storage/xtradb/log/log0online.c
+++ b/storage/xtradb/log/log0online.c
@@ -36,6 +36,11 @@ Online database log parsing for changed page tracking
enum { FOLLOW_SCAN_SIZE = 4 * (UNIV_PAGE_SIZE_MAX) };
+#ifdef UNIV_PFS_MUTEX
+/* Key to register log_bmp_sys->mutex with PFS */
+UNIV_INTERN mysql_pfs_key_t log_bmp_sys_mutex_key;
+#endif /* UNIV_PFS_MUTEX */
+
/** Log parsing and bitmap output data structure */
struct log_bitmap_struct {
byte read_buf[FOLLOW_SCAN_SIZE];
@@ -69,6 +74,7 @@ struct log_bitmap_struct {
both the correct type and the tree does
not mind its overwrite during
rbt_next() tree traversal. */
+ mutex_t mutex; /*!< mutex protecting all the fields.*/
};
/* The log parsing and bitmap output struct instance */
@@ -172,6 +178,8 @@ log_online_set_page_bit(
byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
byte *page_ptr;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
ut_a(space != ULINT_UNDEFINED);
ut_a(page_no != ULINT_UNDEFINED);
@@ -312,8 +320,8 @@ its name is correct and use it for (re-)tracking start.
@return the last fully tracked LSN */
static
ib_uint64_t
-log_online_read_last_tracked_lsn()
-/*==============================*/
+log_online_read_last_tracked_lsn(void)
+/*==================================*/
{
byte page[MODIFIED_PAGE_BLOCK_SIZE];
ibool is_last_page = FALSE;
@@ -405,8 +413,10 @@ log_online_can_track_missing(
if (last_tracked_lsn > tracking_start_lsn) {
fprintf(stderr,
- "InnoDB: Error: last tracked LSN is in future. This "
- "can be caused by mismatched bitmap files.\n");
+ "InnoDB: Error: last tracked LSN %llu is ahead of "
+ "tracking start LSN %llu. This can be caused by "
+ "mismatched bitmap files.\n", last_tracked_lsn,
+ tracking_start_lsn);
exit(1);
}
@@ -431,10 +441,10 @@ log_online_track_missing_on_startup(
{
ut_ad(last_tracked_lsn != tracking_start_lsn);
- fprintf(stderr, "InnoDB: last tracked LSN is %llu, but the last "
- "checkpoint LSN is %llu. This might be due to a server "
- "crash or a very fast shutdown. ", last_tracked_lsn,
- tracking_start_lsn);
+ fprintf(stderr, "InnoDB: last tracked LSN in \'%s\' is %llu, but the "
+ "last checkpoint LSN is %llu. This might be due to a server "
+ "crash or a very fast shutdown. ", log_bmp_sys->out.name,
+ last_tracked_lsn, tracking_start_lsn);
/* See if we can fully recover the missing interval */
if (log_online_can_track_missing(last_tracked_lsn,
@@ -446,7 +456,9 @@ log_online_track_missing_on_startup(
log_bmp_sys->start_lsn = ut_max_uint64(last_tracked_lsn,
MIN_TRACKED_LSN);
log_set_tracked_lsn(log_bmp_sys->start_lsn);
- log_online_follow_redo_log();
+ if (!log_online_follow_redo_log()) {
+ exit(1);
+ }
ut_ad(log_bmp_sys->end_lsn >= tracking_start_lsn);
fprintf(stderr,
@@ -484,18 +496,47 @@ log_online_make_bitmap_name(
}
/*********************************************************************//**
-Create a new empty bitmap output file. */
+Check if an old file that has the name of a new bitmap file we are about to
+create should be overwritten. */
static
-void
-log_online_start_bitmap_file()
-/*==========================*/
+ibool
+log_online_should_overwrite(
+/*========================*/
+ const char *path) /*!< in: path to file */
{
- ibool success;
+ ibool success;
+ os_file_stat_t file_info;
- log_bmp_sys->out.file
- = os_file_create(innodb_file_bmp_key, log_bmp_sys->out.name,
- OS_FILE_OVERWRITE, OS_FILE_NORMAL,
- OS_DATA_FILE, &success);
+ /* Currently, it's OK to overwrite 0-sized files only */
+ success = os_file_get_status(path, &file_info);
+ return success && file_info.size == 0LL;
+}
+
+/*********************************************************************//**
+Create a new empty bitmap output file.
+
+@return TRUE if operation succeeded, FALSE if I/O error */
+static
+ibool
+log_online_start_bitmap_file(void)
+/*==============================*/
+{
+ ibool success = TRUE;
+
+ /* Check for an old file that should be deleted first */
+ if (log_online_should_overwrite(log_bmp_sys->out.name)) {
+ success = os_file_delete(log_bmp_sys->out.name);
+ }
+
+ if (UNIV_LIKELY(success)) {
+ log_bmp_sys->out.file
+ = os_file_create_simple_no_error_handling(
+ innodb_file_bmp_key,
+ log_bmp_sys->out.name,
+ OS_FILE_CREATE,
+ OS_FILE_READ_WRITE,
+ &success);
+ }
if (UNIV_UNLIKELY(!success)) {
/* The following call prints an error message */
@@ -503,25 +544,32 @@ log_online_start_bitmap_file()
fprintf(stderr,
"InnoDB: Error: Cannot create \'%s\'\n",
log_bmp_sys->out.name);
- exit(1);
+ log_bmp_sys->out.file = -1;
+ return FALSE;
}
log_bmp_sys->out.offset = 0;
+ return TRUE;
}
/*********************************************************************//**
-Close the current bitmap output file and create the next one. */
+Close the current bitmap output file and create the next one.
+
+@return TRUE if operation succeeded, FALSE if I/O error */
static
-void
+ibool
log_online_rotate_bitmap_file(
/*===========================*/
ib_uint64_t next_file_start_lsn) /*!<in: the start LSN name
part */
{
- os_file_close(log_bmp_sys->out.file);
+ if (log_bmp_sys->out.file != -1) {
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
log_bmp_sys->out_seq_num++;
log_online_make_bitmap_name(next_file_start_lsn);
- log_online_start_bitmap_file();
+ return log_online_start_bitmap_file();
}
/*********************************************************************//**
@@ -556,8 +604,8 @@ log_online_is_bitmap_file(
Initialize the online log following subsytem. */
UNIV_INTERN
void
-log_online_read_init()
-/*==================*/
+log_online_read_init(void)
+/*======================*/
{
ibool success;
ib_uint64_t tracking_start_lsn
@@ -566,13 +614,16 @@ log_online_read_init()
os_file_stat_t bitmap_dir_file_info;
ib_uint64_t last_file_start_lsn = MIN_TRACKED_LSN;
- /* Assert (could be compile-time assert) that bitmap data start and end
- in a bitmap block is 8-byte aligned */
- ut_a(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
- ut_a(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
+ /* Bitmap data start and end in a bitmap block must be 8-byte
+ aligned. */
+ compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
+ compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
log_bmp_sys = ut_malloc(sizeof(*log_bmp_sys));
+ mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys->mutex,
+ SYNC_LOG_ONLINE);
+
/* Enumerate existing bitmap files to either open the last one to get
the last tracked LSN either to find that there are none and start
tracking from scratch. */
@@ -629,7 +680,9 @@ log_online_read_init()
if (!success) {
/* New file, tracking from scratch */
- log_online_start_bitmap_file();
+ if (!log_online_start_bitmap_file()) {
+ exit(1);
+ }
}
else {
@@ -637,6 +690,7 @@ log_online_read_init()
ulint size_low;
ulint size_high;
ib_uint64_t last_tracked_lsn;
+ ib_uint64_t file_start_lsn;
success = os_file_get_size(log_bmp_sys->out.file, &size_low,
&size_high);
@@ -667,10 +721,12 @@ log_online_read_init()
if we can retrack any missing data. */
if (log_online_can_track_missing(last_tracked_lsn,
tracking_start_lsn)) {
- log_online_rotate_bitmap_file(last_tracked_lsn);
+ file_start_lsn = last_tracked_lsn;
+ } else {
+ file_start_lsn = tracking_start_lsn;
}
- else {
- log_online_rotate_bitmap_file(tracking_start_lsn);
+ if (!log_online_rotate_bitmap_file(file_start_lsn)) {
+ exit(1);
}
if (last_tracked_lsn < tracking_start_lsn) {
@@ -701,12 +757,15 @@ log_online_read_init()
Shut down the online log following subsystem. */
UNIV_INTERN
void
-log_online_read_shutdown()
-/*======================*/
+log_online_read_shutdown(void)
+/*==========================*/
{
ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list;
- os_file_close(log_bmp_sys->out.file);
+ if (log_bmp_sys->out.file != -1) {
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
rbt_free(log_bmp_sys->modified_pages);
@@ -716,6 +775,8 @@ log_online_read_shutdown()
free_list_node = next;
}
+ mutex_free(&log_bmp_sys->mutex);
+
ut_free(log_bmp_sys);
}
@@ -759,14 +820,16 @@ from the buffer. If an incomplete record is found, moves it to the end of the
buffer. */
static
void
-log_online_parse_redo_log()
-/*=======================*/
+log_online_parse_redo_log(void)
+/*===========================*/
{
byte *ptr = log_bmp_sys->parse_buf;
byte *end = log_bmp_sys->parse_buf_end;
ulint len = 0;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
while (ptr != end
&& log_bmp_sys->next_parse_lsn < log_bmp_sys->end_lsn) {
@@ -857,6 +920,8 @@ log_online_add_to_parse_buf(
ulint actual_data_len = (end_offset >= start_offset)
? end_offset - start_offset : 0;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
ut_memcpy(log_bmp_sys->parse_buf_end, log_block + start_offset,
actual_data_len);
@@ -881,6 +946,8 @@ log_online_parse_redo_log_block(
{
ulint block_data_len;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
block_data_len = log_block_get_data_len(log_block);
ut_ad(block_data_len % OS_FILE_LOG_BLOCK_SIZE == 0
@@ -907,6 +974,8 @@ log_online_follow_log_seg(
byte* log_block_end = log_bmp_sys->read_buf
+ (block_end_lsn - block_start_lsn);
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
mutex_enter(&log_sys->mutex);
log_group_read_log_seg(LOG_RECOVER, log_bmp_sys->read_buf,
group, block_start_lsn, block_end_lsn);
@@ -969,6 +1038,8 @@ log_online_follow_log_group(
ib_uint64_t block_start_lsn = contiguous_lsn;
ib_uint64_t block_end_lsn;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
log_bmp_sys->next_parse_lsn = log_bmp_sys->start_lsn;
log_bmp_sys->parse_buf_end = log_bmp_sys->parse_buf;
@@ -996,19 +1067,26 @@ log_online_follow_log_group(
/*********************************************************************//**
Write, flush one bitmap block to disk and advance the output position if
-successful. */
+successful.
+
+@return TRUE if page written OK, FALSE if I/O error */
static
-void
+ibool
log_online_write_bitmap_page(
/*=========================*/
const byte *block) /*!< in: block to write */
{
ibool success;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
+ /* Simulate a write error */
+ DBUG_EXECUTE_IF("bitmap_page_write_error", return FALSE;);
+
success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
block,
(ulint)(log_bmp_sys->out.offset & 0xFFFFFFFF),
- (ulint)(log_bmp_sys->out.offset << 32),
+ (ulint)(log_bmp_sys->out.offset >> 32),
MODIFIED_PAGE_BLOCK_SIZE);
if (UNIV_UNLIKELY(!success)) {
@@ -1016,7 +1094,7 @@ log_online_write_bitmap_page(
os_file_get_last_error(TRUE);
fprintf(stderr, "InnoDB: Error: failed writing changed page "
"bitmap file \'%s\'\n", log_bmp_sys->out.name);
- return;
+ return FALSE;
}
success = os_file_flush(log_bmp_sys->out.file, FALSE);
@@ -1027,25 +1105,38 @@ log_online_write_bitmap_page(
fprintf(stderr, "InnoDB: Error: failed flushing "
"changed page bitmap file \'%s\'\n",
log_bmp_sys->out.name);
- return;
+ return FALSE;
}
+#ifdef UNIV_LINUX
+ posix_fadvise(log_bmp_sys->out.file, log_bmp_sys->out.offset,
+ MODIFIED_PAGE_BLOCK_SIZE, POSIX_FADV_DONTNEED);
+#endif
+
log_bmp_sys->out.offset += MODIFIED_PAGE_BLOCK_SIZE;
+ return TRUE;
}
/*********************************************************************//**
Append the current changed page bitmap to the bitmap file. Clears the
-bitmap tree and recycles its nodes to the free list. */
+bitmap tree and recycles its nodes to the free list.
+
+@return TRUE if bitmap written OK, FALSE if I/O error*/
static
-void
-log_online_write_bitmap()
-/*=====================*/
+ibool
+log_online_write_bitmap(void)
+/*=========================*/
{
ib_rbt_node_t *bmp_tree_node;
const ib_rbt_node_t *last_bmp_tree_node;
+ ibool success = TRUE;
+
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
if (log_bmp_sys->out.offset >= srv_max_bitmap_file_size) {
- log_online_rotate_bitmap_file(log_bmp_sys->start_lsn);
+ if (!log_online_rotate_bitmap_file(log_bmp_sys->start_lsn)) {
+ return FALSE;
+ }
}
bmp_tree_node = (ib_rbt_node_t *)
@@ -1056,18 +1147,25 @@ log_online_write_bitmap()
byte *page = rbt_value(byte, bmp_tree_node);
- if (bmp_tree_node == last_bmp_tree_node) {
- mach_write_to_4(page + MODIFIED_PAGE_IS_LAST_BLOCK, 1);
- }
+ /* In case of a bitmap page write error keep on looping over
+ the tree to reclaim its memory through the free list instead of
+ returning immediatelly. */
+ if (UNIV_LIKELY(success)) {
+ if (bmp_tree_node == last_bmp_tree_node) {
+ mach_write_to_4(page
+ + MODIFIED_PAGE_IS_LAST_BLOCK,
+ 1);
+ }
- mach_write_to_8(page + MODIFIED_PAGE_START_LSN,
- log_bmp_sys->start_lsn);
- mach_write_to_8(page + MODIFIED_PAGE_END_LSN,
- log_bmp_sys->end_lsn);
- mach_write_to_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM,
- log_online_calc_checksum(page));
+ mach_write_to_8(page + MODIFIED_PAGE_START_LSN,
+ log_bmp_sys->start_lsn);
+ mach_write_to_8(page + MODIFIED_PAGE_END_LSN,
+ log_bmp_sys->end_lsn);
+ mach_write_to_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM,
+ log_online_calc_checksum(page));
- log_online_write_bitmap_page(page);
+ success = log_online_write_bitmap_page(page);
+ }
bmp_tree_node->left = log_bmp_sys->page_free_list;
log_bmp_sys->page_free_list = bmp_tree_node;
@@ -1077,18 +1175,29 @@ log_online_write_bitmap()
}
rbt_reset(log_bmp_sys->modified_pages);
+ return success;
}
/*********************************************************************//**
Read and parse the redo log up to last checkpoint LSN to build the changed
-page bitmap which is then written to disk. */
+page bitmap which is then written to disk.
+
+@return TRUE if log tracking succeeded, FALSE if bitmap write I/O error */
UNIV_INTERN
-void
-log_online_follow_redo_log()
-/*========================*/
+ibool
+log_online_follow_redo_log(void)
+/*============================*/
{
ib_uint64_t contiguous_start_lsn;
log_group_t* group;
+ ibool result;
+
+ mutex_enter(&log_bmp_sys->mutex);
+
+ if (!srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys->mutex);
+ return FALSE;
+ }
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
@@ -1096,7 +1205,8 @@ log_online_follow_redo_log()
mutex_exit(&(log_sys->mutex));
if (log_bmp_sys->end_lsn == log_bmp_sys->start_lsn) {
- return;
+ mutex_exit(&log_bmp_sys->mutex);
+ return TRUE;
}
group = UT_LIST_GET_FIRST(log_sys->log_groups);
@@ -1114,9 +1224,12 @@ log_online_follow_redo_log()
tracked LSN, so that LSN tracking for this interval is tested. */
DBUG_EXECUTE_IF("crash_before_bitmap_write", DBUG_SUICIDE(););
- log_online_write_bitmap();
+ result = log_online_write_bitmap();
log_bmp_sys->start_lsn = log_bmp_sys->end_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
+
+ mutex_exit(&log_bmp_sys->mutex);
+ return result;
}
/*********************************************************************//**
@@ -1514,3 +1627,81 @@ log_online_bitmap_iterator_next(
return TRUE;
}
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn) /*!< in: LSN to purge files up to */
+{
+ log_online_bitmap_file_range_t bitmap_files;
+ size_t i;
+ ibool result = FALSE;
+
+ if (lsn == 0) {
+ lsn = IB_ULONGLONG_MAX;
+ }
+
+ if (srv_track_changed_pages) {
+ /* User requests might happen with both enabled and disabled
+ tracking */
+ mutex_enter(&log_bmp_sys->mutex);
+ }
+
+ if (!log_online_setup_bitmap_file_range(&bitmap_files, 0, lsn)) {
+ if (srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys->mutex);
+ }
+ return TRUE;
+ }
+
+ if (srv_track_changed_pages && lsn >= log_bmp_sys->end_lsn) {
+ /* If we have to delete the current output file, close it
+ first. */
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
+
+ for (i = 0; i < bitmap_files.count; i++) {
+ if (bitmap_files.files[i].seq_num == 0
+ || bitmap_files.files[i].start_lsn >= lsn) {
+ break;
+ }
+ if (!os_file_delete_if_exists(bitmap_files.files[i].name)) {
+ os_file_get_last_error(TRUE);
+ result = TRUE;
+ break;
+ }
+ }
+
+ if (srv_track_changed_pages) {
+ if (lsn > log_bmp_sys->end_lsn) {
+ ib_uint64_t new_file_lsn;
+ if (lsn == IB_ULONGLONG_MAX) {
+ /* RESET restarts the sequence */
+ log_bmp_sys->out_seq_num = 0;
+ new_file_lsn = 0;
+ } else {
+ new_file_lsn = log_bmp_sys->end_lsn;
+ }
+ if (!log_online_rotate_bitmap_file(new_file_lsn)) {
+ /* If file create failed, signal the log
+ tracking thread to quit next time it wakes
+ up. */
+ srv_track_changed_pages = FALSE;
+ }
+ }
+
+ mutex_exit(&log_bmp_sys->mutex);
+ }
+
+ free(bitmap_files.files);
+ return result;
+}
diff --git a/storage/xtradb/log/log0recv.c b/storage/xtradb/log/log0recv.c
index 5ab8c14ae2e..4e179afd50c 100644
--- a/storage/xtradb/log/log0recv.c
+++ b/storage/xtradb/log/log0recv.c
@@ -994,8 +994,11 @@ recv_parse_or_apply_log_rec_body(
not NULL, then the log record is
applied to the page, and the log
record should be complete then */
- mtr_t* mtr) /*!< in: mtr or NULL; should be non-NULL
+ mtr_t* mtr, /*!< in: mtr or NULL; should be non-NULL
if and only if block is non-NULL */
+ ulint space_id)
+ /*!< in: tablespace id obtained by
+ parsing initial log record */
{
dict_index_t* index = NULL;
page_t* page;
@@ -1267,8 +1270,11 @@ recv_parse_or_apply_log_rec_body(
ut_ad(!page || page_type != FIL_PAGE_TYPE_ALLOCATED);
ptr = mlog_parse_string(ptr, end_ptr, page, page_zip);
break;
- case MLOG_FILE_CREATE:
case MLOG_FILE_RENAME:
+ ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type,
+ space_id, 0);
+ break;
+ case MLOG_FILE_CREATE:
case MLOG_FILE_DELETE:
case MLOG_FILE_CREATE2:
ptr = fil_op_log_parse_or_replay(ptr, end_ptr, type, 0, 0);
@@ -1672,7 +1678,8 @@ recv_recover_page_func(
recv_parse_or_apply_log_rec_body(recv->type, buf,
buf + recv->len,
- block, &mtr);
+ block, &mtr,
+ recv_addr->space);
if (srv_recovery_stats) {
mutex_enter(&(recv_sys->mutex));
@@ -1704,9 +1711,8 @@ recv_recover_page_func(
if (fil_page_get_type(page) == FIL_PAGE_INDEX) {
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
- if (page_zip) {
- ut_a(page_zip_validate_low(page_zip, page, FALSE));
- }
+ ut_a(!page_zip
+ || page_zip_validate_low(page_zip, page, NULL, FALSE));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -2158,7 +2164,7 @@ recv_parse_log_rec(
#endif /* UNIV_LOG_LSN_DEBUG */
new_ptr = recv_parse_or_apply_log_rec_body(*type, new_ptr, end_ptr,
- NULL, NULL);
+ NULL, NULL, *space);
if (UNIV_UNLIKELY(new_ptr == NULL)) {
return(0);
diff --git a/storage/xtradb/os/os0file.c b/storage/xtradb/os/os0file.c
index 8e0516a84a9..8f1b3e46bb2 100644
--- a/storage/xtradb/os/os0file.c
+++ b/storage/xtradb/os/os0file.c
@@ -1454,6 +1454,43 @@ os_file_set_nocache(
#endif
}
+
+#ifdef __linux__
+#include <sys/ioctl.h>
+#ifndef DFS_IOCTL_ATOMIC_WRITE_SET
+#define DFS_IOCTL_ATOMIC_WRITE_SET _IOW(0x95, 2, uint)
+#endif
+static int os_file_set_atomic_writes(os_file_t file, const char *name)
+{
+ static int first_time = 1;
+ int atomic_option = 1;
+
+ int ret = ioctl (file, DFS_IOCTL_ATOMIC_WRITE_SET, &atomic_option);
+
+ if (ret) {
+ fprintf(stderr,
+ "InnoDB : can't use atomic write on %s, errno %d\n",
+ name, errno);
+ return ret;
+ }
+ return ret;
+}
+#else
+static int os_file_set_atomic_writes(os_file_t file, const char *name)
+{
+ fprintf(stderr,
+ "InnoDB : can't use atomic writes on %s - not implemented on this platform."
+ "innodb_use_atomic_writes needs to be 0.\n",
+ name);
+#ifdef _WIN32
+ SetLastError(ERROR_INVALID_FUNCTION);
+#else
+ errno = EINVAL;
+#endif
+ return -1;
+}
+#endif
+
/****************************************************************//**
NOTE! Use the corresponding macro os_file_create(), not directly
this function!
@@ -1490,6 +1527,13 @@ os_file_create_func(
DWORD create_flag;
DWORD attributes;
ibool retry;
+
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ SetLastError(ERROR_DISK_FULL);
+ return((os_file_t) -1);
+ );
try_again:
ut_a(name);
@@ -1611,6 +1655,13 @@ try_again:
}
}
+ if (srv_use_atomic_writes && type == OS_DATA_FILE &&
+ os_file_set_atomic_writes(file, name)) {
+ CloseHandle(file);
+ *success = FALSE;
+ file = INVALID_HANDLE_VALUE;
+ }
+
return(file);
#else /* __WIN__ */
os_file_t file;
@@ -1618,6 +1669,12 @@ try_again:
ibool retry;
const char* mode_str = NULL;
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ errno = ENOSPC;
+ return((os_file_t) -1);
+ );
try_again:
ut_a(name);
@@ -1724,6 +1781,12 @@ try_again:
file = -1;
}
#endif /* USE_FILE_LOCK */
+ if (srv_use_atomic_writes && type == OS_DATA_FILE
+ && os_file_set_atomic_writes(file, name)) {
+ close(file);
+ *success = FALSE;
+ file = -1;
+ }
return(file);
#endif /* __WIN__ */
@@ -2068,6 +2131,28 @@ os_file_set_size(
current_size = 0;
desired_size = (ib_int64_t)size + (((ib_int64_t)size_high) << 32);
+#ifdef HAVE_POSIX_FALLOCATE
+ if (srv_use_posix_fallocate) {
+ if (posix_fallocate(file, current_size, desired_size) == -1) {
+ fprintf(stderr,
+ "InnoDB: Error: preallocating data for"
+ " file %s failed at\n"
+ "InnoDB: offset 0 size %lld %lld. Operating system"
+ " error number %llu.\n"
+ "InnoDB: Check that the disk is not full"
+ " or a disk quota exceeded.\n"
+ "InnoDB: Some operating system error numbers"
+ " are described at\n"
+ "InnoDB: "
+ REFMAN "operating-system-error-codes.html\n",
+ name, (long long)size_high, (long long)size, errno);
+
+ return (FALSE);
+ }
+ return (TRUE);
+ }
+#endif
+
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (desired_size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
@@ -2377,7 +2462,7 @@ os_file_pread(
os_n_file_reads++;
- if (innobase_get_slow_log() && trx && trx->take_stats)
+ if (UNIV_UNLIKELY(trx && trx->take_stats))
{
trx->io_reads++;
trx->io_read += n;
@@ -2410,7 +2495,7 @@ os_file_pread(
os_n_pending_reads--;
os_mutex_exit(os_file_count_mutex);
- if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
+ if (UNIV_UNLIKELY(start_time != 0))
{
ut_usectime(&sec, &ms);
finish_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -2464,7 +2549,7 @@ os_file_pread(
os_n_pending_reads--;
os_mutex_exit(os_file_count_mutex);
- if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
+ if (UNIV_UNLIKELY(start_time != 0)
{
ut_usectime(&sec, &ms);
finish_time = (ib_uint64_t)sec * 1000000 + ms;
@@ -4245,8 +4330,8 @@ os_aio_func(
ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
- ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
- ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0);
+ ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0);
+ ut_ad(offset % OS_MIN_LOG_BLOCK_SIZE == 0);
ut_ad(os_aio_validate_skip());
#ifdef WIN_ASYNC_IO
ut_ad((n & 0xFFFFFFFFUL) == n);
diff --git a/storage/xtradb/page/page0cur.c b/storage/xtradb/page/page0cur.c
index d49b121afab..a722f5b188d 100644
--- a/storage/xtradb/page/page0cur.c
+++ b/storage/xtradb/page/page0cur.c
@@ -310,7 +310,7 @@ page_cur_search_with_match(
#endif /* UNIV_DEBUG */
page = buf_block_get_frame(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_check_dir(page);
@@ -1248,7 +1248,7 @@ page_cur_insert_rec_zip(
ut_ad(!page_rec_is_supremum(*current_rec));
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* 1. Get the size of the physical record in the page */
@@ -1902,6 +1902,7 @@ page_cur_delete_rec(
/* Save to local variables some data associated with current_rec */
cur_slot_no = page_dir_find_owner_slot(current_rec);
+ ut_ad(cur_slot_no > 0);
cur_dir_slot = page_dir_get_nth_slot(page, cur_slot_no);
cur_n_owned = page_dir_slot_get_n_owned(cur_dir_slot);
@@ -1972,7 +1973,7 @@ page_cur_delete_rec(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
diff --git a/storage/xtradb/page/page0page.c b/storage/xtradb/page/page0page.c
index e29fa2eb1e5..f2ce6c9fe16 100644
--- a/storage/xtradb/page/page0page.c
+++ b/storage/xtradb/page/page0page.c
@@ -626,7 +626,7 @@ page_copy_rec_list_end(
Furthermore, btr_compress() may set FIL_PAGE_PREV to
FIL_NULL on new_page while leaving it intact on
new_page_zip. So, we cannot validate new_page_zip. */
- ut_a(page_zip_validate_low(page_zip, page, TRUE));
+ ut_a(page_zip_validate_low(page_zip, page, index, TRUE));
}
#endif /* UNIV_ZIP_DEBUG */
ut_ad(buf_block_get_frame(block) == page);
@@ -796,8 +796,8 @@ zip_reorganize:
/* Before copying, "ret" was the predecessor
of the predefined supremum record. If it was
the predefined infimum record, then it would
- still be the infimum. Thus, the assertion
- ut_a(ret_pos > 0) would fail here. */
+ still be the infimum, and we would have
+ ret_pos == 0. */
if (UNIV_UNLIKELY
(!page_zip_reorganize(new_block, index, mtr))) {
@@ -946,7 +946,7 @@ page_delete_rec_list_end(
ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE);
ut_ad(!page_zip || page_rec_is_comp(rec));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_rec_is_infimum(rec)) {
@@ -988,7 +988,7 @@ page_delete_rec_list_end(
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&cur, index, offsets, mtr);
} while (page_offset(rec) != PAGE_NEW_SUPREMUM);
@@ -1052,6 +1052,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_new(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
} else {
rec_t* rec2 = rec;
@@ -1067,6 +1068,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_old(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
}
@@ -1126,7 +1128,8 @@ page_delete_rec_list_start(
between btr_attach_half_pages() and insert_page = ...
when btr_page_get_split_rec_to_left() holds
(direction == FSP_DOWN). */
- ut_a(!page_zip || page_zip_validate_low(page_zip, page, TRUE));
+ ut_a(!page_zip
+ || page_zip_validate_low(page_zip, page, index, TRUE));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -1197,9 +1200,10 @@ page_move_rec_list_end(
= buf_block_get_page_zip(block);
ut_a(!new_page_zip == !page_zip);
ut_a(!new_page_zip
- || page_zip_validate(new_page_zip, new_page));
+ || page_zip_validate(new_page_zip, new_page, index));
ut_a(!page_zip
- || page_zip_validate(page_zip, page_align(split_rec)));
+ || page_zip_validate(page_zip, page_align(split_rec),
+ index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -1471,6 +1475,10 @@ page_rec_get_nth_const(
ulint n_owned;
const rec_t* rec;
+ if (nth == 0) {
+ return(page_get_infimum_rec(page));
+ }
+
ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1));
for (i = 0;; i++) {
diff --git a/storage/xtradb/page/page0zip.c b/storage/xtradb/page/page0zip.c
index 4751f4816a9..5357479908f 100644
--- a/storage/xtradb/page/page0zip.c
+++ b/storage/xtradb/page/page0zip.c
@@ -1437,7 +1437,7 @@ err_exit:
page_zip_get_size(page_zip) - PAGE_DATA);
mem_heap_free(heap);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (mtr) {
@@ -3123,6 +3123,7 @@ page_zip_validate_low(
/*==================*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index, /*!< in: index of the page, if known */
ibool sloppy) /*!< in: FALSE=strict,
TRUE=ignore the MIN_REC_FLAG */
{
@@ -3210,39 +3211,102 @@ page_zip_validate_low(
committed. Let us tolerate that difference when we
are performing a sloppy validation. */
- if (sloppy) {
- byte info_bits_diff;
- ulint offset
- = rec_get_next_offs(page + PAGE_NEW_INFIMUM,
- TRUE);
- ut_a(offset >= PAGE_NEW_SUPREMUM);
- offset -= 5 /* REC_NEW_INFO_BITS */;
-
- info_bits_diff = page[offset] ^ temp_page[offset];
-
- if (info_bits_diff == REC_INFO_MIN_REC_FLAG) {
- temp_page[offset] = page[offset];
-
- if (!memcmp(page + PAGE_HEADER,
- temp_page + PAGE_HEADER,
- UNIV_PAGE_SIZE - PAGE_HEADER
- - FIL_PAGE_DATA_END)) {
-
- /* Only the minimum record flag
- differed. Let us ignore it. */
- page_zip_fail(("page_zip_validate: "
- "min_rec_flag "
- "(ignored, "
- "%lu,%lu,0x%02lx)\n",
- page_get_space_id(page),
- page_get_page_no(page),
- (ulong) page[offset]));
- goto func_exit;
+ ulint* offsets;
+ mem_heap_t* heap;
+ const rec_t* rec;
+ const rec_t* trec;
+ byte info_bits_diff;
+ ulint offset
+ = rec_get_next_offs(page + PAGE_NEW_INFIMUM, TRUE);
+ ut_a(offset >= PAGE_NEW_SUPREMUM);
+ offset -= 5/*REC_NEW_INFO_BITS*/;
+
+ info_bits_diff = page[offset] ^ temp_page[offset];
+
+ if (info_bits_diff == REC_INFO_MIN_REC_FLAG) {
+ temp_page[offset] = page[offset];
+
+ if (!memcmp(page + PAGE_HEADER,
+ temp_page + PAGE_HEADER,
+ UNIV_PAGE_SIZE - PAGE_HEADER
+ - FIL_PAGE_DATA_END)) {
+
+ /* Only the minimum record flag
+ differed. Let us ignore it. */
+ page_zip_fail(("page_zip_validate: "
+ "min_rec_flag "
+ "(%s"
+ "%lu,%lu,0x%02lx)\n",
+ sloppy ? "ignored, " : "",
+ page_get_space_id(page),
+ page_get_page_no(page),
+ (ulong) page[offset]));
+ valid = sloppy;
+ goto func_exit;
+ }
+ }
+
+ /* Compare the pointers in the PAGE_FREE list. */
+ rec = page_header_get_ptr(page, PAGE_FREE);
+ trec = page_header_get_ptr(temp_page, PAGE_FREE);
+
+ while (rec || trec) {
+ if (page_offset(rec) != page_offset(trec)) {
+ page_zip_fail(("page_zip_validate: "
+ "PAGE_FREE list: %u!=%u\n",
+ (unsigned) page_offset(rec),
+ (unsigned) page_offset(trec)));
+ valid = FALSE;
+ goto func_exit;
+ }
+
+ rec = page_rec_get_next_low(rec, TRUE);
+ trec = page_rec_get_next_low(trec, TRUE);
+ }
+
+ /* Compare the records. */
+ heap = NULL;
+ offsets = NULL;
+ rec = page_rec_get_next_low(
+ page + PAGE_NEW_INFIMUM, TRUE);
+ trec = page_rec_get_next_low(
+ temp_page + PAGE_NEW_INFIMUM, TRUE);
+
+ do {
+ if (page_offset(rec) != page_offset(trec)) {
+ page_zip_fail(("page_zip_validate: "
+ "record list: 0x%02x!=0x%02x\n",
+ (unsigned) page_offset(rec),
+ (unsigned) page_offset(trec)));
+ valid = FALSE;
+ break;
+ }
+
+ if (index) {
+ /* Compare the data. */
+ offsets = rec_get_offsets(
+ rec, index, offsets,
+ ULINT_UNDEFINED, &heap);
+
+ if (memcmp(rec - rec_offs_extra_size(offsets),
+ trec - rec_offs_extra_size(offsets),
+ rec_offs_size(offsets))) {
+ page_zip_fail(
+ ("page_zip_validate: "
+ "record content: 0x%02x",
+ (unsigned) page_offset(rec)));
+ valid = FALSE;
+ break;
}
}
+
+ rec = page_rec_get_next_low(rec, TRUE);
+ trec = page_rec_get_next_low(trec, TRUE);
+ } while (rec || trec);
+
+ if (heap) {
+ mem_heap_free(heap);
}
- page_zip_fail(("page_zip_validate: content\n"));
- valid = FALSE;
}
func_exit:
@@ -3264,9 +3328,10 @@ ibool
page_zip_validate(
/*==============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
- const page_t* page) /*!< in: uncompressed page */
+ const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index) /*!< in: index of the page, if known */
{
- return(page_zip_validate_low(page_zip, page,
+ return(page_zip_validate_low(page_zip, page, index,
recv_recovery_is_on()));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3597,7 +3662,7 @@ page_zip_write_rec(
page_zip->m_nonempty = TRUE;
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page_align(rec)));
+ ut_a(page_zip_validate(page_zip, page_align(rec), index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3644,7 +3709,7 @@ corrupt:
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
memcpy(page + offset,
@@ -3653,7 +3718,7 @@ corrupt:
ptr + 4, BTR_EXTERN_FIELD_REF_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3720,7 +3785,7 @@ page_zip_write_blob_ptr(
memcpy(externs, field, BTR_EXTERN_FIELD_REF_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (mtr) {
@@ -3791,7 +3856,7 @@ corrupt:
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
field = page + offset;
@@ -3812,7 +3877,7 @@ corrupt:
memcpy(storage, ptr + 4, REC_NODE_PTR_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4039,7 +4104,7 @@ page_zip_clear_rec(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4063,7 +4128,7 @@ page_zip_rec_set_deleted(
*slot &= ~(PAGE_ZIP_DIR_SLOT_DEL >> 8);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page_align(rec)));
+ ut_a(page_zip_validate(page_zip, page_align(rec), NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4364,14 +4429,14 @@ corrupt:
goto corrupt;
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
memcpy(page + offset, ptr, len);
memcpy(page_zip->data + offset, ptr, len);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4449,7 +4514,7 @@ page_zip_reorganize(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_is_comp(page));
ut_ad(!dict_index_is_ibuf(index));
- /* Note that page_zip_validate(page_zip, page) may fail here. */
+ /* Note that page_zip_validate(page_zip, page, index) may fail here. */
UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
@@ -4536,7 +4601,7 @@ page_zip_copy_recs(
FIL_PAGE_PREV or PAGE_LEVEL, causing a temporary min_rec_flag
mismatch. A strict page_zip_validate() will be executed later
during the B-tree operations. */
- ut_a(page_zip_validate_low(src_zip, src, TRUE));
+ ut_a(page_zip_validate_low(src_zip, src, index, TRUE));
#endif /* UNIV_ZIP_DEBUG */
ut_a(page_zip_get_size(page_zip) == page_zip_get_size(src_zip));
if (UNIV_UNLIKELY(src_zip->n_blobs)) {
@@ -4597,7 +4662,7 @@ page_zip_copy_recs(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
btr_blob_dbg_add(page, index, "page_zip_copy_recs");
diff --git a/storage/xtradb/rem/rem0rec.c b/storage/xtradb/rem/rem0rec.c
index 30fc28561fa..6bd40c54a0c 100644
--- a/storage/xtradb/rem/rem0rec.c
+++ b/storage/xtradb/rem/rem0rec.c
@@ -167,7 +167,6 @@ rec_get_n_extern_new(
{
const byte* nulls;
const byte* lens;
- dict_field_t* field;
ulint null_mask;
ulint n_extern;
ulint i;
@@ -188,10 +187,13 @@ rec_get_n_extern_new(
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -209,8 +211,6 @@ rec_get_n_extern_new(
if (UNIV_UNLIKELY(!field->fixed_len)) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -239,16 +239,15 @@ rec_get_n_extern_new(
Determine the offset to each field in a leaf-page record
in ROW_FORMAT=COMPACT. This is a special case of
rec_init_offsets() and rec_get_offsets_func(). */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_init_offsets_comp_ordinary(
/*===========================*/
const rec_t* rec, /*!< in: physical record in
ROW_FORMAT=COMPACT */
- ulint extra, /*!< in: number of bytes to reserve
- between the record header and
- the data payload
- (usually REC_N_NEW_EXTRA_BYTES) */
+ ibool temp, /*!< in: whether to use the
+ format for temporary files in
+ index creation */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
@@ -256,27 +255,38 @@ rec_init_offsets_comp_ordinary(
ulint i = 0;
ulint offs = 0;
ulint any_ext = 0;
- const byte* nulls = rec - (extra + 1);
+ const byte* nulls = temp
+ ? rec - 1
+ : rec - (1 + REC_N_NEW_EXTRA_BYTES);
const byte* lens = nulls
- UT_BITS_IN_BYTES(index->n_nullable);
- dict_field_t* field;
ulint null_mask = 1;
#ifdef UNIV_DEBUG
- /* We cannot invoke rec_offs_make_valid() here, because it can hold
- that extra != REC_N_NEW_EXTRA_BYTES. Similarly, rec_offs_validate()
- will fail in that case, because it invokes rec_get_status(). */
+ /* We cannot invoke rec_offs_make_valid() here if temp=TRUE.
+ Similarly, rec_offs_validate() will fail in that case, because
+ it invokes rec_get_status(). */
offsets[2] = (ulint) rec;
offsets[3] = (ulint) index;
#endif /* UNIV_DEBUG */
+ ut_ad(temp || dict_table_is_comp(index->table));
+
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
do {
- ulint len;
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+ ulint len;
- field = dict_index_get_nth_field(index, i);
- if (!(dict_field_get_col(field)->prtype
- & DATA_NOT_NULL)) {
+ if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
if (UNIV_UNLIKELY(!(byte) null_mask)) {
@@ -296,10 +306,9 @@ rec_init_offsets_comp_ordinary(
null_mask <<= 1;
}
- if (UNIV_UNLIKELY(!field->fixed_len)) {
+ if (!field->fixed_len
+ || (temp && !dict_col_get_fixed_size(col, temp))) {
/* Variable-length field: read the length */
- const dict_col_t* col
- = dict_field_get_col(field);
len = *lens--;
/* If the maximum length of the field is up
to 255 bytes, the actual length is always
@@ -393,9 +402,8 @@ rec_init_offsets(
= dict_index_get_n_unique_in_tree(index);
break;
case REC_STATUS_ORDINARY:
- rec_init_offsets_comp_ordinary(rec,
- REC_N_NEW_EXTRA_BYTES,
- index, offsets);
+ rec_init_offsets_comp_ordinary(
+ rec, FALSE, index, offsets);
return;
}
@@ -766,17 +774,19 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INTERN
+UNIV_INLINE __attribute__((warn_unused_result, nonnull(1,2)))
ulint
-rec_get_converted_size_comp_prefix(
-/*===============================*/
+rec_get_converted_size_comp_prefix_low(
+/*===================================*/
const dict_index_t* index, /*!< in: record descriptor;
dict_table_is_comp() is
assumed to hold, even if
it does not */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
- ulint* extra) /*!< out: extra size */
+ ulint* extra, /*!< out: extra size */
+ ibool temp) /*!< in: whether this is a
+ temporary file record */
{
ulint extra_size;
ulint data_size;
@@ -785,15 +795,25 @@ rec_get_converted_size_comp_prefix(
ut_ad(fields);
ut_ad(n_fields > 0);
ut_ad(n_fields <= dict_index_get_n_fields(index));
+ ut_ad(!temp || extra);
- extra_size = REC_N_NEW_EXTRA_BYTES
+ extra_size = temp
+ ? UT_BITS_IN_BYTES(index->n_nullable)
+ : REC_N_NEW_EXTRA_BYTES
+ UT_BITS_IN_BYTES(index->n_nullable);
data_size = 0;
+ if (temp && dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only need to
+ adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+
/* read the lengths of fields 0..n */
for (i = 0; i < n_fields; i++) {
const dict_field_t* field;
ulint len;
+ ulint fixed_len;
const dict_col_t* col;
field = dict_index_get_nth_field(index, i);
@@ -809,8 +829,14 @@ rec_get_converted_size_comp_prefix(
continue;
}
- ut_ad(len <= col->len || col->mtype == DATA_BLOB);
+ ut_ad(len <= col->len || col->mtype == DATA_BLOB
+ || (col->len == 0 && col->mtype == DATA_VARCHAR));
+ fixed_len = field->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -818,11 +844,20 @@ rec_get_converted_size_comp_prefix(
0..127. The length will be encoded in two bytes when
it is 128 or more, or when the field is stored externally. */
- if (field->fixed_len) {
- ut_ad(len == field->fixed_len);
+ if (fixed_len) {
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(col->mbminmaxlen);
+
+ ut_ad(len <= fixed_len);
+
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
+
/* dict_index_add_col() should guarantee this */
ut_ad(!field->prefix_len
- || field->fixed_len == field->prefix_len);
+ || fixed_len == field->prefix_len);
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(&fields[i])) {
ut_ad(col->len >= 256 || col->mtype == DATA_BLOB);
extra_size += 2;
@@ -839,7 +874,7 @@ rec_get_converted_size_comp_prefix(
data_size += len;
}
- if (UNIV_LIKELY_NULL(extra)) {
+ if (extra) {
*extra = extra_size;
}
@@ -847,6 +882,23 @@ rec_get_converted_size_comp_prefix(
}
/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_comp_prefix(
+/*===============================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ ut_ad(dict_table_is_comp(index->table));
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
+}
+
+/**********************************************************//**
Determines the size of a data tuple in ROW_FORMAT=COMPACT.
@return total size */
UNIV_INTERN
@@ -890,8 +942,8 @@ rec_get_converted_size_comp(
return(ULINT_UNDEFINED);
}
- return(size + rec_get_converted_size_comp_prefix(index, fields,
- n_fields, extra));
+ return(size + rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, FALSE));
}
/***********************************************************//**
@@ -1068,19 +1120,18 @@ rec_convert_dtuple_to_rec_old(
/*********************************************************//**
Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
-UNIV_INTERN
+UNIV_INLINE __attribute__((nonnull))
void
rec_convert_dtuple_to_rec_comp(
/*===========================*/
rec_t* rec, /*!< in: origin of record */
- ulint extra, /*!< in: number of bytes to
- reserve between the record
- header and the data payload
- (normally REC_N_NEW_EXTRA_BYTES) */
const dict_index_t* index, /*!< in: record descriptor */
- ulint status, /*!< in: status bits of the record */
const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields)/*!< in: number of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint status, /*!< in: status bits of the record */
+ ibool temp) /*!< in: whether to use the
+ format for temporary files in
+ index creation */
{
const dfield_t* field;
const dtype_t* type;
@@ -1092,31 +1143,44 @@ rec_convert_dtuple_to_rec_comp(
ulint n_node_ptr_field;
ulint fixed_len;
ulint null_mask = 1;
- ut_ad(extra == 0 || dict_table_is_comp(index->table));
- ut_ad(extra == 0 || extra == REC_N_NEW_EXTRA_BYTES);
+ ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(n_fields > 0);
- switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
- case REC_STATUS_ORDINARY:
+ if (temp) {
+ ut_ad(status == REC_STATUS_ORDINARY);
ut_ad(n_fields <= dict_index_get_n_fields(index));
n_node_ptr_field = ULINT_UNDEFINED;
- break;
- case REC_STATUS_NODE_PTR:
- ut_ad(n_fields == dict_index_get_n_unique_in_tree(index) + 1);
- n_node_ptr_field = n_fields - 1;
- break;
- case REC_STATUS_INFIMUM:
- case REC_STATUS_SUPREMUM:
- ut_ad(n_fields == 1);
- n_node_ptr_field = ULINT_UNDEFINED;
- break;
- default:
- ut_error;
- return;
+ nulls = rec - 1;
+ if (dict_table_is_comp(index->table)) {
+ /* No need to do adjust fixed_len=0. We only
+ need to adjust it for ROW_FORMAT=REDUNDANT. */
+ temp = FALSE;
+ }
+ } else {
+ nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
+
+ switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
+ case REC_STATUS_ORDINARY:
+ ut_ad(n_fields <= dict_index_get_n_fields(index));
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ case REC_STATUS_NODE_PTR:
+ ut_ad(n_fields
+ == dict_index_get_n_unique_in_tree(index) + 1);
+ n_node_ptr_field = n_fields - 1;
+ break;
+ case REC_STATUS_INFIMUM:
+ case REC_STATUS_SUPREMUM:
+ ut_ad(n_fields == 1);
+ n_node_ptr_field = ULINT_UNDEFINED;
+ break;
+ default:
+ ut_error;
+ return;
+ }
}
end = rec;
- nulls = rec - (extra + 1);
lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
/* clear the SQL-null flags */
memset(lens + 1, 0, nulls - lens);
@@ -1162,6 +1226,10 @@ rec_convert_dtuple_to_rec_comp(
ifield = dict_index_get_nth_field(index, i);
fixed_len = ifield->fixed_len;
+ if (temp && fixed_len
+ && !dict_col_get_fixed_size(ifield->col, temp)) {
+ fixed_len = 0;
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -1169,8 +1237,17 @@ rec_convert_dtuple_to_rec_comp(
0..127. The length will be encoded in two bytes when
it is 128 or more, or when the field is stored externally. */
if (fixed_len) {
- ut_ad(len == fixed_len);
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(
+ ifield->col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(
+ ifield->col->mbminmaxlen);
+
+ ut_ad(len <= fixed_len);
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
ut_ad(!dfield_is_ext(field));
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(field)) {
ut_ad(ifield->col->len >= 256
|| ifield->col->mtype == DATA_BLOB);
@@ -1222,8 +1299,7 @@ rec_convert_dtuple_to_rec_new(
rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp(
- rec, REC_N_NEW_EXTRA_BYTES, index, status,
- dtuple->fields, dtuple->n_fields);
+ rec, index, dtuple->fields, dtuple->n_fields, status, FALSE);
/* Set the info bits of the record */
rec_set_info_and_status_bits(rec, dtuple_get_info_bits(dtuple));
@@ -1285,6 +1361,54 @@ rec_convert_dtuple_to_rec(
return(rec);
}
+#ifndef UNIV_HOTBACKUP
+/**********************************************************//**
+Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@return total size */
+UNIV_INTERN
+ulint
+rec_get_converted_size_temp(
+/*========================*/
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields,/*!< in: number of data fields */
+ ulint* extra) /*!< out: extra size */
+{
+ return(rec_get_converted_size_comp_prefix_low(
+ index, fields, n_fields, extra, TRUE));
+}
+
+/******************************************************//**
+Determine the offset to each field in temporary file.
+@see rec_convert_dtuple_to_temp() */
+UNIV_INTERN
+void
+rec_init_offsets_temp(
+/*==================*/
+ const rec_t* rec, /*!< in: temporary file record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint* offsets)/*!< in/out: array of offsets;
+ in: n=rec_offs_n_fields(offsets) */
+{
+ rec_init_offsets_comp_ordinary(rec, TRUE, index, offsets);
+}
+
+/*********************************************************//**
+Builds a temporary file record out of a data tuple.
+@see rec_init_offsets_temp() */
+UNIV_INTERN
+void
+rec_convert_dtuple_to_temp(
+/*=======================*/
+ rec_t* rec, /*!< out: record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const dfield_t* fields, /*!< in: array of data fields */
+ ulint n_fields) /*!< in: number of fields */
+{
+ rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
+ REC_STATUS_ORDINARY, TRUE);
+}
+
/**************************************************************//**
Copies the first n fields of a physical record to a data tuple. The fields
are copied to the memory heap. */
@@ -1495,6 +1619,7 @@ rec_copy_prefix_to_buf(
return(*buf + (rec - (lens + 1)));
}
+#endif /* UNIV_HOTBACKUP */
/***************************************************************//**
Validates the consistency of an old-style physical record.
diff --git a/storage/xtradb/row/row0ins.c b/storage/xtradb/row/row0ins.c
index 61c3720fa2e..fda0c55b5c7 100644
--- a/storage/xtradb/row/row0ins.c
+++ b/storage/xtradb/row/row0ins.c
@@ -2288,7 +2288,10 @@ row_ins_index_entry(
err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
n_ext, thr);
if (err != DB_FAIL) {
-
+ if (index == dict_table_get_first_index(index->table)
+ && thr_get_trx(thr)->mysql_thd != 0) {
+ DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
+ }
return(err);
}
diff --git a/storage/xtradb/row/row0merge.c b/storage/xtradb/row/row0merge.c
index 0fd13f5339c..0b6a6302854 100644
--- a/storage/xtradb/row/row0merge.c
+++ b/storage/xtradb/row/row0merge.c
@@ -301,6 +301,7 @@ row_merge_buf_add(
for (i = 0; i < n_fields; i++, field++, ifield++) {
const dict_col_t* col;
ulint col_no;
+ ulint fixed_len;
const dfield_t* row_field;
ulint len;
@@ -349,9 +350,30 @@ row_merge_buf_add(
ut_ad(len <= col->len || col->mtype == DATA_BLOB);
- if (ifield->fixed_len) {
- ut_ad(len == ifield->fixed_len);
+ fixed_len = ifield->fixed_len;
+ if (fixed_len && !dict_table_is_comp(index->table)
+ && DATA_MBMINLEN(col->mbminmaxlen)
+ != DATA_MBMAXLEN(col->mbminmaxlen)) {
+ /* CHAR in ROW_FORMAT=REDUNDANT is always
+ fixed-length, but in the temporary file it is
+ variable-length for variable-length character
+ sets. */
+ fixed_len = 0;
+ }
+
+ if (fixed_len) {
+#ifdef UNIV_DEBUG
+ ulint mbminlen = DATA_MBMINLEN(col->mbminmaxlen);
+ ulint mbmaxlen = DATA_MBMAXLEN(col->mbminmaxlen);
+
+ /* len should be between size calcualted base on
+ mbmaxlen and mbminlen */
+ ut_ad(len <= fixed_len);
+ ut_ad(!mbmaxlen || len >= mbminlen
+ * (fixed_len / mbmaxlen));
+
ut_ad(!dfield_is_ext(field));
+#endif /* UNIV_DEBUG */
} else if (dfield_is_ext(field)) {
extra_size += 2;
} else if (len < 128
@@ -372,12 +394,11 @@ row_merge_buf_add(
ulint size;
ulint extra;
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields, &extra);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra);
- ut_ad(data_size + extra_size + REC_N_NEW_EXTRA_BYTES == size);
- ut_ad(extra_size + REC_N_NEW_EXTRA_BYTES == extra);
+ ut_ad(data_size + extra_size == size);
+ ut_ad(extra_size == extra);
}
#endif /* UNIV_DEBUG */
@@ -581,14 +602,9 @@ row_merge_buf_write(
ulint extra_size;
const dfield_t* entry = buf->tuples[i];
- size = rec_get_converted_size_comp(index,
- REC_STATUS_ORDINARY,
- entry, n_fields,
- &extra_size);
+ size = rec_get_converted_size_temp(
+ index, entry, n_fields, &extra_size);
ut_ad(size >= extra_size);
- ut_ad(extra_size >= REC_N_NEW_EXTRA_BYTES);
- extra_size -= REC_N_NEW_EXTRA_BYTES;
- size -= REC_N_NEW_EXTRA_BYTES;
/* Encode extra_size + 1 */
if (extra_size + 1 < 0x80) {
@@ -601,9 +617,8 @@ row_merge_buf_write(
ut_ad(b + size < block[1]);
- rec_convert_dtuple_to_rec_comp(b + extra_size, 0, index,
- REC_STATUS_ORDINARY,
- entry, n_fields);
+ rec_convert_dtuple_to_temp(b + extra_size, index,
+ entry, n_fields);
b += size;
@@ -709,6 +724,8 @@ row_merge_read(
ib_uint64_t ofs = ((ib_uint64_t) offset) * block_size;
ibool success;
+ DBUG_EXECUTE_IF("row_merge_read_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_read) {
fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
@@ -756,6 +773,8 @@ row_merge_write(
(ulint) (ofs >> 32),
block_size);
+ DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE););
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_write) {
fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
@@ -872,7 +891,7 @@ err_exit:
*mrec = *buf + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
@@ -891,7 +910,7 @@ err_exit:
*mrec = b + extra_size;
- rec_init_offsets_comp_ordinary(*mrec, 0, index, offsets);
+ rec_init_offsets_temp(*mrec, index, offsets);
data_size = rec_offs_data_size(offsets);
ut_ad(extra_size + data_size < block_size);
@@ -2261,7 +2280,7 @@ row_merge_drop_temp_indexes(void)
/*********************************************************************//**
Creates temperary merge files, and if UNIV_PFS_IO defined, register
the file descriptor with Performance Schema.
-@return File descriptor */
+@return file descriptor, or -1 on failure */
UNIV_INLINE
int
row_merge_file_create_low(void)
@@ -2283,12 +2302,19 @@ row_merge_file_create_low(void)
#ifdef UNIV_PFS_IO
register_pfs_file_open_end(locker, fd);
#endif
+ if (fd < 0) {
+ fprintf(stderr,
+ "InnoDB: Error: Cannot create temporary merge file\n");
+ return(-1);
+ }
return(fd);
}
+
/*********************************************************************//**
-Create a merge file. */
-static
-void
+Create a merge file.
+@return file descriptor, or -1 on failure */
+static __attribute__((nonnull, warn_unused_result))
+int
row_merge_file_create(
/*==================*/
merge_file_t* merge_file) /*!< out: merge file structure */
@@ -2296,6 +2322,7 @@ row_merge_file_create(
merge_file->fd = row_merge_file_create_low();
merge_file->offset = 0;
merge_file->n_rec = 0;
+ return(merge_file->fd);
}
/*********************************************************************//**
@@ -2541,6 +2568,28 @@ row_merge_rename_tables(
goto err_exit;
}
+ /* Generate the redo logs for file operations */
+ fil_mtr_rename_log(old_table->space, old_name,
+ new_table->space, new_table->name, tmp_name);
+
+ /* What if the redo logs are flushed to disk here? This is
+ tested with following crash point */
+ DBUG_EXECUTE_IF("bug14669848_precommit", log_buffer_flush_to_disk();
+ DBUG_SUICIDE(););
+
+ /* File operations cannot be rolled back. So, before proceeding
+ with file operations, commit the dictionary changes.*/
+ trx_commit_for_mysql(trx);
+
+ /* If server crashes here, the dictionary in InnoDB and MySQL
+ will differ. The .ibd files and the .frm files must be swapped
+ manually by the administrator. No loss of data. */
+ DBUG_EXECUTE_IF("bug14669848", DBUG_SUICIDE(););
+
+ /* Ensure that the redo logs are flushed to disk. The config
+ innodb_flush_log_at_trx_commit must not affect this. */
+ log_buffer_flush_to_disk();
+
/* The following calls will also rename the .ibd data files if
the tables are stored in a single-table tablespace */
@@ -2715,7 +2764,7 @@ row_merge_build_indexes(
ulint block_size;
ulint i;
ulint error;
- int tmpfd;
+ int tmpfd = -1;
ulint merge_sort_block_size;
void* block_mem;
@@ -2741,13 +2790,31 @@ row_merge_build_indexes(
i * merge_sort_block_size);
}
+ /* Initialize all the merge file descriptors, so that we
+ don't call row_merge_file_destroy() on uninitialized
+ merge file descriptor */
+
+ for (i = 0; i < n_indexes; i++) {
+ merge_files[i].fd = -1;
+ }
+
for (i = 0; i < n_indexes; i++) {
- row_merge_file_create(&merge_files[i]);
+ if (row_merge_file_create(&merge_files[i]) < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
}
tmpfd = row_merge_file_create_low();
+ if (tmpfd < 0)
+ {
+ error = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
+
/* Reset the MySQL row buffer that is used when reporting
duplicate keys. */
innobase_rec_reset(table);
diff --git a/storage/xtradb/row/row0mysql.c b/storage/xtradb/row/row0mysql.c
index 9ab85940760..3a18cfc679e 100644
--- a/storage/xtradb/row/row0mysql.c
+++ b/storage/xtradb/row/row0mysql.c
@@ -1879,7 +1879,8 @@ Creates a table for MySQL. If the name of the table ends in
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
"innodb_table_monitor", then this will also start the printing of monitor
output by the master thread. If the table name ends in "innodb_mem_validate",
-InnoDB will try to invoke mem_validate().
+InnoDB will try to invoke mem_validate(). On failure the transaction will
+be rolled back and the 'table' object will be freed.
@return error code or DB_SUCCESS */
UNIV_INTERN
int
@@ -2017,6 +2018,8 @@ err_exit:
row_drop_table_for_mysql(table->name, trx, FALSE);
trx_commit_for_mysql(trx);
+ } else {
+ dict_mem_table_free(table);
}
break;
@@ -4270,6 +4273,13 @@ end:
trx->error_state = DB_SUCCESS;
trx_general_rollback_for_mysql(trx, NULL);
trx->error_state = DB_SUCCESS;
+ } else {
+ if (old_is_tmp && !new_is_tmp) {
+ /* After ALTER TABLE the table statistics
+ needs to be rebuilt. It will be rebuilt
+ when the table is loaded again. */
+ table->stat_initialized = FALSE;
+ }
}
}
diff --git a/storage/xtradb/row/row0sel.c b/storage/xtradb/row/row0sel.c
index ec3603f2550..858d50fd5a6 100644
--- a/storage/xtradb/row/row0sel.c
+++ b/storage/xtradb/row/row0sel.c
@@ -2489,6 +2489,9 @@ row_sel_convert_mysql_key_to_innobase(
dfield++;
}
+ DBUG_EXECUTE_IF("innodb_srch_key_buffer_full",
+ ut_a(buf == (original_buf + buf_len)););
+
ut_a(buf <= original_buf + buf_len);
/* We set the length of tuple to n_fields: we assume that the memory
@@ -3641,13 +3644,13 @@ row_search_for_mysql(
should_release = 0;
for (i = 0; i < btr_search_index_num; i++) {
/* we should check all latches (fix Bug#791030) */
- if (rw_lock_get_writer(btr_search_latch_part[i])
- != RW_LOCK_NOT_LOCKED) {
+ if (UNIV_UNLIKELY(rw_lock_get_writer(btr_search_latch_part[i])
+ != RW_LOCK_NOT_LOCKED)) {
should_release |= ((ulint)1 << i);
}
}
- if (should_release) {
+ if (UNIV_UNLIKELY(should_release)) {
/* There is an x-latch request on the adaptive hash index:
release the s-latch to reduce starvation and wait for
@@ -4124,6 +4127,11 @@ wait_table_again:
}
rec_loop:
+ if (trx_is_interrupted(trx)) {
+ err = DB_INTERRUPTED;
+ goto normal_return;
+ }
+
/*-------------------------------------------------------------*/
/* PHASE 4: Look for matching records in a loop */
@@ -5118,11 +5126,15 @@ row_search_autoinc_read_column(
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, col_no + 1, &heap);
- data = rec_get_nth_field(rec, offsets, col_no, &len);
+ if (rec_offs_nth_sql_null(offsets, col_no)) {
+ /* There is no non-NULL value in the auto-increment column. */
+ value = 0;
+ goto func_exit;
+ }
- ut_a(len != UNIV_SQL_NULL);
+ data = rec_get_nth_field(rec, offsets, col_no, &len);
switch (mtype) {
case DATA_INT:
@@ -5144,14 +5156,15 @@ row_search_autoinc_read_column(
ut_error;
}
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
if (!unsigned_type && (ib_int64_t) value < 0) {
value = 0;
}
+func_exit:
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+
return(value);
}
diff --git a/storage/xtradb/row/row0umod.c b/storage/xtradb/row/row0umod.c
index 9597c476125..3c933c87b27 100644
--- a/storage/xtradb/row/row0umod.c
+++ b/storage/xtradb/row/row0umod.c
@@ -69,36 +69,6 @@ If you make a change in this module make sure that no codepath is
introduced where a call to log_free_check() is bypassed. */
/***********************************************************//**
-Checks if also the previous version of the clustered index record was
-modified or inserted by the same transaction, and its undo number is such
-that it should be undone in the same rollback.
-@return TRUE if also previous modify or insert of this row should be undone */
-static
-ibool
-row_undo_mod_undo_also_prev_vers(
-/*=============================*/
- undo_node_t* node, /*!< in: row undo node */
- undo_no_t* undo_no)/*!< out: the undo number */
-{
- trx_undo_rec_t* undo_rec;
- trx_t* trx;
-
- trx = node->trx;
-
- if (node->new_trx_id != trx->id) {
-
- *undo_no = 0;
- return(FALSE);
- }
-
- undo_rec = trx_undo_get_undo_rec_low(node->new_roll_ptr, node->heap);
-
- *undo_no = trx_undo_rec_get_undo_no(undo_rec);
-
- return(trx->roll_limit <= *undo_no);
-}
-
-/***********************************************************//**
Undoes a modify in a clustered index record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
static
@@ -226,19 +196,11 @@ row_undo_mod_clust(
btr_pcur_t* pcur;
mtr_t mtr;
ulint err;
- ibool success;
- ibool more_vers;
- undo_no_t new_undo_no;
ut_ad(node && thr);
log_free_check();
- /* Check if also the previous version of the clustered index record
- should be undone in this same rollback operation */
-
- more_vers = row_undo_mod_undo_also_prev_vers(node, &new_undo_no);
-
pcur = &(node->pcur);
mtr_start(&mtr);
@@ -286,20 +248,6 @@ row_undo_mod_clust(
trx_undo_rec_release(node->trx, node->undo_no);
- if (more_vers && err == DB_SUCCESS) {
-
- /* Reserve the undo log record to the prior version after
- committing &mtr: this is necessary to comply with the latching
- order, as &mtr may contain the fsp latch which is lower in
- the latch hierarchy than trx->undo_mutex. */
-
- success = trx_undo_rec_reserve(node->trx, new_undo_no);
-
- if (success) {
- node->state = UNDO_NODE_PREV_VERS;
- }
- }
-
return(err);
}
@@ -571,6 +519,7 @@ row_undo_mod_upd_del_sec(
ulint err = DB_SUCCESS;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
+ ut_ad(!node->undo_row);
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -632,6 +581,8 @@ row_undo_mod_del_mark_sec(
dict_index_t* index;
ulint err;
+ ut_ad(!node->undo_row);
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -847,7 +798,6 @@ row_undo_mod_parse_undo_rec(
trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
roll_ptr, info_bits, trx,
node->heap, &(node->update));
- node->new_roll_ptr = roll_ptr;
node->new_trx_id = trx_id;
node->cmpl_info = cmpl_info;
}
diff --git a/storage/xtradb/row/row0undo.c b/storage/xtradb/row/row0undo.c
index 09970b7fe21..74fc1baf1d2 100644
--- a/storage/xtradb/row/row0undo.c
+++ b/storage/xtradb/row/row0undo.c
@@ -216,7 +216,7 @@ row_undo_search_clust_to_pcur(
node->row = row_build(ROW_COPY_DATA, clust_index, rec,
offsets, NULL, ext, node->heap);
- if (node->update) {
+ if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
node->undo_row = dtuple_copy(node->row, node->heap);
row_upd_replace(node->undo_row, &node->undo_ext,
clust_index, node->update, node->heap);
@@ -282,25 +282,6 @@ row_undo(
} else {
node->state = UNDO_NODE_MODIFY;
}
-
- } else if (node->state == UNDO_NODE_PREV_VERS) {
-
- /* Undo should be done to the same clustered index record
- again in this same rollback, restoring the previous version */
-
- roll_ptr = node->new_roll_ptr;
-
- node->undo_rec = trx_undo_get_undo_rec_low(roll_ptr,
- node->heap);
- node->roll_ptr = roll_ptr;
- node->undo_no = trx_undo_rec_get_undo_no(node->undo_rec);
-
- if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
-
- node->state = UNDO_NODE_INSERT;
- } else {
- node->state = UNDO_NODE_MODIFY;
- }
}
/* Prevent DROP TABLE etc. while we are rolling back this row.
diff --git a/storage/xtradb/srv/srv0srv.c b/storage/xtradb/srv/srv0srv.c
index 6e210071746..6edfbaa7755 100644
--- a/storage/xtradb/srv/srv0srv.c
+++ b/storage/xtradb/srv/srv0srv.c
@@ -58,6 +58,8 @@ Created 10/8/1995 Heikki Tuuri
*******************************************************/
/* Dummy comment */
+#include "m_string.h" /* for my_sys.h */
+#include "my_sys.h" /* DEBUG_SYNC_C */
#include "srv0srv.h"
#include "ut0mem.h"
@@ -181,7 +183,7 @@ UNIV_INTERN my_bool srv_track_changed_pages = TRUE;
UNIV_INTERN ib_uint64_t srv_max_bitmap_file_size = 100 * 1024 * 1024;
-UNIV_INTERN ulonglong srv_changed_pages_limit = 0;
+UNIV_INTERN ulonglong srv_max_changed_pages = 0;
/** When TRUE, fake change transcations take S rather than X row locks.
When FALSE, row locks are not taken at all. */
@@ -312,58 +314,11 @@ UNIV_INTERN ulong srv_purge_batch_size = 20;
/* the number of rollback segments to use */
UNIV_INTERN ulong srv_rollback_segments = TRX_SYS_N_RSEGS;
-/* variable counts amount of data read in total (in bytes) */
-UNIV_INTERN ulint srv_data_read = 0;
-
/* Internal setting for "innodb_stats_method". Decides how InnoDB treats
NULL value when collecting statistics. By default, it is set to
SRV_STATS_NULLS_EQUAL(0), ie. all NULL value are treated equal */
ulong srv_innodb_stats_method = SRV_STATS_NULLS_EQUAL;
-/* here we count the amount of data written in total (in bytes) */
-UNIV_INTERN ulint srv_data_written = 0;
-
-/* the number of the log write requests done */
-UNIV_INTERN ulint srv_log_write_requests = 0;
-
-/* the number of physical writes to the log performed */
-UNIV_INTERN ulint srv_log_writes = 0;
-
-/* amount of data written to the log files in bytes */
-UNIV_INTERN ulint srv_os_log_written = 0;
-
-/* amount of writes being done to the log files */
-UNIV_INTERN ulint srv_os_log_pending_writes = 0;
-
-/* we increase this counter, when there we don't have enough space in the
-log buffer and have to flush it */
-UNIV_INTERN ulint srv_log_waits = 0;
-
-/* this variable counts the amount of times, when the doublewrite buffer
-was flushed */
-UNIV_INTERN ulint srv_dblwr_writes = 0;
-
-/* here we store the number of pages that have been flushed to the
-doublewrite buffer */
-UNIV_INTERN ulint srv_dblwr_pages_written = 0;
-
-/* in this variable we store the number of write requests issued */
-UNIV_INTERN ulint srv_buf_pool_write_requests = 0;
-
-/* here we store the number of times when we had to wait for a free page
-in the buffer pool. It happens when the buffer pool is full and we need
-to make a flush, in order to be able to read or create a page. */
-UNIV_INTERN ulint srv_buf_pool_wait_free = 0;
-
-/* variable to count the number of pages that were written from buffer
-pool to the disk */
-UNIV_INTERN ulint srv_buf_pool_flushed = 0;
-UNIV_INTERN ulint buf_lru_flush_page_count = 0;
-
-/** Number of buffer pool reads that led to the
-reading of a disk page */
-UNIV_INTERN ulint srv_buf_pool_reads = 0;
-
/** Time in seconds between automatic buffer pool dumps */
UNIV_INTERN uint srv_auto_lru_dump = 0;
@@ -406,6 +361,9 @@ UNIV_INTERN lint srv_conc_n_threads = 0;
InnoDB */
UNIV_INTERN ulint srv_conc_n_waiting_threads = 0;
+/* print all user-level transactions deadlocks to mysqld stderr */
+UNIV_INTERN my_bool srv_print_all_deadlocks = FALSE;
+
typedef struct srv_conc_slot_struct srv_conc_slot_t;
struct srv_conc_slot_struct{
os_event_t event; /*!< event to wait */
@@ -451,6 +409,10 @@ UNIV_INTERN ulong srv_sys_stats_root_page = 0;
#endif
UNIV_INTERN ibool srv_use_doublewrite_buf = TRUE;
+UNIV_INTERN ibool srv_use_atomic_writes = FALSE;
+#ifdef HAVE_POSIX_FALLOCATE
+UNIV_INTERN ibool srv_use_posix_fallocate = TRUE;
+#endif
UNIV_INTERN ibool srv_use_checksums = TRUE;
UNIV_INTERN ibool srv_fast_checksum = FALSE;
@@ -489,23 +451,83 @@ UNIV_INTERN ibool srv_print_log_io = FALSE;
UNIV_INTERN ibool srv_print_latch_waits = FALSE;
#endif /* UNIV_DEBUG */
-UNIV_INTERN ulint srv_n_rows_inserted = 0;
-UNIV_INTERN ulint srv_n_rows_updated = 0;
-UNIV_INTERN ulint srv_n_rows_deleted = 0;
-UNIV_INTERN ulint srv_n_rows_read = 0;
-
static ulint srv_n_rows_inserted_old = 0;
static ulint srv_n_rows_updated_old = 0;
static ulint srv_n_rows_deleted_old = 0;
static ulint srv_n_rows_read_old = 0;
-UNIV_INTERN ulint srv_n_lock_deadlock_count = 0;
-UNIV_INTERN ulint srv_n_lock_wait_count = 0;
-UNIV_INTERN ulint srv_n_lock_wait_current_count = 0;
-UNIV_INTERN ib_int64_t srv_n_lock_wait_time = 0;
-UNIV_INTERN ulint srv_n_lock_max_wait_time = 0;
+/* Ensure counters are on separate cache lines */
+
+#define CACHE_LINE_SIZE 64
+#define CACHE_ALIGNED __attribute__ ((aligned (CACHE_LINE_SIZE)))
+
+UNIV_INTERN byte
+counters_pad_start[CACHE_LINE_SIZE] __attribute__((unused)) = {0};
+
+UNIV_INTERN ulint srv_n_rows_inserted CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_rows_updated CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_rows_deleted CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_rows_read CACHE_ALIGNED = 0;
+
+UNIV_INTERN ulint srv_n_lock_deadlock_count CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_lock_wait_count CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_lock_wait_current_count CACHE_ALIGNED = 0;
+UNIV_INTERN ib_int64_t srv_n_lock_wait_time CACHE_ALIGNED = 0;
+UNIV_INTERN ulint srv_n_lock_max_wait_time CACHE_ALIGNED = 0;
-UNIV_INTERN ulint srv_truncated_status_writes = 0;
+UNIV_INTERN ulint srv_truncated_status_writes CACHE_ALIGNED = 0;
+
+/* variable counts amount of data read in total (in bytes) */
+UNIV_INTERN ulint srv_data_read CACHE_ALIGNED = 0;
+
+/* here we count the amount of data written in total (in bytes) */
+UNIV_INTERN ulint srv_data_written CACHE_ALIGNED = 0;
+
+/* the number of the log write requests done */
+UNIV_INTERN ulint srv_log_write_requests CACHE_ALIGNED = 0;
+
+/* the number of physical writes to the log performed */
+UNIV_INTERN ulint srv_log_writes CACHE_ALIGNED = 0;
+
+/* amount of data written to the log files in bytes */
+UNIV_INTERN ulint srv_os_log_written CACHE_ALIGNED = 0;
+
+/* amount of writes being done to the log files */
+UNIV_INTERN ulint srv_os_log_pending_writes CACHE_ALIGNED = 0;
+
+/* we increase this counter, when there we don't have enough space in the
+log buffer and have to flush it */
+UNIV_INTERN ulint srv_log_waits CACHE_ALIGNED = 0;
+
+/* this variable counts the amount of times, when the doublewrite buffer
+was flushed */
+UNIV_INTERN ulint srv_dblwr_writes CACHE_ALIGNED = 0;
+
+/* here we store the number of pages that have been flushed to the
+doublewrite buffer */
+UNIV_INTERN ulint srv_dblwr_pages_written CACHE_ALIGNED = 0;
+
+/* in this variable we store the number of write requests issued */
+UNIV_INTERN ulint srv_buf_pool_write_requests CACHE_ALIGNED = 0;
+
+/* here we store the number of times when we had to wait for a free page
+in the buffer pool. It happens when the buffer pool is full and we need
+to make a flush, in order to be able to read or create a page. */
+UNIV_INTERN ulint srv_buf_pool_wait_free CACHE_ALIGNED = 0;
+
+/** Number of buffer pool reads that led to the
+reading of a disk page */
+UNIV_INTERN ulint srv_buf_pool_reads CACHE_ALIGNED = 0;
+
+/* variable to count the number of pages that were written from buffer
+pool to the disk */
+UNIV_INTERN ulint srv_buf_pool_flushed CACHE_ALIGNED = 0;
+
+/* variable to count the number of LRU flushed pages */
+UNIV_INTERN ulint buf_lru_flush_page_count CACHE_ALIGNED = 0;
+
+UNIV_INTERN byte
+counters_pad_end[CACHE_LINE_SIZE] __attribute__((unused)) = {0};
/*
Set the following to 0 if you want InnoDB to write messages on
@@ -1438,7 +1460,7 @@ retry:
ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
#endif /* UNIV_SYNC_DEBUG */
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
start_time = (ib_uint64_t)sec * 1000000 + ms;
} else {
@@ -1453,7 +1475,7 @@ retry:
trx->op_info = "";
- if (innobase_get_slow_log() && trx->take_stats && start_time) {
+ if (UNIV_UNLIKELY(start_time != 0)) {
ut_usectime(&sec, &ms);
finish_time = (ib_uint64_t)sec * 1000000 + ms;
trx->innodb_que_wait_timer += (ulint)(finish_time - start_time);
@@ -1764,6 +1786,10 @@ srv_suspend_mysql_thread(
trx = thr_get_trx(thr);
+ if (trx->mysql_thd != 0) {
+ DEBUG_SYNC_C("srv_suspend_mysql_thread_enter");
+ }
+
os_event_set(srv_lock_timeout_thread_event);
mutex_enter(&kernel_mutex);
@@ -2193,6 +2219,8 @@ srv_printf_innodb_monitor(
(long) srv_conc_n_threads,
(ulong) srv_conc_n_waiting_threads);
+ mutex_enter(&kernel_mutex);
+
fprintf(file, "%lu read views open inside InnoDB\n",
UT_LIST_GET_LEN(trx_sys->view_list));
@@ -2206,6 +2234,8 @@ srv_printf_innodb_monitor(
}
}
+ mutex_exit(&kernel_mutex);
+
n_reserved = fil_space_get_n_reserved_extents(0);
if (n_reserved > 0) {
fprintf(file,
@@ -2290,16 +2320,18 @@ void
srv_export_innodb_status(void)
/*==========================*/
{
- buf_pool_stat_t stat;
- ulint LRU_len;
- ulint free_len;
- ulint flush_list_len;
- ulint mem_adaptive_hash, mem_dictionary;
- read_view_t* oldest_view;
- ulint i;
+ buf_pool_stat_t stat;
+ buf_pools_list_size_t buf_pools_list_size;
+ ulint LRU_len;
+ ulint free_len;
+ ulint flush_list_len;
+ ulint mem_adaptive_hash, mem_dictionary;
+ read_view_t* oldest_view;
+ ulint i;
buf_get_total_stat(&stat);
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
+ buf_get_total_list_size_in_bytes(&buf_pools_list_size);
if (btr_search_sys && btr_search_sys->hash_index[0]->heap) {
mem_adaptive_hash = mem_heap_get_size(btr_search_sys->hash_index[0]->heap);
@@ -2364,7 +2396,12 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_read_ahead_evicted
= stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data = LRU_len;
+ export_vars.innodb_buffer_pool_bytes_data =
+ buf_pools_list_size.LRU_bytes
+ + buf_pools_list_size.unzip_LRU_bytes;
export_vars.innodb_buffer_pool_pages_dirty = flush_list_len;
+ export_vars.innodb_buffer_pool_bytes_dirty =
+ buf_pools_list_size.flush_list_bytes;
export_vars.innodb_buffer_pool_pages_free = free_len;
export_vars.innodb_deadlocks = srv_n_lock_deadlock_count;
#ifdef UNIV_DEBUG
@@ -2497,6 +2534,23 @@ srv_export_innodb_status(void)
export_vars.innodb_rows_deleted = srv_n_rows_deleted;
export_vars.innodb_truncated_status_writes = srv_truncated_status_writes;
+#ifdef UNIV_DEBUG
+ if (trx_sys->max_trx_id < purge_sys->done_trx_no) {
+ export_vars.innodb_purge_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_trx_id_age =
+ trx_sys->max_trx_id - purge_sys->done_trx_no;
+ }
+
+ if (!purge_sys->view
+ || trx_sys->max_trx_id < purge_sys->view->up_limit_id) {
+ export_vars.innodb_purge_view_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_view_trx_id_age =
+ trx_sys->max_trx_id - purge_sys->view->up_limit_id;
+ }
+#endif /* UNIV_DEBUG */
+
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -3080,11 +3134,19 @@ srv_redo_log_follow_thread(
os_event_reset(srv_checkpoint_completed_event);
if (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) {
- log_online_follow_redo_log();
+ if (!log_online_follow_redo_log()) {
+ /* TODO: sync with I_S log tracking status? */
+ fprintf(stderr,
+ "InnoDB: Error: log tracking bitmap "
+ "write failed, stopping log tracking "
+ "thread!\n");
+ break;
+ }
}
} while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE);
+ srv_track_changed_pages = FALSE;
log_online_read_shutdown();
os_event_set(srv_redo_log_thread_finished_event);
@@ -3329,6 +3391,26 @@ loop:
for (i = 0; i < 10; i++) {
ulint cur_time = ut_time_ms();
+#ifdef UNIV_DEBUG
+ if (btr_cur_limit_optimistic_insert_debug
+ && srv_n_purge_threads == 0) {
+ /* If btr_cur_limit_optimistic_insert_debug is enabled
+ and no purge_threads, purge opportunity is increased
+ by x100 (1purge/100msec), to speed up debug scripts
+ which should wait for purged. */
+ next_itr_time -= 900;
+
+ srv_main_thread_op_info = "master purging";
+
+ srv_master_do_purge();
+
+ if (srv_fast_shutdown && srv_shutdown_state > 0) {
+
+ goto background_loop;
+ }
+ }
+#endif /* UNIV_DEBUG */
+
n_pages_flushed = 0; /* initialize */
/* ALTER TABLE in MySQL requires on Unix that the table handler
@@ -3487,8 +3569,7 @@ loop:
buf_pool = buf_pool_from_array(j);
- /* The scanning flush_list is optimistic here */
-
+ buf_flush_list_mutex_enter(buf_pool);
level = 0;
n_blocks = 0;
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
@@ -3502,6 +3583,7 @@ loop:
bpage = UT_LIST_GET_NEXT(flush_list, bpage);
n_blocks++;
}
+ buf_flush_list_mutex_exit(buf_pool);
if (level) {
bpl += ((ib_uint64_t) n_blocks * n_blocks
@@ -3567,30 +3649,25 @@ retry_flush_batch:
/* prev_flush_info[j] should be the previous loop's */
for (j = 0; j < srv_buf_pool_instances; j++) {
- lint blocks_num, new_blocks_num, flushed_blocks_num;
- ibool found;
+ lint blocks_num, new_blocks_num = 0;
+ lint flushed_blocks_num;
buf_pool = buf_pool_from_array(j);
+ buf_flush_list_mutex_enter(buf_pool);
blocks_num = UT_LIST_GET_LEN(buf_pool->flush_list);
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
- new_blocks_num = 0;
- found = FALSE;
while (bpage != NULL) {
if (prev_flush_info[j].space == bpage->space
&& prev_flush_info[j].offset == bpage->offset
&& prev_flush_info[j].oldest_modification
== bpage->oldest_modification) {
- found = TRUE;
break;
}
bpage = UT_LIST_GET_NEXT(flush_list, bpage);
new_blocks_num++;
}
- if (!found) {
- new_blocks_num = blocks_num;
- }
flushed_blocks_num = new_blocks_num + prev_flush_info[j].count
- blocks_num;
@@ -3605,7 +3682,9 @@ retry_flush_batch:
prev_flush_info[j].space = bpage->space;
prev_flush_info[j].offset = bpage->offset;
prev_flush_info[j].oldest_modification = bpage->oldest_modification;
+ buf_flush_list_mutex_exit(buf_pool);
} else {
+ buf_flush_list_mutex_exit(buf_pool);
prev_flush_info[j].space = 0;
prev_flush_info[j].offset = 0;
prev_flush_info[j].oldest_modification = 0;
@@ -3631,6 +3710,7 @@ retry_flush_batch:
/* store previous first pages of the flush_list */
for (j = 0; j < srv_buf_pool_instances; j++) {
buf_pool = buf_pool_from_array(j);
+ buf_flush_list_mutex_enter(buf_pool);
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
@@ -3639,7 +3719,9 @@ retry_flush_batch:
prev_flush_info[j].space = bpage->space;
prev_flush_info[j].offset = bpage->offset;
prev_flush_info[j].oldest_modification = bpage->oldest_modification;
+ buf_flush_list_mutex_exit(buf_pool);
} else {
+ buf_flush_list_mutex_exit(buf_pool);
prev_flush_info[j].space = 0;
prev_flush_info[j].offset = 0;
prev_flush_info[j].oldest_modification = 0;
diff --git a/storage/xtradb/srv/srv0start.c b/storage/xtradb/srv/srv0start.c
index 2faa68cb87c..9e0477253cd 100644
--- a/storage/xtradb/srv/srv0start.c
+++ b/storage/xtradb/srv/srv0start.c
@@ -1155,6 +1155,11 @@ void
init_log_online(void)
/*=================*/
{
+ if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
+ srv_track_changed_pages = FALSE;
+ return;
+ }
+
if (srv_track_changed_pages) {
log_online_read_init();
diff --git a/storage/xtradb/sync/sync0sync.c b/storage/xtradb/sync/sync0sync.c
index efc43c4cbe5..25f96d9817a 100644
--- a/storage/xtradb/sync/sync0sync.c
+++ b/storage/xtradb/sync/sync0sync.c
@@ -315,9 +315,9 @@ mutex_create_func(
/* NOTE! The very first mutexes are not put to the mutex list */
- if ((mutex == &mutex_list_mutex)
+ if (mutex == &mutex_list_mutex
#ifdef UNIV_SYNC_DEBUG
- || (mutex == &sync_thread_mutex)
+ || mutex == &sync_thread_mutex
#endif /* UNIV_SYNC_DEBUG */
) {
diff --git a/storage/xtradb/trx/trx0purge.c b/storage/xtradb/trx/trx0purge.c
index 122aab119ba..b048dc66efe 100644
--- a/storage/xtradb/trx/trx0purge.c
+++ b/storage/xtradb/trx/trx0purge.c
@@ -61,6 +61,10 @@ UNIV_INTERN mysql_pfs_key_t trx_purge_latch_key;
UNIV_INTERN mysql_pfs_key_t purge_sys_bh_mutex_key;
#endif /* UNIV_PFS_MUTEX */
+#ifdef UNIV_DEBUG
+UNIV_INTERN my_bool srv_purge_view_update_only_debug;
+#endif /* UNIV_DEBUG */
+
/*****************************************************************//**
Checks if trx_id is >= purge_view: then it is guaranteed that its update
undo log still exists in the system.
@@ -236,6 +240,7 @@ trx_purge_sys_create(
purge_sys->purge_trx_no = 0;
purge_sys->purge_undo_no = 0;
purge_sys->next_stored = FALSE;
+ ut_d(purge_sys->done_trx_no = 0);
rw_lock_create(trx_purge_latch_key,
&purge_sys->latch, SYNC_PURGE_LATCH);
@@ -656,6 +661,12 @@ trx_purge_truncate_if_arr_empty(void)
{
static ulint count;
+#ifdef UNIV_DEBUG
+ if (purge_sys->arr->n_used == 0) {
+ purge_sys->done_trx_no = purge_sys->purge_trx_no;
+ }
+#endif /* UNIV_DEBUG */
+
if (!(++count % TRX_SYS_N_RSEGS) && purge_sys->arr->n_used == 0) {
trx_purge_truncate_history();
@@ -1172,6 +1183,12 @@ trx_purge(
rw_lock_x_unlock(&(purge_sys->latch));
+#ifdef UNIV_DEBUG
+ if (srv_purge_view_update_only_debug) {
+ return(0);
+ }
+#endif
+
purge_sys->state = TRX_PURGE_ON;
purge_sys->handle_limit = purge_sys->n_pages_handled + limit;
diff --git a/storage/xtradb/trx/trx0rec.c b/storage/xtradb/trx/trx0rec.c
index db4897c368d..ef42152aeb7 100644
--- a/storage/xtradb/trx/trx0rec.c
+++ b/storage/xtradb/trx/trx0rec.c
@@ -36,6 +36,7 @@ Created 3/26/1996 Heikki Tuuri
#ifndef UNIV_HOTBACKUP
#include "dict0dict.h"
#include "ut0mem.h"
+#include "read0read.h"
#include "row0ext.h"
#include "row0upd.h"
#include "que0que.h"
@@ -1647,6 +1648,25 @@ trx_undo_prev_version_build(
if (row_upd_changes_field_size_or_external(index, offsets, update)) {
ulint n_ext;
+ /* We should confirm the existence of disowned external data,
+ if the previous version record is delete marked. If the trx_id
+ of the previous record is seen by purge view, we should treat
+ it as missing history, because the disowned external data
+ might be purged already.
+
+ The inherited external data (BLOBs) can be freed (purged)
+ after trx_id was committed, provided that no view was started
+ before trx_id. If the purge view can see the committed
+ delete-marked record by trx_id, no transactions need to access
+ the BLOB. */
+
+ if ((update->info_bits & REC_INFO_DELETED_FLAG)
+ && read_view_sees_trx_id(purge_sys->view, trx_id)) {
+ /* treat as a fresh insert, not to
+ cause assertion error at the caller. */
+ return(DB_SUCCESS);
+ }
+
/* We have to set the appropriate extern storage bits in the
old version of the record: the extern bits in rec for those
fields that update does NOT update, as well as the bits for
diff --git a/storage/xtradb/trx/trx0trx.c b/storage/xtradb/trx/trx0trx.c
index 99b4276fbee..a17f8abdad0 100644
--- a/storage/xtradb/trx/trx0trx.c
+++ b/storage/xtradb/trx/trx0trx.c
@@ -235,7 +235,7 @@ trx_allocate_for_mysql(void)
mutex_exit(&kernel_mutex);
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
trx->distinct_page_access_hash = mem_alloc(DPAH_SIZE);
memset(trx->distinct_page_access_hash, 0, DPAH_SIZE);
}
@@ -1269,7 +1269,7 @@ trx_end_lock_wait(
thr = UT_LIST_GET_FIRST(trx->wait_thrs);
}
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
now = (ib_uint64_t)sec * 1000000 + ms;
trx->lock_que_wait_timer += (ulint)(now - trx->lock_que_wait_ustarted);
@@ -1304,7 +1304,7 @@ trx_lock_wait_to_suspended(
thr = UT_LIST_GET_FIRST(trx->wait_thrs);
}
- if (innobase_get_slow_log() && trx->take_stats) {
+ if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
now = (ib_uint64_t)sec * 1000000 + ms;
trx->lock_que_wait_timer += (ulint)(now - trx->lock_que_wait_ustarted);