summaryrefslogtreecommitdiff
path: root/storage/innobase/row
diff options
context:
space:
mode:
authorunknown <aivanov@mysql.com>2006-03-10 19:22:21 +0300
committerunknown <aivanov@mysql.com>2006-03-10 19:22:21 +0300
commit050f14ac371e03dc96b96e8a9b0cd8fa8e3a23e0 (patch)
tree8105d79f7267d8af93861befd1899063f1ad42c2 /storage/innobase/row
parent1cef1679a47bfbf744d656646770193ba07c30fe (diff)
downloadmariadb-git-050f14ac371e03dc96b96e8a9b0cd8fa8e3a23e0.tar.gz
Applied innodb-5.1-ss269 snapshot.
Fixed BUGS: #3300: "UPDATE statement with no index column in where condition locks all rows" Implement semi-consistent read to reduce lock conflicts at the cost of breaking serializability. ha_innobase::unlock_row(): reset the "did semi consistent read" flag ha_innobase::was_semi_consistent_read(), ha_innobase::try_semi_consistent_read(): new methods row_prebuilt_t, row_create_prebuilt(): add field row_read_type for keeping track of semi-consistent reads row_vers_build_for_semi_consistent_read(), row_sel_build_committed_vers_for_mysql(): new functions row_search_for_mysql(): implement semi-consistent reads #9802: "Foreign key checks disallow alter table". Added test cases. #12456: "Cursor shows incorrect data - DML does not affect, probably caching" This patch implements a high-granularity read view to be used with cursors. In this high-granularity consistent read view modifications done by the creating transaction after the cursor is created or future transactions are not visible. But those modifications that transaction did before the cursor was created are visible. #12701: "Support >4GB buffer pool and log files on 64-bit Windows" Do not call os_file_create_tmpfile() at runtime. Instead, create all tempfiles at startup and guard access to them with mutexes. #13778: "If FOREIGN_KEY_CHECKS=0, one can create inconsistent FOREIGN KEYs". When FOREIGN_KEY_CHECKS=0 we still need to check that datatypes between foreign key references are compatible. #14189: "VARBINARY and BINARY variables: trailing space ignored with InnoDB" innobase_init(): Assert that DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number. dtype_get_pad_char(): Do not pad VARBINARY or BINARY columns. row_ins_cascade_calc_update_vec(): Refuse ON UPDATE CASCADE when trying to change the length of a VARBINARY column that refers to or is referenced by a BINARY column. BINARY columns are no longer padded on comparison, and thus they cannot be padded on storage either. #14747: "Race condition can cause btr_search_drop_page_hash_index() to crash" Note that buf_block_t::index should be protected by btr_search_latch or an s-latch or x-latch on the index page. btr_search_drop_page_hash_index(): Read block->index while holding btr_search_latch and use the cached value in the loop. Remove some redundant assertions. #15108: "mysqld crashes when innodb_log_file_size is set > 4G" #15308: "Problem of Order with Enum Column in Primary Key" #15550: "mysqld crashes in printing a FOREIGN KEY error in InnoDB" row_ins_foreign_report_add_err(): When printing the parent record, use the index in the parent table rather than the index in the child table. #15653: "Slow inserts to InnoDB if many thousands of .ibd files" Keep track on unflushed modifications to file spaces. When there are tens of thousands of file spaces, flushing all files in fil_flush_file_spaces() would be very slow. fil_flush_file_spaces(): Only flush unflushed file spaces. fil_space_t, fil_system_t: Add a list of unflushed spaces. #15991: "innodb-file-per-table + symlink database + rename = cr" os_file_handle_error(): Map the error codes EXDEV, ENOTDIR, and EISDIR to the new code OS_FILE_PATH_ERROR. Treat this code as OS_FILE_PATH_ERROR. This fixes the crash on RENAME TABLE when the .ibd file is a symbolic link to a different file system. #16157: "InnoDB crashes when main location settings are empty" This patch is from Heikki. #16298: "InnoDB segfaults in INSERTs in upgrade of 4.0 -> 5.0 tables with VARCHAR BINARY" dict_load_columns(): Set the charset-collation code DATA_MYSQL_BINARY_CHARSET_COLL for those binary string columns that lack a charset-collation code, i.e., the tables were created with an older version of MySQL/InnoDB than 4.1.2. #16229: "MySQL/InnoDB uses full explicit table locks in trigger processing" Take a InnoDB table lock only if user has explicitly requested a table lock. Added some additional comments to store_lock() and external_lock(). #16387: "InnoDB crash when dropping a foreign key <table>_ibfk_0" Do not mistake TABLENAME_ibfk_0 for auto-generated id. dict_table_get_highest_foreign_id(): Ignore foreign constraint identifiers starting with the pattern TABLENAME_ibfk_0. #16582: "InnoDB: Error in an adaptive hash index pointer to page" Account for a race condition when dropping the adaptive hash index for a B-tree page. btr_search_drop_page_hash_index(): Retry the operation if a hash index with different parameters was built meanwhile. Add diagnostics for the case that hash node pointers to the page remain. btr_search_info_update_hash(), btr_search_info_update_slow(): Document the parameter "info" as in/out. #16814: "SHOW INNODB STATUS format error in LATEST FOREIGN KEY ERROR section" Add a missing newline to the LAST FOREIGN KEY ERROR section in SHOW INNODB STATUS output. dict_foreign_error_report(): Always print a newline after invoking dict_print_info_on_foreign_key_in_create_format(). #16827: "Better InnoDB error message if ibdata files omitted from my.cnf" #17126: "CHECK TABLE on InnoDB causes a short hang during check of adaptive hash" CHECK TABLE blocking other queries, by releasing the btr_search_latch periodically during the adaptive hash table validation. #17405: "Valgrind: conditional jump or move depends on unititialised values" buf_block_init(): Reset magic_n, buf_fix_count and io_fix to avoid testing uninitialized variables. mysql-test/r/innodb.result: Applied innodb-5.1-ss269 snapshot. mysql-test/t/innodb.test: Applied innodb-5.1-ss269 snapshot. sql/ha_innodb.cc: Applied innodb-5.1-ss269 snapshot. sql/ha_innodb.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/btr/btr0btr.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/btr/btr0cur.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/btr/btr0pcur.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/btr/btr0sea.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/buf/buf0buf.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/buf/buf0flu.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/buf/buf0lru.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/buf/buf0rea.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/data/data0data.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/data/data0type.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dict/dict0boot.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dict/dict0crea.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dict/dict0dict.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dict/dict0load.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dict/dict0mem.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/dyn/dyn0dyn.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/eval/eval0eval.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/eval/eval0proc.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/fil/fil0fil.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/fsp/fsp0fsp.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/fut/fut0lst.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ha/ha0ha.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ha/hash0hash.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ibuf/ibuf0ibuf.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0btr.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0btr.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0cur.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0cur.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0pcur.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0pcur.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0sea.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0sea.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/btr0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/buf0buf.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/buf0buf.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/buf0flu.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/buf0flu.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/buf0lru.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/data0data.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/data0data.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/data0type.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/data0type.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/db0err.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0boot.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0boot.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0crea.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0dict.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0dict.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0load.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dict0mem.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dyn0dyn.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/dyn0dyn.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/eval0eval.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/eval0eval.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/eval0proc.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/eval0proc.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/fil0fil.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/fsp0fsp.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/fut0lst.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ha0ha.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/hash0hash.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/hash0hash.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ibuf0ibuf.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ibuf0ibuf.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/lock0lock.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/lock0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/log0log.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/log0log.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/log0recv.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mach0data.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mach0data.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mem0dbg.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mem0dbg.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mem0mem.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mem0mem.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mem0pool.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mtr0log.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mtr0mtr.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/mtr0mtr.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/os0file.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/os0proc.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/os0sync.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/os0sync.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/os0thread.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/page0cur.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/page0cur.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/page0page.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/page0page.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/page0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/pars0grm.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/pars0opt.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/pars0pars.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/pars0sym.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/pars0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/que0que.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/que0que.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/que0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/read0read.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/read0read.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/rem0cmp.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/rem0cmp.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/rem0rec.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/rem0rec.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0ins.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0mysql.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0purge.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0row.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0row.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0sel.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0sel.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0uins.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0umod.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0undo.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0upd.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0upd.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/row0vers.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/srv0srv.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/srv0start.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/sync0arr.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/sync0rw.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/sync0rw.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/sync0sync.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/sync0sync.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0purge.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0purge.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0rec.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0roll.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0rseg.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0rseg.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0sys.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0sys.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0trx.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0trx.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0undo.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0undo.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/trx0xa.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/univ.i: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/usr0sess.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/usr0types.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0byte.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0byte.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0dbg.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0lst.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0mem.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0mem.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0rnd.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0rnd.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0sort.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0ut.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/include/ut0ut.ic: Applied innodb-5.1-ss269 snapshot. storage/innobase/lock/lock0lock.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/log/log0log.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/log/log0recv.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mach/mach0data.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mem/mem0dbg.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mem/mem0mem.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mem/mem0pool.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mtr/mtr0log.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/mtr/mtr0mtr.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/os/os0file.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/os/os0proc.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/os/os0sync.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/os/os0thread.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/page/page0cur.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/page/page0page.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/lexyy.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0grm.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0grm.h: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0grm.y: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0lex.l: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0opt.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0pars.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/pars/pars0sym.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/que/que0que.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/read/read0read.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/rem/rem0cmp.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/rem/rem0rec.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0ins.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0mysql.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0purge.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0row.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0sel.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0uins.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0umod.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0undo.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0upd.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/row/row0vers.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/srv/srv0que.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/srv/srv0srv.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/srv/srv0start.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/sync/sync0arr.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/sync/sync0rw.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/sync/sync0sync.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/thr/thr0loc.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0purge.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0rec.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0roll.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0rseg.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0sys.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0trx.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/trx/trx0undo.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/usr/usr0sess.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ut/ut0byte.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ut/ut0dbg.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ut/ut0mem.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ut/ut0rnd.c: Applied innodb-5.1-ss269 snapshot. storage/innobase/ut/ut0ut.c: Applied innodb-5.1-ss269 snapshot. mysql-test/r/innodb_unsafe_binlog.result: New BitKeeper file ``mysql-test/r/innodb_unsafe_binlog.result'' mysql-test/t/innodb_unsafe_binlog-master.opt: New BitKeeper file ``mysql-test/t/innodb_unsafe_binlog-master.opt'' mysql-test/t/innodb_unsafe_binlog.test: New BitKeeper file ``mysql-test/t/innodb_unsafe_binlog.test'' storage/innobase/pars/make_bison.sh: New BitKeeper file ``storage/innobase/pars/make_bison.sh''
Diffstat (limited to 'storage/innobase/row')
-rw-r--r--storage/innobase/row/row0ins.c394
-rw-r--r--storage/innobase/row/row0mysql.c714
-rw-r--r--storage/innobase/row/row0purge.c82
-rw-r--r--storage/innobase/row/row0row.c71
-rw-r--r--storage/innobase/row/row0sel.c769
-rw-r--r--storage/innobase/row/row0uins.c44
-rw-r--r--storage/innobase/row/row0umod.c106
-rw-r--r--storage/innobase/row/row0undo.c42
-rw-r--r--storage/innobase/row/row0upd.c271
-rw-r--r--storage/innobase/row/row0vers.c74
10 files changed, 1312 insertions, 1255 deletions
diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c
index 128b46f9aa4..c80a61cc729 100644
--- a/storage/innobase/row/row0ins.c
+++ b/storage/innobase/row/row0ins.c
@@ -51,15 +51,15 @@ innobase_invalidate_query_cache(
chars count */
/**********************************************************************
-This function returns true if
+This function returns true if
1) SQL-query in the current thread
-is either REPLACE or LOAD DATA INFILE REPLACE.
+is either REPLACE or LOAD DATA INFILE REPLACE.
2) SQL-query in the current thread
is INSERT ON DUPLICATE KEY UPDATE.
-NOTE that /mysql/innobase/row/row0ins.c must contain the
+NOTE that /mysql/innobase/row/row0ins.c must contain the
prototype for this function ! */
ibool
@@ -73,7 +73,7 @@ ins_node_create(
/*============*/
/* out, own: insert node struct */
ulint ins_type, /* in: INS_VALUES, ... */
- dict_table_t* table, /* in: table where to insert */
+ dict_table_t* table, /* in: table where to insert */
mem_heap_t* heap) /* in: mem heap where created */
{
ins_node_t* node;
@@ -90,13 +90,13 @@ ins_node_create(
node->entry = NULL;
node->select = NULL;
-
+
node->trx_id = ut_dulint_zero;
-
+
node->entry_sys_heap = mem_heap_create(128);
- node->magic_n = INS_NODE_MAGIC_N;
-
+ node->magic_n = INS_NODE_MAGIC_N;
+
return(node);
}
@@ -116,7 +116,7 @@ ins_node_create_entry_list(
UT_LIST_INIT(node->entry_list);
index = dict_table_get_first_index(node->table);
-
+
while (index != NULL) {
entry = row_build_index_entry(node->row, index,
node->entry_sys_heap);
@@ -152,11 +152,11 @@ row_ins_alloc_sys_fields(
/* 1. Allocate buffer for row id */
col = dict_table_get_sys_col(table, DATA_ROW_ID);
-
+
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_alloc(heap, DATA_ROW_ID_LEN);
-
+
dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN);
node->row_id_buf = ptr;
@@ -166,12 +166,12 @@ row_ins_alloc_sys_fields(
/* 2. Fill in the dfield for mix id */
col = dict_table_get_sys_col(table, DATA_MIX_ID);
-
+
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
len = mach_dulint_get_compressed_size(table->mix_id);
ptr = mem_heap_alloc(heap, DATA_MIX_ID_LEN);
-
+
mach_dulint_write_compressed(ptr, table->mix_id);
dfield_set_data(dfield, ptr, len);
}
@@ -179,10 +179,10 @@ row_ins_alloc_sys_fields(
/* 3. Allocate buffer for trx id */
col = dict_table_get_sys_col(table, DATA_TRX_ID);
-
+
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_alloc(heap, DATA_TRX_ID_LEN);
-
+
dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN);
node->trx_id_buf = ptr;
@@ -190,10 +190,10 @@ row_ins_alloc_sys_fields(
/* 4. Allocate buffer for roll ptr */
col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
-
+
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
ptr = mem_heap_alloc(heap, DATA_ROLL_PTR_LEN);
-
+
dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
}
@@ -217,7 +217,7 @@ ins_node_set_new_row(
mem_heap_empty(node->entry_sys_heap);
/* Create templates for index entries */
-
+
ins_node_create_entry_list(node);
/* Allocate from entry_sys_heap buffers for sys fields */
@@ -252,19 +252,20 @@ row_ins_sec_index_entry_by_modify(
upd_t* update;
rec_t* rec;
ulint err;
-
+
rec = btr_cur_get_rec(cursor);
-
+
ut_ad((cursor->index->type & DICT_CLUSTERED) == 0);
- ut_ad(rec_get_deleted_flag(rec, cursor->index->table->comp));
-
+ ut_ad(rec_get_deleted_flag(rec,
+ dict_table_is_comp(cursor->index->table)));
+
/* We know that in the alphabetical ordering, entry and rec are
identified. But in their binary form there may be differences if
there are char fields in them. Therefore we have to calculate the
difference. */
-
+
heap = mem_heap_create(1024);
-
+
update = row_upd_build_sec_rec_difference_binary(cursor->index,
entry, rec, thr_get_trx(thr), heap);
if (mode == BTR_MODIFY_LEAF) {
@@ -276,7 +277,7 @@ row_ins_sec_index_entry_by_modify(
if (err == DB_OVERFLOW || err == DB_UNDERFLOW) {
err = DB_FAIL;
}
- } else {
+ } else {
ut_a(mode == BTR_MODIFY_TREE);
err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG, cursor,
&dummy_big_rec, update, 0, thr, mtr);
@@ -314,21 +315,22 @@ row_ins_clust_index_entry_by_modify(
rec_t* rec;
upd_t* update;
ulint err;
-
+
ut_ad(cursor->index->type & DICT_CLUSTERED);
-
+
*big_rec = NULL;
rec = btr_cur_get_rec(cursor);
- ut_ad(rec_get_deleted_flag(rec, cursor->index->table->comp));
+ ut_ad(rec_get_deleted_flag(rec,
+ dict_table_is_comp(cursor->index->table)));
heap = mem_heap_create(1024);
-
+
/* Build an update vector containing all the fields to be modified;
NOTE that this vector may NOT contain system columns trx_id or
roll_ptr */
-
+
update = row_upd_build_difference_binary(cursor->index, entry, ext_vec,
n_ext_vec, rec, thr_get_trx(thr), heap);
if (mode == BTR_MODIFY_LEAF) {
@@ -340,12 +342,12 @@ row_ins_clust_index_entry_by_modify(
if (err == DB_OVERFLOW || err == DB_UNDERFLOW) {
err = DB_FAIL;
}
- } else {
+ } else {
ut_a(mode == BTR_MODIFY_TREE);
err = btr_cur_pessimistic_update(0, cursor, big_rec, update,
0, thr, mtr);
}
-
+
mem_heap_free(heap);
return(err);
@@ -366,7 +368,7 @@ row_ins_cascade_ancestor_updates_table(
upd_node_t* upd_node;
parent = que_node_get_parent(node);
-
+
while (que_node_get_type(parent) == QUE_NODE_UPDATE) {
upd_node = parent;
@@ -383,7 +385,7 @@ row_ins_cascade_ancestor_updates_table(
return(FALSE);
}
-
+
/*************************************************************************
Returns the number of ancestor UPDATE or DELETE nodes of a
cascaded update/delete node. */
@@ -398,7 +400,7 @@ row_ins_cascade_n_ancestors(
ulint n_ancestors = 0;
parent = que_node_get_parent(node);
-
+
while (que_node_get_type(parent) == QUE_NODE_UPDATE) {
n_ancestors++;
@@ -409,7 +411,7 @@ row_ins_cascade_n_ancestors(
return(n_ancestors);
}
-
+
/**********************************************************************
Calculates the update vector node->cascade->update for a child table in
a cascaded update. */
@@ -442,15 +444,15 @@ row_ins_cascade_calc_update_vec(
upd_t* parent_update;
upd_field_t* parent_ufield;
ulint n_fields_updated;
- ulint parent_field_no;
+ ulint parent_field_no;
dtype_t* type;
ulint i;
ulint j;
-
+
ut_a(node && foreign && cascade && table && index);
/* Calculate the appropriate update vector which will set the fields
- in the child index record to the same value (possibly padded with
+ in the child index record to the same value (possibly padded with
spaces if the column is a fixed length CHAR or FIXBINARY column) as
the referenced index record will get in the update. */
@@ -458,12 +460,12 @@ row_ins_cascade_calc_update_vec(
ut_a(parent_table == foreign->referenced_table);
parent_index = foreign->referenced_index;
parent_update = node->update;
-
+
update = cascade->update;
update->info_bits = 0;
update->n_fields = foreign->n_fields;
-
+
n_fields_updated = 0;
for (i = 0; i < foreign->n_fields; i++) {
@@ -475,7 +477,7 @@ row_ins_cascade_calc_update_vec(
for (j = 0; j < parent_update->n_fields; j++) {
parent_ufield = parent_update->fields + j;
-
+
if (parent_ufield->field_no == parent_field_no) {
ulint min_size;
@@ -484,7 +486,7 @@ row_ins_cascade_calc_update_vec(
updated. Let us make the update vector
field for the child table. */
- ufield = update->fields + n_fields_updated;
+ ufield = update->fields + n_fields_updated;
ufield->field_no =
dict_table_get_nth_col_pos(table,
@@ -499,20 +501,20 @@ row_ins_cascade_calc_update_vec(
updated as NULL */
if (ufield->new_val.len == UNIV_SQL_NULL
- && (type->prtype & DATA_NOT_NULL)) {
+ && (type->prtype & DATA_NOT_NULL)) {
- return(ULINT_UNDEFINED);
+ return(ULINT_UNDEFINED);
}
/* If the new value would not fit in the
column, do not allow the update */
if (ufield->new_val.len != UNIV_SQL_NULL
- && dtype_get_at_most_n_mbchars(
+ && dtype_get_at_most_n_mbchars(
type, dtype_get_len(type),
ufield->new_val.len,
ufield->new_val.data)
- < ufield->new_val.len) {
+ < ufield->new_val.len) {
return(ULINT_UNDEFINED);
}
@@ -525,12 +527,12 @@ row_ins_cascade_calc_update_vec(
min_size = dtype_get_min_size(type);
if (min_size
- && ufield->new_val.len != UNIV_SQL_NULL
- && ufield->new_val.len < min_size) {
+ && ufield->new_val.len != UNIV_SQL_NULL
+ && ufield->new_val.len < min_size) {
char* pad_start;
const char* pad_end;
- ufield->new_val.data =
+ ufield->new_val.data =
mem_heap_alloc(heap,
min_size);
pad_start =
@@ -549,6 +551,15 @@ row_ins_cascade_calc_update_vec(
default:
ut_error;
case 1:
+ if (UNIV_UNLIKELY(
+ dtype_get_charset_coll(
+ dtype_get_prtype(type))
+ == DATA_MYSQL_BINARY_CHARSET_COLL)) {
+ /* Do not pad BINARY
+ columns. */
+ return(ULINT_UNDEFINED);
+ }
+
/* space=0x20 */
memset(pad_start, 0x20,
pad_end - pad_start);
@@ -588,20 +599,21 @@ row_ins_set_detailed(
trx_t* trx, /* in: transaction */
dict_foreign_t* foreign) /* in: foreign key constraint */
{
-
- FILE* tf = os_file_create_tmpfile();
-
- if (tf) {
- ut_print_name(tf, trx, foreign->foreign_table_name);
- dict_print_info_on_foreign_key_in_create_format(tf, trx,
- foreign, FALSE);
-
- trx_set_detailed_error_from_file(trx, tf);
-
- fclose(tf);
+ mutex_enter(&srv_misc_tmpfile_mutex);
+ rewind(srv_misc_tmpfile);
+
+ if (os_file_set_eof(srv_misc_tmpfile)) {
+ ut_print_name(srv_misc_tmpfile, trx,
+ foreign->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ srv_misc_tmpfile,
+ trx, foreign, FALSE);
+ trx_set_detailed_error_from_file(trx, srv_misc_tmpfile);
} else {
- trx_set_detailed_error(trx, "temp file creation failed");
+ trx_set_detailed_error(trx, "temp file operation failed");
}
+
+ mutex_exit(&srv_misc_tmpfile_mutex);
}
/*************************************************************************
@@ -677,9 +689,9 @@ row_ins_foreign_report_add_err(
child table */
{
FILE* ef = dict_foreign_err_file;
-
+
row_ins_set_detailed(trx, foreign);
-
+
mutex_enter(&dict_foreign_err_mutex);
rewind(ef);
ut_print_timestamp(ef);
@@ -709,7 +721,7 @@ row_ins_foreign_report_add_err(
}
if (rec) {
- rec_print(ef, rec, foreign->foreign_index);
+ rec_print(ef, rec, foreign->referenced_index);
}
putc('\n', ef);
@@ -803,7 +815,7 @@ row_ins_foreign_check_on_constraint(
thr, foreign,
btr_pcur_get_rec(pcur), entry);
- return(DB_ROW_IS_REFERENCED);
+ return(DB_ROW_IS_REFERENCED);
}
if (!node->is_delete && 0 == (foreign->type &
@@ -811,12 +823,12 @@ row_ins_foreign_check_on_constraint(
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
/* This is an UPDATE */
-
+
row_ins_foreign_report_err("Trying to update",
thr, foreign,
btr_pcur_get_rec(pcur), entry);
- return(DB_ROW_IS_REFERENCED);
+ return(DB_ROW_IS_REFERENCED);
}
if (node->cascade_node == NULL) {
@@ -836,13 +848,13 @@ row_ins_foreign_check_on_constraint(
several child tables to the table where the delete is done! */
cascade = node->cascade_node;
-
+
cascade->table = table;
cascade->foreign = foreign;
-
+
if (node->is_delete
- && (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) {
+ && (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) {
cascade->is_delete = TRUE;
} else {
cascade->is_delete = FALSE;
@@ -865,12 +877,12 @@ row_ins_foreign_check_on_constraint(
of the parent table in an inconsistent state! */
if (!cascade->is_delete
- && row_ins_cascade_ancestor_updates_table(cascade, table)) {
+ && row_ins_cascade_ancestor_updates_table(cascade, table)) {
- /* We do not know if this would break foreign key
- constraints, but play safe and return an error */
+ /* We do not know if this would break foreign key
+ constraints, but play safe and return an error */
- err = DB_ROW_IS_REFERENCED;
+ err = DB_ROW_IS_REFERENCED;
row_ins_foreign_report_err(
"Trying an update, possibly causing a cyclic cascaded update\n"
@@ -892,13 +904,13 @@ row_ins_foreign_check_on_constraint(
index = btr_pcur_get_btr_cur(pcur)->index;
ut_a(index == foreign->foreign_index);
-
+
rec = btr_pcur_get_rec(pcur);
if (index->type & DICT_CLUSTERED) {
/* pcur is already positioned in the clustered index of
the child table */
-
+
clust_index = index;
clust_rec = rec;
} else {
@@ -908,7 +920,7 @@ row_ins_foreign_check_on_constraint(
clust_index = dict_table_get_first_index(table);
tmp_heap = mem_heap_create(256);
-
+
ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec,
tmp_heap);
btr_pcur_open_with_no_init(clust_index, ref,
@@ -918,8 +930,8 @@ row_ins_foreign_check_on_constraint(
clust_rec = btr_pcur_get_rec(cascade->pcur);
if (!page_rec_is_user_rec(clust_rec)
- || btr_pcur_get_low_match(cascade->pcur)
- < dict_index_get_n_unique(clust_index)) {
+ || btr_pcur_get_low_match(cascade->pcur)
+ < dict_index_get_n_unique(clust_index)) {
fputs(
"InnoDB: error in cascade of a foreign key op\n"
@@ -953,26 +965,26 @@ row_ins_foreign_check_on_constraint(
err = lock_clust_rec_read_check_and_lock_alt(0, clust_rec,
clust_index, LOCK_X, LOCK_REC_NOT_GAP, thr);
}
-
+
if (err != DB_SUCCESS) {
goto nonstandard_exit_func;
}
- if (rec_get_deleted_flag(clust_rec, table->comp)) {
+ if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) {
/* This can happen if there is a circular reference of
rows such that cascading delete comes to delete a row
already in the process of being delete marked */
- err = DB_SUCCESS;
+ err = DB_SUCCESS;
goto nonstandard_exit_func;
}
if ((node->is_delete
- && (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL))
- || (!node->is_delete
- && (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
-
+ && (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL))
+ || (!node->is_delete
+ && (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
+
/* Build the appropriate update vector which sets
foreign->n_fields first fields in rec to SQL NULL */
@@ -980,7 +992,7 @@ row_ins_foreign_check_on_constraint(
update->info_bits = 0;
update->n_fields = foreign->n_fields;
-
+
for (i = 0; i < foreign->n_fields; i++) {
(update->fields + i)->field_no
= dict_table_get_nth_col_pos(table,
@@ -993,7 +1005,7 @@ row_ins_foreign_check_on_constraint(
}
if (!node->is_delete
- && (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
+ && (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
/* Build the appropriate update vector which sets changing
foreign->n_fields first fields in rec to new values */
@@ -1001,9 +1013,9 @@ row_ins_foreign_check_on_constraint(
upd_vec_heap = mem_heap_create(256);
n_to_update = row_ins_cascade_calc_update_vec(node, foreign,
- upd_vec_heap);
+ upd_vec_heap);
if (n_to_update == ULINT_UNDEFINED) {
- err = DB_ROW_IS_REFERENCED;
+ err = DB_ROW_IS_REFERENCED;
row_ins_foreign_report_err(
"Trying a cascaded update where the updated value in the child\n"
@@ -1011,7 +1023,7 @@ row_ins_foreign_check_on_constraint(
"be NULL and the column is declared as not NULL in the child table,",
thr, foreign, btr_pcur_get_rec(pcur), entry);
- goto nonstandard_exit_func;
+ goto nonstandard_exit_func;
}
if (cascade->update->n_fields == 0) {
@@ -1020,29 +1032,29 @@ row_ins_foreign_check_on_constraint(
to in this foreign key constraint: no need to do
anything */
- err = DB_SUCCESS;
+ err = DB_SUCCESS;
- goto nonstandard_exit_func;
+ goto nonstandard_exit_func;
}
}
-
+
/* Store pcur position and initialize or store the cascade node
pcur stored position */
-
+
btr_pcur_store_position(pcur, mtr);
-
+
if (index == clust_index) {
btr_pcur_copy_stored_position(cascade->pcur, pcur);
} else {
btr_pcur_store_position(cascade->pcur, mtr);
}
-
+
mtr_commit(mtr);
ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON);
cascade->state = UPD_NODE_UPDATE_CLUSTERED;
-
+
err = row_update_cascade_for_mysql(thr, cascade,
foreign->foreign_table);
@@ -1065,7 +1077,7 @@ row_ins_foreign_check_on_constraint(
mtr_start(mtr);
/* Restore pcur position */
-
+
btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr);
if (tmp_heap) {
@@ -1073,7 +1085,7 @@ row_ins_foreign_check_on_constraint(
}
if (upd_vec_heap) {
- mem_heap_free(upd_vec_heap);
+ mem_heap_free(upd_vec_heap);
}
return(err);
@@ -1084,7 +1096,7 @@ nonstandard_exit_func:
}
if (upd_vec_heap) {
- mem_heap_free(upd_vec_heap);
+ mem_heap_free(upd_vec_heap);
}
btr_pcur_store_position(pcur, mtr);
@@ -1105,12 +1117,12 @@ ulint
row_ins_set_shared_rec_lock(
/*========================*/
/* out: DB_SUCCESS or error code */
- ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
+ ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
- que_thr_t* thr) /* in: query thread */
+ que_thr_t* thr) /* in: query thread */
{
ulint err;
@@ -1127,20 +1139,21 @@ row_ins_set_shared_rec_lock(
return(err);
}
+#ifndef UNIV_HOTBACKUP
/*************************************************************************
Sets a exclusive lock on a record. Used in locking possible duplicate key
records */
static
ulint
row_ins_set_exclusive_rec_lock(
-/*============================*/
+/*===========================*/
/* out: DB_SUCCESS or error code */
- ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
+ ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
- que_thr_t* thr) /* in: query thread */
+ que_thr_t* thr) /* in: query thread */
{
ulint err;
@@ -1156,7 +1169,8 @@ row_ins_set_exclusive_rec_lock(
return(err);
}
-
+#endif /* !UNIV_HOTBACKUP */
+
/*******************************************************************
Checks if foreign key constraint fails for an index entry. Sets shared locks
which lock either the success or the failure of the constraint. NOTE that
@@ -1179,7 +1193,7 @@ row_ins_check_foreign_constraint(
dtuple_t* entry, /* in: index entry for index */
que_thr_t* thr) /* in: query thread */
{
- upd_node_t* upd_node;
+ upd_node_t* upd_node;
dict_table_t* check_table;
dict_index_t* check_index;
ulint n_fields_cmp;
@@ -1215,17 +1229,17 @@ run_again:
for (i = 0; i < foreign->n_fields; i++) {
if (UNIV_SQL_NULL == dfield_get_len(
- dtuple_get_nth_field(entry, i))) {
+ dtuple_get_nth_field(entry, i))) {
goto exit_func;
}
}
if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) {
- upd_node = thr->run_node;
+ upd_node = thr->run_node;
- if (!(upd_node->is_delete) && upd_node->foreign == foreign) {
- /* If a cascaded update is done as defined by a
+ if (!(upd_node->is_delete) && upd_node->foreign == foreign) {
+ /* If a cascaded update is done as defined by a
foreign key constraint, do not check that
constraint for the child row. In ON UPDATE CASCADE
the update of the parent row is only half done when
@@ -1258,7 +1272,7 @@ run_again:
FILE* ef = dict_foreign_err_file;
row_ins_set_detailed(trx, foreign);
-
+
mutex_enter(&dict_foreign_err_mutex);
rewind(ef);
ut_print_timestamp(ef);
@@ -1289,7 +1303,7 @@ run_again:
if (check_table != table) {
/* We already have a LOCK_IX on table, but not necessarily
on check_table */
-
+
err = lock_table(0, check_table, LOCK_IS, thr);
if (err != DB_SUCCESS) {
@@ -1320,7 +1334,7 @@ run_again:
goto next_rec;
}
-
+
offsets = rec_get_offsets(rec, check_index,
offsets, ULINT_UNDEFINED, &heap);
@@ -1352,7 +1366,7 @@ run_again:
/* Found a matching record. Lock only
a record because we can allow inserts
into gaps */
-
+
err = row_ins_set_shared_rec_lock(
LOCK_REC_NOT_GAP, rec,
check_index, offsets, thr);
@@ -1362,7 +1376,7 @@ run_again:
break;
}
- if (check_ref) {
+ if (check_ref) {
err = DB_SUCCESS;
break;
@@ -1387,9 +1401,9 @@ run_again:
key errors resulting from
FK constraints to a
separate error code. */
-
+
if (err == DB_DUPLICATE_KEY) {
- err = DB_FOREIGN_DUPLICATE_KEY;
+ err = DB_FOREIGN_DUPLICATE_KEY;
}
break;
@@ -1413,7 +1427,7 @@ run_again:
break;
}
- if (check_ref) {
+ if (check_ref) {
err = DB_NO_REFERENCED_ROW;
row_ins_foreign_report_add_err(
trx, foreign, rec, entry);
@@ -1429,7 +1443,7 @@ next_rec:
moved = btr_pcur_move_to_next(&pcur, &mtr);
if (!moved) {
- if (check_ref) {
+ if (check_ref) {
rec = btr_pcur_get_rec(&pcur);
row_ins_foreign_report_add_err(
trx, foreign, rec, entry);
@@ -1456,10 +1470,10 @@ do_possible_lock_wait:
que_thr_stop_for_mysql(thr);
srv_suspend_mysql_thread(thr);
-
+
if (trx->error_state == DB_SUCCESS) {
- goto run_again;
+ goto run_again;
}
err = trx->error_state;
@@ -1542,7 +1556,7 @@ row_ins_check_foreign_constraints(
if (got_s_lock) {
row_mysql_unfreeze_data_dictionary(trx);
}
-
+
if (err != DB_SUCCESS) {
return(err);
}
@@ -1554,6 +1568,7 @@ row_ins_check_foreign_constraints(
return(DB_SUCCESS);
}
+#ifndef UNIV_HOTBACKUP
/*******************************************************************
Checks if a unique key violation to rec would occur at the index entry
insert. */
@@ -1572,7 +1587,7 @@ row_ins_dupl_error_with_rec(
ulint matched_fields;
ulint matched_bytes;
ulint n_unique;
- ulint i;
+ ulint i;
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -1586,7 +1601,7 @@ row_ins_dupl_error_with_rec(
if (matched_fields < n_unique) {
- return(FALSE);
+ return(FALSE);
}
/* In a unique secondary index we allow equal key values if they
@@ -1594,17 +1609,18 @@ row_ins_dupl_error_with_rec(
if (!(index->type & DICT_CLUSTERED)) {
- for (i = 0; i < n_unique; i++) {
- if (UNIV_SQL_NULL == dfield_get_len(
- dtuple_get_nth_field(entry, i))) {
+ for (i = 0; i < n_unique; i++) {
+ if (UNIV_SQL_NULL == dfield_get_len(
+ dtuple_get_nth_field(entry, i))) {
- return(FALSE);
- }
- }
+ return(FALSE);
+ }
+ }
}
return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
-}
+}
+#endif /* !UNIV_HOTBACKUP */
/*******************************************************************
Scans a unique non-clustered index at a given index entry to determine
@@ -1643,7 +1659,7 @@ row_ins_scan_sec_index_for_duplicate(
for (i = 0; i < n_unique; i++) {
if (UNIV_SQL_NULL == dfield_get_len(
- dtuple_get_nth_field(entry, i))) {
+ dtuple_get_nth_field(entry, i))) {
return(DB_SUCCESS);
}
@@ -1656,7 +1672,7 @@ row_ins_scan_sec_index_for_duplicate(
n_fields_cmp = dtuple_get_n_fields_cmp(entry);
dtuple_set_n_fields_cmp(entry, dict_index_get_n_unique(index));
-
+
btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr);
/* Scan index records and check if there is a duplicate */
@@ -1668,17 +1684,17 @@ row_ins_scan_sec_index_for_duplicate(
goto next_rec;
}
-
+
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
if (innobase_query_is_update()) {
/* If the SQL-query will update or replace
- duplicate key we will take X-lock for
- duplicates ( REPLACE, LOAD DATAFILE REPLACE,
+ duplicate key we will take X-lock for
+ duplicates ( REPLACE, LOAD DATAFILE REPLACE,
INSERT ON DUPLICATE KEY UPDATE). */
-
+
err = row_ins_set_exclusive_rec_lock(LOCK_ORDINARY,
rec, index, offsets, thr);
} else {
@@ -1693,7 +1709,7 @@ row_ins_scan_sec_index_for_duplicate(
}
if (page_rec_is_supremum(rec)) {
-
+
goto next_rec;
}
@@ -1737,6 +1753,7 @@ next_rec:
InnoDB Hot Backup builds. Besides, this function should never
be called in InnoDB Hot Backup. */
ut_error;
+ return(DB_FAIL);
#endif /* UNIV_HOTBACKUP */
}
@@ -1768,7 +1785,7 @@ row_ins_duplicate_error_in_clust(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
UT_NOT_USED(mtr);
-
+
ut_a(cursor->index->type & DICT_CLUSTERED);
ut_ad(cursor->index->type & DICT_UNIQUE);
@@ -1779,16 +1796,16 @@ row_ins_duplicate_error_in_clust(
clustered index key fields. For such a secondary index record,
to avoid race condition, we must FIRST do the insertion and after
that check that the uniqueness condition is not breached! */
-
+
/* NOTE: A problem is that in the B-tree node pointers on an
upper level may match more to the entry than the actual existing
user records on the leaf level. So, even if low_match would suggest
that a duplicate key violation may occur, this may not be the case. */
n_unique = dict_index_get_n_unique(cursor->index);
-
+
if (cursor->low_match >= n_unique) {
-
+
rec = btr_cur_get_rec(cursor);
if (!page_rec_is_infimum(rec)) {
@@ -1803,19 +1820,19 @@ row_ins_duplicate_error_in_clust(
if (innobase_query_is_update()) {
/* If the SQL-query will update or replace
- duplicate key we will take X-lock for
- duplicates ( REPLACE, LOAD DATAFILE REPLACE,
+ duplicate key we will take X-lock for
+ duplicates ( REPLACE, LOAD DATAFILE REPLACE,
INSERT ON DUPLICATE KEY UPDATE). */
-
+
err = row_ins_set_exclusive_rec_lock(
LOCK_REC_NOT_GAP,rec,cursor->index,
offsets, thr);
} else {
-
+
err = row_ins_set_shared_rec_lock(
- LOCK_REC_NOT_GAP,rec, cursor->index,
+ LOCK_REC_NOT_GAP,rec, cursor->index,
offsets, thr);
- }
+ }
if (err != DB_SUCCESS) {
goto func_exit;
@@ -1841,8 +1858,8 @@ row_ins_duplicate_error_in_clust(
if (innobase_query_is_update()) {
/* If the SQL-query will update or replace
- duplicate key we will take X-lock for
- duplicates ( REPLACE, LOAD DATAFILE REPLACE,
+ duplicate key we will take X-lock for
+ duplicates ( REPLACE, LOAD DATAFILE REPLACE,
INSERT ON DUPLICATE KEY UPDATE). */
err = row_ins_set_exclusive_rec_lock(
@@ -1880,6 +1897,7 @@ func_exit:
InnoDB Hot Backup builds. Besides, this function should never
be called in InnoDB Hot Backup. */
ut_error;
+ return(DB_FAIL);
#endif /* UNIV_HOTBACKUP */
}
@@ -1902,7 +1920,7 @@ row_ins_must_modify(
{
ulint enough_match;
rec_t* rec;
-
+
/* NOTE: (compare to the note in row_ins_duplicate_error) Because node
pointers on upper levels of the B-tree may match more to entry than
to actual user records on the leaf level, we have to check if the
@@ -1911,7 +1929,7 @@ row_ins_must_modify(
of a secondary index, all fields of the index. */
enough_match = dict_index_get_n_unique_in_tree(cursor->index);
-
+
if (cursor->low_match >= enough_match) {
rec = btr_cur_get_rec(cursor);
@@ -1973,7 +1991,7 @@ row_ins_index_entry_low(
/* Note that we use PAGE_CUR_LE as the search mode, because then
the function will return in both low_match and up_match of the
cursor sensible values */
-
+
if (!(thr_get_trx(thr)->check_unique_secondary)) {
ignore_sec_unique = BTR_IGNORE_SEC_UNIQUE;
}
@@ -2009,7 +2027,7 @@ row_ins_index_entry_low(
if (index->type & DICT_UNIQUE && (cursor.up_match >= n_unique
|| cursor.low_match >= n_unique)) {
- if (index->type & DICT_CLUSTERED) {
+ if (index->type & DICT_CLUSTERED) {
/* Note that the following may return also
DB_LOCK_WAIT */
@@ -2035,11 +2053,11 @@ row_ins_index_entry_low(
prevent any insertion of a duplicate by another
transaction. Let us now reposition the cursor and
continue the insertion. */
-
+
btr_cur_search_to_nth_level(index, 0, entry,
PAGE_CUR_LE, mode | BTR_INSERT,
&cursor, 0, &mtr);
- }
+ }
}
modify = row_ins_must_modify(&cursor);
@@ -2066,7 +2084,7 @@ row_ins_index_entry_low(
entry,
thr, &mtr);
}
-
+
} else {
if (mode == BTR_MODIFY_LEAF) {
err = btr_cur_optimistic_insert(0, &cursor, entry,
@@ -2091,7 +2109,7 @@ function_exit:
if (big_rec) {
rec_t* rec;
mtr_start(&mtr);
-
+
btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
BTR_MODIFY_TREE, &cursor, 0, &mtr);
rec = btr_cur_get_rec(&cursor);
@@ -2177,7 +2195,7 @@ row_ins_index_entry_set_vals(
dfield_t* row_field;
ulint n_fields;
ulint i;
- dtype_t* cur_type;
+ dtype_t* cur_type;
ut_ad(entry && row);
@@ -2191,7 +2209,7 @@ row_ins_index_entry_set_vals(
/* Check column prefix indexes */
if (ind_field->prefix_len > 0
- && dfield_get_len(row_field) != UNIV_SQL_NULL) {
+ && dfield_get_len(row_field) != UNIV_SQL_NULL) {
cur_type = dict_col_get_type(
dict_field_get_col(ind_field));
@@ -2200,7 +2218,7 @@ row_ins_index_entry_set_vals(
ind_field->prefix_len,
dfield_get_len(row_field), row_field->data);
} else {
- field->len = row_field->len;
+ field->len = row_field->len;
}
field->data = row_field->data;
@@ -2221,9 +2239,9 @@ row_ins_index_entry_step(
ulint err;
ut_ad(dtuple_check_typed(node->row));
-
+
row_ins_index_entry_set_vals(node->index, node->entry, node->row);
-
+
ut_ad(dtuple_check_typed(node->entry));
err = row_ins_index_entry(node->index, node->entry, NULL, 0, thr);
@@ -2240,16 +2258,16 @@ row_ins_alloc_row_id_step(
ins_node_t* node) /* in: row insert node */
{
dulint row_id;
-
+
ut_ad(node->state == INS_NODE_ALLOC_ROW_ID);
-
+
if (dict_table_get_first_index(node->table)->type & DICT_UNIQUE) {
/* No row id is stored if the clustered index is unique */
return;
}
-
+
/* Fill in row id value to row */
row_id = dict_sys_get_new_row_id();
@@ -2269,12 +2287,12 @@ row_ins_get_row_from_values(
dfield_t* dfield;
dtuple_t* row;
ulint i;
-
+
/* The field values are copied in the buffers of the select node and
it is safe to use them until we fetch from select again: therefore
we can just copy the pointers */
- row = node->row;
+ row = node->row;
i = 0;
list_node = node->values_list;
@@ -2307,7 +2325,7 @@ row_ins_get_row_from_select(
it is safe to use them until we fetch from select again: therefore
we can just copy the pointers */
- row = node->row;
+ row = node->row;
i = 0;
list_node = node->select->select_list;
@@ -2320,7 +2338,7 @@ row_ins_get_row_from_select(
list_node = que_node_get_next(list_node);
}
}
-
+
/***************************************************************
Inserts a row to a table. */
@@ -2333,13 +2351,13 @@ row_ins(
que_thr_t* thr) /* in: query thread */
{
ulint err;
-
+
ut_ad(node && thr);
if (node->state == INS_NODE_ALLOC_ROW_ID) {
row_ins_alloc_row_id_step(node);
-
+
node->index = dict_table_get_first_index(node->table);
node->entry = UT_LIST_GET_FIRST(node->entry_list);
@@ -2359,7 +2377,7 @@ row_ins(
while (node->index != NULL) {
err = row_ins_index_entry_step(node, thr);
-
+
if (err != DB_SUCCESS) {
return(err);
@@ -2370,9 +2388,9 @@ row_ins(
}
ut_ad(node->entry == NULL);
-
+
node->state = INS_NODE_ALLOC_ROW_ID;
-
+
return(DB_SUCCESS);
}
@@ -2393,11 +2411,11 @@ row_ins_step(
ulint err;
ut_ad(thr);
-
+
trx = thr_get_trx(thr);
trx_start_if_not_started(trx);
-
+
node = thr->run_node;
ut_ad(que_node_get_type(node) == QUE_NODE_INSERT);
@@ -2417,12 +2435,12 @@ row_ins_step(
/* It may be that the current session has not yet started
its transaction, or it has been committed: */
-
+
if (UT_DULINT_EQ(trx->id, node->trx_id)) {
/* No need to do IX-locking or write trx id to buf */
goto same_trx;
- }
+ }
trx_write_trx_id(node->trx_id_buf, trx->id);
@@ -2434,17 +2452,17 @@ row_ins_step(
}
node->trx_id = trx->id;
- same_trx:
+ same_trx:
node->state = INS_NODE_ALLOC_ROW_ID;
if (node->ins_type == INS_SEARCHED) {
/* Reset the cursor */
sel_node->state = SEL_NODE_OPEN;
-
+
/* Fetch a row to insert */
-
+
thr->run_node = sel_node;
-
+
return(thr);
}
}
@@ -2456,7 +2474,7 @@ row_ins_step(
/* No more rows to insert */
thr->run_node = parent;
-
+
return(thr);
}
@@ -2476,7 +2494,7 @@ error_handling:
if (node->ins_type == INS_SEARCHED) {
/* Fetch a row to insert */
-
+
thr->run_node = sel_node;
} else {
thr->run_node = que_node_get_parent(node);
diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c
index a16ffc5aa41..4dbe5128974 100644
--- a/storage/innobase/row/row0mysql.c
+++ b/storage/innobase/row/row0mysql.c
@@ -45,7 +45,7 @@ struct row_mysql_drop_struct{
};
UT_LIST_BASE_NODE_T(row_mysql_drop_t) row_mysql_drop_list;
-ibool row_mysql_drop_list_inited = FALSE;
+ibool row_mysql_drop_list_inited = FALSE;
/* Magic table names for invoking various monitor threads */
static const char S_innodb_monitor[] = "innodb_monitor";
@@ -89,8 +89,8 @@ row_mysql_is_system_table(
return(FALSE);
}
return(0 == strcmp(name + 6, "host")
- || 0 == strcmp(name + 6, "user")
- || 0 == strcmp(name + 6, "db"));
+ || 0 == strcmp(name + 6, "user")
+ || 0 == strcmp(name + 6, "db"));
}
/***********************************************************************
@@ -207,7 +207,7 @@ row_mysql_store_blob_ref(
mach_write_to_n_little_endian(dest, col_len - 8, len);
- ut_memcpy(dest + col_len - 8, (byte*)&data, sizeof(byte*));
+ ut_memcpy(dest + col_len - 8, &data, sizeof(byte*));
}
/***********************************************************************
@@ -226,7 +226,7 @@ row_mysql_read_blob_ref(
*len = mach_read_from_n_little_endian(ref, col_len - 8);
- ut_memcpy((byte*)&data, ref + col_len - 8, sizeof(byte*));
+ ut_memcpy(&data, ref + col_len - 8, sizeof(byte*));
return(data);
}
@@ -267,7 +267,7 @@ row_mysql_store_col_in_innobase_format(
VARCHAR then this is irrelevant */
ulint comp) /* in: nonzero=compact format */
{
- byte* ptr = mysql_data;
+ byte* ptr = mysql_data;
dtype_t* dtype;
ulint type;
ulint lenlen;
@@ -299,13 +299,13 @@ row_mysql_store_col_in_innobase_format(
buf += col_len;
} else if ((type == DATA_VARCHAR
- || type == DATA_VARMYSQL
- || type == DATA_BINARY)) {
+ || type == DATA_VARMYSQL
+ || type == DATA_BINARY)) {
if (dtype_get_mysql_type(dtype) == DATA_MYSQL_TRUE_VARCHAR) {
/* The length of the actual data is stored to 1 or 2
bytes at the start of the field */
-
+
if (row_format_col) {
if (dtype->prtype & DATA_LONG_TRUE_VARCHAR) {
lenlen = 2;
@@ -318,7 +318,7 @@ row_mysql_store_col_in_innobase_format(
}
ptr = row_mysql_read_true_varchar(&col_len, mysql_data,
- lenlen);
+ lenlen);
} else {
/* Remove trailing spaces from old style VARCHAR
columns. */
@@ -361,12 +361,12 @@ row_mysql_store_col_in_innobase_format(
Consider a CHAR(n) field, a field of n characters.
It will contain between n * mbminlen and n * mbmaxlen bytes.
We will try to truncate it to n bytes by stripping
- space padding. If the field contains single-byte
+ space padding. If the field contains single-byte
characters only, it will be truncated to n characters.
Consider a CHAR(5) field containing the string ".a "
where "." denotes a 3-byte character represented by
the bytes "$%&". After our stripping, the string will
- be stored as "$%&a " (5 bytes). The string ".abc "
+ be stored as "$%&a " (5 bytes). The string ".abc "
will be stored as "$%&abc" (6 bytes).
The space padding will be restored in row0sel.c, function
@@ -410,10 +410,10 @@ row_mysql_convert_row_to_innobase(
row is used, as row may contain
pointers to this record! */
{
- mysql_row_templ_t* templ;
+ mysql_row_templ_t* templ;
dfield_t* dfield;
ulint i;
-
+
ut_ad(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
ut_ad(prebuilt->mysql_template);
@@ -426,7 +426,7 @@ row_mysql_convert_row_to_innobase(
/* Column may be SQL NULL */
if (mysql_rec[templ->mysql_null_byte_offset] &
- (byte) (templ->mysql_null_bit_mask)) {
+ (byte) (templ->mysql_null_bit_mask)) {
/* It is SQL NULL */
@@ -434,18 +434,18 @@ row_mysql_convert_row_to_innobase(
goto next_column;
}
- }
-
+ }
+
row_mysql_store_col_in_innobase_format(dfield,
- prebuilt->ins_upd_rec_buff
- + templ->mysql_col_offset,
- TRUE, /* MySQL row format data */
- mysql_rec + templ->mysql_col_offset,
- templ->mysql_col_len,
- prebuilt->table->comp);
+ prebuilt->ins_upd_rec_buff
+ + templ->mysql_col_offset,
+ TRUE, /* MySQL row format data */
+ mysql_rec + templ->mysql_col_offset,
+ templ->mysql_col_len,
+ dict_table_is_comp(prebuilt->table));
next_column:
;
- }
+ }
}
/********************************************************************
@@ -469,21 +469,21 @@ row_mysql_handle_errors(
handle_new_error:
err = trx->error_state;
-
+
ut_a(err != DB_SUCCESS);
-
+
trx->error_state = DB_SUCCESS;
-
+
if ((err == DB_DUPLICATE_KEY)
|| (err == DB_FOREIGN_DUPLICATE_KEY)) {
- if (savept) {
+ if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
trx_general_rollback_for_mysql(trx, TRUE, savept);
}
} else if (err == DB_TOO_BIG_RECORD) {
- if (savept) {
+ if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
@@ -493,7 +493,7 @@ handle_new_error:
} else if (err == DB_ROW_IS_REFERENCED
|| err == DB_NO_REFERENCED_ROW
|| err == DB_CANNOT_ADD_CONSTRAINT) {
- if (savept) {
+ if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
@@ -520,10 +520,10 @@ handle_new_error:
to version 3.23.43 */
trx_general_rollback_for_mysql(trx, FALSE, NULL);
-
+
} else if (err == DB_OUT_OF_FILE_SPACE
|| err == DB_LOCK_WAIT_TIMEOUT) {
- if (savept) {
+ if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
@@ -537,32 +537,32 @@ handle_new_error:
"InnoDB: The database cannot continue operation because of\n"
"InnoDB: lack of space. You must add a new data file to\n"
"InnoDB: my.cnf and restart the database.\n", stderr);
-
+
exit(1);
} else if (err == DB_CORRUPTION) {
- fputs(
- "InnoDB: We detected index corruption in an InnoDB type table.\n"
- "InnoDB: You have to dump + drop + reimport the table or, in\n"
- "InnoDB: a case of widespread corruption, dump all InnoDB\n"
- "InnoDB: tables and recreate the whole InnoDB tablespace.\n"
- "InnoDB: If the mysqld server crashes after the startup or when\n"
- "InnoDB: you dump the tables, look at\n"
- "InnoDB: http://dev.mysql.com/doc/mysql/en/Forcing_recovery.html"
- " for help.\n", stderr);
+ fputs(
+ "InnoDB: We detected index corruption in an InnoDB type table.\n"
+ "InnoDB: You have to dump + drop + reimport the table or, in\n"
+ "InnoDB: a case of widespread corruption, dump all InnoDB\n"
+ "InnoDB: tables and recreate the whole InnoDB tablespace.\n"
+ "InnoDB: If the mysqld server crashes after the startup or when\n"
+ "InnoDB: you dump the tables, look at\n"
+ "InnoDB: http://dev.mysql.com/doc/mysql/en/Forcing_recovery.html"
+ " for help.\n", stderr);
} else {
fprintf(stderr, "InnoDB: unknown error code %lu\n",
(ulong) err);
ut_error;
- }
+ }
if (trx->error_state != DB_SUCCESS) {
*new_err = trx->error_state;
} else {
*new_err = err;
}
-
+
trx->error_state = DB_SUCCESS;
return(FALSE);
@@ -571,6 +571,7 @@ handle_new_error:
InnoDB Hot Backup builds. Besides, this function should never
be called in InnoDB Hot Backup. */
ut_error;
+ return(FALSE);
#endif /* UNIV_HOTBACKUP */
}
@@ -589,7 +590,7 @@ row_create_prebuilt(
dtuple_t* ref;
ulint ref_len;
ulint i;
-
+
heap = mem_heap_create(128);
prebuilt = mem_heap_alloc(heap, sizeof(row_prebuilt_t));
@@ -616,13 +617,13 @@ row_create_prebuilt(
prebuilt->ins_node = NULL;
prebuilt->ins_upd_rec_buff = NULL;
-
+
prebuilt->upd_node = NULL;
prebuilt->ins_graph = NULL;
prebuilt->upd_graph = NULL;
- prebuilt->pcur = btr_pcur_create_for_mysql();
- prebuilt->clust_pcur = btr_pcur_create_for_mysql();
+ prebuilt->pcur = btr_pcur_create_for_mysql();
+ prebuilt->clust_pcur = btr_pcur_create_for_mysql();
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = 99999999;
@@ -633,7 +634,7 @@ row_create_prebuilt(
prebuilt->search_tuple = dtuple_create(heap,
2 * dict_table_get_n_cols(table));
-
+
clust_index = dict_table_get_first_index(table);
/* Make sure that search_tuple is long enough for clustered index */
@@ -671,7 +672,7 @@ row_prebuilt_free(
ulint i;
if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED
- || prebuilt->magic_n2 != ROW_PREBUILT_ALLOCATED) {
+ || prebuilt->magic_n2 != ROW_PREBUILT_ALLOCATED) {
fprintf(stderr,
"InnoDB: Error: trying to free a corrupt\n"
"InnoDB: table handle. Magic n %lu, magic n2 %lu, table name",
@@ -680,7 +681,7 @@ row_prebuilt_free(
ut_print_name(stderr, NULL, prebuilt->table->name);
putc('\n', stderr);
- mem_analyze_corruption((byte*)prebuilt);
+ mem_analyze_corruption(prebuilt);
ut_error;
}
@@ -702,11 +703,11 @@ row_prebuilt_free(
if (prebuilt->sel_graph) {
que_graph_free_recursive(prebuilt->sel_graph);
}
-
+
if (prebuilt->upd_graph) {
que_graph_free_recursive(prebuilt->upd_graph);
}
-
+
if (prebuilt->blob_heap) {
mem_heap_free(prebuilt->blob_heap);
}
@@ -714,15 +715,15 @@ row_prebuilt_free(
if (prebuilt->old_vers_heap) {
mem_heap_free(prebuilt->old_vers_heap);
}
-
+
for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) {
if (prebuilt->fetch_cache[i] != NULL) {
if ((ROW_PREBUILT_FETCH_MAGIC_N !=
- mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
- || (ROW_PREBUILT_FETCH_MAGIC_N !=
- mach_read_from_4((prebuilt->fetch_cache[i])
- + prebuilt->mysql_row_len))) {
+ mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
+ || (ROW_PREBUILT_FETCH_MAGIC_N !=
+ mach_read_from_4((prebuilt->fetch_cache[i])
+ + prebuilt->mysql_row_len))) {
fputs(
"InnoDB: Error: trying to free a corrupt\n"
"InnoDB: fetch buffer.\n", stderr);
@@ -753,14 +754,14 @@ row_update_prebuilt_trx(
row_prebuilt_t* prebuilt, /* in: prebuilt struct in MySQL
handle */
trx_t* trx) /* in: transaction handle */
-{
+{
if (trx->magic_n != TRX_MAGIC_N) {
fprintf(stderr,
"InnoDB: Error: trying to use a corrupt\n"
"InnoDB: trx handle. Magic n %lu\n",
(ulong) trx->magic_n);
- mem_analyze_corruption((byte*)trx);
+ mem_analyze_corruption(trx);
ut_error;
}
@@ -773,7 +774,7 @@ row_update_prebuilt_trx(
ut_print_name(stderr, NULL, prebuilt->table->name);
putc('\n', stderr);
- mem_analyze_corruption((byte*)prebuilt);
+ mem_analyze_corruption(prebuilt);
ut_error;
}
@@ -790,7 +791,7 @@ row_update_prebuilt_trx(
if (prebuilt->sel_graph) {
prebuilt->sel_graph->trx = trx;
- }
+ }
}
/*************************************************************************
@@ -802,7 +803,7 @@ dtuple_t*
row_get_prebuilt_insert_row(
/*========================*/
/* out: prebuilt dtuple; the column
- type information is also set in it */
+ type information is also set in it */
row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
handle */
{
@@ -812,14 +813,14 @@ row_get_prebuilt_insert_row(
ulint i;
ut_ad(prebuilt && table && prebuilt->trx);
-
+
if (prebuilt->ins_node == NULL) {
/* Not called before for this handle: create an insert node
and query graph to the prebuilt struct */
node = ins_node_create(INS_DIRECT, table, prebuilt->heap);
-
+
prebuilt->ins_node = node;
if (prebuilt->ins_upd_rec_buff == NULL) {
@@ -827,7 +828,7 @@ row_get_prebuilt_insert_row(
prebuilt->heap,
prebuilt->mysql_row_len);
}
-
+
row = dtuple_create(prebuilt->heap,
dict_table_get_n_cols(table));
@@ -837,8 +838,8 @@ row_get_prebuilt_insert_row(
a debug assertion from failing */
for (i = 0; i < dtuple_get_n_fields(row); i++) {
-
- dtuple_get_nth_field(row, i)->len = UNIV_SQL_NULL;
+
+ dtuple_get_nth_field(row, i)->len = UNIV_SQL_NULL;
}
ins_node_set_new_row(node, row);
@@ -851,7 +852,7 @@ row_get_prebuilt_insert_row(
prebuilt->ins_graph->state = QUE_FORK_ACTIVE;
}
- return(prebuilt->ins_node->row);
+ return(prebuilt->ins_node->row);
}
/*************************************************************************
@@ -864,7 +865,7 @@ row_update_statistics_if_needed(
dict_table_t* table) /* in: table */
{
ulint counter;
-
+
counter = table->stat_modified_counter;
table->stat_modified_counter = counter + 1;
@@ -876,16 +877,16 @@ row_update_statistics_if_needed(
a counter table which is very small and updated very often. */
if (counter > 2000000000
- || ((ib_longlong)counter > 16 + table->stat_n_rows / 16)) {
+ || ((ib_longlong)counter > 16 + table->stat_n_rows / 16)) {
dict_update_statistics(table);
- }
+ }
}
-
+
/*************************************************************************
Unlocks an AUTO_INC type lock possibly reserved by trx. */
-void
+void
row_unlock_table_autoinc_for_mysql(
/*===============================*/
trx_t* trx) /* in: transaction */
@@ -912,15 +913,15 @@ row_lock_table_autoinc_for_mysql(
row_prebuilt_t* prebuilt) /* in: prebuilt struct in the MySQL
table handle */
{
- trx_t* trx = prebuilt->trx;
+ trx_t* trx = prebuilt->trx;
ins_node_t* node = prebuilt->ins_node;
que_thr_t* thr;
ulint err;
ibool was_lock_wait;
-
+
ut_ad(trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-
+
if (trx->auto_inc_lock) {
return(DB_SUCCESS);
@@ -990,11 +991,11 @@ row_lock_table_for_mysql(
ulint mode) /* in: lock mode of table
(ignored if table==NULL) */
{
- trx_t* trx = prebuilt->trx;
+ trx_t* trx = prebuilt->trx;
que_thr_t* thr;
ulint err;
ibool was_lock_wait;
-
+
ut_ad(trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
@@ -1045,12 +1046,12 @@ run_again:
}
que_thr_stop_for_mysql_no_error(thr, trx);
-
+
trx->op_info = "";
- return((int) err);
+ return((int) err);
}
-
+
/*************************************************************************
Does an insert for MySQL. */
@@ -1066,15 +1067,15 @@ row_insert_for_mysql(
que_thr_t* thr;
ulint err;
ibool was_lock_wait;
- trx_t* trx = prebuilt->trx;
+ trx_t* trx = prebuilt->trx;
ins_node_t* node = prebuilt->ins_node;
-
+
ut_ad(trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
if (prebuilt->table->ibd_file_missing) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error:\n"
"InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
"InnoDB: table %s does not exist.\n"
"InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -1094,7 +1095,7 @@ row_insert_for_mysql(
ut_print_name(stderr, prebuilt->trx, prebuilt->table->name);
putc('\n', stderr);
- mem_analyze_corruption((byte*)prebuilt);
+ mem_analyze_corruption(prebuilt);
ut_error;
}
@@ -1123,9 +1124,9 @@ row_insert_for_mysql(
}
row_mysql_convert_row_to_innobase(node->row, prebuilt, mysql_rec);
-
+
savept = trx_savept_take(trx);
-
+
thr = que_fork_get_first_thr(prebuilt->ins_graph);
if (prebuilt->sql_stat_start) {
@@ -1134,7 +1135,7 @@ row_insert_for_mysql(
} else {
node->state = INS_NODE_ALLOC_ROW_ID;
}
-
+
que_thr_move_to_run_state_for_mysql(thr, trx);
run_again:
@@ -1142,7 +1143,7 @@ run_again:
thr->prev_node = node;
row_ins_step(thr);
-
+
err = trx->error_state;
if (err != DB_SUCCESS) {
@@ -1164,15 +1165,15 @@ run_again:
}
que_thr_stop_for_mysql_no_error(thr, trx);
-
+
prebuilt->table->stat_n_rows++;
srv_n_rows_inserted++;
-
+
if (prebuilt->table->stat_n_rows == 0) {
/* Avoid wrap-over */
prebuilt->table->stat_n_rows--;
- }
+ }
row_update_statistics_if_needed(prebuilt->table);
trx->op_info = "";
@@ -1192,11 +1193,11 @@ row_prebuild_sel_graph(
sel_node_t* node;
ut_ad(prebuilt && prebuilt->trx);
-
+
if (prebuilt->sel_graph == NULL) {
node = sel_node_create(prebuilt->heap);
-
+
prebuilt->sel_graph =
que_node_get_parent(
pars_complete_graph_for_exec(node,
@@ -1221,7 +1222,7 @@ row_create_update_node_for_mysql(
upd_node_t* node;
node = upd_node_create(heap);
-
+
node->in_mysql_interface = TRUE;
node->is_delete = FALSE;
node->searched_update = FALSE;
@@ -1233,7 +1234,7 @@ row_create_update_node_for_mysql(
node->update = upd_create(dict_table_get_n_cols(table), heap);
node->update_n_fields = dict_table_get_n_cols(table);
-
+
UT_LIST_INIT(node->columns);
node->has_clust_rec_x_lock = TRUE;
node->cmpl_info = 0;
@@ -1260,7 +1261,7 @@ row_get_prebuilt_update_vector(
upd_node_t* node;
ut_ad(prebuilt && table && prebuilt->trx);
-
+
if (prebuilt->upd_node == NULL) {
/* Not called before for this handle: create an update node
@@ -1269,7 +1270,7 @@ row_get_prebuilt_update_vector(
node = row_create_update_node_for_mysql(table, prebuilt->heap);
prebuilt->upd_node = node;
-
+
prebuilt->upd_graph =
que_node_get_parent(
pars_complete_graph_for_exec(node,
@@ -1297,7 +1298,7 @@ row_update_for_mysql(
ulint err;
que_thr_t* thr;
ibool was_lock_wait;
- dict_index_t* clust_index;
+ dict_index_t* clust_index;
/* ulint ref_len; */
upd_node_t* node;
dict_table_t* table = prebuilt->table;
@@ -1306,10 +1307,10 @@ row_update_for_mysql(
ut_ad(prebuilt && trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
UT_NOT_USED(mysql_rec);
-
+
if (prebuilt->table->ibd_file_missing) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error:\n"
"InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
"InnoDB: table %s does not exist.\n"
"InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -1329,7 +1330,7 @@ row_update_for_mysql(
ut_print_name(stderr, prebuilt->trx, prebuilt->table->name);
putc('\n', stderr);
- mem_analyze_corruption((byte*)prebuilt);
+ mem_analyze_corruption(prebuilt);
ut_error;
}
@@ -1362,9 +1363,9 @@ row_update_for_mysql(
btr_pcur_copy_stored_position(node->pcur,
prebuilt->clust_pcur);
}
-
+
ut_a(node->pcur->rel_pos == BTR_PCUR_ON);
-
+
/* MySQL seems to call rnd_pos before updating each row it
has cached: we can get the correct cursor position from
prebuilt->pcur; NOTE that we cannot build the row reference
@@ -1373,7 +1374,7 @@ row_update_for_mysql(
the row id used as the clustered index key */
savept = trx_savept_take(trx);
-
+
thr = que_fork_get_first_thr(prebuilt->upd_graph);
node->state = UPD_NODE_UPDATE_CLUSTERED;
@@ -1392,7 +1393,7 @@ run_again:
if (err != DB_SUCCESS) {
que_thr_stop_for_mysql(thr);
-
+
if (err == DB_RECORD_NOT_FOUND) {
trx->error_state = DB_SUCCESS;
trx->op_info = "";
@@ -1400,10 +1401,11 @@ run_again:
return((int) err);
}
- thr->lock_state= QUE_THR_LOCK_ROW;
+ thr->lock_state= QUE_THR_LOCK_ROW;
was_lock_wait = row_mysql_handle_errors(&err, trx, thr,
- &savept);
- thr->lock_state= QUE_THR_LOCK_NOLOCK;;
+ &savept);
+ thr->lock_state= QUE_THR_LOCK_NOLOCK;
+
if (was_lock_wait) {
goto run_again;
}
@@ -1459,11 +1461,11 @@ row_unlock_for_mysql(
btr_pcur_t* clust_pcur = prebuilt->clust_pcur;
trx_t* trx = prebuilt->trx;
rec_t* rec;
- mtr_t mtr;
-
+ mtr_t mtr;
+
ut_ad(prebuilt && trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-
+
if (!srv_locks_unsafe_for_binlog) {
fprintf(stderr,
@@ -1480,9 +1482,9 @@ row_unlock_for_mysql(
if (index != NULL && trx_new_rec_locks_contain(trx, index)) {
mtr_start(&mtr);
-
+
/* Restore the cursor position and find the record */
-
+
if (!has_latches_on_recs) {
btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, &mtr);
}
@@ -1499,7 +1501,7 @@ row_unlock_for_mysql(
garbage! */
if (index->type & DICT_CLUSTERED) {
-
+
goto func_exit;
}
}
@@ -1509,7 +1511,7 @@ row_unlock_for_mysql(
if (index != NULL && trx_new_rec_locks_contain(trx, index)) {
mtr_start(&mtr);
-
+
/* Restore the cursor position and find the record */
if (!has_latches_on_recs) {
@@ -1523,10 +1525,10 @@ row_unlock_for_mysql(
mtr_commit(&mtr);
}
-
+
func_exit:
trx->op_info = "";
-
+
return(DB_SUCCESS);
}
@@ -1560,7 +1562,7 @@ run_again:
if (err == DB_LOCK_WAIT) {
/* Handle lock wait here */
-
+
que_thr_stop_for_mysql(thr);
srv_suspend_mysql_thread(thr);
@@ -1575,7 +1577,7 @@ run_again:
}
/* Retry operation after a normal lock wait */
-
+
goto run_again;
}
@@ -1613,8 +1615,8 @@ row_table_got_default_clust_index(
clust_index = dict_table_get_first_index(table);
if (dtype_get_mtype(dict_index_get_nth_type(clust_index, 0))
- == DATA_SYS) {
- return(TRUE);
+ == DATA_SYS) {
+ return(TRUE);
}
return(FALSE);
@@ -1700,7 +1702,7 @@ row_mysql_freeze_data_dictionary(
trx_t* trx) /* in: transaction */
{
ut_a(trx->dict_operation_lock_mode == 0);
-
+
rw_lock_s_lock(&dict_operation_lock);
trx->dict_operation_lock_mode = RW_S_LATCH;
@@ -1731,8 +1733,8 @@ row_mysql_lock_data_dictionary(
trx_t* trx) /* in: transaction */
{
ut_a(trx->dict_operation_lock_mode == 0
- || trx->dict_operation_lock_mode == RW_X_LATCH);
-
+ || trx->dict_operation_lock_mode == RW_X_LATCH);
+
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks or lock waits can occur then in these operations */
@@ -1787,7 +1789,7 @@ row_create_table_for_mysql(
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
-
+
if (srv_created_new_raw) {
fputs(
"InnoDB: A new raw disk partition was initialized or\n"
@@ -1803,7 +1805,7 @@ row_create_table_for_mysql(
}
trx->op_info = "creating table";
-
+
if (row_mysql_is_system_table(table->name)) {
fprintf(stderr,
@@ -1826,7 +1828,7 @@ row_create_table_for_mysql(
#sql-table in the tablespace. We have here a special
mechanism to recover such tables by renaming them to
rsql... */
-
+
return(row_mysql_recover_tmp_table(table, trx));
}
@@ -1845,7 +1847,7 @@ row_create_table_for_mysql(
/* Table equals "innodb_monitor":
start monitor prints */
-
+
srv_print_innodb_monitor = TRUE;
/* The lock timeout monitor thread also takes care
@@ -1874,7 +1876,7 @@ row_create_table_for_mysql(
} else if (table_name_len == sizeof S_innodb_mem_validate
&& !memcmp(table_name, S_innodb_mem_validate,
sizeof S_innodb_mem_validate)) {
- /* We define here a debugging feature intended for
+ /* We define here a debugging feature intended for
developers */
fputs("Validating InnoDB memory:\n"
@@ -1894,7 +1896,7 @@ row_create_table_for_mysql(
heap = mem_heap_create(512);
trx->dict_operation = TRUE;
-
+
node = tab_create_graph_create(table, heap);
thr = pars_complete_graph_for_exec(node, trx, heap);
@@ -1906,27 +1908,27 @@ row_create_table_for_mysql(
if (err != DB_SUCCESS) {
/* We have special error handling here */
-
+
trx->error_state = DB_SUCCESS;
-
+
trx_general_rollback_for_mysql(trx, FALSE, NULL);
if (err == DB_OUT_OF_FILE_SPACE) {
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
- fputs(" InnoDB: Warning: cannot create table ",
+ fputs(" InnoDB: Warning: cannot create table ",
stderr);
ut_print_name(stderr, trx, table->name);
fputs(" because tablespace full\n", stderr);
if (dict_table_get_low(table->name)) {
- row_drop_table_for_mysql(table->name, trx,
+ row_drop_table_for_mysql(table->name, trx,
FALSE);
}
} else if (err == DB_DUPLICATE_KEY) {
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
fputs(" InnoDB: Error: table ", stderr);
ut_print_name(stderr, trx, table->name);
@@ -1944,7 +1946,7 @@ row_create_table_for_mysql(
"InnoDB: http://dev.mysql.com/doc/mysql/en/"
"InnoDB_troubleshooting_datadict.html\n", stderr);
}
-
+
/* We may also get err == DB_ERROR if the .ibd file for the
table already exists */
@@ -1982,13 +1984,13 @@ row_create_index_for_mysql(
ulint err;
ulint i, j;
ulint len;
-
+
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-
+
trx->op_info = "creating index";
trx_start_if_not_started(trx);
@@ -2000,8 +2002,8 @@ row_create_index_for_mysql(
for (i = 0; i < dict_index_get_n_fields(index); i++) {
for (j = 0; j < i; j++) {
if (0 == ut_strcmp(
- dict_index_get_nth_field(index, j)->name,
- dict_index_get_nth_field(index, i)->name)) {
+ dict_index_get_nth_field(index, j)->name,
+ dict_index_get_nth_field(index, i)->name)) {
ut_print_timestamp(stderr);
@@ -2019,7 +2021,7 @@ row_create_index_for_mysql(
goto error_handling;
}
}
-
+
/* Check also that prefix_len and actual length
< DICT_MAX_INDEX_COL_LEN */
@@ -2028,7 +2030,7 @@ row_create_index_for_mysql(
if (field_lengths) {
len = ut_max(len, field_lengths[i]);
}
-
+
if (len >= DICT_MAX_INDEX_COL_LEN) {
err = DB_TOO_BIG_RECORD;
@@ -2055,14 +2057,14 @@ row_create_index_for_mysql(
ut_a(thr == que_fork_start_command(que_node_get_parent(thr)));
que_run_threads(thr);
- err = trx->error_state;
+ err = trx->error_state;
que_graph_free((que_t*) que_node_get_parent(thr));
error_handling:
if (err != DB_SUCCESS) {
/* We have special error handling here */
-
+
trx->error_state = DB_SUCCESS;
trx_general_rollback_for_mysql(trx, FALSE, NULL);
@@ -2071,7 +2073,7 @@ error_handling:
trx->error_state = DB_SUCCESS;
}
-
+
trx->op_info = "";
return((int) err);
@@ -2110,7 +2112,7 @@ row_table_add_foreign_constraints(
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_a(sql_string);
-
+
trx->op_info = "adding foreign keys";
trx_start_if_not_started(trx);
@@ -2127,12 +2129,12 @@ row_table_add_foreign_constraints(
if (err == DB_SUCCESS) {
/* Check that also referencing constraints are ok */
- err = dict_load_foreigns(name, trx->check_foreigns);
+ err = dict_load_foreigns(name, TRUE);
}
if (err != DB_SUCCESS) {
/* We have special error handling here */
-
+
trx->error_state = DB_SUCCESS;
trx_general_rollback_for_mysql(trx, FALSE, NULL);
@@ -2174,19 +2176,19 @@ row_drop_table_for_mysql_in_background(
ut_print_name(stderr, name);
fputs(" in background drop list\n", stderr); */
- /* Try to drop the table in InnoDB */
+ /* Try to drop the table in InnoDB */
+
+ error = row_drop_table_for_mysql(name, trx, FALSE);
- error = row_drop_table_for_mysql(name, trx, FALSE);
-
/* Flush the log to reduce probability that the .frm files and
the InnoDB data dictionary get out-of-sync if the user runs
with innodb_flush_log_at_trx_commit = 0 */
-
+
log_buffer_flush_to_disk();
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
- trx_free_for_background(trx);
+ trx_free_for_background(trx);
return((int) error);
}
@@ -2206,7 +2208,7 @@ row_drop_tables_for_mysql_in_background(void)
dict_table_t* table;
ulint n_tables;
ulint n_tables_dropped = 0;
-loop:
+loop:
mutex_enter(&kernel_mutex);
if (!row_mysql_drop_list_inited) {
@@ -2216,7 +2218,7 @@ loop:
}
drop = UT_LIST_GET_FIRST(row_mysql_drop_list);
-
+
n_tables = UT_LIST_GET_LEN(row_mysql_drop_list);
mutex_exit(&kernel_mutex);
@@ -2232,12 +2234,12 @@ loop:
mutex_exit(&(dict_sys->mutex));
if (table == NULL) {
- /* If for some reason the table has already been dropped
+ /* If for some reason the table has already been dropped
through some other mechanism, do not try to drop it */
- goto already_dropped;
+ goto already_dropped;
}
-
+
if (DB_SUCCESS != row_drop_table_for_mysql_in_background(
drop->table_name)) {
/* If the DROP fails for some table, we return, and let the
@@ -2253,7 +2255,7 @@ already_dropped:
UT_LIST_REMOVE(row_mysql_drop_list, row_mysql_drop_list, drop);
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Dropped table %s in background drop queue.\n",
drop->table_name);
@@ -2285,7 +2287,7 @@ row_get_background_drop_list_len_low(void)
UT_LIST_INIT(row_mysql_drop_list);
row_mysql_drop_list_inited = TRUE;
}
-
+
return(UT_LIST_GET_LEN(row_mysql_drop_list));
}
@@ -2304,7 +2306,7 @@ row_add_table_to_background_drop_list(
dict_table_t* table) /* in: table */
{
row_mysql_drop_t* drop;
-
+
mutex_enter(&kernel_mutex);
if (!row_mysql_drop_list_inited) {
@@ -2312,14 +2314,14 @@ row_add_table_to_background_drop_list(
UT_LIST_INIT(row_mysql_drop_list);
row_mysql_drop_list_inited = TRUE;
}
-
+
/* Look if the table already is in the drop list */
drop = UT_LIST_GET_FIRST(row_mysql_drop_list);
while (drop != NULL) {
if (strcmp(drop->table_name, table->name) == 0) {
/* Already in the list */
-
+
mutex_exit(&kernel_mutex);
return(FALSE);
@@ -2331,9 +2333,9 @@ row_add_table_to_background_drop_list(
drop = mem_alloc(sizeof(row_mysql_drop_t));
drop->table_name = mem_strdup(table->name);
-
+
UT_LIST_ADD_LAST(row_mysql_drop_list, row_mysql_drop_list, drop);
-
+
/* fputs("InnoDB: Adding table ", stderr);
ut_print_name(stderr, drop->table_name);
fputs(" to background drop list\n", stderr); */
@@ -2403,8 +2405,8 @@ do not allow the discard. We also reserve the data dictionary latch. */
"FROM SYS_TABLES\n"
"WHERE NAME = table_name;\n"
"IF (SQL %% NOTFOUND) THEN\n"
- " COMMIT WORK;\n"
- " RETURN;\n"
+ " COMMIT WORK;\n"
+ " RETURN;\n"
"END IF;\n"
"UPDATE SYS_TABLES SET ID = new_id\n"
"WHERE ID = old_id;\n"
@@ -2446,8 +2448,8 @@ do not allow the discard. We also reserve the data dictionary latch. */
if (table->n_foreign_key_checks_running > 0) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: You are trying to DISCARD table ", stderr);
+ ut_print_timestamp(stderr);
+ fputs(" InnoDB: You are trying to DISCARD table ", stderr);
ut_print_name(stderr, trx, table->name);
fputs("\n"
"InnoDB: though there is a foreign key check running on it.\n"
@@ -2463,7 +2465,7 @@ do not allow the discard. We also reserve the data dictionary latch. */
some other table (not the table itself) */
foreign = UT_LIST_GET_FIRST(table->referenced_list);
-
+
while (foreign && foreign->foreign_table == table) {
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
@@ -2510,7 +2512,7 @@ do not allow the discard. We also reserve the data dictionary latch. */
ut_a(graph);
/* Remove any locks there are on the table or its records */
-
+
lock_reset_all_on_table(table);
graph->trx = trx;
@@ -2546,14 +2548,14 @@ do not allow the discard. We also reserve the data dictionary latch. */
table->ibd_file_missing = TRUE;
}
}
-funct_exit:
+funct_exit:
row_mysql_unlock_data_dictionary(trx);
if (graph) {
que_graph_free(graph);
}
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
trx->op_info = "";
@@ -2583,7 +2585,7 @@ row_import_tablespace_for_mysql(
trx->op_info = "importing tablespace";
current_lsn = log_get_lsn();
-
+
/* It is possible, though very improbable, that the lsn's in the
tablespace to be imported have risen above the current system lsn, if
a lengthy purge, ibuf merge, or rollback was performed on a backup
@@ -2683,10 +2685,10 @@ row_import_tablespace_for_mysql(
err = DB_ERROR;
}
-funct_exit:
+funct_exit:
row_mysql_unlock_data_dictionary(trx);
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
trx->op_info = "";
@@ -2777,7 +2779,7 @@ do not allow the TRUNCATE. We also reserve the data dictionary latch. */
"InnoDB: database modifications by the user. Shut down\n"
"InnoDB: mysqld and edit my.cnf so that newraw is replaced\n"
"InnoDB: with raw, and innodb_force_... is removed.\n",
- stderr);
+ stderr);
return(DB_ERROR);
}
@@ -2839,7 +2841,7 @@ do not allow the TRUNCATE. We also reserve the data dictionary latch. */
if (table->n_foreign_key_checks_running > 0) {
ut_print_timestamp(stderr);
- fputs(" InnoDB: Cannot truncate table ", stderr);
+ fputs(" InnoDB: Cannot truncate table ", stderr);
ut_print_name(stderr, trx, table->name);
fputs(" by DROP+CREATE\n"
"InnoDB: because there is a foreign key check running on it.\n",
@@ -2962,7 +2964,7 @@ do not allow the TRUNCATE. We also reserve the data dictionary latch. */
trx_general_rollback_for_mysql(trx, FALSE, NULL);
trx->error_state = DB_SUCCESS;
ut_print_timestamp(stderr);
-fputs(" InnoDB: Unable to assign a new identifier to table ", stderr);
+fputs(" InnoDB: Unable to assign a new identifier to table ", stderr);
ut_print_name(stderr, trx, table->name);
fputs("\n"
"InnoDB: after truncating it. Background processes may corrupt the table!\n",
@@ -2975,7 +2977,7 @@ fputs(" InnoDB: Unable to assign a new identifier to table ", stderr);
dict_table_autoinc_initialize(table, 0);
dict_update_statistics(table);
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
funct_exit:
@@ -3037,46 +3039,46 @@ row_drop_table_for_mysql(
"FROM SYS_TABLES\n"
"WHERE NAME = table_name;\n"
"IF (SQL % NOTFOUND) THEN\n"
- " COMMIT WORK;\n"
- " RETURN;\n"
+ " COMMIT WORK;\n"
+ " RETURN;\n"
"END IF;\n"
"found := 1;\n"
"SELECT ID INTO sys_foreign_id\n"
"FROM SYS_TABLES\n"
"WHERE NAME = 'SYS_FOREIGN';\n"
"IF (SQL % NOTFOUND) THEN\n"
- " found := 0;\n"
+ " found := 0;\n"
"END IF;\n"
"IF (table_name = 'SYS_FOREIGN') THEN\n"
- " found := 0;\n"
+ " found := 0;\n"
"END IF;\n"
"IF (table_name = 'SYS_FOREIGN_COLS') THEN\n"
- " found := 0;\n"
+ " found := 0;\n"
"END IF;\n"
"WHILE found = 1 LOOP\n"
- " SELECT ID INTO foreign_id\n"
- " FROM SYS_FOREIGN\n"
- " WHERE FOR_NAME = table_name\n"
- " AND TO_BINARY(FOR_NAME) = TO_BINARY(table_name);\n"
- " IF (SQL % NOTFOUND) THEN\n"
- " found := 0;\n"
- " ELSE"
- " DELETE FROM SYS_FOREIGN_COLS WHERE ID = foreign_id;\n"
- " DELETE FROM SYS_FOREIGN WHERE ID = foreign_id;\n"
- " END IF;\n"
+ " SELECT ID INTO foreign_id\n"
+ " FROM SYS_FOREIGN\n"
+ " WHERE FOR_NAME = table_name\n"
+ " AND TO_BINARY(FOR_NAME) = TO_BINARY(table_name);\n"
+ " IF (SQL % NOTFOUND) THEN\n"
+ " found := 0;\n"
+ " ELSE"
+ " DELETE FROM SYS_FOREIGN_COLS WHERE ID = foreign_id;\n"
+ " DELETE FROM SYS_FOREIGN WHERE ID = foreign_id;\n"
+ " END IF;\n"
"END LOOP;\n"
"found := 1;\n"
"WHILE found = 1 LOOP\n"
- " SELECT ID INTO index_id\n"
- " FROM SYS_INDEXES\n"
- " WHERE TABLE_ID = table_id;\n"
- " IF (SQL % NOTFOUND) THEN\n"
- " found := 0;\n"
- " ELSE"
- " DELETE FROM SYS_FIELDS WHERE INDEX_ID = index_id;\n"
- " DELETE FROM SYS_INDEXES WHERE ID = index_id\n"
- " AND TABLE_ID = table_id;\n"
- " END IF;\n"
+ " SELECT ID INTO index_id\n"
+ " FROM SYS_INDEXES\n"
+ " WHERE TABLE_ID = table_id;\n"
+ " IF (SQL % NOTFOUND) THEN\n"
+ " found := 0;\n"
+ " ELSE"
+ " DELETE FROM SYS_FIELDS WHERE INDEX_ID = index_id;\n"
+ " DELETE FROM SYS_INDEXES WHERE ID = index_id\n"
+ " AND TABLE_ID = table_id;\n"
+ " END IF;\n"
"END LOOP;\n"
"DELETE FROM SYS_COLUMNS WHERE TABLE_ID = table_id;\n"
"DELETE FROM SYS_TABLES WHERE ID = table_id;\n"
@@ -3092,7 +3094,7 @@ row_drop_table_for_mysql(
"InnoDB: database modifications by the user. Shut down\n"
"InnoDB: mysqld and edit my.cnf so that newraw is replaced\n"
"InnoDB: with raw, and innodb_force_... is removed.\n",
- stderr);
+ stderr);
return(DB_ERROR);
}
@@ -3116,7 +3118,7 @@ row_drop_table_for_mysql(
/* Table name equals "innodb_monitor":
stop monitor prints */
-
+
srv_print_innodb_monitor = FALSE;
srv_print_innodb_lock_monitor = FALSE;
} else if (namelen == sizeof S_innodb_lock_monitor
@@ -3160,7 +3162,7 @@ row_drop_table_for_mysql(
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
-
+
graph = pars_sql(sql);
ut_a(graph);
@@ -3175,13 +3177,13 @@ row_drop_table_for_mysql(
if (!table) {
err = DB_TABLE_NOT_FOUND;
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
fputs(" InnoDB: Error: table ", stderr);
ut_print_name(stderr, trx, name);
fputs(" does not exist in the InnoDB internal\n"
- "InnoDB: data dictionary though MySQL is trying to drop it.\n"
- "InnoDB: Have you copied the .frm file of the table to the\n"
+ "InnoDB: data dictionary though MySQL is trying to drop it.\n"
+ "InnoDB: Have you copied the .frm file of the table to the\n"
"InnoDB: MySQL database directory from another database?\n"
"InnoDB: You can look for further help from\n"
"InnoDB: http://dev.mysql.com/doc/mysql/en/"
@@ -3193,7 +3195,7 @@ row_drop_table_for_mysql(
some other table (not the table itself) */
foreign = UT_LIST_GET_FIRST(table->referenced_list);
-
+
while (foreign && foreign->foreign_table == table) {
check_next_foreign:
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
@@ -3233,15 +3235,15 @@ row_drop_table_for_mysql(
added = row_add_table_to_background_drop_list(table);
- if (added) {
+ if (added) {
ut_print_timestamp(stderr);
-fputs(" InnoDB: Warning: MySQL is trying to drop table ", stderr);
+fputs(" InnoDB: Warning: MySQL is trying to drop table ", stderr);
ut_print_name(stderr, trx, table->name);
fputs("\n"
"InnoDB: though there are still open handles to it.\n"
"InnoDB: Adding the table to the background drop queue.\n",
stderr);
-
+
/* We return DB_SUCCESS to MySQL though the drop will
happen lazily later */
@@ -3267,8 +3269,8 @@ fputs(" InnoDB: Warning: MySQL is trying to drop table ", stderr);
added = row_add_table_to_background_drop_list(table);
if (added) {
- ut_print_timestamp(stderr);
-fputs(" InnoDB: You are trying to drop table ", stderr);
+ ut_print_timestamp(stderr);
+fputs(" InnoDB: You are trying to drop table ", stderr);
ut_print_name(stderr, trx, table->name);
fputs("\n"
"InnoDB: though there is a foreign key check running on it.\n"
@@ -3286,9 +3288,9 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
goto funct_exit;
}
-
+
/* Remove any locks there are on the table or its records */
-
+
lock_reset_all_on_table(table);
trx->dict_operation = TRUE;
@@ -3304,7 +3306,7 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
ut_a(err == DB_OUT_OF_FILE_SPACE);
err = DB_MUST_GET_MORE_FILE_SPACE;
-
+
row_mysql_handle_errors(&err, trx, thr, NULL);
ut_error;
@@ -3313,7 +3315,7 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
const char* name_or_path;
space_id = table->space;
-
+
if (table->dir_path_of_temp_table != NULL) {
dir_path_of_temp_table =
mem_strdup(table->dir_path_of_temp_table);
@@ -3347,7 +3349,7 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
fprintf(stderr,
"InnoDB: We removed now the InnoDB internal data dictionary entry\n"
-"InnoDB: of table ");
+"InnoDB: of table ");
ut_print_name(stderr, trx, name);
fprintf(stderr, ".\n");
@@ -3359,7 +3361,7 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
if (!success) {
fprintf(stderr,
"InnoDB: We removed now the InnoDB internal data dictionary entry\n"
-"InnoDB: of table ");
+"InnoDB: of table ");
ut_print_name(stderr, trx, name);
fprintf(stderr, ".\n");
@@ -3376,7 +3378,7 @@ fputs(" InnoDB: You are trying to drop table ", stderr);
funct_exit:
if (locked_dictionary) {
- row_mysql_unlock_data_dictionary(trx);
+ row_mysql_unlock_data_dictionary(trx);
}
if (dir_path_of_temp_table) {
@@ -3384,8 +3386,8 @@ funct_exit:
}
que_graph_free(graph);
-
- trx_commit_for_mysql(trx);
+
+ trx_commit_for_mysql(trx);
trx->op_info = "";
@@ -3406,17 +3408,17 @@ row_drop_database_for_mysql(
const char* name, /* in: database name which ends to '/' */
trx_t* trx) /* in: transaction handle */
{
- dict_table_t* table;
+ dict_table_t* table;
char* table_name;
int err = DB_SUCCESS;
ulint namelen = strlen(name);
-
+
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
ut_a(name != NULL);
ut_a(name[namelen - 1] == '/');
-
+
trx->op_info = "dropping database";
-
+
trx_start_if_not_started(trx);
loop:
row_mysql_lock_data_dictionary(trx);
@@ -3443,11 +3445,11 @@ loop:
ut_print_name(stderr, trx, table_name);
fputs(".\n", stderr);
- os_thread_sleep(1000000);
+ os_thread_sleep(1000000);
- mem_free(table_name);
+ mem_free(table_name);
- goto loop;
+ goto loop;
}
err = row_drop_table_for_mysql(table_name, trx, TRUE);
@@ -3466,7 +3468,7 @@ loop:
}
row_mysql_unlock_data_dictionary(trx);
-
+
trx_commit_for_mysql(trx);
trx->op_info = "";
@@ -3542,54 +3544,55 @@ row_rename_table_for_mysql(
"old_t_name_len := LENGTH(old_table_name);\n"
"gen_constr_prefix := CONCAT(old_table_name, '_ibfk_');\n"
"WHILE found = 1 LOOP\n"
- " SELECT ID INTO foreign_id\n"
- " FROM SYS_FOREIGN\n"
- " WHERE FOR_NAME = old_table_name\n"
- " AND TO_BINARY(FOR_NAME) = TO_BINARY(old_table_name);\n"
- " IF (SQL % NOTFOUND) THEN\n"
- " found := 0;\n"
- " ELSE\n"
- " UPDATE SYS_FOREIGN\n"
- " SET FOR_NAME = new_table_name\n"
- " WHERE ID = foreign_id;\n"
- " id_len := LENGTH(foreign_id);\n"
- " IF (INSTR(foreign_id, '/') > 0) THEN\n"
- " IF (INSTR(foreign_id,\n"
- " gen_constr_prefix) > 0)\n"
- " THEN\n"
- " new_foreign_id :=\n"
- " CONCAT(new_table_name,\n"
- " SUBSTR(foreign_id, old_t_name_len,\n"
- " id_len - old_t_name_len));\n"
- " ELSE\n"
- " new_foreign_id :=\n"
- " CONCAT(new_db_name,\n"
- " SUBSTR(foreign_id,\n"
- " old_db_name_len,\n"
- " id_len - old_db_name_len));\n"
- " END IF;\n"
- " UPDATE SYS_FOREIGN\n"
- " SET ID = new_foreign_id\n"
- " WHERE ID = foreign_id;\n"
- " UPDATE SYS_FOREIGN_COLS\n"
- " SET ID = new_foreign_id\n"
- " WHERE ID = foreign_id;\n"
- " END IF;\n"
- " END IF;\n"
+ " SELECT ID INTO foreign_id\n"
+ " FROM SYS_FOREIGN\n"
+ " WHERE FOR_NAME = old_table_name\n"
+ " AND TO_BINARY(FOR_NAME) = TO_BINARY(old_table_name);\n"
+ " IF (SQL % NOTFOUND) THEN\n"
+ " found := 0;\n"
+ " ELSE\n"
+ " UPDATE SYS_FOREIGN\n"
+ " SET FOR_NAME = new_table_name\n"
+ " WHERE ID = foreign_id;\n"
+ " id_len := LENGTH(foreign_id);\n"
+ " IF (INSTR(foreign_id, '/') > 0) THEN\n"
+ " IF (INSTR(foreign_id,\n"
+ " gen_constr_prefix) > 0)\n"
+ " THEN\n"
+ " new_foreign_id :=\n"
+ " CONCAT(new_table_name,\n"
+ " SUBSTR(foreign_id, old_t_name_len,\n"
+ " id_len - old_t_name_len));\n"
+ " ELSE\n"
+ " new_foreign_id :=\n"
+ " CONCAT(new_db_name,\n"
+ " SUBSTR(foreign_id,\n"
+ " old_db_name_len,\n"
+ " id_len - old_db_name_len));\n"
+ " END IF;\n"
+ " UPDATE SYS_FOREIGN\n"
+ " SET ID = new_foreign_id\n"
+ " WHERE ID = foreign_id;\n"
+ " UPDATE SYS_FOREIGN_COLS\n"
+ " SET ID = new_foreign_id\n"
+ " WHERE ID = foreign_id;\n"
+ " END IF;\n"
+ " END IF;\n"
"END LOOP;\n"
"UPDATE SYS_FOREIGN SET REF_NAME = new_table_name\n"
"WHERE REF_NAME = old_table_name\n"
- " AND TO_BINARY(REF_NAME) = TO_BINARY(old_table_name);\n";
+ " AND TO_BINARY(REF_NAME) = TO_BINARY(old_table_name);\n";
static const char str5[] =
"END;\n";
mem_heap_t* heap = NULL;
const char** constraints_to_drop = NULL;
ulint n_constraints_to_drop = 0;
- ibool recovering_temp_table = FALSE;
+ ibool recovering_temp_table = FALSE;
+ ibool old_is_tmp, new_is_tmp;
ulint len;
ulint i;
- ibool success;
+ ibool success;
/* length of database name; 0 if not renaming to a temporary table */
ulint db_name_len;
char* sql;
@@ -3608,28 +3611,31 @@ row_rename_table_for_mysql(
"InnoDB: with raw, and innodb_force_... is removed.\n",
stderr);
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
return(DB_ERROR);
}
-
+
if (row_mysql_is_system_table(new_name)) {
-
+
fprintf(stderr,
"InnoDB: Error: trying to create a MySQL system table %s of type InnoDB.\n"
"InnoDB: MySQL system tables must be of the MyISAM type!\n",
new_name);
- trx_commit_for_mysql(trx);
+ trx_commit_for_mysql(trx);
return(DB_ERROR);
}
trx->op_info = "renaming table";
trx_start_if_not_started(trx);
+ old_is_tmp = row_is_mysql_tmp_table_name(old_name);
+ new_is_tmp = row_is_mysql_tmp_table_name(new_name);
+
if (row_mysql_is_recovered_tmp_table(new_name)) {
- recovering_temp_table = TRUE;
- } else {
+ recovering_temp_table = TRUE;
+ } else {
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks can occur then in these operations */
@@ -3640,30 +3646,30 @@ row_rename_table_for_mysql(
if (!table) {
err = DB_TABLE_NOT_FOUND;
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_name(stderr, trx, old_name);
- fputs(" does not exist in the InnoDB internal\n"
- "InnoDB: data dictionary though MySQL is trying to rename the table.\n"
- "InnoDB: Have you copied the .frm file of the table to the\n"
+ fputs(" InnoDB: Error: table ", stderr);
+ ut_print_name(stderr, trx, old_name);
+ fputs(" does not exist in the InnoDB internal\n"
+ "InnoDB: data dictionary though MySQL is trying to rename the table.\n"
+ "InnoDB: Have you copied the .frm file of the table to the\n"
"InnoDB: MySQL database directory from another database?\n"
"InnoDB: You can look for further help from\n"
- "InnoDB: http://dev.mysql.com/doc/mysql/en/"
+ "InnoDB: http://dev.mysql.com/doc/mysql/en/"
"InnoDB_troubleshooting_datadict.html\n", stderr);
goto funct_exit;
}
if (table->ibd_file_missing) {
err = DB_TABLE_NOT_FOUND;
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_name(stderr, trx, old_name);
- fputs(
+ fputs(" InnoDB: Error: table ", stderr);
+ ut_print_name(stderr, trx, old_name);
+ fputs(
" does not have an .ibd file in the database directory.\n"
"InnoDB: You can look for further help from\n"
- "InnoDB: http://dev.mysql.com/doc/mysql/en/"
+ "InnoDB: http://dev.mysql.com/doc/mysql/en/"
"InnoDB_troubleshooting_datadict.html\n", stderr);
goto funct_exit;
}
@@ -3672,7 +3678,7 @@ row_rename_table_for_mysql(
len = (sizeof str1) + (sizeof str2) + (sizeof str3) + (sizeof str5) - 4
+ ut_strlenq(new_name, '\'') + ut_strlenq(old_name, '\'');
- if (row_is_mysql_tmp_table_name(new_name)) {
+ if (new_is_tmp) {
db_name_len = dict_get_db_name_len(old_name) + 1;
/* MySQL is doing an ALTER TABLE command and it renames the
@@ -3682,7 +3688,7 @@ row_rename_table_for_mysql(
the ALTER TABLE contained DROP FOREIGN KEY <foreign key id>.*/
heap = mem_heap_create(100);
-
+
err = dict_foreign_parse_drop_constraints(heap, trx,
table,
&n_constraints_to_drop,
@@ -3691,7 +3697,7 @@ row_rename_table_for_mysql(
goto funct_exit;
}
-
+
/* reserve space for all database names */
len += 2 * n_constraints_to_drop
* (ut_strlenq(old_name, '\'')
@@ -3738,7 +3744,7 @@ row_rename_table_for_mysql(
sqlend += (sizeof str4a2) - 1;
sqlend = ut_memcpyq(sqlend, '\'',
old_name, db_name_len);
- sqlend = ut_strcpyq(sqlend, '\'',
+ sqlend = ut_strcpyq(sqlend, '\'',
constraints_to_drop[i]);
memcpy(sqlend, str4a3, (sizeof str4a3) - 1);
sqlend += (sizeof str4a3) - 1;
@@ -3755,7 +3761,7 @@ row_rename_table_for_mysql(
constraints_to_drop[i]);
memcpy(sqlend, str4a2, (sizeof str4a2) - 1);
sqlend += (sizeof str4a2) - 1;
- sqlend = ut_strcpyq(sqlend, '\'',
+ sqlend = ut_strcpyq(sqlend, '\'',
constraints_to_drop[i]);
memcpy(sqlend, str4a3, (sizeof str4a3) - 1);
sqlend += (sizeof str4a3) - 1;
@@ -3771,7 +3777,7 @@ row_rename_table_for_mysql(
sqlend += sizeof str5;
ut_a(sqlend == sql + len + 1);
-
+
graph = pars_sql(sql);
ut_a(graph);
@@ -3790,16 +3796,16 @@ row_rename_table_for_mysql(
if (err != DB_SUCCESS) {
if (err == DB_DUPLICATE_KEY) {
- ut_print_timestamp(stderr);
+ ut_print_timestamp(stderr);
fputs(
" InnoDB: Error; possible reasons:\n"
"InnoDB: 1) Table rename would cause two FOREIGN KEY constraints\n"
"InnoDB: to have the same internal name in case-insensitive comparison.\n"
"InnoDB: 2) table ", stderr);
- ut_print_name(stderr, trx, new_name);
- fputs(" exists in the InnoDB internal data\n"
+ ut_print_name(stderr, trx, new_name);
+ fputs(" exists in the InnoDB internal data\n"
"InnoDB: dictionary though MySQL is trying rename table ", stderr);
- ut_print_name(stderr, trx, old_name);
+ ut_print_name(stderr, trx, old_name);
fputs(" to it.\n"
"InnoDB: Have you deleted the .frm file and not used DROP TABLE?\n"
"InnoDB: You can look for further help from\n"
@@ -3825,7 +3831,7 @@ row_rename_table_for_mysql(
the table is stored in a single-table tablespace */
success = dict_table_rename_in_cache(table, new_name,
- !row_is_mysql_tmp_table_name(new_name));
+ !new_is_tmp);
if (!success) {
trx->error_state = DB_SUCCESS;
trx_general_rollback_for_mysql(trx, FALSE, NULL);
@@ -3842,19 +3848,16 @@ row_rename_table_for_mysql(
goto funct_exit;
}
- err = dict_load_foreigns(new_name, trx->check_foreigns);
+ /* We only want to switch off some of the type checking in
+ an ALTER, not in a RENAME. */
- if (row_is_mysql_tmp_table_name(old_name)) {
+ err = dict_load_foreigns(new_name,
+ old_is_tmp ? trx->check_foreigns : TRUE);
- /* MySQL is doing an ALTER TABLE command and it
- renames the created temporary table to the name
- of the original table. In the ALTER TABLE we maybe
- created some FOREIGN KEY constraints for the temporary
- table. But we want to load also the foreign key
- constraint definitions for the original table name. */
+ if (err != DB_SUCCESS) {
+ ut_print_timestamp(stderr);
- if (err != DB_SUCCESS) {
- ut_print_timestamp(stderr);
+ if (old_is_tmp) {
fputs(" InnoDB: Error: in ALTER TABLE ",
stderr);
ut_print_name(stderr, trx, new_name);
@@ -3862,39 +3865,26 @@ row_rename_table_for_mysql(
"InnoDB: has or is referenced in foreign key constraints\n"
"InnoDB: which are not compatible with the new table definition.\n",
stderr);
-
- ut_a(dict_table_rename_in_cache(table,
- old_name, FALSE));
- trx->error_state = DB_SUCCESS;
- trx_general_rollback_for_mysql(trx, FALSE,
- NULL);
- trx->error_state = DB_SUCCESS;
- }
- } else {
- if (err != DB_SUCCESS) {
-
- ut_print_timestamp(stderr);
-
+ } else {
fputs(
" InnoDB: Error: in RENAME TABLE table ",
stderr);
ut_print_name(stderr, trx, new_name);
fputs("\n"
- "InnoDB: is referenced in foreign key constraints\n"
- "InnoDB: which are not compatible with the new table definition.\n",
+ "InnoDB: is referenced in foreign key constraints\n"
+ "InnoDB: which are not compatible with the new table definition.\n",
stderr);
-
- ut_a(dict_table_rename_in_cache(table,
- old_name, FALSE));
-
- trx->error_state = DB_SUCCESS;
- trx_general_rollback_for_mysql(trx, FALSE,
- NULL);
- trx->error_state = DB_SUCCESS;
}
+
+ ut_a(dict_table_rename_in_cache(table,
+ old_name, FALSE));
+ trx->error_state = DB_SUCCESS;
+ trx_general_rollback_for_mysql(trx, FALSE,
+ NULL);
+ trx->error_state = DB_SUCCESS;
}
}
-funct_exit:
+funct_exit:
if (!recovering_temp_table) {
row_mysql_unlock_data_dictionary(trx);
}
@@ -3906,8 +3896,8 @@ funct_exit:
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
-
- trx_commit_for_mysql(trx);
+
+ trx_commit_for_mysql(trx);
trx->op_info = "";
@@ -3945,10 +3935,10 @@ row_scan_and_check_index(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
*n_rows = 0;
-
+
buf = mem_alloc(UNIV_PAGE_SIZE);
heap = mem_heap_create(100);
-
+
/* Make a dummy template in prebuilt, which we will use
in scanning the index entries */
@@ -3958,7 +3948,7 @@ row_scan_and_check_index(
prebuilt->n_template = 0;
prebuilt->need_to_access_clustered = FALSE;
- dtuple_set_n_fields(prebuilt->search_tuple, 0);
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
prebuilt->select_lock_type = LOCK_NONE;
cnt = 1000;
@@ -3981,11 +3971,11 @@ loop:
}
*n_rows = *n_rows + 1;
-
+
/* row_search... returns the index record in buf, record origin offset
within buf stored in the first 4 bytes, because we have built a dummy
template */
-
+
rec = buf + mach_read_from_4(buf);
if (prev_entry != NULL) {
@@ -4002,15 +3992,15 @@ loop:
/* In a unique secondary index we allow equal key values if
they contain SQL NULLs */
- for (i = 0;
- i < dict_index_get_n_ordering_defined_by_user(index);
+ for (i = 0;
+ i < dict_index_get_n_ordering_defined_by_user(index);
i++) {
- if (UNIV_SQL_NULL == dfield_get_len(
- dtuple_get_nth_field(prev_entry, i))) {
+ if (UNIV_SQL_NULL == dfield_get_len(
+ dtuple_get_nth_field(prev_entry, i))) {
- contains_null = TRUE;
- }
- }
+ contains_null = TRUE;
+ }
+ }
if (cmp > 0) {
fputs("InnoDB: index records in a wrong order in ",
@@ -4038,12 +4028,12 @@ loop:
mem_heap_empty(heap);
offsets = offsets_;
-
+
prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT);
- goto loop;
+ goto loop;
}
/*************************************************************************
@@ -4060,12 +4050,12 @@ row_check_table_for_mysql(
dict_index_t* index;
ulint n_rows;
ulint n_rows_in_table = ULINT_UNDEFINED;
- ulint ret = DB_SUCCESS;
+ ulint ret = DB_SUCCESS;
ulint old_isolation_level;
if (prebuilt->table->ibd_file_missing) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error:\n"
"InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
"InnoDB: table %s does not exist.\n"
"InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -4099,7 +4089,7 @@ row_check_table_for_mysql(
/* fputs("Validating index ", stderr);
ut_print_name(stderr, index->name);
putc('\n', stderr); */
-
+
if (!btr_validate_tree(index->tree, prebuilt->trx)) {
ret = DB_ERROR;
} else {
@@ -4120,7 +4110,7 @@ row_check_table_for_mysql(
} else if (n_rows != n_rows_in_table) {
ret = DB_ERROR;
-
+
fputs("Error: ", stderr);
dict_index_name_print(stderr,
prebuilt->trx, index);
@@ -4136,7 +4126,7 @@ row_check_table_for_mysql(
/* Restore the original isolation level */
prebuilt->trx->isolation_level = old_isolation_level;
-
+
/* We validate also the whole adaptive hash index for all tables
at every CHECK TABLE */
diff --git a/storage/innobase/row/row0purge.c b/storage/innobase/row/row0purge.c
index abcf97110d9..b7581fa3644 100644
--- a/storage/innobase/row/row0purge.c
+++ b/storage/innobase/row/row0purge.c
@@ -105,7 +105,7 @@ row_purge_remove_clust_if_poss_low(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
index = dict_table_get_first_index(node->table);
-
+
pcur = &(node->pcur);
btr_cur = btr_pcur_get_btr_cur(pcur);
@@ -158,7 +158,7 @@ row_purge_remove_clust_if_poss_low(
return(success);
}
-
+
/***************************************************************
Removes a clustered index record if it has not been modified after the delete
marking. */
@@ -170,7 +170,7 @@ row_purge_remove_clust_if_poss(
{
ibool success;
ulint n_tries = 0;
-
+
/* fputs("Purge: Removing clustered record\n", stderr); */
success = row_purge_remove_clust_if_poss_low(node, BTR_MODIFY_LEAF);
@@ -188,13 +188,13 @@ retry:
n_tries++;
os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
-
+
goto retry;
}
ut_a(success);
}
-
+
/***************************************************************
Removes a secondary index entry if possible. */
static
@@ -206,7 +206,7 @@ row_purge_remove_sec_if_poss_low(
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry */
ulint mode) /* in: latch mode BTR_MODIFY_LEAF or
- BTR_MODIFY_TREE */
+ BTR_MODIFY_TREE */
{
btr_pcur_t pcur;
btr_cur_t* btr_cur;
@@ -216,10 +216,10 @@ row_purge_remove_sec_if_poss_low(
ulint err;
mtr_t mtr;
mtr_t* mtr_vers;
-
+
log_free_check();
mtr_start(&mtr);
-
+
found = row_search_index_entry(index, entry, mode, &pcur, &mtr);
if (!found) {
@@ -235,18 +235,18 @@ row_purge_remove_sec_if_poss_low(
}
btr_cur = btr_pcur_get_btr_cur(&pcur);
-
+
/* We should remove the index record if no later version of the row,
which cannot be purged yet, requires its existence. If some requires,
we should do nothing. */
mtr_vers = mem_alloc(sizeof(mtr_t));
-
+
mtr_start(mtr_vers);
success = row_purge_reposition_pcur(BTR_SEARCH_LEAF, node, mtr_vers);
- if (success) {
+ if (success) {
old_has = row_vers_old_has_index_entry(TRUE,
btr_pcur_get_rec(&(node->pcur)),
mtr_vers, index, entry);
@@ -255,11 +255,11 @@ row_purge_remove_sec_if_poss_low(
btr_pcur_commit_specify_mtr(&(node->pcur), mtr_vers);
mem_free(mtr_vers);
-
+
if (!success || !old_has) {
/* Remove the index record */
- if (mode == BTR_MODIFY_LEAF) {
+ if (mode == BTR_MODIFY_LEAF) {
success = btr_cur_optimistic_delete(btr_cur, &mtr);
} else {
ut_ad(mode == BTR_MODIFY_TREE);
@@ -293,7 +293,7 @@ row_purge_remove_sec_if_poss(
{
ibool success;
ulint n_tries = 0;
-
+
/* fputs("Purge: Removing secondary record\n", stderr); */
success = row_purge_remove_sec_if_poss_low(node, index, entry,
@@ -310,11 +310,11 @@ retry:
and restart with more file space */
if (!success && n_tries < BTR_CUR_RETRY_DELETE_N_TIMES) {
-
+
n_tries++;
os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
-
+
goto retry;
}
@@ -332,7 +332,7 @@ row_purge_del_mark(
mem_heap_t* heap;
dtuple_t* entry;
dict_index_t* index;
-
+
ut_ad(node);
heap = mem_heap_create(1024);
@@ -348,11 +348,11 @@ row_purge_del_mark(
node->index = dict_table_get_next_index(node->index);
}
- mem_heap_free(heap);
+ mem_heap_free(heap);
row_purge_remove_clust_if_poss(node);
}
-
+
/***************************************************************
Purges an update of an existing record. Also purges an update of a delete
marked record if that record contained an externally stored field. */
@@ -375,7 +375,7 @@ row_purge_upd_exist_or_extern(
ulint data_field_len;
ulint i;
mtr_t mtr;
-
+
ut_ad(node);
if (node->rec_type == TRX_UNDO_UPD_DEL_REC) {
@@ -399,7 +399,7 @@ row_purge_upd_exist_or_extern(
node->index = dict_table_get_next_index(node->index);
}
- mem_heap_free(heap);
+ mem_heap_free(heap);
skip_secondaries:
/* Free possible externally stored fields */
@@ -416,7 +416,7 @@ skip_secondaries:
internal_offset = ((byte*)ufield->new_val.data)
- node->undo_rec;
-
+
ut_a(internal_offset < UNIV_PAGE_SIZE);
trx_undo_decode_roll_ptr(node->roll_ptr,
@@ -439,20 +439,20 @@ skip_secondaries:
latch on an undo log page, we would break the
latching order if we would only later latch the
root page of such a tree! */
-
+
btr_root_get(index->tree, &mtr);
/* We assume in purge of externally stored fields
that the space id of the undo log record is 0! */
data_field = buf_page_get(0, page_no, RW_X_LATCH, &mtr)
- + offset + internal_offset;
+ + offset + internal_offset;
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(buf_frame_align(data_field),
SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
-
+
data_field_len = ufield->new_val.len;
btr_free_externally_stored_field(index, data_field,
@@ -487,11 +487,11 @@ row_purge_parse_undo_rec(
ulint info_bits;
ulint type;
ulint cmpl_info;
-
+
ut_ad(node && thr);
trx = thr_get_trx(thr);
-
+
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
updated_extern, &undo_no, &table_id);
node->rec_type = type;
@@ -499,20 +499,20 @@ row_purge_parse_undo_rec(
if (type == TRX_UNDO_UPD_DEL_REC && !(*updated_extern)) {
return(FALSE);
- }
+ }
ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
&info_bits);
node->table = NULL;
if (type == TRX_UNDO_UPD_EXIST_REC
- && cmpl_info & UPD_NODE_NO_ORD_CHANGE && !(*updated_extern)) {
+ && cmpl_info & UPD_NODE_NO_ORD_CHANGE && !(*updated_extern)) {
- /* Purge requires no changes to indexes: we may return */
+ /* Purge requires no changes to indexes: we may return */
- return(FALSE);
+ return(FALSE);
}
-
+
/* Prevent DROP TABLE etc. from running when we are doing the purge
for this row */
@@ -521,7 +521,7 @@ row_purge_parse_undo_rec(
mutex_enter(&(dict_sys->mutex));
node->table = dict_table_get_on_id_low(table_id, trx);
-
+
mutex_exit(&(dict_sys->mutex));
if (node->table == NULL) {
@@ -565,7 +565,7 @@ row_purge_parse_undo_rec(
ptr = trx_undo_rec_get_partial_row(ptr, clust_index,
&(node->row), node->heap);
}
-
+
return(TRUE);
}
@@ -586,11 +586,11 @@ row_purge(
ibool purge_needed;
ibool updated_extern;
trx_t* trx;
-
+
ut_ad(node && thr);
trx = thr_get_trx(thr);
-
+
node->undo_rec = trx_purge_fetch_next_rec(&roll_ptr,
&(node->reservation),
node->heap);
@@ -615,7 +615,7 @@ row_purge(
if (purge_needed) {
node->found_clust = FALSE;
-
+
node->index = dict_table_get_next_index(
dict_table_get_first_index(node->table));
@@ -623,7 +623,7 @@ row_purge(
row_purge_del_mark(node);
} else if (updated_extern
- || node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
+ || node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
row_purge_upd_exist_or_extern(node);
}
@@ -638,7 +638,7 @@ row_purge(
/* Do some cleanup */
trx_purge_rec_release(node->reservation);
mem_heap_empty(node->heap);
-
+
thr->run_node = node;
return(DB_SUCCESS);
@@ -658,7 +658,7 @@ row_purge_step(
ulint err;
ut_ad(thr);
-
+
node = thr->run_node;
ut_ad(que_node_get_type(node) == QUE_NODE_PURGE);
@@ -668,4 +668,4 @@ row_purge_step(
ut_ad(err == DB_SUCCESS);
return(thr);
-}
+}
diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c
index 9a74397dc08..24464ab5b99 100644
--- a/storage/innobase/row/row0row.c
+++ b/storage/innobase/row/row0row.c
@@ -103,9 +103,9 @@ dtuple_t*
row_build_index_entry(
/*==================*/
/* out: index entry which should be inserted */
- dtuple_t* row, /* in: row which should be inserted to the
+ dtuple_t* row, /* in: row which should be inserted to the
table */
- dict_index_t* index, /* in: index on the table */
+ dict_index_t* index, /* in: index on the table */
mem_heap_t* heap) /* in: memory heap from which the memory for
the index entry is allocated */
{
@@ -116,12 +116,12 @@ row_build_index_entry(
dfield_t* dfield2;
dict_col_t* col;
ulint i;
- ulint storage_len;
+ ulint storage_len;
dtype_t* cur_type;
ut_ad(row && index && heap);
ut_ad(dtuple_check_typed(row));
-
+
entry_len = dict_index_get_n_fields(index);
entry = dtuple_create(heap, entry_len);
@@ -144,8 +144,8 @@ row_build_index_entry(
/* If a column prefix index, take only the prefix */
if (ind_field->prefix_len > 0
- && dfield_get_len(dfield2) != UNIV_SQL_NULL) {
-
+ && dfield_get_len(dfield2) != UNIV_SQL_NULL) {
+
cur_type = dict_col_get_type(
dict_field_get_col(ind_field));
@@ -161,7 +161,7 @@ row_build_index_entry(
ut_ad(dtuple_check_typed(entry));
return(entry);
-}
+}
/***********************************************************************
An inverse function to dict_row_build_index_entry. Builds a row from a
@@ -172,7 +172,7 @@ row_build(
/*======*/
/* out, own: row built; see the NOTE below! */
ulint type, /* in: ROW_COPY_POINTERS, ROW_COPY_DATA, or
- ROW_COPY_ALSO_EXTERNALS,
+ ROW_COPY_ALSO_EXTERNALS,
the two last copy also the data fields to
heap as the first only places pointers to
data fields on the index page, and thus is
@@ -200,7 +200,7 @@ row_build(
byte* field;
ulint len;
ulint row_len;
- byte* buf;
+ byte* buf;
ulint i;
mem_heap_t* tmp_heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
@@ -229,26 +229,27 @@ row_build(
row = dtuple_create(heap, row_len);
- dtuple_set_info_bits(row, rec_get_info_bits(rec, table->comp));
+ dtuple_set_info_bits(row, rec_get_info_bits(rec,
+ dict_table_is_comp(table)));
n_fields = rec_offs_n_fields(offsets);
dict_table_copy_types(row, table);
for (i = 0; i < n_fields; i++) {
- ind_field = dict_index_get_nth_field(index, i);
+ ind_field = dict_index_get_nth_field(index, i);
if (ind_field->prefix_len == 0) {
- col = dict_field_get_col(ind_field);
+ col = dict_field_get_col(ind_field);
dfield = dtuple_get_nth_field(row,
dict_col_get_no(col));
field = rec_get_nth_field(rec, offsets, i, &len);
if (type == ROW_COPY_ALSO_EXTERNALS
- && rec_offs_nth_extern(offsets, i)) {
+ && rec_offs_nth_extern(offsets, i)) {
- field = btr_rec_copy_externally_stored_field(
+ field = btr_rec_copy_externally_stored_field(
rec, offsets, i, &len, heap);
}
@@ -302,7 +303,7 @@ row_rec_to_index_entry(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_ad(rec && heap && index);
-
+
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &tmp_heap);
@@ -315,7 +316,7 @@ row_rec_to_index_entry(
}
rec_len = rec_offs_n_fields(offsets);
-
+
entry = dtuple_create(heap, rec_len);
dtuple_set_n_fields_cmp(entry,
@@ -399,7 +400,7 @@ row_build_row_ref(
}
table = index->table;
-
+
clust_index = dict_table_get_first_index(table);
ref_len = dict_index_get_n_unique(clust_index);
@@ -414,7 +415,7 @@ row_build_row_ref(
pos = dict_index_get_nth_field_pos(index, clust_index, i);
ut_a(pos != ULINT_UNDEFINED);
-
+
field = rec_get_nth_field(rec, offsets, pos, &len);
dfield_set_data(dfield, field, len);
@@ -428,7 +429,7 @@ row_build_row_ref(
dict_index_get_nth_field(clust_index, i)->prefix_len;
if (clust_col_prefix_len > 0) {
- if (len != UNIV_SQL_NULL) {
+ if (len != UNIV_SQL_NULL) {
dfield_set_len(dfield,
dtype_get_at_most_n_mbchars(
@@ -478,7 +479,7 @@ row_build_row_ref_in_tuple(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_a(ref && index && rec);
-
+
if (!index->table) {
fputs("InnoDB: table ", stderr);
notfound:
@@ -488,9 +489,9 @@ row_build_row_ref_in_tuple(
fputs(" not found\n", stderr);
ut_error;
}
-
+
clust_index = dict_table_get_first_index(index->table);
-
+
if (!clust_index) {
fputs("InnoDB: clust index for table ", stderr);
goto notfound;
@@ -501,7 +502,7 @@ row_build_row_ref_in_tuple(
ref_len = dict_index_get_n_unique(clust_index);
ut_ad(ref_len == dtuple_get_n_fields(ref));
-
+
dict_index_copy_types(ref, clust_index, ref_len);
for (i = 0; i < ref_len; i++) {
@@ -524,7 +525,7 @@ row_build_row_ref_in_tuple(
dict_index_get_nth_field(clust_index, i)->prefix_len;
if (clust_col_prefix_len > 0) {
- if (len != UNIV_SQL_NULL) {
+ if (len != UNIV_SQL_NULL) {
dfield_set_len(dfield,
dtype_get_at_most_n_mbchars(
@@ -563,28 +564,28 @@ row_build_row_ref_from_row(
ulint ref_len;
ulint i;
dtype_t* cur_type;
-
+
ut_ad(ref && table && row);
-
+
clust_index = dict_table_get_first_index(table);
ref_len = dict_index_get_n_unique(clust_index);
ut_ad(ref_len == dtuple_get_n_fields(ref));
-
+
for (i = 0; i < ref_len; i++) {
dfield = dtuple_get_nth_field(ref, i);
-
+
field = dict_index_get_nth_field(clust_index, i);
-
+
col = dict_field_get_col(field);
-
+
dfield2 = dtuple_get_nth_field(row, dict_col_get_no(col));
dfield_copy(dfield, dfield2);
if (field->prefix_len > 0
- && dfield->len != UNIV_SQL_NULL) {
+ && dfield->len != UNIV_SQL_NULL) {
cur_type = dict_col_get_type(
dict_field_get_col(field));
@@ -613,7 +614,7 @@ row_search_on_row_ref(
dtuple_t* ref, /* in: row reference */
mtr_t* mtr) /* in: mtr */
{
- ulint low_match;
+ ulint low_match;
rec_t* rec;
dict_index_t* index;
@@ -624,7 +625,7 @@ row_search_on_row_ref(
ut_a(dtuple_get_n_fields(ref) == dict_index_get_n_unique(index));
btr_pcur_open(index, ref, PAGE_CUR_LE, mode, pcur, mtr);
-
+
low_match = btr_pcur_get_low_match(pcur);
rec = btr_pcur_get_rec(pcur);
@@ -662,7 +663,7 @@ row_get_clust_rec(
btr_pcur_t pcur;
ibool found;
rec_t* clust_rec;
-
+
ut_ad((index->type & DICT_CLUSTERED) == 0);
table = index->table;
@@ -703,7 +704,7 @@ row_search_index_entry(
rec_t* rec;
ut_ad(dtuple_check_typed(entry));
-
+
btr_pcur_open(index, entry, PAGE_CUR_LE, mode, pcur, mtr);
low_match = btr_pcur_get_low_match(pcur);
diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c
index b08d27d2916..0809d2872a5 100644
--- a/storage/innobase/row/row0sel.c
+++ b/storage/innobase/row/row0sel.c
@@ -70,13 +70,13 @@ row_sel_sec_rec_is_for_clust_rec(
dict_index_t* clust_index) /* in: clustered index */
{
dict_field_t* ifield;
- dict_col_t* col;
- byte* sec_field;
- ulint sec_len;
- byte* clust_field;
- ulint clust_len;
- ulint n;
- ulint i;
+ dict_col_t* col;
+ byte* sec_field;
+ ulint sec_len;
+ byte* clust_field;
+ ulint clust_len;
+ ulint n;
+ ulint i;
dtype_t* cur_type;
mem_heap_t* heap = NULL;
ulint clust_offsets_[REC_OFFS_NORMAL_SIZE];
@@ -93,19 +93,19 @@ row_sel_sec_rec_is_for_clust_rec(
sec_offs = rec_get_offsets(sec_rec, sec_index, sec_offs,
ULINT_UNDEFINED, &heap);
- n = dict_index_get_n_ordering_defined_by_user(sec_index);
+ n = dict_index_get_n_ordering_defined_by_user(sec_index);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
ifield = dict_index_get_nth_field(sec_index, i);
- col = dict_field_get_col(ifield);
-
+ col = dict_field_get_col(ifield);
+
clust_field = rec_get_nth_field(clust_rec, clust_offs,
- dict_col_get_clust_pos(col),
- &clust_len);
+ dict_col_get_clust_pos(col),
+ &clust_len);
sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len);
if (ifield->prefix_len > 0
- && clust_len != UNIV_SQL_NULL) {
+ && clust_len != UNIV_SQL_NULL) {
cur_type = dict_col_get_type(
dict_field_get_col(ifield));
@@ -116,13 +116,13 @@ row_sel_sec_rec_is_for_clust_rec(
clust_len, (char*) clust_field);
}
- if (0 != cmp_data_data(dict_col_get_type(col),
- clust_field, clust_len,
- sec_field, sec_len)) {
+ if (0 != cmp_data_data(dict_col_get_type(col),
+ clust_field, clust_len,
+ sec_field, sec_len)) {
is_equal = FALSE;
goto func_exit;
- }
- }
+ }
+ }
func_exit:
if (UNIV_LIKELY_NULL(heap)) {
@@ -150,7 +150,7 @@ sel_node_create(
node->latch_mode = BTR_SEARCH_LEAF;
node->plans = NULL;
-
+
return(node);
}
@@ -248,7 +248,7 @@ sel_reset_aggregate_vals(
eval_node_set_int_val(func_node, 0);
func_node = que_node_get_next(func_node);
- }
+ }
node->aggregate_already_fetched = FALSE;
}
@@ -292,7 +292,7 @@ row_sel_fetch_columns(
ulint field_no;
byte* data;
ulint len;
-
+
ut_ad(rec_offs_validate(rec, index, offsets));
if (index->type & DICT_CLUSTERED) {
@@ -305,9 +305,9 @@ row_sel_fetch_columns(
field_no = column->field_nos[index_type];
if (field_no != ULINT_UNDEFINED) {
-
+
data = rec_get_nth_field(rec, offsets, field_no, &len);
-
+
if (column->copy_val) {
eval_node_copy_and_alloc_val(column, data,
len);
@@ -333,7 +333,7 @@ sel_col_prefetch_buf_alloc(
ulint i;
ut_ad(que_node_get_type(column) == QUE_NODE_SYMBOL);
-
+
column->prefetch_buf = mem_alloc(SEL_MAX_N_PREFETCH
* sizeof(sel_buf_t));
for (i = 0; i < SEL_MAX_N_PREFETCH; i++) {
@@ -382,7 +382,7 @@ sel_pop_prefetched_row(
byte* data;
ulint len;
ulint val_buf_size;
-
+
ut_ad(plan->n_rows_prefetched > 0);
column = UT_LIST_GET_FIRST(plan->columns);
@@ -417,7 +417,7 @@ sel_pop_prefetched_row(
sel_buf->data = dfield_get_data(val);
sel_buf->len = dfield_get_len(val);
sel_buf->val_buf_size = que_node_get_val_buf_size(column);
-
+
dfield_set_data(val, data, len);
que_node_set_val_buf_size(column, val_buf_size);
next_col:
@@ -454,14 +454,14 @@ sel_push_prefetched_row(
/* We have the convention that pushing new rows starts only
after the prefetch stack has been emptied: */
-
+
ut_ad(plan->first_prefetched == 0);
}
plan->n_rows_prefetched++;
-
+
ut_ad(pos < SEL_MAX_N_PREFETCH);
-
+
column = UT_LIST_GET_FIRST(plan->columns);
while (column) {
@@ -471,7 +471,7 @@ sel_push_prefetched_row(
goto next_col;
}
-
+
if (!column->prefetch_buf) {
/* Allocate a new prefetch buffer */
@@ -492,11 +492,11 @@ sel_push_prefetched_row(
dfield_set_data(val, sel_buf->data, sel_buf->len);
que_node_set_val_buf_size(column, sel_buf->val_buf_size);
-
+
sel_buf->data = data;
sel_buf->len = len;
sel_buf->val_buf_size = val_buf_size;
-next_col:
+next_col:
column = UT_LIST_GET_NEXT(col_var_list, column);
}
}
@@ -528,7 +528,7 @@ row_sel_build_prev_vers(
} else {
plan->old_vers_heap = mem_heap_create(512);
}
-
+
err = row_vers_build_for_consistent_read(rec, mtr, plan->index,
offsets, read_view, offset_heap,
plan->old_vers_heap, old_vers);
@@ -563,7 +563,7 @@ row_sel_build_committed_vers_for_mysql(
} else {
prebuilt->old_vers_heap = mem_heap_create(200);
}
-
+
err = row_vers_build_for_semi_consistent_read(rec, mtr, clust_index,
offsets, offset_heap,
prebuilt->old_vers_heap, old_vers);
@@ -577,7 +577,7 @@ UNIV_INLINE
ibool
row_sel_test_end_conds(
/*===================*/
- /* out: TRUE if row passed the tests */
+ /* out: TRUE if row passed the tests */
plan_t* plan) /* in: plan for the table; the column values must
already have been retrieved and the right sides of
comparisons evaluated */
@@ -586,7 +586,7 @@ row_sel_test_end_conds(
/* All conditions in end_conds are comparisons of a column to an
expression */
-
+
cond = UT_LIST_GET_FIRST(plan->end_conds);
while (cond) {
@@ -619,7 +619,7 @@ row_sel_test_other_conds(
already have been retrieved */
{
func_node_t* cond;
-
+
cond = UT_LIST_GET_FIRST(plan->other_conds);
while (cond) {
@@ -670,11 +670,11 @@ row_sel_get_clust_rec(
offsets = rec_get_offsets(rec,
btr_pcur_get_btr_cur(&plan->pcur)->index,
offsets, ULINT_UNDEFINED, &heap);
-
+
row_build_row_ref_fast(plan->clust_ref, plan->clust_map, rec, offsets);
index = dict_table_get_first_index(plan->table);
-
+
btr_pcur_open_with_no_init(index, plan->clust_ref, PAGE_CUR_LE,
node->latch_mode, &(plan->clust_pcur),
0, mtr);
@@ -685,10 +685,11 @@ row_sel_get_clust_rec(
low_match value the real match to the search tuple */
if (!page_rec_is_user_rec(clust_rec)
- || btr_pcur_get_low_match(&(plan->clust_pcur))
- < dict_index_get_n_unique(index)) {
-
- ut_a(rec_get_deleted_flag(rec, plan->table->comp));
+ || btr_pcur_get_low_match(&(plan->clust_pcur))
+ < dict_index_get_n_unique(index)) {
+
+ ut_a(rec_get_deleted_flag(rec,
+ dict_table_is_comp(plan->table)));
ut_a(node->read_view);
/* In a rare case it is possible that no clust rec is found
@@ -708,8 +709,8 @@ row_sel_get_clust_rec(
if (!node->read_view) {
/* Try to place a lock on the index record */
-
- /* If innodb_locks_unsafe_for_binlog option is used,
+
+ /* If innodb_locks_unsafe_for_binlog option is used,
we lock only the record, i.e., next-key locking is
not used. */
ulint lock_type;
@@ -764,10 +765,11 @@ row_sel_get_clust_rec(
a wrong result if we would not drop rows which we come to
visit through secondary index records that would not really
exist in our snapshot. */
-
- if ((old_vers || rec_get_deleted_flag(rec, plan->table->comp))
- && !row_sel_sec_rec_is_for_clust_rec(rec, plan->index,
- clust_rec, index)) {
+
+ if ((old_vers || rec_get_deleted_flag(rec,
+ dict_table_is_comp(plan->table)))
+ && !row_sel_sec_rec_is_for_clust_rec(rec, plan->index,
+ clust_rec, index)) {
goto func_exit;
}
}
@@ -797,17 +799,17 @@ sel_set_rec_lock(
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
ulint mode, /* in: lock mode */
- ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or LOC_REC_NOT_GAP */
- que_thr_t* thr) /* in: query thread */
+ ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or LOC_REC_NOT_GAP */
+ que_thr_t* thr) /* in: query thread */
{
trx_t* trx;
ulint err;
- trx = thr_get_trx(thr);
+ trx = thr_get_trx(thr);
if (UT_LIST_GET_LEN(trx->trx_locks) > 10000) {
if (buf_LRU_buf_pool_running_out()) {
-
+
return(DB_LOCK_TABLE_FULL);
}
}
@@ -841,7 +843,7 @@ row_sel_open_pcur(
func_node_t* cond;
que_node_t* exp;
ulint n_fields;
- ulint has_search_latch = 0; /* RW_S_LATCH or 0 */
+ ulint has_search_latch = 0; /* RW_S_LATCH or 0 */
ulint i;
if (search_latch_locked) {
@@ -858,29 +860,29 @@ row_sel_open_pcur(
while (cond) {
eval_exp(que_node_get_next(cond->args));
-
+
cond = UT_LIST_GET_NEXT(cond_list, cond);
}
-
+
if (plan->tuple) {
n_fields = dtuple_get_n_fields(plan->tuple);
-
+
if (plan->n_exact_match < n_fields) {
/* There is a non-exact match field which must be
evaluated separately */
-
+
eval_exp(plan->tuple_exps[n_fields - 1]);
}
-
+
for (i = 0; i < n_fields; i++) {
exp = plan->tuple_exps[i];
-
+
dfield_copy_data(dtuple_get_nth_field(plan->tuple, i),
que_node_get_val(exp));
}
-
+
/* Open pcur to the index */
-
+
btr_pcur_open_with_no_init(index, plan->tuple, plan->mode,
node->latch_mode, &(plan->pcur),
has_search_latch, mtr);
@@ -895,7 +897,7 @@ row_sel_open_pcur(
ut_ad(plan->n_rows_prefetched == 0);
ut_ad(plan->n_rows_fetched == 0);
ut_ad(plan->cursor_at_end == FALSE);
-
+
plan->pcur_is_open = TRUE;
}
@@ -918,14 +920,14 @@ row_sel_restore_pcur_pos(
ulint relative_position;
ut_ad(!plan->cursor_at_end);
-
+
relative_position = btr_pcur_get_rel_pos(&(plan->pcur));
equal_position = btr_pcur_restore_position(node->latch_mode,
&(plan->pcur), mtr);
/* If the cursor is traveling upwards, and relative_position is
-
+
(1) BTR_PCUR_BEFORE: this is not allowed, as we did not have a lock
yet on the successor of the page infimum;
(2) BTR_PCUR_AFTER: btr_pcur_restore_position placed the cursor on the
@@ -952,13 +954,13 @@ row_sel_restore_pcur_pos(
}
ut_ad(relative_position == BTR_PCUR_AFTER
- || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE);
+ || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE);
return(FALSE);
}
/* If the cursor is traveling downwards, and relative_position is
-
+
(1) BTR_PCUR_BEFORE: btr_pcur_restore_position placed the cursor on
the last record LESS than the successor of a page infimum; we have not
processed the cursor record: no need to move the cursor;
@@ -975,7 +977,7 @@ row_sel_restore_pcur_pos(
record, else there is no need to move the cursor. */
if (relative_position == BTR_PCUR_BEFORE
- || relative_position == BTR_PCUR_BEFORE_FIRST_IN_TREE) {
+ || relative_position == BTR_PCUR_BEFORE_FIRST_IN_TREE) {
return(FALSE);
}
@@ -991,7 +993,7 @@ row_sel_restore_pcur_pos(
}
ut_ad(relative_position == BTR_PCUR_AFTER
- || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE);
+ || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE);
return(TRUE);
}
@@ -1003,13 +1005,13 @@ void
plan_reset_cursor(
/*==============*/
plan_t* plan) /* in: plan */
-{
+{
plan->pcur_is_open = FALSE;
- plan->cursor_at_end = FALSE;
+ plan->cursor_at_end = FALSE;
plan->n_rows_fetched = 0;
plan->n_rows_prefetched = 0;
}
-
+
/*************************************************************************
Tries to do a shortcut to fetch a clustered index record with a unique key,
using the hash index if possible (not always). */
@@ -1039,21 +1041,21 @@ row_sel_try_search_shortcut(
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
-
+
row_sel_open_pcur(node, plan, TRUE, mtr);
rec = btr_pcur_get_rec(&(plan->pcur));
-
+
if (!page_rec_is_user_rec(rec)) {
return(SEL_RETRY);
}
ut_ad(plan->mode == PAGE_CUR_GE);
-
+
/* As the cursor is now placed on a user record after a search with
the mode PAGE_CUR_GE, the up_match field in the cursor tells how many
- fields in the user record matched to the search tuple */
+ fields in the user record matched to the search tuple */
if (btr_pcur_get_up_match(&(plan->pcur)) < plan->n_exact_match) {
@@ -1062,7 +1064,7 @@ row_sel_try_search_shortcut(
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
-
+
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
if (index->type & DICT_CLUSTERED) {
@@ -1082,14 +1084,14 @@ row_sel_try_search_shortcut(
row_sel_fetch_columns(index, rec, offsets,
UT_LIST_GET_FIRST(plan->columns));
- if (rec_get_deleted_flag(rec, plan->table->comp)) {
+ if (rec_get_deleted_flag(rec, dict_table_is_comp(plan->table))) {
ret = SEL_EXHAUSTED;
goto func_exit;
}
/* Test the rest of search conditions */
-
+
if (!row_sel_test_other_conds(plan)) {
ret = SEL_EXHAUSTED;
@@ -1125,7 +1127,7 @@ row_sel(
rec_t* clust_rec;
ibool search_latch_locked;
ibool consistent_read;
-
+
/* The following flag becomes TRUE when we are doing a
consistent read from a non-clustered index and we must look
at the clustered index to find out the previous delete mark
@@ -1135,14 +1137,14 @@ row_sel(
ulint cost_counter = 0;
ibool cursor_just_opened;
ibool must_go_to_next;
- ibool leaf_contains_updates = FALSE;
+ ibool leaf_contains_updates = FALSE;
/* TRUE if select_will_do_update is
TRUE and the current clustered index
leaf page has been updated during
the current mtr: mtr must be committed
at the same time as the leaf x-latch
is released */
- ibool mtr_has_extra_clust_latch = FALSE;
+ ibool mtr_has_extra_clust_latch = FALSE;
/* TRUE if the search was made using
a non-clustered index, and we had to
access the clustered record: now &mtr
@@ -1203,7 +1205,7 @@ table_loop:
}
/* Open a cursor to index, or restore an open cursor position */
-
+
mtr_start(&mtr);
if (consistent_read && plan->unique_search && !plan->pcur_is_open
@@ -1235,7 +1237,7 @@ table_loop:
goto table_exhausted;
}
-
+
ut_ad(found_flag == SEL_RETRY);
plan_reset_cursor(plan);
@@ -1270,11 +1272,11 @@ table_loop:
if (must_go_to_next) {
/* We have already processed the cursor record: move
to the next */
-
+
goto next_rec;
}
}
-
+
rec_loop:
/* RECORD LOOP
-----------
@@ -1287,11 +1289,11 @@ rec_loop:
NOTE that if cursor_just_opened is TRUE here, it means that we came
to this point right after row_sel_open_pcur. */
-
+
ut_ad(mtr_has_extra_clust_latch == FALSE);
rec = btr_pcur_get_rec(&(plan->pcur));
-
+
/* PHASE 1: Set a lock if specified */
if (!node->asc && cursor_just_opened
@@ -1302,7 +1304,7 @@ rec_loop:
be possible to insert new records next to the cursor position,
and it might be that these new records should appear in the
search result set, resulting in the phantom problem. */
-
+
if (!consistent_read) {
/* If innodb_locks_unsafe_for_binlog option is used,
@@ -1327,7 +1329,7 @@ rec_loop:
/* Note that in this case we will store in pcur
the PREDECESSOR of the record we are waiting
the lock for */
-
+
goto lock_wait_or_error;
}
}
@@ -1346,7 +1348,7 @@ rec_loop:
}
if (!consistent_read) {
- /* Try to place a lock on the index record */
+ /* Try to place a lock on the index record */
/* If innodb_locks_unsafe_for_binlog option is used,
we lock only the record, i.e., next-key locking is
@@ -1382,7 +1384,7 @@ rec_loop:
ut_ad(page_rec_is_user_rec(rec));
if (cost_counter > SEL_COST_LIMIT) {
-
+
/* Now that we have placed the necessary locks, we can stop
for a while and store the cursor position; NOTE that if we
would store the cursor position BEFORE placing a record lock,
@@ -1392,17 +1394,17 @@ rec_loop:
goto stop_for_a_while;
}
-
+
/* PHASE 2: Check a mixed index mix id if needed */
if (plan->unique_search && cursor_just_opened) {
ut_ad(plan->mode == PAGE_CUR_GE);
-
+
/* As the cursor is now placed on a user record after a search
with the mode PAGE_CUR_GE, the up_match field in the cursor
tells how many fields in the user record matched to the search
- tuple */
+ tuple */
if (btr_pcur_get_up_match(&(plan->pcur))
< plan->n_exact_match) {
@@ -1412,13 +1414,13 @@ rec_loop:
/* Ok, no need to test end_conds or mix id */
} else if (plan->mixed_index) {
- /* We have to check if the record in a mixed cluster belongs
- to this table */
+ /* We have to check if the record in a mixed cluster belongs
+ to this table */
- if (!dict_is_mixed_table_rec(plan->table, rec)) {
+ if (!dict_is_mixed_table_rec(plan->table, rec)) {
- goto next_rec;
- }
+ goto next_rec;
+ }
}
/* We are ready to look at a possible new index entry in the result
@@ -1434,7 +1436,7 @@ rec_loop:
a previous version of the record */
if (index->type & DICT_CLUSTERED) {
-
+
if (!lock_clust_rec_cons_read_sees(rec, index, offsets,
node->read_view)) {
@@ -1452,8 +1454,8 @@ rec_loop:
rec, index, offsets,
ULINT_UNDEFINED, &heap);
row_sel_fetch_columns(index, rec,
- offsets,
- UT_LIST_GET_FIRST(plan->columns));
+ offsets,
+ UT_LIST_GET_FIRST(plan->columns));
if (!row_sel_test_end_conds(plan)) {
@@ -1474,7 +1476,7 @@ rec_loop:
/* PHASE 4: Test search end conditions and deleted flag */
/* Fetch the columns needed in test conditions */
-
+
row_sel_fetch_columns(index, rec, offsets,
UT_LIST_GET_FIRST(plan->columns));
@@ -1491,7 +1493,7 @@ rec_loop:
goto table_exhausted;
}
- if (rec_get_deleted_flag(rec, plan->table->comp)
+ if (rec_get_deleted_flag(rec, dict_table_is_comp(plan->table))
&& !cons_read_requires_clust_rec) {
/* The record is delete marked: we can skip it if this is
@@ -1499,10 +1501,10 @@ rec_loop:
of a non-clustered index record */
if (plan->unique_search) {
-
+
goto table_exhausted;
}
-
+
goto next_rec;
}
@@ -1517,7 +1519,7 @@ rec_loop:
err = row_sel_get_clust_rec(node, plan, rec, thr, &clust_rec,
&mtr);
mtr_has_extra_clust_latch = TRUE;
-
+
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
@@ -1535,7 +1537,8 @@ rec_loop:
goto next_rec;
}
- if (rec_get_deleted_flag(clust_rec, plan->table->comp)) {
+ if (rec_get_deleted_flag(clust_rec,
+ dict_table_is_comp(plan->table))) {
/* The record is delete marked: we can skip it */
@@ -1546,14 +1549,14 @@ rec_loop:
btr_pcur_store_position(&(plan->clust_pcur), &mtr);
}
- }
+ }
/* PHASE 6: Test the rest of search conditions */
-
+
if (!row_sel_test_other_conds(plan)) {
if (plan->unique_search) {
-
+
goto table_exhausted;
}
@@ -1562,7 +1565,7 @@ rec_loop:
/* PHASE 7: We found a new qualifying row for the current table; push
the row if prefetch is on, or move to the next table in the join */
-
+
plan->n_rows_fetched++;
ut_ad(plan->pcur.latch_mode == node->latch_mode);
@@ -1578,22 +1581,22 @@ rec_loop:
/* When the database is in the online backup mode, the number
of log records for a single mtr should be small: increment the
cost counter to ensure it */
-
+
cost_counter += 1 + (SEL_COST_LIMIT / 8);
if (plan->unique_search) {
- goto table_exhausted;
+ goto table_exhausted;
}
goto next_rec;
- }
+ }
if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
|| plan->unique_search || plan->no_prefetch) {
/* No prefetch in operation: go to the next table */
-
+
goto next_table;
}
@@ -1602,13 +1605,13 @@ rec_loop:
if (plan->n_rows_prefetched == SEL_MAX_N_PREFETCH) {
/* The prefetch buffer is now full */
-
+
sel_pop_prefetched_row(plan);
goto next_table;
}
-next_rec:
+next_rec:
ut_ad(!search_latch_locked);
if (mtr_has_extra_clust_latch) {
@@ -1620,7 +1623,7 @@ next_rec:
goto commit_mtr_for_a_while;
}
-
+
if (leaf_contains_updates
&& btr_pcur_is_after_last_on_page(&(plan->pcur), &mtr)) {
@@ -1641,7 +1644,7 @@ next_rec:
}
if (!moved) {
-
+
goto table_exhausted;
}
@@ -1656,7 +1659,7 @@ next_table:
the next table or return a row in the result set */
ut_ad(btr_pcur_is_on_user_rec(&(plan->pcur), &mtr));
-
+
if (plan->unique_search && !node->can_get_updated) {
plan->cursor_at_end = TRUE;
@@ -1676,18 +1679,18 @@ next_table:
next_table_no_mtr:
/* If we use 'goto' to this label, it means that the row was popped
from the prefetched rows stack, and &mtr is already committed */
-
+
if (node->fetch_table + 1 == node->n_tables) {
sel_eval_select_list(node);
if (node->is_aggregate) {
- goto table_loop;
+ goto table_loop;
}
sel_assign_into_var_values(node->into_list, node);
-
+
thr->run_node = que_node_get_parent(node);
if (search_latch_locked) {
@@ -1702,14 +1705,14 @@ next_table_no_mtr:
/* When we move to the next table, we first reset the plan cursor:
we do not care about resetting it when we backtrack from a table */
-
+
plan_reset_cursor(sel_node_get_nth_plan(node, node->fetch_table));
-
+
goto table_loop;
table_exhausted:
/* The table cursor pcur reached the result set end: backtrack to the
- previous table in the join if we do not have cached prefetched rows */
+ previous table in the join if we do not have cached prefetched rows */
plan->cursor_at_end = TRUE;
@@ -1717,10 +1720,10 @@ table_exhausted:
leaf_contains_updates = FALSE;
mtr_has_extra_clust_latch = FALSE;
-
+
if (plan->n_rows_prefetched > 0) {
/* The table became exhausted during a prefetch */
-
+
sel_pop_prefetched_row(plan);
goto next_table_no_mtr;
@@ -1741,18 +1744,18 @@ table_exhausted_no_mtr:
if (search_latch_locked) {
rw_lock_s_unlock(&btr_search_latch);
}
-
+
goto func_exit;
}
node->state = SEL_NODE_NO_MORE_ROWS;
-
+
thr->run_node = que_node_get_parent(node);
if (search_latch_locked) {
rw_lock_s_unlock(&btr_search_latch);
}
-
+
goto func_exit;
}
@@ -1767,7 +1770,7 @@ stop_for_a_while:
record lock on the cursor record or its successor: when we reposition
the cursor, this record lock guarantees that nobody can meanwhile have
inserted new records which should have appeared in the result set,
- which would result in the phantom problem. */
+ which would result in the phantom problem. */
ut_ad(!search_latch_locked);
@@ -1775,7 +1778,7 @@ stop_for_a_while:
btr_pcur_store_position(&(plan->pcur), &mtr);
mtr_commit(&mtr);
-
+
ut_ad(sync_thread_levels_empty_gen(TRUE));
err = DB_SUCCESS;
goto func_exit;
@@ -1783,7 +1786,7 @@ stop_for_a_while:
commit_mtr_for_a_while:
/* Stores the cursor position and commits &mtr; this is used if
&mtr may contain latches which would break the latching order if
- &mtr would not be committed and the latches released. */
+ &mtr would not be committed and the latches released. */
plan->stored_cursor_rec_processed = TRUE;
@@ -1794,7 +1797,7 @@ commit_mtr_for_a_while:
leaf_contains_updates = FALSE;
mtr_has_extra_clust_latch = FALSE;
-
+
ut_ad(sync_thread_levels_empty_gen(TRUE));
goto table_loop;
@@ -1808,9 +1811,9 @@ lock_wait_or_error:
plan->stored_cursor_rec_processed = FALSE;
btr_pcur_store_position(&(plan->pcur), &mtr);
-
+
mtr_commit(&mtr);
-
+
ut_ad(sync_thread_levels_empty_gen(TRUE));
func_exit:
@@ -1836,7 +1839,7 @@ row_sel_step(
ulint err;
ut_ad(thr);
-
+
node = thr->run_node;
ut_ad(que_node_get_type(node) == QUE_NODE_SELECT);
@@ -1869,23 +1872,23 @@ row_sel_step(
} else {
i_lock_mode = LOCK_IS;
}
-
+
table_node = node->table_list;
-
+
while (table_node) {
err = lock_table(0, table_node->table,
i_lock_mode, thr);
if (err != DB_SUCCESS) {
-
+
que_thr_handle_error(thr, DB_ERROR,
NULL, 0);
return(NULL);
}
-
+
table_node = que_node_get_next(table_node);
}
}
-
+
/* If this is an explicit cursor, copy stored procedure
variable values, so that the values cannot change between
fetches (currently, we copy them also for non-explicit
@@ -1896,7 +1899,7 @@ row_sel_step(
row_sel_copy_input_variable_vals(node);
}
-
+
node->state = SEL_NODE_FETCH;
node->fetch_table = 0;
@@ -1911,7 +1914,7 @@ row_sel_step(
/* NOTE! if queries are parallelized, the following assignment may
have problems; the assignment should be made only if thr is the
only top-level thr in the graph: */
-
+
thr->graph->last_sel_node = node;
if (err == DB_SUCCESS) {
@@ -1930,7 +1933,7 @@ row_sel_step(
}
return(thr);
-}
+}
/**************************************************************************
Performs a fetch for a cursor. */
@@ -1945,16 +1948,16 @@ fetch_step(
fetch_node_t* node;
ut_ad(thr);
-
+
node = thr->run_node;
sel_node = node->cursor_def;
-
+
ut_ad(que_node_get_type(node) == QUE_NODE_FETCH);
if (thr->prev_node != que_node_get_parent(node)) {
if (sel_node->state != SEL_NODE_NO_MORE_ROWS) {
-
+
sel_assign_into_var_values(node->into_list, sel_node);
}
@@ -1967,9 +1970,9 @@ fetch_step(
the time of the fetch, so that execution knows to return to this
fetch node after a row has been selected or we know that there is
no row left */
-
+
sel_node->common.parent = node;
-
+
if (sel_node->state == SEL_NODE_CLOSED) {
/* SQL error detected */
fprintf(stderr, "SQL error %lu\n", (ulong)DB_ERROR);
@@ -1982,7 +1985,7 @@ fetch_step(
thr->run_node = sel_node;
return(thr);
-}
+}
/***************************************************************
Prints a row in a select result. */
@@ -1998,22 +2001,22 @@ row_printf_step(
que_node_t* arg;
ut_ad(thr);
-
+
node = thr->run_node;
-
+
sel_node = node->sel_node;
ut_ad(que_node_get_type(node) == QUE_NODE_ROW_PRINTF);
if (thr->prev_node == que_node_get_parent(node)) {
-
+
/* Reset the cursor */
sel_node->state = SEL_NODE_OPEN;
/* Fetch next row to print */
thr->run_node = sel_node;
-
+
return(thr);
}
@@ -2024,7 +2027,7 @@ row_printf_step(
/* No more rows to print */
thr->run_node = que_node_get_parent(node);
-
+
return(thr);
}
@@ -2045,7 +2048,7 @@ row_printf_step(
thr->run_node = sel_node;
return(thr);
-}
+}
/********************************************************************
Converts a key value stored in MySQL format to an Innobase dtuple. The last
@@ -2081,14 +2084,14 @@ row_sel_convert_mysql_key_to_innobase(
byte* key_end;
ulint n_fields = 0;
ulint type;
-
+
/* For documentation of the key value storage format in MySQL, see
ha_innobase::store_key_val_for_row() in ha_innodb.cc. */
key_end = key_ptr + key_len;
/* Permit us to access any field in the tuple (ULINT_MAX): */
-
+
dtuple_set_n_fields(tuple, ULINT_MAX);
dfield = dtuple_get_nth_field(tuple, 0);
@@ -2104,7 +2107,7 @@ row_sel_convert_mysql_key_to_innobase(
ut_a(key_len == DATA_ROW_ID_LEN);
dfield_set_data(dfield, key_ptr, DATA_ROW_ID_LEN);
-
+
dtuple_set_n_fields(tuple, 1);
return;
@@ -2113,28 +2116,28 @@ row_sel_convert_mysql_key_to_innobase(
while (key_ptr < key_end) {
ut_a(dict_col_get_type(field->col)->mtype
- == dfield_get_type(dfield)->mtype);
+ == dfield_get_type(dfield)->mtype);
data_offset = 0;
is_null = FALSE;
- if (!(dfield_get_type(dfield)->prtype & DATA_NOT_NULL)) {
- /* The first byte in the field tells if this is
- an SQL NULL value */
-
+ if (!(dfield_get_type(dfield)->prtype & DATA_NOT_NULL)) {
+ /* The first byte in the field tells if this is
+ an SQL NULL value */
+
data_offset = 1;
- if (*key_ptr != 0) {
- dfield_set_data(dfield, NULL, UNIV_SQL_NULL);
+ if (*key_ptr != 0) {
+ dfield_set_data(dfield, NULL, UNIV_SQL_NULL);
is_null = TRUE;
- }
- }
+ }
+ }
type = dfield_get_type(dfield)->mtype;
/* Calculate data length and data field total length */
-
+
if (type == DATA_BLOB) {
/* The key field is a column prefix of a BLOB or
TEXT */
@@ -2171,16 +2174,16 @@ row_sel_convert_mysql_key_to_innobase(
with the full prefix_len bytes. How do characters
0xff in UTF-8 behave? */
- data_len = field->prefix_len;
+ data_len = field->prefix_len;
data_field_len = data_offset + data_len;
} else {
data_len = dfield_get_type(dfield)->len;
data_field_len = data_offset + data_len;
}
- if (dtype_get_mysql_type(dfield_get_type(dfield))
+ if (dtype_get_mysql_type(dfield_get_type(dfield))
== DATA_MYSQL_TRUE_VARCHAR
- && dfield_get_type(dfield)->mtype != DATA_INT) {
+ && dfield_get_type(dfield)->mtype != DATA_INT) {
/* In a MySQL key value format, a true VARCHAR is
always preceded by 2 bytes of a length field.
dfield_get_type(dfield)->len returns the maximum
@@ -2195,32 +2198,32 @@ row_sel_convert_mysql_key_to_innobase(
}
/* Storing may use at most data_len bytes of buf */
-
+
if (!is_null) {
- row_mysql_store_col_in_innobase_format(
+ row_mysql_store_col_in_innobase_format(
dfield,
buf,
FALSE, /* MySQL key value format col */
key_ptr + data_offset,
data_len,
- index->table->comp);
+ dict_table_is_comp(index->table));
buf += data_len;
}
- key_ptr += data_field_len;
+ key_ptr += data_field_len;
if (key_ptr > key_end) {
/* The last field in key was not a complete key field
but a prefix of it.
- Print a warning about this! HA_READ_PREFIX_LAST does
+ Print a warning about this! HA_READ_PREFIX_LAST does
not currently work in InnoDB with partial-field key
value prefixes. Since MySQL currently uses a padding
trick to calculate LIKE 'abc%' type queries there
should never be partial-field prefixes in searches. */
- ut_print_timestamp(stderr);
-
+ ut_print_timestamp(stderr);
+
fputs(
" InnoDB: Warning: using a partial-field key prefix in search.\n"
"InnoDB: ", stderr);
@@ -2235,21 +2238,21 @@ row_sel_convert_mysql_key_to_innobase(
fprintf(stderr, "\n");
if (!is_null) {
- dfield->len -= (ulint)(key_ptr - key_end);
+ dfield->len -= (ulint)(key_ptr - key_end);
}
}
- n_fields++;
- field++;
+ n_fields++;
+ field++;
dfield++;
- }
+ }
ut_a(buf <= original_buf + buf_len);
- /* We set the length of tuple to n_fields: we assume that the memory
+ /* We set the length of tuple to n_fields: we assume that the memory
area allocated for it is big enough (usually bigger than n_fields). */
-
- dtuple_set_n_fields(tuple, n_fields);
+
+ dtuple_set_n_fields(tuple, n_fields);
}
/******************************************************************
@@ -2273,12 +2276,13 @@ row_sel_store_row_id_to_prebuilt(
dict_index_get_sys_col_pos(index, DATA_ROW_ID), &len);
if (len != DATA_ROW_ID_LEN) {
- fprintf(stderr,
+ fprintf(stderr,
"InnoDB: Error: Row id field is wrong length %lu in ", (ulong) len);
dict_index_name_print(stderr, prebuilt->trx, index);
fprintf(stderr, "\n"
"InnoDB: Field number %lu, record:\n",
- (ulong) dict_index_get_sys_col_pos(index, DATA_ROW_ID));
+ (ulong) dict_index_get_sys_col_pos(index,
+ DATA_ROW_ID));
rec_print_new(stderr, index_rec, offsets);
putc('\n', stderr);
ut_error;
@@ -2331,7 +2335,7 @@ row_sel_field_store_in_mysql_format(
ut_ad(templ->mysql_col_len == len);
} else if (templ->type == DATA_VARCHAR
- || templ->type == DATA_VARMYSQL
+ || templ->type == DATA_VARMYSQL
|| templ->type == DATA_BINARY) {
field_end = dest + templ->mysql_col_len;
@@ -2340,18 +2344,18 @@ row_sel_field_store_in_mysql_format(
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
-
+
dest = row_mysql_store_true_var_len(dest, len,
templ->mysql_length_bytes);
}
/* Copy the actual data */
ut_memcpy(dest, data, len);
-
+
/* Pad with trailing spaces. We pad with spaces also the
unused end of a >= 5.0.3 true VARCHAR column, just in case
MySQL expects its contents to be deterministic. */
-
+
pad_ptr = dest + len;
ut_ad(templ->mbminlen <= templ->mbmaxlen);
@@ -2363,13 +2367,13 @@ row_sel_field_store_in_mysql_format(
if (len & 1) {
/* A 0x20 has been stripped from the column.
Pad it back. */
-
+
if (pad_ptr < field_end) {
*pad_ptr = 0x20;
pad_ptr++;
}
}
-
+
/* Pad the rest of the string with 0x0020 */
while (pad_ptr < field_end) {
@@ -2393,16 +2397,18 @@ row_sel_field_store_in_mysql_format(
} else if (templ->type == DATA_MYSQL) {
memcpy(dest, data, len);
+#if defined(UNIV_RELEASE_NOT_YET_STABLE) || defined(UNIV_DEBUG)
ut_a(templ->mysql_col_len >= len);
ut_a(templ->mbmaxlen >= templ->mbminlen);
ut_a(templ->mbmaxlen > templ->mbminlen
|| templ->mysql_col_len == len);
+ ut_a(len * templ->mbmaxlen >= templ->mysql_col_len);
+#endif /* UNIV_RELEASE_NOT_YET_STABLE || UNIV_DEBUG */
/* The following assertion would fail for old tables
containing UTF-8 ENUM columns due to Bug #9526. */
ut_ad(!templ->mbmaxlen
|| !(templ->mysql_col_len % templ->mbmaxlen));
- ut_a(len * templ->mbmaxlen >= templ->mysql_col_len);
if (templ->mbminlen != templ->mbmaxlen) {
/* Pad with spaces. This undoes the stripping
@@ -2412,6 +2418,7 @@ row_sel_field_store_in_mysql_format(
memset(dest + len, 0x20, templ->mysql_col_len - len);
}
} else {
+#if defined(UNIV_RELEASE_NOT_YET_STABLE) || defined(UNIV_DEBUG)
ut_a(templ->type == DATA_CHAR
|| templ->type == DATA_FIXBINARY
/*|| templ->type == DATA_SYS_CHILD
@@ -2419,6 +2426,7 @@ row_sel_field_store_in_mysql_format(
|| templ->type == DATA_FLOAT
|| templ->type == DATA_DOUBLE
|| templ->type == DATA_DECIMAL);
+#endif /* UNIV_RELEASE_NOT_YET_STABLE || UNIV_DEBUG */
ut_ad(templ->mysql_col_len == len);
memcpy(dest, data, len);
@@ -2451,7 +2459,7 @@ row_sel_store_mysql_rec(
byte* data;
ulint len;
ulint i;
-
+
ut_ad(prebuilt->mysql_template);
ut_ad(rec_offs_validate(rec, NULL, offsets));
@@ -2499,8 +2507,8 @@ row_sel_store_mysql_rec(
CPU time, we do not use it for small BLOBs. */
if (UNIV_UNLIKELY(len > 2000000)
- && UNIV_UNLIKELY(!ut_test_malloc(
- len + 1000000))) {
+ && UNIV_UNLIKELY(!ut_test_malloc(
+ len + 1000000))) {
ut_print_timestamp(stderr);
fprintf(stderr,
@@ -2530,17 +2538,17 @@ row_sel_store_mysql_rec(
prebuilt->blob_heap, len),
data, len);
}
-
+
row_sel_field_store_in_mysql_format(
mysql_rec + templ->mysql_col_offset,
templ, data, len);
/* Cleanup */
if (extern_field_heap) {
- mem_heap_free(extern_field_heap);
+ mem_heap_free(extern_field_heap);
extern_field_heap = NULL;
- }
-
+ }
+
if (templ->mysql_null_bit_mask) {
/* It is a nullable column with a non-NULL
value */
@@ -2548,11 +2556,11 @@ row_sel_store_mysql_rec(
~(byte) (templ->mysql_null_bit_mask);
}
} else {
- /* MySQL seems to assume the field for an SQL NULL
- value is set to zero or space. Not taking this into
+ /* MySQL seems to assume the field for an SQL NULL
+ value is set to zero or space. Not taking this into
account caused seg faults with NULL BLOB fields, and
- bug number 154 in the MySQL bug database: GROUP BY
- and DISTINCT could treat NULL values inequal. */
+ bug number 154 in the MySQL bug database: GROUP BY
+ and DISTINCT could treat NULL values inequal. */
int pad_char;
mysql_rec[templ->mysql_null_byte_offset] |=
@@ -2562,7 +2570,7 @@ row_sel_store_mysql_rec(
case DATA_BINARY:
case DATA_VARMYSQL:
if (templ->mysql_type
- == DATA_MYSQL_TRUE_VARCHAR) {
+ == DATA_MYSQL_TRUE_VARCHAR) {
/* This is a >= 5.0.3 type
true VARCHAR. Zero the field. */
pad_char = 0x00;
@@ -2572,7 +2580,7 @@ row_sel_store_mysql_rec(
case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_MYSQL:
- /* MySQL pads all string types (except
+ /* MySQL pads all string types (except
BLOB, TEXT and true VARCHAR) with space. */
if (UNIV_UNLIKELY(templ->mbminlen == 2)) {
/* Treat UCS2 as a special case. */
@@ -2601,7 +2609,7 @@ row_sel_store_mysql_rec(
memset(mysql_rec + templ->mysql_col_offset,
pad_char, templ->mysql_col_len);
}
- }
+ }
return(TRUE);
}
@@ -2634,7 +2642,7 @@ row_sel_build_prev_vers_for_mysql(
} else {
prebuilt->old_vers_heap = mem_heap_create(200);
}
-
+
err = row_vers_build_for_consistent_read(rec, mtr, clust_index,
offsets, read_view, offset_heap,
prebuilt->old_vers_heap, old_vers);
@@ -2677,11 +2685,11 @@ row_sel_get_clust_rec_for_mysql(
*out_rec = NULL;
trx = thr_get_trx(thr);
-
+
row_build_row_ref_in_tuple(prebuilt->clust_ref, sec_index, rec, trx);
clust_index = dict_table_get_first_index(sec_index->table);
-
+
btr_pcur_open_with_no_init(clust_index, prebuilt->clust_ref,
PAGE_CUR_LE, BTR_SEARCH_LEAF,
prebuilt->clust_pcur, 0, mtr);
@@ -2694,9 +2702,9 @@ row_sel_get_clust_rec_for_mysql(
low_match value the real match to the search tuple */
if (!page_rec_is_user_rec(clust_rec)
- || btr_pcur_get_low_match(prebuilt->clust_pcur)
- < dict_index_get_n_unique(clust_index)) {
-
+ || btr_pcur_get_low_match(prebuilt->clust_pcur)
+ < dict_index_get_n_unique(clust_index)) {
+
/* In a rare case it is possible that no clust rec is found
for a delete-marked secondary index record: if in row0umod.c
in row_undo_mod_remove_clust_low() we have already removed
@@ -2706,9 +2714,10 @@ row_sel_get_clust_rec_for_mysql(
clustered index record did not exist in the read view of
trx. */
- if (!rec_get_deleted_flag(rec, sec_index->table->comp)
- || prebuilt->select_lock_type != LOCK_NONE) {
- ut_print_timestamp(stderr);
+ if (!rec_get_deleted_flag(rec,
+ dict_table_is_comp(sec_index->table))
+ || prebuilt->select_lock_type != LOCK_NONE) {
+ ut_print_timestamp(stderr);
fputs(" InnoDB: error clustered record"
" for sec rec not found\n"
"InnoDB: ", stderr);
@@ -2738,7 +2747,7 @@ row_sel_get_clust_rec_for_mysql(
/* Try to place a lock on the index record; we are searching
the clust rec with a unique condition, hence
we set a LOCK_REC_NOT_GAP type lock */
-
+
err = lock_clust_rec_read_check_and_lock(0, clust_rec,
clust_index, *offsets,
prebuilt->select_lock_type,
@@ -2757,9 +2766,9 @@ row_sel_get_clust_rec_for_mysql(
then we never look for an earlier version */
if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
- && !lock_clust_rec_cons_read_sees(clust_rec, clust_index,
- *offsets, trx->read_view)) {
-
+ && !lock_clust_rec_cons_read_sees(clust_rec,
+ clust_index, *offsets, trx->read_view)) {
+
/* The following call returns 'offsets' associated with
'old_vers' */
err = row_sel_build_prev_vers_for_mysql(
@@ -2767,7 +2776,7 @@ row_sel_get_clust_rec_for_mysql(
prebuilt, clust_rec,
offsets, offset_heap,
&old_vers, mtr);
-
+
if (err != DB_SUCCESS) {
goto err_exit;
@@ -2788,18 +2797,19 @@ row_sel_get_clust_rec_for_mysql(
a wrong result if we would not drop rows which we come to
visit through secondary index records that would not really
exist in our snapshot. */
-
+
if (clust_rec && (old_vers
- || rec_get_deleted_flag(rec, sec_index->table->comp))
- && !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
- clust_rec, clust_index)) {
+ || rec_get_deleted_flag(rec,
+ dict_table_is_comp(sec_index->table)))
+ && !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
+ clust_rec, clust_index)) {
clust_rec = NULL;
} else {
#ifdef UNIV_SEARCH_DEBUG
ut_a(clust_rec == NULL ||
- row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
- clust_rec, clust_index));
-#endif
+ row_sel_sec_rec_is_for_clust_rec(rec,
+ sec_index, clust_rec, clust_index));
+#endif
}
}
@@ -2808,7 +2818,7 @@ func_exit:
if (prebuilt->select_lock_type == LOCK_X) {
/* We may use the cursor in update: store its position */
-
+
btr_pcur_store_position(prebuilt->clust_pcur, mtr);
}
@@ -2846,7 +2856,7 @@ sel_restore_position_for_mysql(
ulint relative_position;
relative_position = pcur->rel_pos;
-
+
success = btr_pcur_restore_position(latch_mode, pcur, mtr);
*same_user_rec = success;
@@ -2864,12 +2874,12 @@ sel_restore_position_for_mysql(
}
if (relative_position == BTR_PCUR_AFTER
- || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE) {
+ || relative_position == BTR_PCUR_AFTER_LAST_IN_TREE) {
if (moves_up) {
return(TRUE);
}
-
+
if (btr_pcur_is_on_user_rec(pcur, mtr)) {
btr_pcur_move_to_prev(pcur, mtr);
}
@@ -2878,8 +2888,8 @@ sel_restore_position_for_mysql(
}
ut_ad(relative_position == BTR_PCUR_BEFORE
- || relative_position == BTR_PCUR_BEFORE_FIRST_IN_TREE);
-
+ || relative_position == BTR_PCUR_BEFORE_FIRST_IN_TREE);
+
if (moves_up && btr_pcur_is_on_user_rec(pcur, mtr)) {
btr_pcur_move_to_next(pcur, mtr);
}
@@ -2900,26 +2910,24 @@ row_sel_pop_cached_row_for_mysql(
ulint i;
mysql_row_templ_t* templ;
byte* cached_rec;
- ut_ad(prebuilt->n_fetch_cached > 0);
+ ut_ad(prebuilt->n_fetch_cached > 0);
ut_ad(prebuilt->mysql_prefix_len <= prebuilt->mysql_row_len);
-
- if (UNIV_UNLIKELY(prebuilt->keep_other_fields_on_keyread))
- {
- /* Copy cache record field by field, don't touch fields that
+
+ if (UNIV_UNLIKELY(prebuilt->keep_other_fields_on_keyread)) {
+ /* Copy cache record field by field, don't touch fields that
are not covered by current key */
- cached_rec =
+ cached_rec =
prebuilt->fetch_cache[prebuilt->fetch_cache_first];
for (i = 0; i < prebuilt->n_template; i++) {
templ = prebuilt->mysql_template + i;
ut_memcpy(
- buf + templ->mysql_col_offset,
+ buf + templ->mysql_col_offset,
cached_rec + templ->mysql_col_offset,
templ->mysql_col_len);
- /* Copy NULL bit of the current field from cached_rec
+ /* Copy NULL bit of the current field from cached_rec
to buf */
- if (templ->mysql_null_bit_mask)
- {
+ if (templ->mysql_null_bit_mask) {
buf[templ->mysql_null_byte_offset] ^=
(buf[templ->mysql_null_byte_offset] ^
cached_rec[templ->mysql_null_byte_offset]) &
@@ -2927,8 +2935,7 @@ row_sel_pop_cached_row_for_mysql(
}
}
}
- else
- {
+ else {
ut_memcpy(buf, prebuilt->fetch_cache[prebuilt->fetch_cache_first],
prebuilt->mysql_prefix_len);
}
@@ -2965,11 +2972,11 @@ row_sel_push_cache_row_for_mysql(
/* A user has reported memory corruption in these
buffers in Linux. Put magic numbers there to help
to track a possible bug. */
-
+
buf = mem_alloc(prebuilt->mysql_row_len + 8);
prebuilt->fetch_cache[i] = buf + 4;
-
+
mach_write_to_4(buf, ROW_PREBUILT_FETCH_MAGIC_N);
mach_write_to_4(buf + 4 + prebuilt->mysql_row_len,
ROW_PREBUILT_FETCH_MAGIC_N);
@@ -3008,10 +3015,10 @@ row_sel_try_search_shortcut_for_mysql(
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
rec_t* rec;
-
+
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(!prebuilt->templ_contains_blob);
-
+
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur,
#ifndef UNIV_SEARCH_DEBUG
@@ -3021,7 +3028,7 @@ row_sel_try_search_shortcut_for_mysql(
#endif
mtr);
rec = btr_pcur_get_rec(pcur);
-
+
if (!page_rec_is_user_rec(rec)) {
return(SEL_RETRY);
@@ -3029,7 +3036,7 @@ row_sel_try_search_shortcut_for_mysql(
/* As the cursor is now placed on a user record after a search with
the mode PAGE_CUR_GE, the up_match field in the cursor tells how many
- fields in the user record matched to the search tuple */
+ fields in the user record matched to the search tuple */
if (btr_pcur_get_up_match(pcur) < dtuple_get_n_fields(search_tuple)) {
@@ -3048,13 +3055,13 @@ row_sel_try_search_shortcut_for_mysql(
return(SEL_RETRY);
}
- if (rec_get_deleted_flag(rec, index->table->comp)) {
+ if (rec_get_deleted_flag(rec, dict_table_is_comp(index->table))) {
return(SEL_EXHAUSTED);
}
*out_rec = rec;
-
+
return(SEL_FOUND);
}
@@ -3069,7 +3076,7 @@ ulint
row_search_for_mysql(
/*=================*/
/* out: DB_SUCCESS,
- DB_RECORD_NOT_FOUND,
+ DB_RECORD_NOT_FOUND,
DB_END_OF_INDEX, DB_DEADLOCK,
DB_LOCK_TABLE_FULL, DB_CORRUPTION,
or DB_TOO_BIG_RECORD */
@@ -3084,7 +3091,7 @@ row_search_for_mysql(
the end of the index, depending on
'mode' */
ulint match_mode, /* in: 0 or ROW_SEL_EXACT or
- ROW_SEL_EXACT_PREFIX */
+ ROW_SEL_EXACT_PREFIX */
ulint direction) /* in: 0 or ROW_SEL_NEXT or
ROW_SEL_PREV; NOTE: if this is != 0,
then prebuilt must have a pcur
@@ -3092,7 +3099,7 @@ row_search_for_mysql(
cursor 'direction' should be 0. */
{
dict_index_t* index = prebuilt->index;
- ibool comp = index->table->comp;
+ ibool comp = dict_table_is_comp(index->table);
dtuple_t* search_tuple = prebuilt->search_tuple;
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
@@ -3104,8 +3111,8 @@ row_search_for_mysql(
ulint err = DB_SUCCESS;
ibool unique_search = FALSE;
ibool unique_search_from_clust_index = FALSE;
- ibool mtr_has_extra_clust_latch = FALSE;
- ibool moves_up = FALSE;
+ ibool mtr_has_extra_clust_latch = FALSE;
+ ibool moves_up = FALSE;
ibool set_also_gap_locks = TRUE;
/* if the query is a plain
locking SELECT, and the isolation
@@ -3132,8 +3139,8 @@ row_search_for_mysql(
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
if (UNIV_UNLIKELY(prebuilt->table->ibd_file_missing)) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error:\n"
"InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
"InnoDB: table %s does not exist.\n"
"InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -3154,13 +3161,13 @@ row_search_for_mysql(
ut_print_name(stderr, trx, prebuilt->table->name);
putc('\n', stderr);
- mem_analyze_corruption((byte*)prebuilt);
+ mem_analyze_corruption(prebuilt);
ut_error;
}
if (trx->n_mysql_tables_in_use == 0
- && UNIV_UNLIKELY(prebuilt->select_lock_type == LOCK_NONE)) {
+ && UNIV_UNLIKELY(prebuilt->select_lock_type == LOCK_NONE)) {
/* Note that if MySQL uses an InnoDB temp table that it
created inside LOCK TABLES, then n_mysql_tables_in_use can
be zero; in that case select_lock_type is set to LOCK_X in
@@ -3172,16 +3179,16 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
fputs(
"InnoDB: Error: MySQL is trying to perform a SELECT\n"
"InnoDB: but it has not locked any tables in ::external_lock()!\n",
- stderr);
+stderr);
trx_print(stderr, trx, 600);
- fputc('\n', stderr);
+ fputc('\n', stderr);
*/
}
/* fprintf(stderr, "Match mode %lu\n search tuple ", (ulong) match_mode);
dtuple_print(search_tuple);
-
+
fprintf(stderr, "N tables locked %lu\n", trx->mysql_n_tables_locked);
*/
/*-------------------------------------------------------------*/
@@ -3189,7 +3196,7 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
adaptive hash index latch if there is someone waiting behind */
if (UNIV_UNLIKELY(btr_search_latch.writer != RW_LOCK_NOT_LOCKED)
- && trx->has_search_latch) {
+ && trx->has_search_latch) {
/* There is an x-latch request on the adaptive hash index:
release the s-latch to reduce starvation and wait for
@@ -3201,13 +3208,13 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
trx->search_latch_timeout = BTR_SEA_TIMEOUT;
}
-
+
/* Reset the new record lock info if srv_locks_unsafe_for_binlog
is set. Then we are able to remove the record locks set here on an
individual row. */
if (srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && prebuilt->select_lock_type != LOCK_NONE) {
trx_reset_new_rec_lock_info(trx);
}
@@ -3217,7 +3224,7 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
if (UNIV_UNLIKELY(direction == 0)) {
trx->op_info = "starting index read";
-
+
prebuilt->n_rows_fetched = 0;
prebuilt->n_fetch_cached = 0;
prebuilt->fetch_cache_first = 0;
@@ -3241,7 +3248,7 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
or better: prevent caching for a scroll
cursor! */
}
-
+
prebuilt->n_rows_fetched = 0;
prebuilt->n_fetch_cached = 0;
prebuilt->fetch_cache_first = 0;
@@ -3257,16 +3264,16 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
}
if (prebuilt->fetch_cache_first > 0
- && prebuilt->fetch_cache_first < MYSQL_FETCH_CACHE_SIZE) {
+ && prebuilt->fetch_cache_first < MYSQL_FETCH_CACHE_SIZE) {
- /* The previous returned row was popped from the fetch
- cache, but the cache was not full at the time of the
- popping: no more rows can exist in the result set */
+ /* The previous returned row was popped from the fetch
+ cache, but the cache was not full at the time of the
+ popping: no more rows can exist in the result set */
err = DB_RECORD_NOT_FOUND;
goto func_exit;
}
-
+
prebuilt->n_rows_fetched++;
if (prebuilt->n_rows_fetched > 1000000000) {
@@ -3285,13 +3292,13 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
marked versions of a record where only the primary key values differ:
thus in a secondary index we must use next-key locks when locking
delete-marked records. */
-
+
if (match_mode == ROW_SEL_EXACT
- && index->type & DICT_UNIQUE
- && dtuple_get_n_fields(search_tuple)
- == dict_index_get_n_unique(index)
- && (index->type & DICT_CLUSTERED
- || !dtuple_contains_null(search_tuple))) {
+ && index->type & DICT_UNIQUE
+ && dtuple_get_n_fields(search_tuple)
+ == dict_index_get_n_unique(index)
+ && (index->type & DICT_CLUSTERED
+ || !dtuple_contains_null(search_tuple))) {
/* Note above that a UNIQUE secondary index can contain many
rows with the same key value if one of the columns is the SQL
@@ -3308,7 +3315,7 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
if (UNIV_UNLIKELY(direction != 0 &&
!prebuilt->used_in_HANDLER)) {
-
+
err = DB_RECORD_NOT_FOUND;
goto func_exit;
}
@@ -3326,20 +3333,20 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
may be long and there may be externally stored fields */
if (UNIV_UNLIKELY(direction == 0)
- && unique_search
- && index->type & DICT_CLUSTERED
- && !prebuilt->templ_contains_blob
- && !prebuilt->used_in_HANDLER
- && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
+ && unique_search
+ && index->type & DICT_CLUSTERED
+ && !prebuilt->templ_contains_blob
+ && !prebuilt->used_in_HANDLER
+ && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
mode = PAGE_CUR_GE;
unique_search_from_clust_index = TRUE;
if (trx->mysql_n_tables_locked == 0
- && prebuilt->select_lock_type == LOCK_NONE
- && trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
- && trx->read_view) {
+ && prebuilt->select_lock_type == LOCK_NONE
+ && trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
+ && trx->read_view) {
/* This is a SELECT query done as a consistent read,
and the read view has already been allocated:
@@ -3353,7 +3360,7 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
and if we try that, we can deadlock on the adaptive
hash index semaphore! */
-#ifndef UNIV_SEARCH_DEBUG
+#ifndef UNIV_SEARCH_DEBUG
if (!trx->has_search_latch) {
rw_lock_s_lock(&btr_search_latch);
trx->has_search_latch = TRUE;
@@ -3365,49 +3372,49 @@ cursor lock count is done correctly. See bugs #12263 and #12456!
#ifdef UNIV_SEARCH_DEBUG
ut_a(0 == cmp_dtuple_rec(search_tuple,
rec, offsets));
-#endif
+#endif
if (!row_sel_store_mysql_rec(buf, prebuilt,
rec, offsets)) {
- err = DB_TOO_BIG_RECORD;
+ err = DB_TOO_BIG_RECORD;
/* We let the main loop to do the
error handling */
- goto shortcut_fails_too_big_rec;
+ goto shortcut_fails_too_big_rec;
}
-
- mtr_commit(&mtr);
+
+ mtr_commit(&mtr);
/* ut_print_name(stderr, index->name);
fputs(" shortcut\n", stderr); */
srv_n_rows_read++;
-
+
if (trx->search_latch_timeout > 0
- && trx->has_search_latch) {
+ && trx->has_search_latch) {
trx->search_latch_timeout--;
- rw_lock_s_unlock(&btr_search_latch);
+ rw_lock_s_unlock(&btr_search_latch);
trx->has_search_latch = FALSE;
- }
-
+ }
+
/* NOTE that we do NOT store the cursor
position */
err = DB_SUCCESS;
goto func_exit;
case SEL_EXHAUSTED:
- mtr_commit(&mtr);
+ mtr_commit(&mtr);
/* ut_print_name(stderr, index->name);
fputs(" record not found 2\n", stderr); */
if (trx->search_latch_timeout > 0
- && trx->has_search_latch) {
+ && trx->has_search_latch) {
trx->search_latch_timeout--;
- rw_lock_s_unlock(&btr_search_latch);
+ rw_lock_s_unlock(&btr_search_latch);
trx->has_search_latch = FALSE;
}
@@ -3429,16 +3436,16 @@ shortcut_fails_too_big_rec:
if (trx->has_search_latch) {
rw_lock_s_unlock(&btr_search_latch);
trx->has_search_latch = FALSE;
- }
+ }
trx_start_if_not_started(trx);
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
- && prebuilt->select_lock_type != LOCK_NONE
- && trx->mysql_query_str) {
+ && prebuilt->select_lock_type != LOCK_NONE
+ && trx->mysql_query_str) {
/* Scan the MySQL query string; check if SELECT is the first
- word there */
+ word there */
ibool success;
dict_accept(*trx->mysql_query_str, "SELECT", &success);
@@ -3450,11 +3457,11 @@ shortcut_fails_too_big_rec:
set_also_gap_locks = FALSE;
}
}
-
+
/* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order,
otherwise downward */
-
+
if (UNIV_UNLIKELY(direction == 0)) {
if (mode == PAGE_CUR_GE || mode == PAGE_CUR_G) {
moves_up = TRUE;
@@ -3516,24 +3523,24 @@ shortcut_fails_too_big_rec:
/* No need to set an intention lock or assign a read view */
if (trx->read_view == NULL
- && prebuilt->select_lock_type == LOCK_NONE) {
+ && prebuilt->select_lock_type == LOCK_NONE) {
fputs(
"InnoDB: Error: MySQL is trying to perform a consistent read\n"
"InnoDB: but the read view is not assigned!\n", stderr);
trx_print(stderr, trx, 600);
- fputc('\n', stderr);
+ fputc('\n', stderr);
ut_a(0);
}
} else if (prebuilt->select_lock_type == LOCK_NONE) {
- /* This is a consistent read */
+ /* This is a consistent read */
/* Assign a read view for the query */
trx_assign_read_view(trx);
prebuilt->sql_stat_start = FALSE;
} else {
ulint lock_mode;
- if (prebuilt->select_lock_type == LOCK_S) {
+ if (prebuilt->select_lock_type == LOCK_S) {
lock_mode = LOCK_IS;
} else {
lock_mode = LOCK_IX;
@@ -3550,7 +3557,7 @@ shortcut_fails_too_big_rec:
rec_loop:
/*-------------------------------------------------------------*/
/* PHASE 4: Look for matching records in a loop */
-
+
rec = btr_pcur_get_rec(pcur);
ut_ad(!!page_rec_is_comp(rec) == comp);
#ifdef UNIV_SEARCH_DEBUG
@@ -3575,15 +3582,15 @@ rec_loop:
if (page_rec_is_supremum(rec)) {
if (set_also_gap_locks
- && !srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && !srv_locks_unsafe_for_binlog
+ && prebuilt->select_lock_type != LOCK_NONE) {
/* Try to place a lock on the index record */
/* If innodb_locks_unsafe_for_binlog option is used,
we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
-
+
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(rec, index, offsets,
@@ -3597,14 +3604,14 @@ rec_loop:
}
/* A page supremum record cannot be in the result set: skip
it now that we have placed a possible lock on it */
-
+
goto next_rec;
}
/*-------------------------------------------------------------*/
/* Do sanity checks in case our cursor has bumped into page
corruption */
-
+
if (comp) {
next_offs = rec_get_next_offs(rec, TRUE);
if (UNIV_UNLIKELY(next_offs < PAGE_NEW_SUPREMUM)) {
@@ -3640,8 +3647,8 @@ wrong_offs:
dict_index_name_print(stderr, trx, index);
fputs(". Run CHECK TABLE. You may need to\n"
"InnoDB: restore from a backup, or dump + drop + reimport the table.\n",
- stderr);
-
+ stderr);
+
err = DB_CORRUPTION;
goto lock_wait_or_error;
@@ -3697,14 +3704,14 @@ wrong_offs:
in prebuilt: if not, then we return with DB_RECORD_NOT_FOUND */
/* fputs("Comparing rec and search tuple\n", stderr); */
-
+
if (0 != cmp_dtuple_rec(search_tuple, rec, offsets)) {
if (set_also_gap_locks
- && !srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && !srv_locks_unsafe_for_binlog
+ && prebuilt->select_lock_type != LOCK_NONE) {
- /* Try to place a gap lock on the index
+ /* Try to place a gap lock on the index
record only if innodb_locks_unsafe_for_binlog
option is not set */
@@ -3723,19 +3730,19 @@ wrong_offs:
err = DB_RECORD_NOT_FOUND;
/* ut_print_name(stderr, index->name);
fputs(" record not found 3\n", stderr); */
-
+
goto normal_return;
}
} else if (match_mode == ROW_SEL_EXACT_PREFIX) {
if (!cmp_dtuple_is_prefix_of_rec(search_tuple, rec, offsets)) {
-
+
if (set_also_gap_locks
- && !srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && !srv_locks_unsafe_for_binlog
+ && prebuilt->select_lock_type != LOCK_NONE) {
- /* Try to place a gap lock on the index
+ /* Try to place a gap lock on the index
record only if innodb_locks_unsafe_for_binlog
option is not set */
@@ -3775,9 +3782,9 @@ wrong_offs:
ulint lock_type;
if (!set_also_gap_locks
- || srv_locks_unsafe_for_binlog
- || (unique_search && !UNIV_UNLIKELY(rec_get_deleted_flag(
- rec, comp)))) {
+ || srv_locks_unsafe_for_binlog
+ || (unique_search && !UNIV_UNLIKELY(
+ rec_get_deleted_flag(rec, comp)))) {
goto no_gap_lock;
} else {
@@ -3796,11 +3803,11 @@ wrong_offs:
need to lock the gap before that record. */
if (index == clust_index
- && mode == PAGE_CUR_GE
- && direction == 0
- && dtuple_get_n_fields_cmp(search_tuple)
- == dict_index_get_n_unique(index)
- && 0 == cmp_dtuple_rec(search_tuple, rec, offsets)) {
+ && mode == PAGE_CUR_GE
+ && direction == 0
+ && dtuple_get_n_fields_cmp(search_tuple)
+ == dict_index_get_n_unique(index)
+ && 0 == cmp_dtuple_rec(search_tuple, rec, offsets)) {
no_gap_lock:
lock_type = LOCK_REC_NOT_GAP;
}
@@ -3815,8 +3822,8 @@ no_gap_lock:
break;
case DB_LOCK_WAIT:
if (UNIV_LIKELY(prebuilt->row_read_type
- != ROW_READ_TRY_SEMI_CONSISTENT)
- || index != clust_index) {
+ != ROW_READ_TRY_SEMI_CONSISTENT)
+ || index != clust_index) {
goto lock_wait_or_error;
}
@@ -3878,17 +3885,17 @@ no_gap_lock:
/* Do nothing: we let a non-locking SELECT read the
latest version of the record */
-
+
} else if (index == clust_index) {
-
+
/* Fetch a previous version of the row if the current
one is not visible in the snapshot; if we have a very
high force recovery level set, we try to avoid crashes
by skipping this lookup */
if (UNIV_LIKELY(srv_force_recovery < 5)
- && !lock_clust_rec_cons_read_sees(rec, index,
- offsets, trx->read_view)) {
+ && !lock_clust_rec_cons_read_sees(rec, index,
+ offsets, trx->read_view)) {
rec_t* old_vers;
/* The following call returns 'offsets'
@@ -3898,7 +3905,7 @@ no_gap_lock:
prebuilt, rec,
&offsets, &heap,
&old_vers, &mtr);
-
+
if (err != DB_SUCCESS) {
goto lock_wait_or_error;
@@ -3937,15 +3944,15 @@ no_gap_lock:
/* The record is delete-marked: we can skip it */
if (srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE
- && !did_semi_consistent_read) {
+ && prebuilt->select_lock_type != LOCK_NONE
+ && !did_semi_consistent_read) {
/* No need to keep a lock on a delete-marked record
if we do not want to use next-key locking. */
row_unlock_for_mysql(prebuilt, TRUE);
}
-
+
goto next_rec;
}
@@ -3969,7 +3976,7 @@ requires_clust_rec:
/* The following call returns 'offsets' associated with
'clust_rec'. Note that 'clust_rec' can be an old version
built for a consistent read. */
-
+
err = row_sel_get_clust_rec_for_mysql(prebuilt, index, rec,
thr, &clust_rec,
&offsets, &heap, &mtr);
@@ -3990,7 +3997,7 @@ requires_clust_rec:
/* The record is delete marked: we can skip it */
if (srv_locks_unsafe_for_binlog
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && prebuilt->select_lock_type != LOCK_NONE) {
/* No need to keep a lock on a delete-marked
record if we do not want to use next-key
@@ -4001,10 +4008,10 @@ requires_clust_rec:
goto next_rec;
}
-
+
if (prebuilt->need_to_access_clustered) {
- result_rec = clust_rec;
+ result_rec = clust_rec;
ut_ad(rec_offs_validate(result_rec, clust_index,
offsets));
@@ -4032,8 +4039,8 @@ requires_clust_rec:
&& !prebuilt->templ_contains_blob
&& !prebuilt->clust_index_was_generated
&& !prebuilt->used_in_HANDLER
- && prebuilt->template_type
- != ROW_MYSQL_DUMMY_TEMPLATE) {
+ && prebuilt->template_type
+ != ROW_MYSQL_DUMMY_TEMPLATE) {
/* Inside an update, for example, we do not cache rows,
since we may use the cursor position to do the actual
@@ -4047,7 +4054,7 @@ requires_clust_rec:
row_sel_push_cache_row_for_mysql(prebuilt, result_rec,
offsets);
if (prebuilt->n_fetch_cached == MYSQL_FETCH_CACHE_SIZE) {
-
+
goto got_row;
}
@@ -4085,13 +4092,13 @@ got_row:
/* We have an optimization to save CPU time: if this is a consistent
read on a unique condition on the clustered index, then we do not
store the pcur position, because any fetch next or prev will anyway
- return 'end of file'. An exception is the MySQL HANDLER command
- where the user can move the cursor with PREV or NEXT even after
- a unique search. */
+ return 'end of file'. Exceptions are locking reads and the MySQL
+ HANDLER command where the user can move the cursor with PREV or NEXT
+ even after a unique search. */
if (!unique_search_from_clust_index
- || prebuilt->select_lock_type != LOCK_NONE
- || prebuilt->used_in_HANDLER) {
+ || prebuilt->select_lock_type != LOCK_NONE
+ || prebuilt->used_in_HANDLER) {
/* Inside an update always store the cursor position */
@@ -4111,7 +4118,7 @@ next_rec:
did_semi_consistent_read = FALSE;
if (UNIV_UNLIKELY(srv_locks_unsafe_for_binlog)
- && prebuilt->select_lock_type != LOCK_NONE) {
+ && prebuilt->select_lock_type != LOCK_NONE) {
trx_reset_new_rec_lock_info(trx);
}
@@ -4129,7 +4136,7 @@ next_rec:
mtr_commit(&mtr);
mtr_has_extra_clust_latch = FALSE;
-
+
mtr_start(&mtr);
if (sel_restore_position_for_mysql(&same_user_rec,
BTR_SEARCH_LEAF,
@@ -4142,7 +4149,7 @@ next_rec:
}
}
- if (moves_up) {
+ if (moves_up) {
if (UNIV_UNLIKELY(!btr_pcur_move_to_next(pcur, &mtr))) {
not_moved:
btr_pcur_store_position(pcur, &mtr);
@@ -4176,11 +4183,12 @@ lock_wait_or_error:
did_semi_consistent_read = FALSE;
/*-------------------------------------------------------------*/
+
btr_pcur_store_position(pcur, &mtr);
mtr_commit(&mtr);
mtr_has_extra_clust_latch = FALSE;
-
+
trx->error_state = err;
/* The following is a patch for MySQL */
@@ -4217,7 +4225,7 @@ lock_wait_or_error:
trx_reset_new_rec_lock_info(trx);
}
-
+
mode = pcur->search_mode;
goto rec_loop;
@@ -4289,7 +4297,7 @@ row_search_check_if_query_cache_permitted(
'/' char, table name */
{
dict_table_t* table;
- ibool ret = FALSE;
+ ibool ret = FALSE;
table = dict_table_get(norm_name, trx);
@@ -4310,22 +4318,23 @@ row_search_check_if_query_cache_permitted(
IX type locks actually would require ret = FALSE. */
if (UT_LIST_GET_LEN(table->locks) == 0
- && ut_dulint_cmp(trx->id, table->query_cache_inv_trx_id) >= 0) {
+ && ut_dulint_cmp(trx->id,
+ table->query_cache_inv_trx_id) >= 0) {
ret = TRUE;
-
+
/* If the isolation level is high, assign a read view for the
transaction if it does not yet have one */
if (trx->isolation_level >= TRX_ISO_REPEATABLE_READ
- && !trx->read_view) {
+ && !trx->read_view) {
- trx->read_view = read_view_open_now(trx,
+ trx->read_view = read_view_open_now(trx->id,
trx->global_read_view_heap);
trx->global_read_view = trx->read_view;
}
}
-
+
mutex_exit(&kernel_mutex);
return(ret);
diff --git a/storage/innobase/row/row0uins.c b/storage/innobase/row/row0uins.c
index 9dc860d70b1..2bf4b0e5c65 100644
--- a/storage/innobase/row/row0uins.c
+++ b/storage/innobase/row/row0uins.c
@@ -39,14 +39,14 @@ row_undo_ins_remove_clust_rec(
/* out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
undo_node_t* node) /* in: undo node */
{
- btr_cur_t* btr_cur;
+ btr_cur_t* btr_cur;
ibool success;
ulint err;
ulint n_tries = 0;
mtr_t mtr;
-
+
mtr_start(&mtr);
-
+
success = btr_pcur_restore_position(BTR_MODIFY_LEAF, &(node->pcur),
&mtr);
ut_a(success);
@@ -55,7 +55,7 @@ row_undo_ins_remove_clust_rec(
/* Drop the index tree associated with the row in
SYS_INDEXES table: */
-
+
dict_drop_index_tree(btr_pcur_get_rec(&(node->pcur)), &mtr);
mtr_commit(&mtr);
@@ -66,9 +66,9 @@ row_undo_ins_remove_clust_rec(
&(node->pcur), &mtr);
ut_a(success);
}
-
+
btr_cur = btr_pcur_get_btr_cur(&(node->pcur));
-
+
success = btr_cur_optimistic_delete(btr_cur, &mtr);
btr_pcur_commit_specify_mtr(&(node->pcur), &mtr);
@@ -81,7 +81,7 @@ row_undo_ins_remove_clust_rec(
retry:
/* If did not succeed, try pessimistic descent to tree */
mtr_start(&mtr);
-
+
success = btr_pcur_restore_position(BTR_MODIFY_TREE,
&(node->pcur), &mtr);
ut_a(success);
@@ -100,7 +100,7 @@ retry:
n_tries++;
os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
-
+
goto retry;
}
@@ -125,13 +125,13 @@ row_undo_ins_remove_sec_low(
dict_index_t* index, /* in: index */
dtuple_t* entry) /* in: index entry to remove */
{
- btr_pcur_t pcur;
+ btr_pcur_t pcur;
btr_cur_t* btr_cur;
ibool found;
ibool success;
ulint err;
mtr_t mtr;
-
+
log_free_check();
mtr_start(&mtr);
@@ -181,11 +181,11 @@ row_undo_ins_remove_sec(
{
ulint err;
ulint n_tries = 0;
-
+
/* Try first optimistic descent to the B-tree */
err = row_undo_ins_remove_sec_low(BTR_MODIFY_LEAF, index, entry);
-
+
if (err == DB_SUCCESS) {
return(err);
@@ -204,7 +204,7 @@ retry:
n_tries++;
os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
-
+
goto retry;
}
@@ -228,7 +228,7 @@ row_undo_ins_parse_undo_rec(
ibool dummy_extern;
ut_ad(node);
-
+
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &dummy,
&dummy_extern, &undo_no, &table_id);
ut_ad(type == TRX_UNDO_INSERT_REC);
@@ -249,11 +249,11 @@ row_undo_ins_parse_undo_rec(
}
clust_index = dict_table_get_first_index(node->table);
-
+
ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
node->heap);
}
-
+
/***************************************************************
Undoes a fresh insert of a row to a table. A fresh insert means that
the same clustered index unique key did not have any record, even delete
@@ -271,17 +271,17 @@ row_undo_ins(
ut_ad(node);
ut_ad(node->state == UNDO_NODE_INSERT);
-
+
row_undo_ins_parse_undo_rec(node);
if (node->table == NULL) {
- found = FALSE;
+ found = FALSE;
} else {
- found = row_undo_search_clust_to_pcur(node);
+ found = row_undo_search_clust_to_pcur(node);
}
if (!found) {
- trx_undo_rec_release(node->trx, node->undo_no);
+ trx_undo_rec_release(node->trx, node->undo_no);
return(DB_SUCCESS);
}
@@ -298,11 +298,11 @@ row_undo_ins(
return(err);
}
-
+
node->index = dict_table_get_next_index(node->index);
}
err = row_undo_ins_remove_clust_rec(node);
-
+
return(err);
}
diff --git a/storage/innobase/row/row0umod.c b/storage/innobase/row/row0umod.c
index f906027033f..9c871def661 100644
--- a/storage/innobase/row/row0umod.c
+++ b/storage/innobase/row/row0umod.c
@@ -51,14 +51,14 @@ row_undo_mod_undo_also_prev_vers(
/*=============================*/
/* out: TRUE if also previous modify or
insert of this row should be undone */
- undo_node_t* node, /* in: row undo node */
+ undo_node_t* node, /* in: row undo node */
dulint* undo_no)/* out: the undo number */
{
trx_undo_rec_t* undo_rec;
trx_t* trx;
trx = node->trx;
-
+
if (0 != ut_dulint_cmp(node->new_trx_id, trx->id)) {
*undo_no = ut_dulint_zero;
@@ -71,7 +71,7 @@ row_undo_mod_undo_also_prev_vers(
return(ut_dulint_cmp(trx->roll_limit, *undo_no) <= 0);
}
-
+
/***************************************************************
Undoes a modify in a clustered index record. */
static
@@ -117,7 +117,7 @@ row_undo_mod_clust_low(
return(err);
}
-
+
/***************************************************************
Removes a clustered index record after undo if possible. */
static
@@ -135,7 +135,7 @@ row_undo_mod_remove_clust_low(
btr_cur_t* btr_cur;
ulint err;
ibool success;
-
+
pcur = &(node->pcur);
btr_cur = btr_pcur_get_btr_cur(pcur);
@@ -149,13 +149,13 @@ row_undo_mod_remove_clust_low(
/* Find out if we can remove the whole clustered index record */
if (node->rec_type == TRX_UNDO_UPD_DEL_REC
- && !row_vers_must_preserve_del_marked(node->new_trx_id, mtr)) {
+ && !row_vers_must_preserve_del_marked(node->new_trx_id, mtr)) {
/* Ok, we can remove */
} else {
return(DB_SUCCESS);
}
-
+
if (mode == BTR_MODIFY_LEAF) {
success = btr_cur_optimistic_delete(btr_cur, mtr);
@@ -180,7 +180,7 @@ row_undo_mod_remove_clust_low(
return(err);
}
-
+
/***************************************************************
Undoes a modify in a clustered index record. Sets also the node state for the
next round of undo. */
@@ -199,7 +199,7 @@ row_undo_mod_clust(
ibool success;
ibool more_vers;
dulint new_undo_no;
-
+
ut_ad(node && thr);
/* Check if also the previous version of the clustered index record
@@ -230,7 +230,7 @@ row_undo_mod_clust(
btr_pcur_commit_specify_mtr(pcur, &mtr);
if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_UPD_DEL_REC) {
-
+
mtr_start(&mtr);
err = row_undo_mod_remove_clust_low(node, thr, &mtr,
@@ -251,8 +251,8 @@ row_undo_mod_clust(
}
node->state = UNDO_NODE_FETCH_NEXT;
-
- trx_undo_rec_release(node->trx, node->undo_no);
+
+ trx_undo_rec_release(node->trx, node->undo_no);
if (more_vers && err == DB_SUCCESS) {
@@ -284,7 +284,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry */
ulint mode) /* in: latch mode BTR_MODIFY_LEAF or
- BTR_MODIFY_TREE */
+ BTR_MODIFY_TREE */
{
ibool found;
btr_pcur_t pcur;
@@ -294,10 +294,10 @@ row_undo_mod_del_mark_or_remove_sec_low(
ulint err;
mtr_t mtr;
mtr_t mtr_vers;
-
+
log_free_check();
mtr_start(&mtr);
-
+
found = row_search_index_entry(index, entry, mode, &pcur, &mtr);
btr_cur = btr_pcur_get_btr_cur(&pcur);
@@ -316,11 +316,11 @@ row_undo_mod_del_mark_or_remove_sec_low(
we should delete mark the record. */
mtr_start(&mtr_vers);
-
+
success = btr_pcur_restore_position(BTR_SEARCH_LEAF, &(node->pcur),
&mtr_vers);
ut_a(success);
-
+
old_has = row_vers_old_has_index_entry(FALSE,
btr_pcur_get_rec(&(node->pcur)),
&mtr_vers, index, entry);
@@ -331,7 +331,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
} else {
/* Remove the index record */
- if (mode == BTR_MODIFY_LEAF) {
+ if (mode == BTR_MODIFY_LEAF) {
success = btr_cur_optimistic_delete(btr_cur, &mtr);
if (success) {
err = DB_SUCCESS;
@@ -376,17 +376,17 @@ row_undo_mod_del_mark_or_remove_sec(
dtuple_t* entry) /* in: index entry */
{
ulint err;
-
+
err = row_undo_mod_del_mark_or_remove_sec_low(node, thr, index,
- entry, BTR_MODIFY_LEAF);
+ entry, BTR_MODIFY_LEAF);
if (err == DB_SUCCESS) {
return(err);
}
err = row_undo_mod_del_mark_or_remove_sec_low(node, thr, index,
- entry, BTR_MODIFY_TREE);
- return(err);
+ entry, BTR_MODIFY_TREE);
+ return(err);
}
/***************************************************************
@@ -417,7 +417,7 @@ row_undo_mod_del_unmark_sec_and_undo_update(
log_free_check();
mtr_start(&mtr);
-
+
found = row_search_index_entry(index, entry, mode, &pcur, &mtr);
if (!found) {
@@ -437,34 +437,34 @@ row_undo_mod_del_unmark_sec_and_undo_update(
} else {
btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&pcur);
- err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
+ err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
btr_cur, FALSE, thr, &mtr);
- ut_a(err == DB_SUCCESS);
+ ut_a(err == DB_SUCCESS);
heap = mem_heap_create(100);
update = row_upd_build_sec_rec_difference_binary(index, entry,
btr_cur_get_rec(btr_cur), trx, heap);
- if (upd_get_n_fields(update) == 0) {
+ if (upd_get_n_fields(update) == 0) {
/* Do nothing */
-
+
} else if (mode == BTR_MODIFY_LEAF) {
- /* Try an optimistic updating of the record, keeping
+ /* Try an optimistic updating of the record, keeping
changes within the page */
- err = btr_cur_optimistic_update(BTR_KEEP_SYS_FLAG
+ err = btr_cur_optimistic_update(BTR_KEEP_SYS_FLAG
| BTR_NO_LOCKING_FLAG,
- btr_cur, update, 0, thr, &mtr);
- if (err == DB_OVERFLOW || err == DB_UNDERFLOW) {
- err = DB_FAIL;
- }
- } else {
- ut_a(mode == BTR_MODIFY_TREE);
- err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG
+ btr_cur, update, 0, thr, &mtr);
+ if (err == DB_OVERFLOW || err == DB_UNDERFLOW) {
+ err = DB_FAIL;
+ }
+ } else {
+ ut_a(mode == BTR_MODIFY_TREE);
+ err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG
| BTR_NO_LOCKING_FLAG,
btr_cur, &dummy_big_rec,
update, 0, thr, &mtr);
- }
+ }
mem_heap_free(heap);
}
@@ -489,7 +489,7 @@ row_undo_mod_upd_del_sec(
dtuple_t* entry;
dict_index_t* index;
ulint err;
-
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -505,7 +505,7 @@ row_undo_mod_upd_del_sec(
return(err);
}
-
+
node->index = dict_table_get_next_index(node->index);
}
@@ -535,7 +535,7 @@ row_undo_mod_del_mark_sec(
index = node->index;
entry = row_build_index_entry(node->row, index, heap);
-
+
err = row_undo_mod_del_unmark_sec_and_undo_update(
BTR_MODIFY_LEAF,
thr, index, entry);
@@ -555,7 +555,7 @@ row_undo_mod_del_mark_sec(
node->index = dict_table_get_next_index(node->index);
}
- mem_heap_free(heap);
+ mem_heap_free(heap);
return(DB_SUCCESS);
}
@@ -577,10 +577,10 @@ row_undo_mod_upd_exist_sec(
if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
/* No change in secondary indexes */
-
+
return(DB_SUCCESS);
}
-
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -670,15 +670,15 @@ row_undo_mod_parse_undo_rec(
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
&dummy_extern, &undo_no, &table_id);
node->rec_type = type;
-
+
node->table = dict_table_get_on_id(table_id, trx);
/* TODO: other fixes associated with DROP TABLE + rollback in the
same table by another user */
if (node->table == NULL) {
- /* Table was dropped */
- return;
+ /* Table was dropped */
+ return;
}
if (node->table->ibd_file_missing) {
@@ -703,7 +703,7 @@ row_undo_mod_parse_undo_rec(
node->new_trx_id = trx_id;
node->cmpl_info = cmpl_info;
}
-
+
/***************************************************************
Undoes a modify operation on a row of a table. */
@@ -716,7 +716,7 @@ row_undo_mod(
{
ibool found;
ulint err;
-
+
ut_ad(node && thr);
ut_ad(node->state == UNDO_NODE_MODIFY);
@@ -731,8 +731,8 @@ row_undo_mod(
if (!found) {
/* It is already undone, or will be undone by another query
thread, or table was dropped */
-
- trx_undo_rec_release(node->trx, node->undo_no);
+
+ trx_undo_rec_release(node->trx, node->undo_no);
node->state = UNDO_NODE_FETCH_NEXT;
return(DB_SUCCESS);
@@ -742,7 +742,7 @@ row_undo_mod(
dict_table_get_first_index(node->table));
if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
-
+
err = row_undo_mod_upd_exist_sec(node, thr);
} else if (node->rec_type == TRX_UNDO_DEL_MARK_REC) {
@@ -757,8 +757,8 @@ row_undo_mod(
return(err);
}
-
+
err = row_undo_mod_clust(node, thr);
-
+
return(err);
}
diff --git a/storage/innobase/row/row0undo.c b/storage/innobase/row/row0undo.c
index 435c0279dbb..3454f6e3274 100644
--- a/storage/innobase/row/row0undo.c
+++ b/storage/innobase/row/row0undo.c
@@ -159,7 +159,7 @@ row_undo_search_clust_to_pcur(
mtr_start(&mtr);
clust_index = dict_table_get_first_index(node->table);
-
+
found = row_search_on_row_ref(&(node->pcur), BTR_MODIFY_LEAF,
node->table, node->ref, &mtr);
@@ -175,10 +175,10 @@ row_undo_search_clust_to_pcur(
BEFORE releasing the latch on the clustered index page: this
is to make sure that some thread will eventually undo the
modification corresponding to node->roll_ptr. */
-
+
/* fputs("--------------------undoing a previous version\n",
stderr); */
-
+
ret = FALSE;
} else {
node->row = row_build(ROW_COPY_DATA, clust_index, rec,
@@ -195,7 +195,7 @@ row_undo_search_clust_to_pcur(
}
return(ret);
}
-
+
/***************************************************************
Fetches an undo log record and does the undo for the recorded operation.
If none left, or a partial rollback completed, returns control to the
@@ -213,9 +213,9 @@ row_undo(
trx_t* trx;
dulint roll_ptr;
ibool froze_data_dict = FALSE;
-
+
ut_ad(node && thr);
-
+
trx = node->trx;
if (node->state == UNDO_NODE_FETCH_NEXT) {
@@ -248,12 +248,12 @@ row_undo(
again in this same rollback, restoring the previous version */
roll_ptr = node->new_roll_ptr;
-
+
node->undo_rec = trx_undo_get_undo_rec_low(roll_ptr,
node->heap);
node->roll_ptr = roll_ptr;
node->undo_no = trx_undo_rec_get_undo_no(node->undo_rec);
-
+
if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
node->state = UNDO_NODE_INSERT;
@@ -263,15 +263,15 @@ row_undo(
}
/* Prevent DROP TABLE etc. while we are rolling back this row.
- If we are doing a TABLE CREATE or some other dictionary operation,
- then we already have dict_operation_lock locked in x-mode. Do not
- try to lock again in s-mode, because that would cause a hang. */
+ If we are doing a TABLE CREATE or some other dictionary operation,
+ then we already have dict_operation_lock locked in x-mode. Do not
+ try to lock again in s-mode, because that would cause a hang. */
if (trx->dict_operation_lock_mode == 0) {
-
- row_mysql_freeze_data_dictionary(trx);
- froze_data_dict = TRUE;
+ row_mysql_freeze_data_dictionary(trx);
+
+ froze_data_dict = TRUE;
}
if (node->state == UNDO_NODE_INSERT) {
@@ -286,14 +286,14 @@ row_undo(
if (froze_data_dict) {
- row_mysql_unfreeze_data_dictionary(trx);
+ row_mysql_unfreeze_data_dictionary(trx);
}
/* Do some cleanup */
btr_pcur_close(&(node->pcur));
mem_heap_empty(node->heap);
-
+
thr->run_node = node;
return(err);
@@ -316,9 +316,9 @@ row_undo_step(
ut_ad(thr);
srv_activity_count++;
-
+
trx = thr_get_trx(thr);
-
+
node = thr->run_node;
ut_ad(que_node_get_type(node) == QUE_NODE_UNDO);
@@ -338,13 +338,13 @@ row_undo_step(
"InnoDB: Error 13 means out of tablespace.\n"
"InnoDB: Consider increasing your tablespace.\n");
- exit(1);
+ exit(1);
}
-
+
ut_error;
return(NULL);
}
return(thr);
-}
+}
diff --git a/storage/innobase/row/row0upd.c b/storage/innobase/row/row0upd.c
index ff1ad1dfd05..23be601a17b 100644
--- a/storage/innobase/row/row0upd.c
+++ b/storage/innobase/row/row0upd.c
@@ -130,7 +130,7 @@ row_upd_index_is_referenced(
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
-
+
if (froze_data_dict) {
row_mysql_unfreeze_data_dictionary(trx);
}
@@ -176,16 +176,16 @@ row_upd_check_references_constraints(
entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
- mtr_commit(mtr);
+ mtr_commit(mtr);
+
+ mtr_start(mtr);
- mtr_start(mtr);
-
if (trx->dict_operation_lock_mode == 0) {
got_s_lock = TRUE;
row_mysql_freeze_data_dictionary(trx);
}
-
+
foreign = UT_LIST_GET_FIRST(table->referenced_list);
while (foreign) {
@@ -195,10 +195,11 @@ row_upd_check_references_constraints(
NOT break the constraint. */
if (foreign->referenced_index == index
- && (node->is_delete
- || row_upd_changes_first_fields_binary(entry, index,
- node->update, foreign->n_fields))) {
-
+ && (node->is_delete
+ || row_upd_changes_first_fields_binary(
+ entry, index, node->update,
+ foreign->n_fields))) {
+
if (foreign->foreign_table == NULL) {
dict_table_get(foreign->foreign_table_name,
trx);
@@ -217,7 +218,7 @@ row_upd_check_references_constraints(
we will release dict_operation_lock temporarily!
But the counter on the table protects 'foreign' from
being dropped while the check is running. */
-
+
err = row_ins_check_foreign_constraint(FALSE, foreign,
table, entry, thr);
@@ -253,7 +254,7 @@ row_upd_check_references_constraints(
}
mem_heap_free(heap);
-
+
return(DB_SUCCESS);
}
@@ -279,18 +280,18 @@ upd_node_create(
node->ext_vec = NULL;
node->index = NULL;
node->update = NULL;
-
+
node->foreign = NULL;
node->cascade_heap = NULL;
node->cascade_node = NULL;
-
+
node->select = NULL;
-
+
node->heap = mem_heap_create(128);
- node->magic_n = UPD_NODE_MAGIC_N;
+ node->magic_n = UPD_NODE_MAGIC_N;
node->cmpl_info = 0;
-
+
return(node);
}
@@ -396,7 +397,8 @@ row_upd_changes_field_size_or_external(
old_len = rec_offs_nth_size(offsets, upd_field->field_no);
if (rec_offs_comp(offsets)
- && rec_offs_nth_sql_null(offsets, upd_field->field_no)) {
+ && rec_offs_nth_sql_null(offsets,
+ upd_field->field_no)) {
/* Note that in the compact table format, for a
variable length field, an SQL NULL will use zero
bytes in the offset array at the start of the physical
@@ -411,7 +413,7 @@ row_upd_changes_field_size_or_external(
return(TRUE);
}
-
+
if (rec_offs_nth_extern(offsets, upd_field->field_no)) {
return(TRUE);
@@ -452,7 +454,7 @@ row_upd_rec_in_place(
for (i = 0; i < n_fields; i++) {
upd_field = upd_get_nth_field(update, i);
new_val = &(upd_field->new_val);
-
+
rec_set_nth_field(rec, offsets, upd_field->field_no,
dfield_get_data(new_val),
dfield_get_len(new_val));
@@ -481,7 +483,7 @@ row_upd_write_sys_vals_to_log(
dict_index_get_sys_col_pos(index, DATA_TRX_ID));
trx_write_roll_ptr(log_ptr, roll_ptr);
- log_ptr += DATA_ROLL_PTR_LEN;
+ log_ptr += DATA_ROLL_PTR_LEN;
log_ptr += mach_dulint_write_compressed(log_ptr, trx->id);
@@ -512,9 +514,9 @@ row_upd_parse_sys_vals(
return(NULL);
}
-
+
*roll_ptr = trx_read_roll_ptr(ptr);
- ptr += DATA_ROLL_PTR_LEN;
+ ptr += DATA_ROLL_PTR_LEN;
ptr = mach_dulint_parse_compressed(ptr, end_ptr, trx_id);
@@ -543,18 +545,20 @@ row_upd_index_write_log(
n_fields = upd_get_n_fields(update);
buf_end = log_ptr + MLOG_BUF_MARGIN;
-
+
mach_write_to_1(log_ptr, update->info_bits);
log_ptr++;
log_ptr += mach_write_compressed(log_ptr, n_fields);
-
+
for (i = 0; i < n_fields; i++) {
- ut_ad(MLOG_BUF_MARGIN > 30);
+#if MLOG_BUF_MARGIN <= 30
+# error "MLOG_BUF_MARGIN <= 30"
+#endif
if (log_ptr + 30 > buf_end) {
mlog_close(mtr, log_ptr);
-
+
log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
buf_end = log_ptr + MLOG_BUF_MARGIN;
}
@@ -575,7 +579,7 @@ row_upd_index_write_log(
log_ptr += len;
} else {
mlog_close(mtr, log_ptr);
-
+
mlog_catenate_string(mtr, new_val->data, len);
log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
@@ -694,7 +698,7 @@ upd_ext_vec_contains(
return(FALSE);
}
-
+
/*******************************************************************
Builds an update vector from those fields which in a secondary index entry
differ from a record that has the equal ordering fields. NOTE: we compare
@@ -828,10 +832,10 @@ row_upd_build_difference_binary(
}
extern_bit = upd_ext_vec_contains(ext_vec, n_ext_vec, i);
-
+
if (UNIV_UNLIKELY(extern_bit ==
- (ibool)!rec_offs_nth_extern(offsets, i))
- || !dfield_data_is_binary_equal(dfield, len, data)) {
+ (ibool)!rec_offs_nth_extern(offsets, i))
+ || !dfield_data_is_binary_equal(dfield, len, data)) {
upd_field = upd_get_nth_field(update, n_diff);
@@ -883,36 +887,36 @@ row_upd_index_replace_new_col_vals_index_pos(
for (j = 0; j < dict_index_get_n_fields(index); j++) {
- field = dict_index_get_nth_field(index, j);
+ field = dict_index_get_nth_field(index, j);
for (i = 0; i < upd_get_n_fields(update); i++) {
- upd_field = upd_get_nth_field(update, i);
+ upd_field = upd_get_nth_field(update, i);
if (upd_field->field_no == j) {
- dfield = dtuple_get_nth_field(entry, j);
+ dfield = dtuple_get_nth_field(entry, j);
new_val = &(upd_field->new_val);
dfield_set_data(dfield, new_val->data,
new_val->len);
if (heap && new_val->len != UNIV_SQL_NULL) {
- dfield->data = mem_heap_alloc(heap,
+ dfield->data = mem_heap_alloc(heap,
new_val->len);
ut_memcpy(dfield->data, new_val->data,
new_val->len);
}
if (field->prefix_len > 0
- && new_val->len != UNIV_SQL_NULL) {
+ && new_val->len != UNIV_SQL_NULL) {
- cur_type = dict_col_get_type(
+ cur_type = dict_col_get_type(
dict_field_get_col(field));
- dfield->len =
- dtype_get_at_most_n_mbchars(
- cur_type,
+ dfield->len =
+ dtype_get_at_most_n_mbchars(
+ cur_type,
field->prefix_len,
new_val->len,
new_val->data);
@@ -953,36 +957,36 @@ row_upd_index_replace_new_col_vals(
for (j = 0; j < dict_index_get_n_fields(index); j++) {
- field = dict_index_get_nth_field(index, j);
+ field = dict_index_get_nth_field(index, j);
for (i = 0; i < upd_get_n_fields(update); i++) {
- upd_field = upd_get_nth_field(update, i);
+ upd_field = upd_get_nth_field(update, i);
if (upd_field->field_no == field->col->clust_pos) {
- dfield = dtuple_get_nth_field(entry, j);
+ dfield = dtuple_get_nth_field(entry, j);
new_val = &(upd_field->new_val);
dfield_set_data(dfield, new_val->data,
new_val->len);
if (heap && new_val->len != UNIV_SQL_NULL) {
- dfield->data = mem_heap_alloc(heap,
+ dfield->data = mem_heap_alloc(heap,
new_val->len);
ut_memcpy(dfield->data, new_val->data,
new_val->len);
}
if (field->prefix_len > 0
- && new_val->len != UNIV_SQL_NULL) {
+ && new_val->len != UNIV_SQL_NULL) {
cur_type = dict_col_get_type(
dict_field_get_col(field));
- dfield->len =
- dtype_get_at_most_n_mbchars(
- cur_type,
+ dfield->len =
+ dtype_get_at_most_n_mbchars(
+ cur_type,
field->prefix_len,
new_val->len,
new_val->data);
@@ -1022,7 +1026,7 @@ row_upd_changes_ord_field_binary(
ulint col_pos;
ulint col_no;
ulint i, j;
-
+
ut_ad(update && index);
n_unique = dict_index_get_n_unique(index);
@@ -1045,10 +1049,11 @@ row_upd_changes_ord_field_binary(
the datas */
if (col_pos == upd_field->field_no
- && (row == NULL
- || ind_field->prefix_len > 0
- || !dfield_datas_are_binary_equal(
- dtuple_get_nth_field(row, col_no),
+ && (row == NULL
+ || ind_field->prefix_len > 0
+ || !dfield_datas_are_binary_equal(
+ dtuple_get_nth_field(row,
+ col_no),
&(upd_field->new_val)))) {
return(TRUE);
}
@@ -1073,21 +1078,21 @@ row_upd_changes_some_index_ord_field_binary(
upd_field_t* upd_field;
dict_index_t* index;
ulint i;
-
+
index = dict_table_get_first_index(table);
-
+
for (i = 0; i < upd_get_n_fields(update); i++) {
upd_field = upd_get_nth_field(update, i);
if (dict_field_get_col(dict_index_get_nth_field(index,
- upd_field->field_no))
- ->ord_part) {
+ upd_field->field_no))
+ ->ord_part) {
- return(TRUE);
+ return(TRUE);
}
}
-
+
return(FALSE);
}
@@ -1111,10 +1116,10 @@ row_upd_changes_first_fields_binary(
ulint n_upd_fields;
ulint col_pos;
ulint i, j;
-
+
ut_a(update && index);
ut_a(n <= dict_index_get_n_fields(index));
-
+
n_upd_fields = upd_get_n_fields(update);
for (i = 0; i < n; i++) {
@@ -1130,9 +1135,10 @@ row_upd_changes_first_fields_binary(
upd_field = upd_get_nth_field(update, j);
if (col_pos == upd_field->field_no
- && !dfield_datas_are_binary_equal(
- dtuple_get_nth_field(entry, i),
- &(upd_field->new_val))) {
+ && !dfield_datas_are_binary_equal(
+ dtuple_get_nth_field(entry, i),
+ &(upd_field->new_val))) {
+
return(TRUE);
}
}
@@ -1214,11 +1220,11 @@ row_upd_store_row(
mem_heap_empty(node->heap);
node->row = NULL;
}
-
+
clust_index = dict_table_get_first_index(node->table);
rec = btr_pcur_get_rec(node->pcur);
-
+
offsets = rec_get_offsets(rec, clust_index, offsets_,
ULINT_UNDEFINED, &heap);
node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets,
@@ -1230,7 +1236,7 @@ row_upd_store_row(
} else {
update = node->update;
}
-
+
node->n_ext_vec = btr_push_update_extern_fields(node->ext_vec,
offsets, update);
if (UNIV_LIKELY_NULL(heap)) {
@@ -1262,7 +1268,7 @@ row_upd_sec_index_entry(
trx_t* trx = thr_get_trx(thr);
index = node->index;
-
+
check_ref = row_upd_index_is_referenced(index, trx);
heap = mem_heap_create(1024);
@@ -1272,7 +1278,7 @@ row_upd_sec_index_entry(
log_free_check();
mtr_start(&mtr);
-
+
found = row_search_index_entry(index, entry, BTR_MODIFY_LEAF, &pcur,
&mtr);
btr_cur = btr_pcur_get_btr_cur(&pcur);
@@ -1296,15 +1302,16 @@ row_upd_sec_index_entry(
fputs("\n"
"InnoDB: Submit a detailed bug report to http://bugs.mysql.com\n", stderr);
} else {
- /* Delete mark the old index record; it can already be
- delete marked if we return after a lock wait in
- row_ins_index_entry below */
+ /* Delete mark the old index record; it can already be
+ delete marked if we return after a lock wait in
+ row_ins_index_entry below */
- if (!rec_get_deleted_flag(rec, index->table->comp)) {
+ if (!rec_get_deleted_flag(rec,
+ dict_table_is_comp(index->table))) {
err = btr_cur_del_mark_set_sec_rec(0, btr_cur, TRUE,
thr, &mtr);
if (err == DB_SUCCESS && check_ref) {
-
+
/* NOTE that the following call loses
the position of pcur ! */
err = row_upd_check_references_constraints(
@@ -1317,7 +1324,7 @@ row_upd_sec_index_entry(
}
}
- }
+ }
}
close_cur:
btr_pcur_close(&pcur);
@@ -1325,9 +1332,9 @@ close_cur:
if (node->is_delete || err != DB_SUCCESS) {
- mem_heap_free(heap);
+ mem_heap_free(heap);
- return(err);
+ return(err);
}
/* Build a new index entry */
@@ -1336,9 +1343,9 @@ close_cur:
/* Insert new index entry */
err = row_ins_index_entry(index, entry, NULL, 0, thr);
- mem_heap_free(heap);
+ mem_heap_free(heap);
- return(err);
+ return(err);
}
/***************************************************************
@@ -1358,10 +1365,10 @@ row_upd_sec_step(
ut_ad((node->state == UPD_NODE_UPDATE_ALL_SEC)
|| (node->state == UPD_NODE_UPDATE_SOME_SEC));
ut_ad(!(node->index->type & DICT_CLUSTERED));
-
+
if (node->state == UPD_NODE_UPDATE_ALL_SEC
- || row_upd_changes_ord_field_binary(node->row, node->index,
- node->update)) {
+ || row_upd_changes_ord_field_binary(node->row, node->index,
+ node->update)) {
err = row_upd_sec_index_entry(node, thr);
return(err);
@@ -1395,7 +1402,7 @@ row_upd_clust_rec_by_insert(
dict_table_t* table;
dtuple_t* entry;
ulint err;
-
+
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
@@ -1403,7 +1410,7 @@ row_upd_clust_rec_by_insert(
table = node->table;
pcur = node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
-
+
if (node->state != UPD_NODE_INSERT_CLUSTERED) {
ulint offsets_[REC_OFFS_NORMAL_SIZE];
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
@@ -1439,7 +1446,7 @@ row_upd_clust_rec_by_insert(
}
}
- }
+ }
mtr_commit(mtr);
@@ -1451,26 +1458,26 @@ row_upd_clust_rec_by_insert(
entry = row_build_index_entry(node->row, index, heap);
row_upd_index_replace_new_col_vals(entry, index, node->update, NULL);
-
+
row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
-
+
/* If we return from a lock wait, for example, we may have
extern fields marked as not-owned in entry (marked in the
if-branch above). We must unmark them. */
-
+
btr_cur_unmark_dtuple_extern_fields(entry, node->ext_vec,
node->n_ext_vec);
/* We must mark non-updated extern fields in entry as inherited,
so that a possible rollback will not free them */
-
+
btr_cur_mark_dtuple_inherited_extern(entry, node->ext_vec,
node->n_ext_vec,
node->update);
-
+
err = row_ins_index_entry(index, entry, node->ext_vec,
node->n_ext_vec, thr);
mem_heap_free(heap);
-
+
return(err);
}
@@ -1492,7 +1499,7 @@ row_upd_clust_rec(
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
-
+
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
@@ -1500,8 +1507,8 @@ row_upd_clust_rec(
btr_cur = btr_pcur_get_btr_cur(pcur);
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
- index->table->comp));
-
+ dict_table_is_comp(index->table)));
+
/* Try optimistic updating of the record, keeping changes within
the page; we do not check locks because we assume the x-lock on the
record to update */
@@ -1517,7 +1524,7 @@ row_upd_clust_rec(
}
mtr_commit(mtr);
-
+
if (err == DB_SUCCESS) {
return(err);
@@ -1527,7 +1534,7 @@ row_upd_clust_rec(
down the index tree */
mtr_start(mtr);
-
+
/* NOTE: this transaction has an s-lock or x-lock on the record and
therefore other transactions cannot modify the record when we have no
latch on the page. In addition, we assume that other query threads of
@@ -1537,8 +1544,8 @@ row_upd_clust_rec(
ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr));
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
- index->table->comp));
-
+ dict_table_is_comp(index->table)));
+
err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur,
&big_rec, node->update,
node->cmpl_info, thr, mtr);
@@ -1567,7 +1574,7 @@ row_upd_clust_rec(
if (big_rec) {
dtuple_big_rec_free(big_rec);
}
-
+
return(err);
}
@@ -1589,7 +1596,7 @@ row_upd_del_mark_clust_rec(
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
-
+
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(node->is_delete);
@@ -1599,7 +1606,7 @@ row_upd_del_mark_clust_rec(
/* Store row because we have to build also the secondary index
entries */
-
+
row_upd_store_row(node);
/* Mark the clustered index record deleted; we do not have to check
@@ -1621,7 +1628,7 @@ row_upd_del_mark_clust_rec(
}
mtr_commit(mtr);
-
+
return(err);
}
@@ -1660,7 +1667,7 @@ row_upd_clust_step(
mtr = &mtr_buf;
mtr_start(mtr);
-
+
/* If the restoration does not succeed, then the same
transaction has deleted the record on which the cursor was,
and that is an SQL error. If the restoration succeeds, it may
@@ -1668,7 +1675,7 @@ row_upd_clust_step(
and inserted a record with the same ordering fields, but in
that case we know that the transaction has at least an
implicit x-lock on the record. */
-
+
ut_a(pcur->rel_pos == BTR_PCUR_ON);
success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr);
@@ -1686,7 +1693,7 @@ row_upd_clust_step(
with the index */
if (node->is_delete
- && ut_dulint_cmp(node->table->id, DICT_INDEXES_ID) == 0) {
+ && ut_dulint_cmp(node->table->id, DICT_INDEXES_ID) == 0) {
dict_drop_index_tree(btr_pcur_get_rec(pcur), mtr);
@@ -1703,7 +1710,7 @@ row_upd_clust_step(
return(err);
}
- }
+ }
rec = btr_pcur_get_rec(pcur);
offsets = rec_get_offsets(rec, index, offsets_,
@@ -1733,10 +1740,10 @@ row_upd_clust_step(
}
return(err);
}
-
+
/* If the update is made for MySQL, we already have the update vector
ready, else we have to do some evaluation: */
-
+
if (!node->in_mysql_interface) {
/* Copy the necessary columns from clust_rec and calculate the
new values to set */
@@ -1748,13 +1755,13 @@ row_upd_clust_step(
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
-
+
if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
err = row_upd_clust_rec(node, index, thr, mtr);
return(err);
}
-
+
row_upd_store_row(node);
if (row_upd_changes_ord_field_binary(node->row, index, node->update)) {
@@ -1808,18 +1815,18 @@ row_upd(
que_thr_t* thr) /* in: query thread */
{
ulint err = DB_SUCCESS;
-
+
ut_ad(node && thr);
if (node->in_mysql_interface) {
-
+
/* We do not get the cmpl_info value from the MySQL
interpreter: we must calculate it on the fly: */
-
+
if (node->is_delete ||
row_upd_changes_some_index_ord_field_binary(
node->table, node->update)) {
- node->cmpl_info = 0;
+ node->cmpl_info = 0;
} else {
node->cmpl_info = UPD_NODE_NO_ORD_CHANGE;
}
@@ -1829,7 +1836,7 @@ row_upd(
|| node->state == UPD_NODE_INSERT_CLUSTERED) {
err = row_upd_clust_step(node, thr);
-
+
if (err != DB_SUCCESS) {
goto function_exit;
@@ -1850,7 +1857,7 @@ row_upd(
}
node->index = dict_table_get_next_index(node->index);
- }
+ }
function_exit:
if (err == DB_SUCCESS) {
@@ -1865,7 +1872,7 @@ function_exit:
node->state = UPD_NODE_UPDATE_CLUSTERED;
}
- return(err);
+ return(err);
}
/***************************************************************
@@ -1885,19 +1892,19 @@ row_upd_step(
trx_t* trx;
ut_ad(thr);
-
+
trx = thr_get_trx(thr);
trx_start_if_not_started(trx);
node = thr->run_node;
-
+
sel_node = node->select;
parent = que_node_get_parent(node);
-
+
ut_ad(que_node_get_type(node) == QUE_NODE_UPDATE);
-
+
if (thr->prev_node == parent) {
node->state = UPD_NODE_SET_IX_LOCK;
}
@@ -1915,23 +1922,23 @@ row_upd_step(
goto error_handling;
}
}
-
+
node->state = UPD_NODE_UPDATE_CLUSTERED;
if (node->searched_update) {
/* Reset the cursor */
sel_node->state = SEL_NODE_OPEN;
-
+
/* Fetch a row to update */
-
+
thr->run_node = sel_node;
-
+
return(thr);
}
}
/* sel_node is NULL if we are in the MySQL interface */
-
+
if (sel_node && (sel_node->state != SEL_NODE_FETCH)) {
if (!node->searched_update) {
@@ -1939,7 +1946,7 @@ row_upd_step(
to update */
ut_error;
-
+
err = DB_ERROR;
goto error_handling;
@@ -1951,12 +1958,12 @@ row_upd_step(
updates directly in-place */
thr->run_node = parent;
-
+
return(thr);
}
/* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
-
+
err = row_upd(node, thr);
error_handling:
@@ -1986,7 +1993,7 @@ error_handling:
node->state = UPD_NODE_UPDATE_CLUSTERED;
return(thr);
-}
+}
/*************************************************************************
Performs an in-place update for the current clustered index record in
@@ -2031,8 +2038,8 @@ row_upd_in_place_in_select(
row_upd_eval_new_vals(node->update);
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
- btr_cur->index->table->comp));
-
+ dict_table_is_comp(btr_cur->index->table)));
+
ut_ad(node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE);
ut_ad(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
ut_ad(node->select_will_do_update);
diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c
index b32ab8822f4..ab3b6385146 100644
--- a/storage/innobase/row/row0vers.c
+++ b/storage/innobase/row/row0vers.c
@@ -62,7 +62,7 @@ row_vers_impl_x_locked_off_kernel(
ulint err;
mtr_t mtr;
ulint comp;
-
+
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -71,12 +71,12 @@ row_vers_impl_x_locked_off_kernel(
mutex_exit(&kernel_mutex);
mtr_start(&mtr);
-
+
/* Search for the clustered index record: this is a time-consuming
operation: therefore we release the kernel mutex; also, the release
is required by the latching order convention. The latch on the
clustered index locks the top of the stack of versions. We also
- reserve purge_latch to lock the bottom of the version stack. */
+ reserve purge_latch to lock the bottom of the version stack. */
clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
&clust_index, &mtr);
@@ -96,7 +96,7 @@ row_vers_impl_x_locked_off_kernel(
mutex_enter(&kernel_mutex);
mtr_commit(&mtr);
- return(NULL);
+ return(NULL);
}
heap = mem_heap_create(1024);
@@ -107,7 +107,7 @@ row_vers_impl_x_locked_off_kernel(
mtr_s_lock(&(purge_sys->latch), &mtr);
mutex_enter(&kernel_mutex);
-
+
trx = NULL;
if (!trx_is_active(trx_id)) {
/* The transaction that modified or inserted clust_rec is no
@@ -123,7 +123,7 @@ row_vers_impl_x_locked_off_kernel(
comp = page_rec_is_comp(rec);
ut_ad(index->table == clust_index->table);
- ut_ad(!!comp == index->table->comp);
+ ut_ad(!!comp == dict_table_is_comp(index->table));
ut_ad(!comp == !page_rec_is_comp(clust_rec));
/* We look up if some earlier version, which was modified by the trx_id
@@ -179,7 +179,7 @@ row_vers_impl_x_locked_off_kernel(
may assert the following: */
ut_ad(err == DB_SUCCESS);
-
+
if (prev_version == NULL) {
/* It was a freshly inserted version: there is an
implicit x-lock on rec */
@@ -273,7 +273,7 @@ row_vers_must_preserve_del_marked(
/* A purge operation is not yet allowed to remove this
delete marked record */
-
+
return(TRUE);
}
@@ -313,7 +313,7 @@ row_vers_old_has_index_entry(
ulint comp;
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
- || mtr_memo_contains(mtr, buf_block_align(rec),
+ || mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -323,7 +323,7 @@ row_vers_old_has_index_entry(
clust_index = dict_table_get_first_index(index->table);
comp = page_rec_is_comp(rec);
- ut_ad(!index->table->comp == !comp);
+ ut_ad(!dict_table_is_comp(index->table) == !comp);
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
ULINT_UNDEFINED, &heap);
@@ -333,7 +333,7 @@ row_vers_old_has_index_entry(
rec, clust_offsets, heap);
entry = row_build_index_entry(row, index, heap);
- /* NOTE that we cannot do the comparison as binary
+ /* NOTE that we cannot do the comparison as binary
fields because the row is maybe being modified so that
the clustered index record has already been updated
to a different binary value in a char field, but the
@@ -373,7 +373,7 @@ row_vers_old_has_index_entry(
prev_version, clust_offsets, heap);
entry = row_build_index_entry(row, index, heap);
- /* NOTE that we cannot do the comparison as binary
+ /* NOTE that we cannot do the comparison as binary
fields because maybe the secondary index record has
already been updated to a different binary value in
a char field, but the collation identifies the old
@@ -421,14 +421,14 @@ row_vers_build_for_consistent_read(
{
rec_t* version;
rec_t* prev_version;
- dulint prev_trx_id;
+ dulint trx_id;
mem_heap_t* heap = NULL;
byte* buf;
ulint err;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
- || mtr_memo_contains(mtr, buf_block_align(rec),
+ || mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
@@ -436,16 +436,48 @@ row_vers_build_for_consistent_read(
ut_ad(rec_offs_validate(rec, index, *offsets));
- ut_ad(!read_view_sees_trx_id(view,
- row_get_rec_trx_id(rec, index, *offsets)));
+ trx_id = row_get_rec_trx_id(rec, index, *offsets);
+
+ ut_ad(!read_view_sees_trx_id(view, trx_id));
rw_lock_s_lock(&(purge_sys->latch));
version = rec;
for (;;) {
mem_heap_t* heap2 = heap;
+ trx_undo_rec_t* undo_rec;
+ dulint roll_ptr;
+ dulint undo_no;
heap = mem_heap_create(1024);
+ /* If we have high-granularity consistent read view and
+ creating transaction of the view is the same as trx_id in
+ the record we see this record only in the case when
+ undo_no of the record is < undo_no in the view. */
+
+ if (view->type == VIEW_HIGH_GRANULARITY
+ && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) {
+
+ roll_ptr = row_get_rec_roll_ptr(version, index,
+ *offsets);
+ undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
+ undo_no = trx_undo_rec_get_undo_no(undo_rec);
+ mem_heap_empty(heap);
+
+ if (ut_dulint_cmp(view->undo_no, undo_no) > 0) {
+ /* The view already sees this version: we can
+ copy it to in_heap and return */
+
+ buf = mem_heap_alloc(in_heap,
+ rec_offs_size(*offsets));
+ *old_vers = rec_copy(buf, version, *offsets);
+ rec_offs_make_valid(*old_vers, index, *offsets);
+ err = DB_SUCCESS;
+
+ break;
+ }
+ }
+
err = trx_undo_prev_version_build(rec, mtr, version, index,
*offsets, heap, &prev_version);
if (heap2) {
@@ -466,10 +498,10 @@ row_vers_build_for_consistent_read(
*offsets = rec_get_offsets(prev_version, index, *offsets,
ULINT_UNDEFINED, offset_heap);
- prev_trx_id = row_get_rec_trx_id(prev_version, index,
- *offsets);
- if (read_view_sees_trx_id(view, prev_trx_id)) {
+ trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
+
+ if (read_view_sees_trx_id(view, trx_id)) {
/* The view already sees this version: we can copy
it to in_heap and return */
@@ -558,8 +590,8 @@ row_vers_build_for_semi_consistent_read(
mutex_exit(&kernel_mutex);
if (!version_trx
- || version_trx->conc_state == TRX_NOT_STARTED
- || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
+ || version_trx->conc_state == TRX_NOT_STARTED
+ || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
/* We found a version that belongs to a
committed transaction: return it. */