diff options
author | Sergey Petrunia <sergefp@mysql.com> | 2008-07-15 18:13:21 +0400 |
---|---|---|
committer | Sergey Petrunia <sergefp@mysql.com> | 2008-07-15 18:13:21 +0400 |
commit | 2951f00be4c2e332686ca713f77dab39615ea083 (patch) | |
tree | e53c78a5b0f2f37f7558020f80392ae283a65bf3 /sql/records.cc | |
parent | 52f510ef22d1ebb518467bdb0d29f3cb7a3e77b1 (diff) | |
download | mariadb-git-2951f00be4c2e332686ca713f77dab39615ea083.tar.gz |
BUG#35478: sort_union() returns bad data when sort_buffer_size is hit
- In QUICK_INDEX_MERGE_SELECT::read_keys_and_merge: when we got table->sort from Unique,
tell init_read_record() not to use rr_from_cache() because a) rowids are already sorted
and b) it might be that the the data is used by filesort(), which will need record rowids
(which rr_from_cache() cannot provide).
- Fully de-initialize the table->sort read in QUICK_INDEX_MERGE_SELECT::get_next(). This fixes BUG#35477.
(bk trigger: file as fix for BUG#35478).
Diffstat (limited to 'sql/records.cc')
-rw-r--r-- | sql/records.cc | 43 |
1 files changed, 40 insertions, 3 deletions
diff --git a/sql/records.cc b/sql/records.cc index f61efc13034..d5c3a421cd9 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -72,11 +72,47 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, } -/* init struct for read with info->read_record */ +/* + init struct for read with info->read_record + + SYNOPSIS + init_read_record() + info OUT read structure + thd Thread handle + table Table the data [originally] comes from. + select SQL_SELECT structure. We may select->quick or + select->file as data source + use_record_cache Call file->extra_opt(HA_EXTRA_CACHE,...) + if we're going to do sequential read and some + additional conditions are satisfied. + print_error Copy this to info->print_error + disable_rr_cache Don't use rr_from_cache (used by sort-union + index-merge which produces rowid sequences that + are already ordered) + + DESCRIPTION + This function sets up reading data via one of the methods: + + rr_unpack_from_tempfile Unpack full records from sequential file + rr_unpack_from_buffer ... or from buffer + + rr_from_tempfile Read rowids from tempfile and get full records + with handler->rnd_pos() calls. + rr_from_pointers ... or get rowids from buffer + + rr_from_cache Read a bunch of rowids from file, sort them, + get records in rowid order, return, repeat. + + rr_quick Get data from QUICK_*_SELECT + + rr_sequential Sequentially scan the table using + handler->rnd_next() calls +*/ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, SQL_SELECT *select, - int use_record_cache, bool print_error) + int use_record_cache, bool print_error, + bool disable_rr_cache) { IO_CACHE *tempfile; DBUG_ENTER("init_read_record"); @@ -121,7 +157,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, it doesn't make sense to use cache - we don't read from the table and table->sort.io_cache is read sequentially */ - if (!table->sort.addon_field && + if (!disable_rr_cache && + !table->sort.addon_field && ! (specialflag & SPECIAL_SAFE_MODE) && thd->variables.read_rnd_buff_size && !(table->file->table_flags() & HA_FAST_KEY_READ) && |