diff options
author | Sachin <sachin.setiya@mariadb.com> | 2019-10-09 21:16:31 +0530 |
---|---|---|
committer | Sachin <sachin.setiya@mariadb.com> | 2020-02-03 12:44:31 +0530 |
commit | eed6d215f13cae8b84d9381918a3bd56dcf16188 (patch) | |
tree | eda4beb45c9f485f04662e4801011ddc1c728a51 | |
parent | b615d275b8c26ecec943003e1275ee19f94d9887 (diff) | |
download | mariadb-git-eed6d215f13cae8b84d9381918a3bd56dcf16188.tar.gz |
MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
Problem:-
So the issue is when we do bulk insert with rows
> MI_MIN_ROWS_TO_DISABLE_INDEXES(100) , We try to disable the indexes to
speedup insert. But current logic also disables the long unique indexes.
Solution:- In ha_myisam::start_bulk_insert if we find long hash index
(HA_KEY_ALG_LONG_HASH) we will not disable the index.
This commit also refactors the mi_disable_indexes_for_rebuild function,
Since this is function is called at only one place, it is inlined into
start_bulk_insert
mi_clear_key_active is added into myisamdef.h because now it is also used
in ha_myisam.cc file.
(Same is done for Aria Storage engine)
-rw-r--r-- | include/maria.h | 1 | ||||
-rw-r--r-- | include/myisam.h | 1 | ||||
-rw-r--r-- | mysql-test/main/long_unique_bugs.result | 3 | ||||
-rw-r--r-- | mysql-test/main/long_unique_bugs.test | 17 | ||||
-rw-r--r-- | storage/maria/ha_maria.cc | 27 | ||||
-rw-r--r-- | storage/maria/ma_check.c | 34 | ||||
-rw-r--r-- | storage/myisam/ha_myisam.cc | 30 | ||||
-rw-r--r-- | storage/myisam/mi_check.c | 34 | ||||
-rw-r--r-- | storage/myisam/myisamdef.h | 2 |
9 files changed, 79 insertions, 70 deletions
diff --git a/include/maria.h b/include/maria.h index a1396d3a4c7..a946363c57b 100644 --- a/include/maria.h +++ b/include/maria.h @@ -396,6 +396,7 @@ int maria_preload(MARIA_HA *info, ulonglong key_map, my_bool ignore_leaves); void maria_versioning(MARIA_HA *info, my_bool versioning); void maria_ignore_trids(MARIA_HA *info); uint maria_max_key_length(void); +my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows); #define maria_max_key_segments() HA_MAX_KEY_SEG /* fulltext functions */ diff --git a/include/myisam.h b/include/myisam.h index 216f041c8a9..f2e31bb9f60 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -430,6 +430,7 @@ int sort_ft_buf_flush(MI_SORT_PARAM *sort_param); int thr_write_keys(MI_SORT_PARAM *sort_param); int sort_write_record(MI_SORT_PARAM *sort_param); int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, ulonglong); +my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows); #ifdef __cplusplus } diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index d4b71e2bc46..c0ba4d0b87d 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -267,3 +267,6 @@ connection default; DROP TABLE t1, t2; CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; ERROR 42000: Specified key was too long; max key length is 1000 bytes +create table t1(a int, unique(a) using hash); +#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) +drop table t1; diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index 62c4076cee0..13a4e1367a0 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -323,3 +323,20 @@ DROP TABLE t1, t2; # --error ER_TOO_LONG_KEY CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; + +# +# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes +# +create table t1(a int, unique(a) using hash); +--let $count=150 +--let insert_stmt= insert into t1 values(200) +while ($count) +{ + --let $insert_stmt=$insert_stmt,($count) + --dec $count +} +--disable_query_log +--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) +--eval $insert_stmt +--enable_query_log +drop table t1; diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 5dd40a7843a..dabacc5af53 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -2157,7 +2157,32 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags) else { my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT); - maria_disable_indexes_for_rebuild(file, rows, all_keys); + /* + Deactivate all indexes that can be recreated fast. + These include packed keys on which sorting will use more temporary + space than the max allowed file length or for which the unpacked keys + will take much more space than packed keys. + Note that 'rows' may be zero for the case when we don't know how many + rows we will put into the file. + */ + MARIA_SHARE *share= file->s; + MARIA_KEYDEF *key=share->keyinfo; + uint i; + + DBUG_ASSERT(share->state.state.records == 0 && + (!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES)); + for (i=0 ; i < share->base.keys ; i++,key++) + { + if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) && + ! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 && + (all_keys || !(key->flag & HA_NOSAME)) && + table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH) + { + maria_clear_key_active(share->state.key_map, i); + file->update|= HA_STATE_CHANGED; + file->create_unique_index_by_sort= all_keys; + } + } } if (share->now_transactional) { diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 9094345c9c0..0a271a77a36 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -6441,7 +6441,7 @@ static ha_checksum maria_byte_checksum(const uchar *buf, uint length) return crc; } -static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows) +my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows) { uint key_maxlength=key->maxlength; if (key->flag & HA_FULLTEXT) @@ -6457,38 +6457,6 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows) } /* - Deactivate all indexes that can be recreated fast. - These include packed keys on which sorting will use more temporary - space than the max allowed file length or for which the unpacked keys - will take much more space than packed keys. - Note that 'rows' may be zero for the case when we don't know how many - rows we will put into the file. - */ - -void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows, - my_bool all_keys) -{ - MARIA_SHARE *share= info->s; - MARIA_KEYDEF *key=share->keyinfo; - uint i; - - DBUG_ASSERT(share->state.state.records == 0 && - (!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES)); - for (i=0 ; i < share->base.keys ; i++,key++) - { - if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) && - ! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 && - (all_keys || !(key->flag & HA_NOSAME))) - { - maria_clear_key_active(share->state.key_map, i); - info->update|= HA_STATE_CHANGED; - info->create_unique_index_by_sort= all_keys; - } - } -} - - -/* Return TRUE if we can use repair by sorting One can set the force argument to force to use sorting even if the temporary file would be quite big! diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index c046572f0c8..f5f36266347 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1750,7 +1750,35 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags) else { my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT); - mi_disable_indexes_for_rebuild(file, rows, all_keys); + MYISAM_SHARE *share=file->s; + MI_KEYDEF *key=share->keyinfo; + uint i; + /* + Deactivate all indexes that can be recreated fast. + These include packed keys on which sorting will use more temporary + space than the max allowed file length or for which the unpacked keys + will take much more space than packed keys. + Note that 'rows' may be zero for the case when we don't know how many + rows we will put into the file. + Long Unique Index (HA_KEY_ALG_LONG_HASH) will not be disabled because + there unique property is enforced at the time of ha_write_row + (check_duplicate_long_entries). So we need active index at the time of + insert. + */ + DBUG_ASSERT(file->state->records == 0 && + (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES)); + for (i=0 ; i < share->base.keys ; i++,key++) + { + if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) && + ! mi_too_big_key_for_sort(key,rows) && file->s->base.auto_key != i+1 && + (all_keys || !(key->flag & HA_NOSAME)) && + table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH) + { + mi_clear_key_active(share->state.key_map, i); + file->update|= HA_STATE_CHANGED; + file->create_unique_index_by_sort= all_keys; + } + } } } else diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index f377028be52..3f3c60a4249 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -4667,7 +4667,7 @@ static ha_checksum mi_byte_checksum(const uchar *buf, uint length) return crc; } -static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows) +my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows) { uint key_maxlength=key->maxlength; if (key->flag & HA_FULLTEXT) @@ -4682,38 +4682,6 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows) } /* - Deactivate all indexes that can be recreated fast. - These include packed keys on which sorting will use more temporary - space than the max allowed file length or for which the unpacked keys - will take much more space than packed keys. - Note that 'rows' may be zero for the case when we don't know how many - rows we will put into the file. - */ - -void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows, - my_bool all_keys) -{ - MYISAM_SHARE *share=info->s; - MI_KEYDEF *key=share->keyinfo; - uint i; - - DBUG_ASSERT(info->state->records == 0 && - (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES)); - for (i=0 ; i < share->base.keys ; i++,key++) - { - if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) && - ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1 && - (all_keys || !(key->flag & HA_NOSAME))) - { - mi_clear_key_active(share->state.key_map, i); - info->update|= HA_STATE_CHANGED; - info->create_unique_index_by_sort= all_keys; - } - } -} - - -/* Return TRUE if we can use repair by sorting One can set the force argument to force to use sorting even if the temporary file would be quite big! diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index c6fa777774a..f7b61ae638c 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -715,8 +715,6 @@ void mi_restore_status(void *param); void mi_copy_status(void *to, void *from); my_bool mi_check_status(void *param); void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table); -void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows, - my_bool all_keys); extern MI_INFO *test_if_reopen(char *filename); my_bool check_table_is_closed(const char *name, const char *where); int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share); |