diff options
-rw-r--r-- | mysql-test/main/long_unique.result | 24 | ||||
-rw-r--r-- | mysql-test/main/long_unique.test | 29 | ||||
-rw-r--r-- | mysql-test/main/long_unique_delayed.result | 19 | ||||
-rw-r--r-- | mysql-test/main/long_unique_delayed.test | 22 | ||||
-rw-r--r-- | mysql-test/suite/versioning/r/long_unique.result | 8 | ||||
-rw-r--r-- | mysql-test/suite/versioning/t/long_unique.test | 9 | ||||
-rw-r--r-- | sql/ha_partition.cc | 12 | ||||
-rw-r--r-- | sql/handler.cc | 136 | ||||
-rw-r--r-- | sql/handler.h | 6 | ||||
-rw-r--r-- | sql/share/errmsg-utf8.txt | 2 | ||||
-rw-r--r-- | sql/sql_base.cc | 7 | ||||
-rw-r--r-- | sql/sql_class.h | 5 | ||||
-rw-r--r-- | sql/sql_delete.cc | 7 | ||||
-rw-r--r-- | sql/sql_insert.cc | 134 | ||||
-rw-r--r-- | sql/sql_load.cc | 11 | ||||
-rw-r--r-- | sql/sql_select.cc | 1 | ||||
-rw-r--r-- | sql/sql_table.cc | 22 | ||||
-rw-r--r-- | sql/sql_update.cc | 8 | ||||
-rw-r--r-- | sql/table.cc | 74 | ||||
-rw-r--r-- | sql/table.h | 4 | ||||
-rw-r--r-- | sql/temporary_tables.cc | 4 | ||||
-rw-r--r-- | storage/maria/ha_maria.cc | 2 | ||||
-rw-r--r-- | storage/myisam/ha_myisam.cc | 3 |
23 files changed, 409 insertions, 140 deletions
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result index a4955b3e7b5..aa9c115e02c 100644 --- a/mysql-test/main/long_unique.result +++ b/mysql-test/main/long_unique.result @@ -1477,4 +1477,28 @@ id select_type table type possible_keys key key_len ref rows Extra SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; b drop table t1,t2; +# +# MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique +# +CREATE TABLE t1 (a INT, b BLOB) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +CREATE TABLE t2 (c BIT, d BLOB, UNIQUE(d)) ENGINE=MyISAM; +INSERT INTO t2 SELECT * FROM t1; +Warnings: +Warning 1264 Out of range value for column 'c' at row 2 +DROP TABLE t1, t2; +# +# MDEV-19338 Using AUTO_INCREMENT with long unique +# +CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; +ERROR HY000: AUTO_INCREMENT column `b` cannot be used in the UNIQUE index `a` +# +# MDEV-21819 Assertion `inited == NONE || update_handler != this' +# failed in handler::ha_write_row +# +CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); +DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; +ERROR 23000: Duplicate entry 'foo' for key 'b' +DROP TABLE t1; set @@GLOBAL.max_allowed_packet= @allowed_packet; diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test index c0bd77ca5c9..b336b2d0b09 100644 --- a/mysql-test/main/long_unique.test +++ b/mysql-test/main/long_unique.test @@ -556,4 +556,33 @@ SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; SELECT t2.b FROM t1 JOIN t2 ON t1.d = t2.f WHERE t2.pk >= 20; drop table t1,t2; +--echo # +--echo # MDEV-21470 MyISAM start_bulk_insert doesn't work with long unique +--echo # + +CREATE TABLE t1 (a INT, b BLOB) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +CREATE TABLE t2 (c BIT, d BLOB, UNIQUE(d)) ENGINE=MyISAM; +INSERT INTO t2 SELECT * FROM t1; +DROP TABLE t1, t2; + +--echo # +--echo # MDEV-19338 Using AUTO_INCREMENT with long unique +--echo # + +--error ER_NO_AUTOINCREMENT_WITH_UNIQUE +CREATE TABLE t1 (pk INT, a TEXT NOT NULL DEFAULT '', PRIMARY KEY (pk), b INT AUTO_INCREMENT, UNIQUE(b), UNIQUE (a,b)) ENGINE=myisam; + +--echo # +--echo # MDEV-21819 Assertion `inited == NONE || update_handler != this' +--echo # failed in handler::ha_write_row +--echo # + +CREATE OR REPLACE TABLE t1 (a INT, b BLOB, s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(b)) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); +--error ER_DUP_ENTRY +DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; +DROP TABLE t1; + +# Cleanup set @@GLOBAL.max_allowed_packet= @allowed_packet; diff --git a/mysql-test/main/long_unique_delayed.result b/mysql-test/main/long_unique_delayed.result new file mode 100644 index 00000000000..52365249097 --- /dev/null +++ b/mysql-test/main/long_unique_delayed.result @@ -0,0 +1,19 @@ +# +# Test insert delayed with long unique keys +# +create table t1(a blob unique) engine=myisam; +insert delayed into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890),('maria'); +insert delayed into t1 values(1),(9); +flush tables t1; +select count(*) from t1; +count(*) +11 +drop table t1; +# +# MDEV-19848 +# Server crashes in check_vcol_forward_refs upon INSERT DELAYED into +# table with long blob key +# +CREATE TABLE t1 (a BLOB, UNIQUE(a)) ENGINE=MyISAM; +INSERT DELAYED t1 () VALUES (); +DROP TABLE t1; diff --git a/mysql-test/main/long_unique_delayed.test b/mysql-test/main/long_unique_delayed.test new file mode 100644 index 00000000000..c0de4a74913 --- /dev/null +++ b/mysql-test/main/long_unique_delayed.test @@ -0,0 +1,22 @@ +--source include/not_embedded.inc + +--echo # +--echo # Test insert delayed with long unique keys +--echo # + +create table t1(a blob unique) engine=myisam; +insert delayed into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890),('maria'); +insert delayed into t1 values(1),(9); +flush tables t1; +select count(*) from t1; +drop table t1; + +--echo # +--echo # MDEV-19848 +--echo # Server crashes in check_vcol_forward_refs upon INSERT DELAYED into +--echo # table with long blob key +--echo # + +CREATE TABLE t1 (a BLOB, UNIQUE(a)) ENGINE=MyISAM; +INSERT DELAYED t1 () VALUES (); +DROP TABLE t1; diff --git a/mysql-test/suite/versioning/r/long_unique.result b/mysql-test/suite/versioning/r/long_unique.result new file mode 100644 index 00000000000..da07bc66e22 --- /dev/null +++ b/mysql-test/suite/versioning/r/long_unique.result @@ -0,0 +1,8 @@ +# +# Assertion `inited == NONE || update_handler != this' failed in +# handler::ha_write_row +# +CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31'); +DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01'; +DROP TABLE t1; diff --git a/mysql-test/suite/versioning/t/long_unique.test b/mysql-test/suite/versioning/t/long_unique.test new file mode 100644 index 00000000000..35be9315fa6 --- /dev/null +++ b/mysql-test/suite/versioning/t/long_unique.test @@ -0,0 +1,9 @@ +--echo # +--echo # Assertion `inited == NONE || update_handler != this' failed in +--echo # handler::ha_write_row +--echo # + +CREATE TABLE t1 (f VARCHAR(4096), s DATE, e DATE, PERIOD FOR app(s,e), UNIQUE(f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo', '2023-08-30', '2025-07-09'),('bar', '2021-01-01', '2021-12-31'); +DELETE FROM t1 FOR PORTION OF app FROM '2023-08-29' TO '2025-07-01'; +DROP TABLE t1; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index b52c193ba13..41cf8bb2ccd 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4344,7 +4344,6 @@ int ha_partition::write_row(const uchar * buf) thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO; } } - old_map= dbug_tmp_use_all_columns(table, table->read_set); error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); dbug_tmp_restore_column_map(table->read_set, old_map); @@ -4362,6 +4361,17 @@ int ha_partition::write_row(const uchar * buf) } m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %u", part_id)); + /* + We have to call prepare_for_insert() if we have an update handler + in the underlying table (to clone the handler). This is because for + INSERT's prepare_for_insert() is only called for the main table, + not for all partitions. This is to reduce the huge overhead of cloning + a possible not needed handler if there are many partitions. + */ + if (table->s->long_unique_table && + m_file[part_id]->update_handler == m_file[part_id] && inited == RND) + m_file[part_id]->prepare_for_insert(0); + start_part_bulk_insert(thd, part_id); tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ diff --git a/sql/handler.cc b/sql/handler.cc index bcd7da1c711..9a3766c90de 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2753,6 +2753,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) HA_OPEN_IGNORE_IF_LOCKED, mem_root)) goto err; + new_handler->update_handler= new_handler; return new_handler; err: @@ -2760,6 +2761,40 @@ err: return NULL; } + +/** + Creates a clone of handler used in update for unique hash key. +*/ + +bool handler::clone_handler_for_update() +{ + handler *tmp; + DBUG_ASSERT(table->s->long_unique_table); + + if (update_handler != this) + return 0; // Already done + if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) + return 1; + update_handler= tmp; + /* The update handler is only used to check if a row exists */ + update_handler->ha_external_lock(table->in_use, F_RDLCK); + return 0; +} + +/** + Delete update handler object if it exists +*/ +void handler::delete_update_handler() +{ + if (update_handler != this) + { + update_handler->ha_external_lock(table->in_use, F_UNLCK); + update_handler->ha_close(); + delete update_handler; + } + update_handler= this; +} + LEX_CSTRING *handler::engine_name() { return hton_name(ht); @@ -2917,7 +2952,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, } reset_statistics(); internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); - + update_handler= this; DBUG_RETURN(error); } @@ -6622,6 +6657,8 @@ int handler::ha_reset() DBUG_ASSERT(inited == NONE); /* reset the bitmaps to point to defaults */ table->default_column_bitmaps(); + if (update_handler != this) + delete_update_handler(); pushed_cond= NULL; tracker= NULL; mark_trx_read_write_done= 0; @@ -6656,7 +6693,12 @@ static int wsrep_after_row(THD *thd) } #endif /* WITH_WSREP */ -static int check_duplicate_long_entry_key(TABLE *table, handler *h, + +/** + Check if there is a conflicting unique hash key +*/ + +static int check_duplicate_long_entry_key(TABLE *table, handler *handler, const uchar *new_rec, uint key_no) { Field *hash_field; @@ -6664,13 +6706,14 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, KEY *key_info= table->key_info + key_no; hash_field= key_info->key_part->field; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; + DBUG_ENTER("check_duplicate_long_entry_key"); DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && - key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) - || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL); + key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || + key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL); if (hash_field->is_real_null()) - return 0; + DBUG_RETURN(0); key_copy(ptr, new_rec, key_info, key_info->key_length, false); @@ -6678,11 +6721,11 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, table->check_unique_buf= (uchar *)alloc_root(&table->mem_root, table->s->reclength); - result= h->ha_index_init(key_no, 0); + result= handler->ha_index_init(key_no, 0); if (result) - return result; + DBUG_RETURN(result); store_record(table, check_unique_buf); - result= h->ha_index_read_map(table->record[0], + result= handler->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (!result) { @@ -6718,7 +6761,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, } } } - while (!is_same && !(result= h->ha_index_next_same(table->record[0], + while (!is_same && !(result= handler->ha_index_next_same(table->record[0], ptr, key_info->key_length))); if (is_same) error= HA_ERR_FOUND_DUPP_KEY; @@ -6730,15 +6773,15 @@ exit: if (error == HA_ERR_FOUND_DUPP_KEY) { table->file->errkey= key_no; - if (h->ha_table_flags() & HA_DUPLICATE_POS) + if (handler->ha_table_flags() & HA_DUPLICATE_POS) { - h->position(table->record[0]); - memcpy(table->file->dup_ref, h->ref, h->ref_length); + handler->position(table->record[0]); + memcpy(table->file->dup_ref, handler->ref, handler->ref_length); } } restore_record(table, check_unique_buf); - h->ha_index_end(); - return error; + handler->ha_index_end(); + DBUG_RETURN(error); } /** @brief @@ -6746,20 +6789,22 @@ exit: unique constraint on long columns. @returns 0 if no duplicate else returns error */ -static int check_duplicate_long_entries(TABLE *table, handler *h, + +static int check_duplicate_long_entries(TABLE *table, handler *handler, const uchar *new_rec) { table->file->errkey= -1; - int result; for (uint i= 0; i < table->s->keys; i++) { + int result; if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH && - (result= check_duplicate_long_entry_key(table, h, new_rec, i))) + (result= check_duplicate_long_entry_key(table, handler, new_rec, i))) return result; } return 0; } + /** @brief check whether updated records breaks the unique constraint on long columns. @@ -6774,11 +6819,11 @@ static int check_duplicate_long_entries(TABLE *table, handler *h, key as a parameter in normal insert key should be -1 @returns 0 if no duplicate else returns error */ -static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec) + +static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec) { Field *field; uint key_parts; - int error= 0; KEY *keyinfo; KEY_PART_INFO *keypart; /* @@ -6786,7 +6831,7 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar * with respect to fields in hash_str */ uint reclength= (uint) (table->record[1] - table->record[0]); - table->clone_handler_for_update(); + table->file->clone_handler_for_update(); for (uint i= 0; i < table->s->keys; i++) { keyinfo= table->key_info + i; @@ -6796,13 +6841,15 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar * keypart= keyinfo->key_part - key_parts; for (uint j= 0; j < key_parts; j++, keypart++) { + int error; field= keypart->field; - /* Compare fields if they are different then check for duplicates*/ - if(field->cmp_binary_offset(reclength)) + /* Compare fields if they are different then check for duplicates */ + if (field->cmp_binary_offset(reclength)) { - if((error= check_duplicate_long_entry_key(table, table->update_handler, - new_rec, i))) - goto exit; + if ((error= (check_duplicate_long_entry_key(table, + table->file->update_handler, + new_rec, i)))) + return error; /* break because check_duplicate_long_entries_key will take care of remaining fields @@ -6812,10 +6859,35 @@ static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar * } } } - exit: - return error; + return 0; } + +/** + Do all initialization needed for insert + + @param force_update_handler Set to TRUE if we should always create an + update handler. Needed if we don't know if we + are going to do inserts while a scan is in + progress. +*/ + +int handler::prepare_for_insert(bool force_update_handler) +{ + /* Preparation for unique of blob's */ + if (table->s->long_unique_table && (inited == RND || force_update_handler)) + { + /* + When doing a scan we can't use the same handler to check + duplicate rows. Create a new temporary one + */ + if (clone_handler_for_update()) + return 1; + } + return 0; +} + + int handler::ha_write_row(const uchar *buf) { int error; @@ -6831,10 +6903,8 @@ int handler::ha_write_row(const uchar *buf) if (table->s->long_unique_table && this == table->file) { - if (inited == RND) - table->clone_handler_for_update(); - handler *h= table->update_handler ? table->update_handler : table->file; - if ((error= check_duplicate_long_entries(table, h, buf))) + DBUG_ASSERT(inited == NONE || update_handler != this); + if ((error= check_duplicate_long_entries(table, update_handler, buf))) DBUG_RETURN(error); } TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error, @@ -6877,10 +6947,8 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) mark_trx_read_write(); increment_statistics(&SSV::ha_update_count); if (table->s->long_unique_table && - (error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data))) - { + (error= check_duplicate_long_entries_update(table, (uchar*) new_data))) return error; - } TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, error, { error= update_row(old_data, new_data);}) diff --git a/sql/handler.h b/sql/handler.h index a8bbc731964..176a67e2494 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3065,6 +3065,7 @@ public: /** Length of ref (1-8 or the clustered key length) */ uint ref_length; FT_INFO *ft_handler; + handler *update_handler; /* Handler used in case of update */ enum init_stat { NONE=0, INDEX, RND }; init_stat inited, pre_inited; @@ -3254,6 +3255,8 @@ public: DBUG_ASSERT(inited == NONE); } virtual handler *clone(const char *name, MEM_ROOT *mem_root); + bool clone_handler_for_update(); + void delete_update_handler(); /** This is called after create to allow us to set up cached variables */ void init() { @@ -4596,6 +4599,7 @@ protected: public: bool check_table_binlog_row_based(bool binlog_row); + int prepare_for_insert(bool force_update_handler= 0); inline void clear_cached_table_binlog_row_based_flag() { @@ -4931,6 +4935,8 @@ public: { return false; } + /* If the table is using sql level unique constraints on some column */ + inline bool has_long_unique(); /* Used for ALTER TABLE. Some engines can handle some differences in indexes by themself. */ diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 2ff60a98c2b..d406c72d33c 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7957,3 +7957,5 @@ ER_SLAVE_IGNORED_SHARED_TABLE por "Slave SQL thread ignorado a consulta devido '%s'" spa "Slave SQL thread ignorado el query '%s'" swe "Slav SQL tråden ignorerade '%s' pga tabellen är delad" +ER_NO_AUTOINCREMENT_WITH_UNIQUE + eng "AUTO_INCREMENT column %`s cannot be used in the UNIQUE index %`s" diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 7a4926eb7c4..54ea259ee3b 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -889,9 +889,6 @@ void close_thread_tables(THD *thd) for (table= thd->open_tables; table; table= table->next) { - if (table->update_handler) - table->delete_update_handler(); - /* Table might be in use by some outer statement. */ DBUG_PRINT("tcache", ("table: '%s' query_id: %lu", table->s->table_name.str, (ulong) table->query_id)); @@ -8725,8 +8722,8 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values, if (unlikely(field->invisible)) continue; - else - value=v++; + + value=v++; bool vers_sys_field= table->versioned() && field->vers_sys_field(); diff --git a/sql/sql_class.h b/sql/sql_class.h index def9cc9a05d..85a99918a2f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -6964,6 +6964,11 @@ inline int handler::ha_update_tmp_row(const uchar *old_data, uchar *new_data) return error; } +inline bool handler::has_long_unique() +{ + return table->s->long_unique_table; +} + extern pthread_attr_t *get_connection_attrib(void); /** diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index bef77e1a2e9..da46a538a28 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -751,6 +751,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, && !table->versioned() && table->file->has_transactions(); + if (table->versioned(VERS_TIMESTAMP) || + (table_list->has_period() && !portion_of_time_through_update)) + table->file->prepare_for_insert(1); + THD_STAGE_INFO(thd, stage_updating); while (likely(!(error=info.read_record())) && likely(!thd->killed) && likely(!thd->is_error())) @@ -1237,6 +1241,9 @@ multi_delete::initialize_tables(JOIN *join) normal_tables= 1; tbl->prepare_triggers_for_delete_stmt_or_event(); tbl->prepare_for_position(); + + if (tbl->versioned(VERS_TIMESTAMP)) + tbl->file->prepare_for_insert(1); } else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) && walk == delete_tables) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d2331fd71ef..801871fcb5a 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -882,9 +882,12 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, if (duplic != DUP_ERROR || ignore) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - if (table->file->ha_table_flags() & HA_DUPLICATE_POS && - table->file->ha_rnd_init_with_error(0)) - goto abort; + if (table->file->ha_table_flags() & HA_DUPLICATE_POS) + { + if (table->file->ha_rnd_init_with_error(0)) + goto abort; + table->file->prepare_for_insert(); + } } /** This is a simple check for the case when the table has a trigger @@ -2541,6 +2544,11 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) uchar *bitmap; char *copy_tmp; uint bitmaps_used; + KEY_PART_INFO *key_part, *end_part; + Field **default_fields, **virtual_fields; + KEY *keys; + KEY_PART_INFO *key_parts; + uchar *record; DBUG_ENTER("Delayed_insert::get_local_table"); /* First request insert thread to get a lock */ @@ -2587,18 +2595,32 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) share= table->s; /* - Allocate memory for the TABLE object, the field pointers array, and - one record buffer of reclength size. Normally a table has three - record buffers of rec_buff_length size, which includes alignment - bytes. Since the table copy is used for creating one record only, - the other record buffers and alignment are unnecessary. + Allocate memory for the TABLE object, the field pointers array, + and one record buffer of reclength size. + Normally a table has three record buffers of rec_buff_length size, + which includes alignment bytes. Since the table copy is used for + creating one record only, the other record buffers and alignment + are unnecessary. + As the table will also need to calculate default values and + expresions, we have to allocate own version of fields. keys and key + parts. The key and key parts are needed as parse_vcol_defs() changes + them in case of long hash keys. */ THD_STAGE_INFO(client_thd, stage_allocating_local_table); - copy_tmp= (char*) client_thd->alloc(sizeof(*copy)+ - (share->fields+1)*sizeof(Field**)+ - share->reclength + - share->column_bitmap_size*4); - if (!copy_tmp) + if (!multi_alloc_root(client_thd->mem_root, + ©_tmp, sizeof(*table), + &field, (uint) (share->fields+1)*sizeof(Field**), + &default_fields, + (share->default_fields + + share->default_expressions + 1) * sizeof(Field*), + &virtual_fields, + (share->virtual_fields + 1) * sizeof(Field*), + &keys, share->keys * sizeof(KEY), + &key_parts, + share->ext_key_parts * sizeof(KEY_PART_INFO), + &record, (uint) share->reclength, + &bitmap, (uint) share->column_bitmap_size*4, + NullS)) goto error; /* Copy the TABLE object. */ @@ -2607,27 +2629,21 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) /* We don't need to change the file handler here */ /* Assign the pointers for the field pointers array and the record. */ - field= copy->field= (Field**) (copy + 1); - bitmap= (uchar*) (field + share->fields + 1); - copy->record[0]= (bitmap + share->column_bitmap_size*4); + copy->field= field; + copy->record[0]= record; memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength); if (share->default_fields || share->default_expressions) - { - copy->default_field= (Field**) - client_thd->alloc((share->default_fields + - share->default_expressions + 1)* - sizeof(Field*)); - if (!copy->default_field) - goto error; - } - + copy->default_field= default_fields; if (share->virtual_fields) - { - copy->vfield= (Field **) client_thd->alloc((share->virtual_fields+1)* - sizeof(Field*)); - if (!copy->vfield) - goto error; - } + copy->vfield= virtual_fields; + copy->key_info= keys; + copy->base_key_part= key_parts; + + /* Copy key and key parts from original table */ + memcpy(keys, table->key_info, sizeof(KEY) * share->keys); + memcpy(key_parts, table->base_key_part, + sizeof(KEY_PART_INFO) *share->ext_key_parts); + copy->expr_arena= NULL; /* Ensure we don't use the table list of the original table */ @@ -2649,6 +2665,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) (*field)->unireg_check= (*org_field)->unireg_check; (*field)->orig_table= copy; // Remove connection (*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0] + (*field)->flags|= ((*org_field)->flags & LONG_UNIQUE_HASH_FIELD); + (*field)->invisible= (*org_field)->invisible; memdup_vcol(client_thd, (*field)->vcol_info); memdup_vcol(client_thd, (*field)->default_value); memdup_vcol(client_thd, (*field)->check_constraint); @@ -2657,6 +2675,35 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) } *field=0; + /* The following is needed for long hash key */ + key_part= copy->base_key_part; + for (KEY *key= copy->key_info, *end_key= key + share->keys ; + key < end_key; + key++) + { + key->key_part= key_part; + key_part+= key->ext_key_parts; + if (key->algorithm == HA_KEY_ALG_LONG_HASH) + key_part++; + } + + for (key_part= copy->base_key_part, + end_part= key_part + share->ext_key_parts ; + key_part < end_part ; + key_part++) + { + Field *field= key_part->field= copy->field[key_part->fieldnr - 1]; + + /* Fix partial fields, like in open_table_from_share() */ + if (field->key_length() != key_part->length && + !(field->flags & BLOB_FLAG)) + { + field= key_part->field= field->make_new_field(client_thd->mem_root, + copy, 0); + field->field_length= key_part->length; + } + } + if (share->virtual_fields || share->default_expressions || share->default_fields) { @@ -3259,6 +3306,12 @@ pthread_handler_t handle_delayed_insert(void *arg) di->table->file->ha_release_auto_increment(); mysql_unlock_tables(thd, lock); trans_commit_stmt(thd); + /* + We have to delete update handler as we need to create a new one + for the next lock table to ensure they have both the same read + view. + */ + di->table->file->delete_update_handler(); di->group_count=0; mysql_audit_release(thd); mysql_mutex_lock(&di->mutex); @@ -3390,6 +3443,7 @@ bool Delayed_insert::handle_inserts(void) if (table->file->ha_rnd_init_with_error(0)) goto err; + table->file->prepare_for_insert(); /* We can't use row caching when using the binary log because if @@ -3876,9 +3930,12 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) if (info.ignore || info.handle_duplicates != DUP_ERROR) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - if (table->file->ha_table_flags() & HA_DUPLICATE_POS && - table->file->ha_rnd_init_with_error(0)) - DBUG_RETURN(1); + if (table->file->ha_table_flags() & HA_DUPLICATE_POS) + { + if (table->file->ha_rnd_init_with_error(0)) + DBUG_RETURN(1); + table->file->prepare_for_insert(); + } } if (info.handle_duplicates == DUP_REPLACE && (!table->triggers || !table->triggers->has_delete_triggers())) @@ -4628,9 +4685,12 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u) if (info.ignore || info.handle_duplicates != DUP_ERROR) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - if (table->file->ha_table_flags() & HA_DUPLICATE_POS && - table->file->ha_rnd_init_with_error(0)) - DBUG_RETURN(1); + if (table->file->ha_table_flags() & HA_DUPLICATE_POS) + { + if (table->file->ha_rnd_init_with_error(0)) + DBUG_RETURN(1); + table->file->prepare_for_insert(); + } } if (info.handle_duplicates == DUP_REPLACE && (!table->triggers || !table->triggers->has_delete_triggers())) diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 6d3f9e540a7..efbbe28bcad 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -391,6 +391,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, DBUG_RETURN(TRUE); if (thd->lex->handle_list_of_derived(table_list, DT_PREPARE)) DBUG_RETURN(TRUE); + if (setup_tables_and_check_access(thd, &thd->lex->first_select_lex()->context, &thd->lex->first_select_lex()-> @@ -647,10 +648,12 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, thd->abort_on_warning= !ignore && thd->is_strict_mode(); - if ((table_list->table->file->ha_table_flags() & HA_DUPLICATE_POS) && - (error= table_list->table->file->ha_rnd_init_with_error(0))) - goto err; - + if ((table_list->table->file->ha_table_flags() & HA_DUPLICATE_POS)) + { + if ((error= table_list->table->file->ha_rnd_init_with_error(0))) + goto err; + table->file->prepare_for_insert(); + } thd_progress_init(thd, 2); if (table_list->table->validate_default_values_of_unset_fields(thd)) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 35b5665d1d0..40517a6f343 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18316,7 +18316,6 @@ TABLE *Create_tmp_table::start(THD *thd, table->copy_blobs= 1; table->in_use= thd; table->no_rows_with_nulls= param->force_not_null_cols; - table->update_handler= NULL; table->check_unique_buf= NULL; table->s= share; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index d589fbfb32d..c5ce3a46499 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3820,6 +3820,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, for (; (key=key_iterator++) ; key_number++) { uint key_length=0; + Create_field *auto_increment_key= 0; Key_part_spec *column; is_hash_field_needed= false; @@ -4069,6 +4070,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_ASSERT(key->type != Key::SPATIAL); if (column_nr == 0 || (file->ha_table_flags() & HA_AUTO_PART_KEY)) auto_increment--; // Field is used + auto_increment_key= sql_field; } key_part_info->fieldnr= field; @@ -4157,6 +4159,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, } } } + /* We can not store key_part_length more then 2^16 - 1 in frm */ if (is_hash_field_needed && column->length > UINT_MAX16) { @@ -4223,11 +4226,22 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(TRUE); } - if (is_hash_field_needed && key_info->algorithm != HA_KEY_ALG_UNDEF && - key_info->algorithm != HA_KEY_ALG_HASH ) + /* Check long unique keys */ + if (is_hash_field_needed) { - my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length); - DBUG_RETURN(TRUE); + if (auto_increment_key) + { + my_error(ER_NO_AUTOINCREMENT_WITH_UNIQUE, MYF(0), + sql_field->field_name.str, + key_info->name.str); + DBUG_RETURN(TRUE); + } + if (key_info->algorithm != HA_KEY_ALG_UNDEF && + key_info->algorithm != HA_KEY_ALG_HASH ) + { + my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length); + DBUG_RETURN(TRUE); + } } if (is_hash_field_needed || (key_info->algorithm == HA_KEY_ALG_HASH && diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d0a920fd473..5a4bf5ac07f 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -968,6 +968,9 @@ update_begin: can_compare_record= records_are_comparable(table); explain->tracker.on_scan_init(); + if (table->versioned(VERS_TIMESTAMP) || table_list->has_period()) + table->file->prepare_for_insert(1); + THD_STAGE_INFO(thd, stage_updating); while (!(error=info.read_record()) && !thd->killed) { @@ -1848,9 +1851,8 @@ int mysql_multi_update_prepare(THD *thd) /* now lock and fill tables */ if (!thd->stmt_arena->is_stmt_prepare() && lock_tables(thd, table_list, table_count, 0)) - { DBUG_RETURN(TRUE); - } + (void) read_statistics_for_tables_if_needed(thd, table_list); /* @todo: downgrade the metadata locks here. */ @@ -2026,6 +2028,8 @@ int multi_update::prepare(List<Item> ¬_used_values, { table->read_set= &table->def_read_set; bitmap_union(table->read_set, &table->tmp_set); + if (table->versioned(VERS_TIMESTAMP)) + table->file->prepare_for_insert(1); } } if (unlikely(error)) diff --git a/sql/table.cc b/sql/table.cc index 2b31cbef083..ee5b29ed8f1 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1243,31 +1243,46 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table, Item *list_item; KEY *key= 0; uint key_index, parts= 0; + KEY_PART_INFO *key_part= table->base_key_part; + for (key_index= 0; key_index < table->s->keys; key_index++) { - key=table->key_info + key_index; - parts= key->user_defined_key_parts; - if (key->key_part[parts].fieldnr == field->field_index + 1) - break; + /* + We have to use key from share as this function may have changed + table->key_info if it was ever invoked before. This could happen + in case of INSERT DELAYED. + */ + key= table->s->key_info + key_index; + if (key->algorithm == HA_KEY_ALG_LONG_HASH) + { + parts= key->user_defined_key_parts; + if (key_part[parts].fieldnr == field->field_index + 1) + break; + key_part++; + } + key_part+= key->ext_key_parts; } - if (!key || key->algorithm != HA_KEY_ALG_LONG_HASH) + if (key_index == table->s->keys) goto end; - KEY_PART_INFO *keypart; - for (uint i=0; i < parts; i++) + + /* Correct the key & key_parts if this function has been called before */ + key= table->key_info + key_index; + key->key_part= key_part; + + for (uint i=0; i < parts; i++, key_part++) { - keypart= key->key_part + i; - if (keypart->key_part_flag & HA_PART_KEY_SEG) + if (key_part->key_part_flag & HA_PART_KEY_SEG) { - int length= keypart->length/keypart->field->charset()->mbmaxlen; + int length= key_part->length/key_part->field->charset()->mbmaxlen; list_item= new (mem_root) Item_func_left(thd, - new (mem_root) Item_field(thd, keypart->field), + new (mem_root) Item_field(thd, key_part->field), new (mem_root) Item_int(thd, length)); list_item->fix_fields(thd, NULL); - keypart->field->vcol_info= - table->field[keypart->field->field_index]->vcol_info; + key_part->field->vcol_info= + table->field[key_part->field->field_index]->vcol_info; } else - list_item= new (mem_root) Item_field(thd, keypart->field); + list_item= new (mem_root) Item_field(thd, key_part->field); field_list->push_back(list_item, mem_root); } Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list); @@ -3871,6 +3886,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, goto err; outparam->key_info= key_info; key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys)); + outparam->base_key_part= key_part; memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys); memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) * @@ -5244,7 +5260,6 @@ void TABLE::init(THD *thd, TABLE_LIST *tl) range_rowid_filter_cost_info_elems= 0; range_rowid_filter_cost_info_ptr= NULL; range_rowid_filter_cost_info= NULL; - update_handler= NULL; check_unique_buf= NULL; vers_write= s->versioned; quick_condition_rows=0; @@ -9246,35 +9261,6 @@ void re_setup_keyinfo_hash(KEY *key_info) key_info->ext_key_parts= 1; key_info->flags&= ~HA_NOSAME; } -/** - @brief clone of current handler. - Creates a clone of handler used in update for - unique hash key. -*/ -void TABLE::clone_handler_for_update() -{ - if (this->update_handler) - return; - handler *update_handler= NULL; - if (!s->long_unique_table) - return; - update_handler= file->clone(s->normalized_path.str, - in_use->mem_root); - update_handler->ha_external_lock(in_use, F_RDLCK); - this->update_handler= update_handler; - return; -} - -/** - @brief Deletes update handler object -*/ -void TABLE::delete_update_handler() -{ - update_handler->ha_external_lock(in_use, F_UNLCK); - update_handler->ha_close(); - delete update_handler; - this->update_handler= NULL; -} LEX_CSTRING *fk_option_name(enum_fk_option opt) { diff --git a/sql/table.h b/sql/table.h index 9430ceb5fe3..fd7d23a13c8 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1153,7 +1153,6 @@ public: uchar *record[3]; /* Pointer to records */ /* record buf to resolve hash collisions for long UNIQUE constraints */ uchar *check_unique_buf; - handler *update_handler; /* Handler used in case of update */ uchar *write_row_record; /* Used as optimisation in THD::write_row */ uchar *insert_values; /* used by INSERT ... UPDATE */ @@ -1182,6 +1181,7 @@ public: /* Map of keys dependent on some constraint */ key_map constraint_dependent_keys; KEY *key_info; /* data of keys in database */ + KEY_PART_INFO *base_key_part; /* Where key parts are stored */ Field **field; /* Pointer to fields */ Field **vfield; /* Pointer to virtual fields*/ @@ -1640,8 +1640,6 @@ public: void vers_update_fields(); void vers_update_end(); void find_constraint_correlated_indexes(); - void clone_handler_for_update(); - void delete_update_handler(); /** Number of additional fields used in versioned tables */ #define VERSIONING_FIELDS 2 diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index c675b11741a..46465294893 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -748,11 +748,7 @@ void THD::mark_tmp_tables_as_free_for_reuse() while ((table= tables_it++)) { if ((table->query_id == query_id) && !table->open_by_handler) - { - if (table->update_handler) - table->delete_update_handler(); mark_tmp_table_as_free_for_reuse(table); - } } } diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 0ccc651cc0e..14178d146b8 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -2605,6 +2605,8 @@ int ha_maria::extra(enum ha_extra_function operation) if (operation == HA_EXTRA_MMAP && !opt_maria_use_mmap) return 0; #endif + if (operation == HA_EXTRA_WRITE_CACHE && has_long_unique()) + return 0; /* We have to set file->trn here because in some cases we call diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 0d3c28fff56..69406177e1c 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -2120,7 +2120,8 @@ int ha_myisam::info(uint flag) int ha_myisam::extra(enum ha_extra_function operation) { - if (operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap) + if ((operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap) || + (operation == HA_EXTRA_WRITE_CACHE && has_long_unique())) return 0; return mi_extra(file, operation, 0); } |