diff options
author | Sergei Golubchik <serg@mariadb.org> | 2020-03-05 19:19:57 +0100 |
---|---|---|
committer | Sergei Golubchik <serg@mariadb.org> | 2020-03-31 17:42:34 +0200 |
commit | 0515577d128318e1b62511846d88d0c56226168d (patch) | |
tree | 6339b3083879f29ce04fd77f10b7c4d6050e49e9 /sql | |
parent | 045510cb92448ab54ff8832b1947707287975bae (diff) | |
download | mariadb-git-0515577d128318e1b62511846d88d0c56226168d.tar.gz |
cleanup: prepare "update_handler" for WITHOUT OVERLAPS
* rename to a generic name
* move remaning initializations from query exec to prepare time
* simplify/unify key handling in open_table_from_share and delayed
* remove dead code
* move tests where they belong
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_partition.cc | 19 | ||||
-rw-r--r-- | sql/handler.cc | 141 | ||||
-rw-r--r-- | sql/handler.h | 21 | ||||
-rw-r--r-- | sql/sql_delete.cc | 4 | ||||
-rw-r--r-- | sql/sql_insert.cc | 55 | ||||
-rw-r--r-- | sql/sql_load.cc | 2 | ||||
-rw-r--r-- | sql/sql_select.cc | 1 | ||||
-rw-r--r-- | sql/sql_table.cc | 2 | ||||
-rw-r--r-- | sql/sql_update.cc | 6 | ||||
-rw-r--r-- | sql/table.cc | 142 | ||||
-rw-r--r-- | sql/table.h | 4 |
11 files changed, 160 insertions, 237 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index be2e665d7e2..0ec1f2138ab 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4352,16 +4352,6 @@ int ha_partition::write_row(const uchar * buf) } m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %u", part_id)); - /* - We have to call prepare_for_insert() if we have an update handler - in the underlying table (to clone the handler). This is because for - INSERT's prepare_for_insert() is only called for the main table, - not for all partitions. This is to reduce the huge overhead of cloning - a possible not needed handler if there are many partitions. - */ - if (table->s->long_unique_table && - m_file[part_id]->update_handler == m_file[part_id] && inited == RND) - m_file[part_id]->prepare_for_insert(0); start_part_bulk_insert(thd, part_id); @@ -9940,8 +9930,13 @@ void ha_partition::print_error(int error, myf errflag) /* fall through to generic error handling. */ } - /* In case m_file has not been initialized, like in bug#42438 */ - if (m_file) + /* + We choose a main handler's print_error if: + * m_file has not been initialized, like in bug#42438 + * lookup_errkey is set, which means that an error has occured in the + main handler, not in individual partitions + */ + if (m_file && lookup_errkey == (uint)-1) { if (m_last_part >= m_tot_parts) { diff --git a/sql/handler.cc b/sql/handler.cc index b1b7a60a413..c76234f52a1 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2753,7 +2753,6 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) HA_OPEN_IGNORE_IF_LOCKED, mem_root)) goto err; - new_handler->update_handler= new_handler; return new_handler; err: @@ -2763,36 +2762,20 @@ err: /** - Creates a clone of handler used in update for unique hash key. -*/ + clone of current handler. -bool handler::clone_handler_for_update() + Creates a clone of handler used for unique hash key and WITHOUT OVERLAPS. + @return error code +*/ +int handler::create_lookup_handler() { handler *tmp; - DBUG_ASSERT(table->s->long_unique_table); - - if (update_handler != this) - return 0; // Already done + if (lookup_handler != this) + return 0; if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) return 1; - update_handler= tmp; - /* The update handler is only used to check if a row exists */ - update_handler->ha_external_lock(table->in_use, F_RDLCK); - return 0; -} - -/** - Delete update handler object if it exists -*/ -void handler::delete_update_handler() -{ - if (update_handler != this) - { - update_handler->ha_external_lock(table->in_use, F_UNLCK); - update_handler->ha_close(); - delete update_handler; - } - update_handler= this; + lookup_handler= tmp; + return lookup_handler->ha_external_lock(table->in_use, F_RDLCK); } LEX_CSTRING *handler::engine_name() @@ -2952,7 +2935,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, } reset_statistics(); internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); - update_handler= this; DBUG_RETURN(error); } @@ -4349,15 +4331,17 @@ uint handler::get_dup_key(int error) { DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK); DBUG_ENTER("handler::get_dup_key"); - if (table->s->long_unique_table && table->file->errkey < table->s->keys) - DBUG_RETURN(table->file->errkey); - table->file->errkey = (uint) -1; + + if (lookup_errkey != (uint)-1) + DBUG_RETURN(errkey= lookup_errkey); + + errkey= (uint)-1; if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL || error == HA_ERR_DROP_INDEX_FK) - table->file->info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK); - DBUG_RETURN(table->file->errkey); + info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK); + DBUG_RETURN(errkey); } @@ -6497,6 +6481,14 @@ int handler::ha_external_lock(THD *thd, int lock_type) mysql_audit_external_lock(thd, table_share, lock_type); } + if (lock_type == F_UNLCK && lookup_handler != this) + { + lookup_handler->ha_external_lock(table->in_use, F_UNLCK); + lookup_handler->close(); + delete lookup_handler; + lookup_handler= this; + } + if (MYSQL_HANDLER_RDLOCK_DONE_ENABLED() || MYSQL_HANDLER_WRLOCK_DONE_ENABLED() || MYSQL_HANDLER_UNLOCK_DONE_ENABLED()) @@ -6535,8 +6527,6 @@ int handler::ha_reset() DBUG_ASSERT(inited == NONE); /* reset the bitmaps to point to defaults */ table->default_column_bitmaps(); - if (update_handler != this) - delete_update_handler(); pushed_cond= NULL; tracker= NULL; mark_trx_read_write_done= 0; @@ -6580,15 +6570,13 @@ static int wsrep_after_row(THD *thd) Check if there is a conflicting unique hash key */ -static int check_duplicate_long_entry_key(TABLE *table, handler *handler, - const uchar *new_rec, uint key_no) +int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { - Field *hash_field; int result, error= 0; KEY *key_info= table->key_info + key_no; - hash_field= key_info->key_part->field; + Field *hash_field= key_info->key_part->field; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; - DBUG_ENTER("check_duplicate_long_entry_key"); + DBUG_ENTER("handler::check_duplicate_long_entry_key"); DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || @@ -6599,15 +6587,11 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler, key_copy(ptr, new_rec, key_info, key_info->key_length, false); - if (!table->check_unique_buf) - table->check_unique_buf= (uchar *)alloc_root(&table->mem_root, - table->s->reclength); - - result= handler->ha_index_init(key_no, 0); + result= lookup_handler->ha_index_init(key_no, 0); if (result) DBUG_RETURN(result); - store_record(table, check_unique_buf); - result= handler->ha_index_read_map(table->record[0], + store_record(table, file->lookup_buffer); + result= lookup_handler->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (!result) { @@ -6618,7 +6602,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler, uint arg_count= temp->argument_count(); do { - my_ptrdiff_t diff= table->check_unique_buf - new_rec; + my_ptrdiff_t diff= table->file->lookup_buffer - new_rec; is_same= true; for (uint j=0; is_same && j < arg_count; j++) { @@ -6643,8 +6627,9 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler, } } } - while (!is_same && !(result= handler->ha_index_next_same(table->record[0], - ptr, key_info->key_length))); + while (!is_same && + !(result= lookup_handler->ha_index_next_same(table->record[0], + ptr, key_info->key_length))); if (is_same) error= HA_ERR_FOUND_DUPP_KEY; goto exit; @@ -6654,33 +6639,40 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *handler, exit: if (error == HA_ERR_FOUND_DUPP_KEY) { - table->file->errkey= key_no; - if (handler->ha_table_flags() & HA_DUPLICATE_POS) + table->file->lookup_errkey= key_no; + if (ha_table_flags() & HA_DUPLICATE_POS) { - handler->position(table->record[0]); - memcpy(table->file->dup_ref, handler->ref, handler->ref_length); + lookup_handler->position(table->record[0]); + memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); } } - restore_record(table, check_unique_buf); - handler->ha_index_end(); + restore_record(table, file->lookup_buffer); + lookup_handler->ha_index_end(); DBUG_RETURN(error); } +void handler::alloc_lookup_buffer() +{ + if (!lookup_buffer) + lookup_buffer= (uchar*)alloc_root(&table->mem_root, + table_share->max_unique_length + + table_share->null_fields + + table_share->reclength); +} + /** @brief check whether inserted records breaks the unique constraint on long columns. @returns 0 if no duplicate else returns error */ - -static int check_duplicate_long_entries(TABLE *table, handler *handler, - const uchar *new_rec) +int handler::check_duplicate_long_entries(const uchar *new_rec) { - table->file->errkey= -1; + lookup_errkey= (uint)-1; for (uint i= 0; i < table->s->keys; i++) { int result; if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH && - (result= check_duplicate_long_entry_key(table, handler, new_rec, i))) + (result= check_duplicate_long_entry_key(new_rec, i))) return result; } return 0; @@ -6701,8 +6693,7 @@ static int check_duplicate_long_entries(TABLE *table, handler *handler, key as a parameter in normal insert key should be -1 @returns 0 if no duplicate else returns error */ - -static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec) +int handler::check_duplicate_long_entries_update(const uchar *new_rec) { Field *field; uint key_parts; @@ -6713,7 +6704,7 @@ static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec) with respect to fields in hash_str */ uint reclength= (uint) (table->record[1] - table->record[0]); - table->file->clone_handler_for_update(); + for (uint i= 0; i < table->s->keys; i++) { keyinfo= table->key_info + i; @@ -6728,9 +6719,7 @@ static int check_duplicate_long_entries_update(TABLE *table, uchar *new_rec) /* Compare fields if they are different then check for duplicates */ if (field->cmp_binary_offset(reclength)) { - if ((error= (check_duplicate_long_entry_key(table, - table->file->update_handler, - new_rec, i)))) + if((error= check_duplicate_long_entry_key(new_rec, i))) return error; /* break because check_duplicate_long_entries_key will @@ -6815,24 +6804,20 @@ bool handler::prepare_for_row_logging() /* Do all initialization needed for insert - - @param force_update_handler Set to TRUE if we should always create an - update handler. Needed if we don't know if we - are going to do inserts while a scan is in - progress. */ -int handler::prepare_for_insert(bool force_update_handler) +int handler::prepare_for_insert() { /* Preparation for unique of blob's */ - if (table->s->long_unique_table && (inited == RND || force_update_handler)) + if (table->s->long_unique_table) { /* When doing a scan we can't use the same handler to check duplicate rows. Create a new temporary one */ - if (clone_handler_for_update()) + if (inited != NONE && create_lookup_handler()) return 1; + alloc_lookup_buffer(); } return 0; } @@ -6852,8 +6837,8 @@ int handler::ha_write_row(const uchar *buf) if (table->s->long_unique_table && this == table->file) { - DBUG_ASSERT(inited == NONE || update_handler != this); - if ((error= check_duplicate_long_entries(table, update_handler, buf))) + DBUG_ASSERT(inited == NONE || lookup_handler != this); + if ((error= check_duplicate_long_entries(buf))) DBUG_RETURN(error); } TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error, @@ -6898,8 +6883,10 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) mark_trx_read_write(); increment_statistics(&SSV::ha_update_count); if (table->s->long_unique_table && - (error= check_duplicate_long_entries_update(table, (uchar*) new_data))) + (error= check_duplicate_long_entries_update(new_data))) + { return error; + } TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, 0, { error= update_row(old_data, new_data);}) diff --git a/sql/handler.h b/sql/handler.h index 8a946ae3518..ed2f68d8ea6 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3025,10 +3025,12 @@ protected: Table_flags cached_table_flags; /* Set on init() and open() */ ha_rows estimation_rows_to_insert; + handler *lookup_handler; public: handlerton *ht; /* storage engine of this handler */ uchar *ref; /* Pointer to current row */ uchar *dup_ref; /* Pointer to duplicate row */ + uchar *lookup_buffer; ha_statistics stats; @@ -3061,6 +3063,7 @@ public: */ bool in_range_check_pushed_down; + uint lookup_errkey; uint errkey; /* Last dup key */ uint key_used_on_scan; uint active_index, keyread; @@ -3068,7 +3071,6 @@ public: /** Length of ref (1-8 or the clustered key length) */ uint ref_length; FT_INFO *ft_handler; - handler *update_handler; /* Handler used in case of update */ enum init_stat { NONE=0, INDEX, RND }; init_stat inited, pre_inited; @@ -3225,13 +3227,14 @@ private: public: handler(handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), table(0), - estimation_rows_to_insert(0), ht(ht_arg), - ref(0), end_range(NULL), + estimation_rows_to_insert(0), + lookup_handler(this), + ht(ht_arg), ref(0), lookup_buffer(NULL), end_range(NULL), implicit_emptied(0), mark_trx_read_write_done(0), check_table_binlog_row_based_done(0), check_table_binlog_row_based_result(0), - in_range_check_pushed_down(FALSE), errkey(-1), + in_range_check_pushed_down(FALSE), lookup_errkey(-1), errkey(-1), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), keyread(MAX_KEY), ref_length(sizeof(my_off_t)), @@ -3268,8 +3271,6 @@ public: return ref != 0; } virtual handler *clone(const char *name, MEM_ROOT *mem_root); - bool clone_handler_for_update(); - void delete_update_handler(); /** This is called after create to allow us to set up cached variables */ void init() { @@ -4646,7 +4647,7 @@ protected: public: bool check_table_binlog_row_based(); bool prepare_for_row_logging(); - int prepare_for_insert(bool force_update_handler= 0); + int prepare_for_insert(); int binlog_log_row(TABLE *table, const uchar *before_record, const uchar *after_record, @@ -4671,6 +4672,12 @@ private: void mark_trx_read_write_internal(); bool check_table_binlog_row_based_internal(); + int create_lookup_handler(); + void alloc_lookup_buffer(); + int check_duplicate_long_entries(const uchar *new_rec); + int check_duplicate_long_entries_update(const uchar *new_rec); + int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no); + protected: /* These are intended to be used only by handler::ha_xxxx() functions diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index da46a538a28..19eabbb053c 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -753,7 +753,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (table->versioned(VERS_TIMESTAMP) || (table_list->has_period() && !portion_of_time_through_update)) - table->file->prepare_for_insert(1); + table->file->prepare_for_insert(); THD_STAGE_INFO(thd, stage_updating); while (likely(!(error=info.read_record())) && likely(!thd->killed) && @@ -1243,7 +1243,7 @@ multi_delete::initialize_tables(JOIN *join) tbl->prepare_for_position(); if (tbl->versioned(VERS_TIMESTAMP)) - tbl->file->prepare_for_insert(1); + tbl->file->prepare_for_insert(); } else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) && walk == delete_tables) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index e546f3cfeca..5de2cc6d539 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -886,9 +886,9 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, { if (table->file->ha_rnd_init_with_error(0)) goto abort; - table->file->prepare_for_insert(); } } + table->file->prepare_for_insert(); /** This is a simple check for the case when the table has a trigger that reads from it, or when the statement invokes a stored function @@ -2544,10 +2544,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) uchar *bitmap; char *copy_tmp; uint bitmaps_used; - KEY_PART_INFO *key_part, *end_part; Field **default_fields, **virtual_fields; - KEY *keys; - KEY_PART_INFO *key_parts; uchar *record; DBUG_ENTER("Delayed_insert::get_local_table"); @@ -2615,9 +2612,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) share->default_expressions + 1) * sizeof(Field*), &virtual_fields, (share->virtual_fields + 1) * sizeof(Field*), - &keys, share->keys * sizeof(KEY), - &key_parts, - share->ext_key_parts * sizeof(KEY_PART_INFO), &record, (uint) share->reclength, &bitmap, (uint) share->column_bitmap_size*4, NullS)) @@ -2636,13 +2630,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) copy->default_field= default_fields; if (share->virtual_fields) copy->vfield= virtual_fields; - copy->key_info= keys; - copy->base_key_part= key_parts; - - /* Copy key and key parts from original table */ - memcpy(keys, table->key_info, sizeof(KEY) * share->keys); - memcpy(key_parts, table->base_key_part, - sizeof(KEY_PART_INFO) *share->ext_key_parts); copy->expr_arena= NULL; @@ -2675,34 +2662,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) } *field=0; - /* The following is needed for long hash key */ - key_part= copy->base_key_part; - for (KEY *key= copy->key_info, *end_key= key + share->keys ; - key < end_key; - key++) - { - key->key_part= key_part; - key_part+= key->ext_key_parts; - if (key->algorithm == HA_KEY_ALG_LONG_HASH) - key_part++; - } - - for (key_part= copy->base_key_part, - end_part= key_part + share->ext_key_parts ; - key_part < end_part ; - key_part++) - { - Field *field= key_part->field= copy->field[key_part->fieldnr - 1]; - - /* Fix partial fields, like in open_table_from_share() */ - if (field->key_length() != key_part->length && - !(field->flags & BLOB_FLAG)) - { - field= key_part->field= field->make_new_field(client_thd->mem_root, - copy, 0); - field->field_length= key_part->length; - } - } + if (copy_keys_from_share(copy, client_thd->mem_root)) + goto error; if (share->virtual_fields || share->default_expressions || share->default_fields) @@ -3310,12 +3271,6 @@ pthread_handler_t handle_delayed_insert(void *arg) di->table->file->ha_release_auto_increment(); mysql_unlock_tables(thd, lock); trans_commit_stmt(thd); - /* - We have to delete update handler as we need to create a new one - for the next lock table to ensure they have both the same read - view. - */ - di->table->file->delete_update_handler(); di->group_count=0; mysql_audit_release(thd); /* @@ -3953,9 +3908,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { if (table->file->ha_rnd_init_with_error(0)) DBUG_RETURN(1); - table->file->prepare_for_insert(); } } + table->file->prepare_for_insert(); if (info.handle_duplicates == DUP_REPLACE && (!table->triggers || !table->triggers->has_delete_triggers())) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); @@ -4720,9 +4675,9 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u) { if (table->file->ha_rnd_init_with_error(0)) DBUG_RETURN(1); - table->file->prepare_for_insert(); } } + table->file->prepare_for_insert(); if (info.handle_duplicates == DUP_REPLACE && (!table->triggers || !table->triggers->has_delete_triggers())) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); diff --git a/sql/sql_load.cc b/sql/sql_load.cc index efbbe28bcad..b52846c1390 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -652,8 +652,8 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, { if ((error= table_list->table->file->ha_rnd_init_with_error(0))) goto err; - table->file->prepare_for_insert(); } + table->file->prepare_for_insert(); thd_progress_init(thd, 2); if (table_list->table->validate_default_values_of_unset_fields(thd)) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index f78d2ac544c..9f4ddb4f357 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18377,7 +18377,6 @@ TABLE *Create_tmp_table::start(THD *thd, table->copy_blobs= 1; table->in_use= thd; table->no_rows_with_nulls= param->force_not_null_cols; - table->check_unique_buf= NULL; table->s= share; init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8dcc4e238d3..4750a302017 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -11013,6 +11013,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, from->file->column_bitmaps_signal(); + to->file->prepare_for_insert(); + /* Tell handler that we have values for all columns in the to table */ to->use_all_columns(); /* Add virtual columns to vcol_set to ensure they are updated */ diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c2afaaef13c..c3ade4288d2 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -968,8 +968,7 @@ update_begin: can_compare_record= records_are_comparable(table); explain->tracker.on_scan_init(); - if (table->versioned(VERS_TIMESTAMP) || table_list->has_period()) - table->file->prepare_for_insert(1); + table->file->prepare_for_insert(); THD_STAGE_INFO(thd, stage_updating); while (!(error=info.read_record()) && !thd->killed) @@ -2028,8 +2027,7 @@ int multi_update::prepare(List<Item> ¬_used_values, { table->read_set= &table->def_read_set; bitmap_union(table->read_set, &table->tmp_set); - if (table->versioned(VERS_TIMESTAMP)) - table->file->prepare_for_insert(1); + table->file->prepare_for_insert(); } } if (unlikely(error)) diff --git a/sql/table.cc b/sql/table.cc index cf0642a780f..a31db77cfff 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1243,46 +1243,31 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table, Item *list_item; KEY *key= 0; uint key_index, parts= 0; - KEY_PART_INFO *key_part= table->base_key_part; - for (key_index= 0; key_index < table->s->keys; key_index++) { - /* - We have to use key from share as this function may have changed - table->key_info if it was ever invoked before. This could happen - in case of INSERT DELAYED. - */ - key= table->s->key_info + key_index; - if (key->algorithm == HA_KEY_ALG_LONG_HASH) - { + key=table->key_info + key_index; parts= key->user_defined_key_parts; - if (key_part[parts].fieldnr == field->field_index + 1) + if (key->key_part[parts].fieldnr == field->field_index + 1) break; - key_part++; } - key_part+= key->ext_key_parts; - } - if (key_index == table->s->keys) + if (!key || key->algorithm != HA_KEY_ALG_LONG_HASH) goto end; - - /* Correct the key & key_parts if this function has been called before */ - key= table->key_info + key_index; - key->key_part= key_part; - - for (uint i=0; i < parts; i++, key_part++) + KEY_PART_INFO *keypart; + for (uint i=0; i < parts; i++) { - if (key_part->key_part_flag & HA_PART_KEY_SEG) + keypart= key->key_part + i; + if (keypart->key_part_flag & HA_PART_KEY_SEG) { - int length= key_part->length/key_part->field->charset()->mbmaxlen; + int length= keypart->length/keypart->field->charset()->mbmaxlen; list_item= new (mem_root) Item_func_left(thd, - new (mem_root) Item_field(thd, key_part->field), + new (mem_root) Item_field(thd, keypart->field), new (mem_root) Item_int(thd, length)); list_item->fix_fields(thd, NULL); - key_part->field->vcol_info= - table->field[key_part->field->field_index]->vcol_info; + keypart->field->vcol_info= + table->field[keypart->field->field_index]->vcol_info; } else - list_item= new (mem_root) Item_field(thd, key_part->field); + list_item= new (mem_root) Item_field(thd, keypart->field); field_list->push_back(list_item, mem_root); } Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list); @@ -3709,6 +3694,54 @@ static void print_long_unique_table(TABLE *table) } #endif +bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root) +{ + TABLE_SHARE *share= outparam->s; + if (share->key_parts) + { + KEY *key_info, *key_info_end; + KEY_PART_INFO *key_part; + + if (!multi_alloc_root(root, &key_info, share->keys*sizeof(KEY), + &key_part, share->ext_key_parts*sizeof(KEY_PART_INFO), + NullS)) + return 1; + + outparam->key_info= key_info; + + memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys); + memcpy(key_part, key_info->key_part, sizeof(*key_part)*share->ext_key_parts); + + my_ptrdiff_t adjust_ptrs= PTR_BYTE_DIFF(key_part, key_info->key_part); + for (key_info_end= key_info + share->keys ; + key_info < key_info_end ; + key_info++) + { + key_info->table= outparam; + (uchar*&)(key_info->key_part)+= adjust_ptrs; + if (key_info->algorithm == HA_KEY_ALG_LONG_HASH) + key_info->flags&= ~HA_NOSAME; + } + for (KEY_PART_INFO *key_part_end= key_part+share->ext_key_parts; + key_part < key_part_end; + key_part++) + { + Field *field= key_part->field= outparam->field[key_part->fieldnr - 1]; + if (field->key_length() != key_part->length && + !(field->flags & BLOB_FLAG)) + { + /* + We are using only a prefix of the column as a key: + Create a new field for the key part that matches the index + */ + field= key_part->field=field->make_new_field(root, outparam, 0); + field->field_length= key_part->length; + } + } + } + return 0; +} + /* Open a table based on a TABLE_SHARE @@ -3871,58 +3904,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, outparam->found_next_number_field= outparam->field[(uint) (share->found_next_number_field - share->field)]; - /* Fix key->name and key_part->field */ - if (share->key_parts) - { - KEY *key_info, *key_info_end; - KEY_PART_INFO *key_part; - uint n_length; - n_length= share->keys*sizeof(KEY) + share->ext_key_parts*sizeof(KEY_PART_INFO); - if (!(key_info= (KEY*) alloc_root(&outparam->mem_root, n_length))) - goto err; - outparam->key_info= key_info; - key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys)); - outparam->base_key_part= key_part; - - memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys); - memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) * - share->ext_key_parts)); - - for (key_info_end= key_info + share->keys ; - key_info < key_info_end ; - key_info++) - { - KEY_PART_INFO *key_part_end; - - key_info->table= outparam; - key_info->key_part= key_part; - - key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts : - key_info->user_defined_key_parts) ; - if (key_info->algorithm == HA_KEY_ALG_LONG_HASH) - { - key_part_end++; - key_info->flags&= ~HA_NOSAME; - } - for ( ; key_part < key_part_end; key_part++) - { - Field *field= key_part->field= outparam->field[key_part->fieldnr - 1]; - if (field->key_length() != key_part->length && - !(field->flags & BLOB_FLAG)) - { - /* - We are using only a prefix of the column as a key: - Create a new field for the key part that matches the index - */ - field= key_part->field=field->make_new_field(&outparam->mem_root, - outparam, 0); - const_cast<uint32_t&>(field->field_length)= key_part->length; - } - } - if (!share->use_ext_keys) - key_part+= key_info->ext_key_parts - key_info->user_defined_key_parts; - } - } + if (copy_keys_from_share(outparam, &outparam->mem_root)) + goto err; /* Process virtual and default columns, if any. @@ -5256,7 +5239,6 @@ void TABLE::init(THD *thd, TABLE_LIST *tl) range_rowid_filter_cost_info_elems= 0; range_rowid_filter_cost_info_ptr= NULL; range_rowid_filter_cost_info= NULL; - check_unique_buf= NULL; vers_write= s->versioned; quick_condition_rows=0; no_cache= false; diff --git a/sql/table.h b/sql/table.h index 4d18d6b1c77..2ccc59f9282 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1151,8 +1151,6 @@ public: THD *in_use; /* Which thread uses this */ uchar *record[3]; /* Pointer to records */ - /* record buf to resolve hash collisions for long UNIQUE constraints */ - uchar *check_unique_buf; uchar *write_row_record; /* Used as optimisation in THD::write_row */ uchar *insert_values; /* used by INSERT ... UPDATE */ @@ -1181,7 +1179,6 @@ public: /* Map of keys dependent on some constraint */ key_map constraint_dependent_keys; KEY *key_info; /* data of keys in database */ - KEY_PART_INFO *base_key_part; /* Where key parts are stored */ Field **field; /* Pointer to fields */ Field **vfield; /* Pointer to virtual fields*/ @@ -2979,6 +2976,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, uint ha_open_flags, TABLE *outparam, bool is_create_table, List<String> *partitions_to_open= NULL); +bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root); bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol); bool fix_session_vcol_expr_for_read(THD *thd, Field *field, Virtual_column_info *vcol); |