diff options
Diffstat (limited to 'sql/sql_update.cc')
-rw-r--r-- | sql/sql_update.cc | 642 |
1 files changed, 445 insertions, 197 deletions
diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 9c82bde9497..22631ee3342 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -26,7 +26,7 @@ /* Return 0 if row hasn't changed */ -bool compare_record(TABLE *table, query_id_t query_id) +bool compare_record(TABLE *table) { if (table->s->blob_fields + table->s->varchar_fields == 0) return cmp_record(table,record[1]); @@ -36,9 +36,9 @@ bool compare_record(TABLE *table, query_id_t query_id) table->s->null_bytes)) return TRUE; // Diff in NULL value /* Compare updated fields */ - for (Field **ptr=table->field ; *ptr ; ptr++) + for (Field **ptr= table->field ; *ptr ; ptr++) { - if ((*ptr)->query_id == query_id && + if (bitmap_is_set(table->write_set, (*ptr)->field_index) && (*ptr)->cmp_binary_offset(table->s->rec_buff_length)) return TRUE; } @@ -83,6 +83,75 @@ static bool check_fields(THD *thd, List<Item> &items) } +/** + Re-read record if more columns are needed for error message. + + If we got a duplicate key error, we want to write an error + message containing the value of the duplicate key. If we do not have + all fields of the key value in record[0], we need to re-read the + record with a proper read_set. + + @param[in] error error number + @param[in] table table +*/ + +static void prepare_record_for_error_message(int error, TABLE *table) +{ + Field **field_p; + Field *field; + uint keynr; + MY_BITMAP unique_map; /* Fields in offended unique. */ + my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)]; + DBUG_ENTER("prepare_record_for_error_message"); + + /* + Only duplicate key errors print the key value. + If storage engine does always read all columns, we have the value alraedy. + */ + if ((error != HA_ERR_FOUND_DUPP_KEY) || + !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)) + DBUG_VOID_RETURN; + + /* + Get the number of the offended index. + We will see MAX_KEY if the engine cannot determine the affected index. + */ + if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY) + DBUG_VOID_RETURN; + + /* Create unique_map with all fields used by that index. */ + bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE); + table->mark_columns_used_by_index_no_reset(keynr, &unique_map); + + /* Subtract read_set and write_set. */ + bitmap_subtract(&unique_map, table->read_set); + bitmap_subtract(&unique_map, table->write_set); + + /* + If the unique index uses columns that are neither in read_set + nor in write_set, we must re-read the record. + Otherwise no need to do anything. + */ + if (bitmap_is_clear_all(&unique_map)) + DBUG_VOID_RETURN; + + /* Get identifier of last read record into table->file->ref. */ + table->file->position(table->record[0]); + /* Add all fields used by unique index to read_set. */ + bitmap_union(table->read_set, &unique_map); + /* Tell the engine about the new set. */ + table->file->column_bitmaps_signal(); + /* Read record that is identified by table->file->ref. */ + (void) table->file->rnd_pos(table->record[1], table->file->ref); + /* Copy the newly read columns into the new record. */ + for (field_p= table->field; (field= *field_p); field_p++) + if (bitmap_is_set(&unique_map, field->field_index)) + field->copy_from_tmp(table->s->rec_buff_length); + + DBUG_VOID_RETURN; +} + + /* Process usual UPDATE @@ -115,30 +184,28 @@ int mysql_update(THD *thd, { bool using_limit= limit != HA_POS_ERROR; bool safe_update= test(thd->options & OPTION_SAFE_UPDATES); - bool used_key_is_modified, transactional_table; + bool used_key_is_modified, transactional_table, will_batch; bool can_compare_record; int res; - int error; - uint used_index= MAX_KEY; + int error, loc_error; + uint used_index= MAX_KEY, dup_key_found; bool need_sort= TRUE; #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; #endif uint table_count= 0; - query_id_t query_id=thd->query_id, timestamp_query_id; ha_rows updated, found; - key_map old_used_keys; + key_map old_covering_keys; TABLE *table; SQL_SELECT *select; READ_RECORD info; SELECT_LEX *select_lex= &thd->lex->select_lex; - bool need_reopen; + bool need_reopen; + ulonglong id; List<Item> all_fields; THD::killed_state killed_status= THD::NOT_KILLED; DBUG_ENTER("mysql_update"); - LINT_INIT(timestamp_query_id); - for ( ; ; ) { if (open_tables(thd, &table_list, &table_count, 0)) @@ -165,12 +232,11 @@ int mysql_update(THD *thd, mysql_handle_derived(thd->lex, &mysql_derived_filling))) DBUG_RETURN(1); - thd->proc_info="init"; + thd_proc_info(thd, "init"); table= table_list->table; - table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - /* Calculate "table->used_keys" based on the WHERE */ - table->used_keys= table->s->keys_in_use; + /* Calculate "table->covering_keys" based on the WHERE */ + table->covering_keys= table->s->keys_in_use; table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -181,23 +247,13 @@ int mysql_update(THD *thd, if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) DBUG_RETURN(1); - old_used_keys= table->used_keys; // Keys used in WHERE - /* - Change the query_id for the timestamp column so that we can - check if this is modified directly - */ - if (table->timestamp_field) - { - timestamp_query_id=table->timestamp_field->query_id; - table->timestamp_field->query_id=thd->query_id-1; - } - + old_covering_keys= table->covering_keys; // Keys used in WHERE /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; table_list->register_want_access(want_privilege); #endif - if (setup_fields_with_no_wrap(thd, 0, fields, 1, 0, 0)) + if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0)) DBUG_RETURN(1); /* purecov: inspected */ if (table_list->view && check_fields(thd, fields)) { @@ -211,10 +267,16 @@ int mysql_update(THD *thd, if (table->timestamp_field) { // Don't set timestamp column if this is modified - if (table->timestamp_field->query_id == thd->query_id) + if (bitmap_is_set(table->write_set, + table->timestamp_field->field_index)) table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; else - table->timestamp_field->query_id=timestamp_query_id; + { + if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE || + table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) + bitmap_set_bit(table->write_set, + table->timestamp_field->field_index); + } } #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -222,7 +284,7 @@ int mysql_update(THD *thd, table_list->grant.want_privilege= table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); #endif - if (setup_fields(thd, 0, values, 1, 0, 0)) + if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0)) { free_underlaid_joins(thd, select_lex); DBUG_RETURN(1); /* purecov: inspected */ @@ -239,8 +301,31 @@ int mysql_update(THD *thd, if (cond_value == Item::COND_FALSE) limit= 0; // Impossible WHERE } + + /* + If a timestamp field settable on UPDATE is present then to avoid wrong + update force the table handler to retrieve write-only fields to be able + to compare records and detect data change. + */ + if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ && + table->timestamp_field && + (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE || + table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)) + bitmap_union(table->read_set, table->write_set); // Don't count on usage of 'only index' when calculating which key to use - table->used_keys.clear_all(); + table->covering_keys.clear_all(); + +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (prune_partitions(thd, table, conds)) + { + free_underlaid_joins(thd, select_lex); + send_ok(thd); // No matching records + DBUG_RETURN(0); + } +#endif + /* Update the table->file->stats.records number */ + table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + select= make_select(table, 0, 0, conds, 0, &error); if (error || !limit || (select && select->check_quick(thd, safe_update, limit))) @@ -271,13 +356,16 @@ int mysql_update(THD *thd, } } init_ftfuncs(thd, select_lex, 1); + + table->mark_columns_needed_for_update(); + /* Check if we are modifying a key that we are used to search with */ if (select && select->quick) { used_index= select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - select->quick->is_keys_used(&fields)); + select->quick->is_keys_used(table->write_set)); } else { @@ -285,28 +373,38 @@ int mysql_update(THD *thd, if (used_index == MAX_KEY) // no index for sort order used_index= table->file->key_used_on_scan; if (used_index != MAX_KEY) - used_key_is_modified= is_key_used(table, used_index, fields); + used_key_is_modified= is_key_used(table, used_index, table->write_set); } + +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (used_key_is_modified || order || + partition_key_modified(table, table->write_set)) +#else if (used_key_is_modified || order) +#endif { /* We can't update table directly; We must first search after all matching rows before updating the table! */ - table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (used_index < MAX_KEY && old_used_keys.is_set(used_index)) + if (used_index < MAX_KEY && old_covering_keys.is_set(used_index)) { table->key_read=1; - table->file->extra(HA_EXTRA_KEYREAD); + table->mark_columns_used_by_index(used_index); + } + else + { + table->use_all_columns(); } - /* note: can actually avoid sorting below.. */ + /* note: We avoid sorting avoid if we sort on the used index */ if (order && (need_sort || used_key_is_modified)) { /* Doing an ORDER BY; Let filesort find and sort the rows we are going to update + NOTE: filesort will call table->prepare_for_position() */ uint length= 0; SORT_FIELD *sortorder; @@ -315,12 +413,11 @@ int mysql_update(THD *thd, table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) || - (table->sort.found_records = filesort(thd, table, sortorder, length, - select, limit, - &examined_rows)) + (table->sort.found_records= filesort(thd, table, sortorder, length, + select, limit, 1, + &examined_rows)) == HA_POS_ERROR) { - free_io_cache(table); goto err; } /* @@ -346,6 +443,7 @@ int mysql_update(THD *thd, /* If quick select is used, initialize it before retrieving rows. */ if (select && select->quick && select->quick->reset()) goto err; + table->file->try_semi_consistent_read(1); /* When we get here, we have one of the following options: @@ -357,18 +455,22 @@ int mysql_update(THD *thd, B.2 quick select is not used, this is full index scan (with LIMIT) Full index scan must be started with init_read_record_idx */ + if (used_index == MAX_KEY || (select && select->quick)) init_read_record(&info,thd,table,select,0,1); else init_read_record_idx(&info, thd, table, 1, used_index); - thd->proc_info="Searching rows for update"; + thd_proc_info(thd, "Searching rows for update"); ha_rows tmp_limit= limit; while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) { + if (table->file->was_semi_consistent_read()) + continue; /* repeat the read of the same row if it still exists */ + table->file->position(table->record[0]); if (my_b_write(&tempfile,table->file->ref, table->file->ref_length)) @@ -388,6 +490,7 @@ int mysql_update(THD *thd, if (thd->killed && !error) error= 1; // Aborted limit= tmp_limit; + table->file->try_semi_consistent_read(0); end_read_record(&info); /* Change select to use tempfile */ @@ -411,56 +514,67 @@ int mysql_update(THD *thd, goto err; } if (table->key_read) - { - table->key_read=0; - table->file->extra(HA_EXTRA_NO_KEYREAD); - } + table->restore_column_maps_after_mark_index(); } if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (select && select->quick && select->quick->reset()) - goto err; + goto err; + table->file->try_semi_consistent_read(1); init_read_record(&info,thd,table,select,0,1); updated= found= 0; - thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ + /* Generate an error when trying to set a NOT NULL field to NULL. */ + thd->count_cuted_fields= ignore ? CHECK_FIELD_WARN + : CHECK_FIELD_ERROR_FOR_NULL; thd->cuted_fields=0L; - thd->proc_info="Updating"; - query_id=thd->query_id; + thd_proc_info(thd, "Updating"); transactional_table= table->file->has_transactions(); thd->abort_on_warning= test(!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - - if (table->triggers) + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) { - table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); - if (table->triggers->has_triggers(TRG_EVENT_UPDATE, - TRG_ACTION_AFTER)) - { - /* - The table has AFTER UPDATE triggers that might access to subject - table and therefore might need update to be done immediately. - So we turn-off the batching. - */ - (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); - } + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + will_batch= FALSE; } + else + will_batch= !table->file->start_bulk_update(); + + /* + Assure that we can use position() + if we need to create an error message. + */ + if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) + table->prepare_for_position(); /* We can use compare_record() to optimize away updates if - the table handler is returning all columns + the table handler is returning all columns OR if + if all updated columns are read */ - can_compare_record= !(table->file->table_flags() & - HA_PARTIAL_COLUMN_READ); + can_compare_record= (!(table->file->ha_table_flags() & + HA_PARTIAL_COLUMN_READ) || + bitmap_is_subset(table->write_set, table->read_set)); + while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) { + if (table->file->was_semi_consistent_read()) + continue; /* repeat the read of the same row if it still exists */ + store_record(table,record[1]); if (fill_record_n_invoke_before_triggers(thd, fields, values, 0, table->triggers, @@ -469,7 +583,7 @@ int mysql_update(THD *thd, found++; - if (!can_compare_record || compare_record(table, query_id)) + if (!can_compare_record || compare_record(table)) { if ((res= table_list->view_check_option(thd, ignore)) != VIEW_CHECK_OK) @@ -483,11 +597,53 @@ int mysql_update(THD *thd, break; } } - if (!(error=table->file->update_row((byte*) table->record[1], - (byte*) table->record[0]))) + if (will_batch) { - updated++; - + /* + Typically a batched handler can execute the batched jobs when: + 1) When specifically told to do so + 2) When it is not a good idea to batch anymore + 3) When it is necessary to send batch for other reasons + (One such reason is when READ's must be performed) + + 1) is covered by exec_bulk_update calls. + 2) and 3) is handled by the bulk_update_row method. + + bulk_update_row can execute the updates including the one + defined in the bulk_update_row or not including the row + in the call. This is up to the handler implementation and can + vary from call to call. + + The dup_key_found reports the number of duplicate keys found + in those updates actually executed. It only reports those if + the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued. + If this hasn't been issued it returns an error code and can + ignore this number. Thus any handler that implements batching + for UPDATE IGNORE must also handle this extra call properly. + + If a duplicate key is found on the record included in this + call then it should be included in the count of dup_key_found + and error should be set to 0 (only if these errors are ignored). + */ + error= table->file->ha_bulk_update_row(table->record[1], + table->record[0], + &dup_key_found); + limit+= dup_key_found; + updated-= dup_key_found; + } + else + { + /* Non-batched update */ + error= table->file->ha_update_row(table->record[1], + table->record[0]); + } + if (!error || error == HA_ERR_RECORD_IS_THE_SAME) + { + if (error != HA_ERR_RECORD_IS_THE_SAME) + updated++; + else + error= 0; + if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)) @@ -495,31 +651,72 @@ int mysql_update(THD *thd, error= 1; break; } - } - else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) - { + } + else if (!ignore || + table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) + { /* - If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to + If (ignore && error is ignorable) we don't have to do anything; otherwise... */ - if (error != HA_ERR_FOUND_DUPP_KEY) + if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) thd->fatal_error(); /* Other handler errors are fatal */ - table->file->print_error(error,MYF(0)); - error= 1; - break; - } + + prepare_record_for_error_message(error, table); + table->file->print_error(error,MYF(0)); + error= 1; + break; + } } - + if (!--limit && using_limit) { - error= -1; // Simulate end of file - break; + /* + We have reached end-of-file in most common situations where no + batching has occurred and if batching was supposed to occur but + no updates were made and finally when the batch execution was + performed without error and without finding any duplicate keys. + If the batched updates were performed with errors we need to + check and if no error but duplicate key's found we need to + continue since those are not counted for in limit. + */ + if (will_batch && + ((error= table->file->exec_bulk_update(&dup_key_found)) || + dup_key_found)) + { + if (error) + { + /* purecov: begin inspected */ + /* + The handler should not report error of duplicate keys if they + are ignored. This is a requirement on batching handlers. + */ + prepare_record_for_error_message(error, table); + table->file->print_error(error,MYF(0)); + error= 1; + break; + /* purecov: end */ + } + /* + Either an error was found and we are ignoring errors or there + were duplicate keys found. In both cases we need to correct + the counters and continue the loop. + */ + limit= dup_key_found; //limit is 0 when we get here so need to + + updated-= dup_key_found; + } + else + { + error= -1; // Simulate end of file + break; + } } } else table->file->unlock_row(); thd->row_count++; } + dup_key_found= 0; /* Caching the killed status to pass as the arg to query event constuctor; The cached value can not change whereas the killed status can @@ -536,14 +733,37 @@ int mysql_update(THD *thd, };); error= (killed_status == THD::NOT_KILLED)? error : 1; + if (error && + will_batch && + (loc_error= table->file->exec_bulk_update(&dup_key_found))) + /* + An error has occurred when a batched update was performed and returned + an error indication. It cannot be an allowed duplicate key error since + we require the batching handler to treat this as a normal behavior. + + Otherwise we simply remove the number of duplicate keys records found + in the batched update. + */ + { + /* purecov: begin inspected */ + thd->fatal_error(); + prepare_record_for_error_message(loc_error, table); + table->file->print_error(loc_error,MYF(0)); + error= 1; + /* purecov: end */ + } + else + updated-= dup_key_found; + if (will_batch) + table->file->end_bulk_update(); + table->file->try_semi_consistent_read(0); if (!transactional_table && updated > 0) thd->transaction.stmt.modified_non_trans_table= TRUE; end_read_record(&info); - free_io_cache(table); // If ORDER BY delete select; - thd->proc_info="end"; + thd_proc_info(thd, "end"); VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY)); /* @@ -570,10 +790,13 @@ int mysql_update(THD *thd, { if (error < 0) thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_table, FALSE, killed_status); - if (mysql_bin_log.write(&qinfo) && transactional_table) - error=1; // Rollback update + if (thd->binlog_query(THD::ROW_QUERY_TYPE, + thd->query, thd->query_length, + transactional_table, FALSE, killed_status) && + transactional_table) + { + error=1; // Rollback update + } } if (thd->transaction.stmt.modified_non_trans_table) thd->transaction.all.modified_non_trans_table= TRUE; @@ -592,6 +815,10 @@ int mysql_update(THD *thd, thd->lock=0; } + /* If LAST_INSERT_ID(X) was used, report X */ + id= thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : 0; + if (error < 0) { char buff[STRING_BUFFER_USUAL_SIZE]; @@ -599,14 +826,12 @@ int mysql_update(THD *thd, (ulong) thd->cuted_fields); thd->row_count_func= (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; - send_ok(thd, (ulong) thd->row_count_func, - thd->insert_id_used ? thd->last_insert_id : 0L,buff); + send_ok(thd, (ulong) thd->row_count_func, id, buff); DBUG_PRINT("info",("%ld records updated", (long) updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ thd->abort_on_warning= 0; - free_io_cache(table); - DBUG_RETURN((error >= 0 || thd->net.report_error) ? 1 : 0); + DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0); err: delete select; @@ -658,7 +883,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, if (setup_tables_and_check_access(thd, &select_lex->context, &select_lex->top_join_list, - table_list, conds, + table_list, &select_lex->leaf_tables, FALSE, UPDATE_ACL, SELECT_ACL) || setup_conds(thd, table_list, select_lex->leaf_tables, conds) || @@ -753,12 +978,12 @@ reopen_tables: if (setup_tables_and_check_access(thd, &lex->select_lex.context, &lex->select_lex.top_join_list, - table_list, &lex->select_lex.where, + table_list, &lex->select_lex.leaf_tables, FALSE, UPDATE_ACL, SELECT_ACL)) DBUG_RETURN(TRUE); - if (setup_fields_with_no_wrap(thd, 0, *fields, 1, 0, 0)) + if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0)) DBUG_RETURN(TRUE); for (tl= table_list; tl ; tl= tl->next_local) @@ -786,7 +1011,8 @@ reopen_tables: TABLE *table= tl->table; /* Only set timestamp column if this is not modified */ if (table->timestamp_field && - table->timestamp_field->query_id == thd->query_id) + bitmap_is_set(table->write_set, + table->timestamp_field->field_index)) table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; /* if table will be updated then check that it is unique */ @@ -798,9 +1024,7 @@ reopen_tables: DBUG_RETURN(TRUE); } - if (table->triggers) - table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); - + table->mark_columns_needed_for_update(); DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); /* If table will be updated we should not downgrade lock for it and @@ -831,7 +1055,7 @@ reopen_tables: if (check_access(thd, want_privilege, tl->db, &tl->grant.privilege, 0, 0, test(tl->schema_table)) || - (grant_option && check_grant(thd, want_privilege, tl, 0, 1, 0))) + check_grant(thd, want_privilege, tl, 0, 1, 0)) DBUG_RETURN(TRUE); } } @@ -956,8 +1180,8 @@ bool mysql_multi_update(THD *thd, OPTION_SETUP_TABLES_DONE, result, unit, select_lex); DBUG_PRINT("info",("res: %d report_error: %d", res, - thd->net.report_error)); - res|= thd->net.report_error; + (int) thd->is_error())); + res|= thd->is_error(); if (unlikely(res)) { /* If we had a another error reported earlier then this will be ignored */ @@ -1002,7 +1226,7 @@ int multi_update::prepare(List<Item> ¬_used_values, thd->count_cuted_fields= CHECK_FIELD_WARN; thd->cuted_fields=0L; - thd->proc_info="updating main table"; + thd_proc_info(thd, "updating main table"); tables_to_update= get_table_map(fields); @@ -1013,11 +1237,11 @@ int multi_update::prepare(List<Item> ¬_used_values, } /* - We have to check values after setup_tables to get used_keys right in + We have to check values after setup_tables to get covering_keys right in reference tables */ - if (setup_fields(thd, 0, *values, 1, 0, 0)) + if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) DBUG_RETURN(1); /* @@ -1038,24 +1262,21 @@ int multi_update::prepare(List<Item> ¬_used_values, sizeof(*tl)); if (!tl) DBUG_RETURN(1); - update.link_in_list((byte*) tl, (byte**) &tl->next_local); + update.link_in_list((uchar*) tl, (uchar**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; - table->used_keys.clear_all(); + table->covering_keys.clear_all(); table->pos_in_table_list= tl; - if (table->triggers) + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) { - table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); - if (table->triggers->has_triggers(TRG_EVENT_UPDATE, - TRG_ACTION_AFTER)) - { - /* - The table has AFTER UPDATE triggers that might access to subject - table and therefore might need update to be done immediately. - So we turn-off the batching. - */ - (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); - } + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); } } } @@ -1110,7 +1331,6 @@ int multi_update::prepare(List<Item> ¬_used_values, thd Thread handler join_tab How table is used in join all_tables List of tables - fields Fields that are updated NOTES We can update the first table in join on the fly if we know that @@ -1125,9 +1345,8 @@ int multi_update::prepare(List<Item> ¬_used_values, - Table is not joined to itself. - When checking for above cases we also should take into account that - BEFORE UPDATE trigger potentially may change value of any field in row - being updated. + This function gets information about fields to be updated from + the TABLE::write_set bitmap. WARNING This code is a bit dependent of how make_join_readinfo() works. @@ -1138,8 +1357,7 @@ int multi_update::prepare(List<Item> ¬_used_values, */ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, - TABLE_LIST *table_ref, TABLE_LIST *all_tables, - List<Item> *fields) + TABLE_LIST *table_ref, TABLE_LIST *all_tables) { TABLE *table= join_tab->table; if (unique_table(thd, table_ref, all_tables, 0)) @@ -1151,20 +1369,21 @@ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, return TRUE; // At most one matching row case JT_REF: case JT_REF_OR_NULL: - return !is_key_used(table, join_tab->ref.key, *fields); + return !is_key_used(table, join_tab->ref.key, table->write_set); case JT_ALL: /* If range search on index */ if (join_tab->quick) - return !join_tab->quick->is_keys_used(fields); + return !join_tab->quick->is_keys_used(table->write_set); /* If scanning in clustered key */ - if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && table->s->primary_key < MAX_KEY) - return !is_key_used(table, table->s->primary_key, *fields); + return !is_key_used(table, table->s->primary_key, table->write_set); return TRUE; default: break; // Avoid compler warning } return FALSE; + } @@ -1205,18 +1424,20 @@ multi_update::initialize_tables(JOIN *join) uint cnt= table_ref->shared; List<Item> temp_fields; ORDER group; + TMP_TABLE_PARAM *tmp_param; + table->mark_columns_needed_for_update(); if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (table == main_table) // First table in join { - if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables, - fields_for_table[cnt])) + if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables)) { table_to_update= main_table; // Update table on the fly continue; } } + table->prepare_for_position(); if (table == first_table_for_update && table_ref->check_option) { @@ -1235,7 +1456,7 @@ multi_update::initialize_tables(JOIN *join) } } - TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt; + tmp_param= tmp_table_param+cnt; /* Create a temporary table to store all fields that are changed for this @@ -1250,10 +1471,10 @@ multi_update::initialize_tables(JOIN *join) do { Field_string *field= new Field_string(tbl->file->ref_length, 0, - tbl->alias, - tbl, &my_charset_bin); + tbl->alias, &my_charset_bin); if (!field) DBUG_RETURN(1); + field->init(tbl); /* The field will be converted to varstring when creating tmp table if table to be updated was created by mysql 4.1. Deny this. @@ -1329,6 +1550,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { TABLE *table= cur_table->table; + uint offset= cur_table->shared; /* Check if we are using outer join and we didn't find the row or if we have already updated this row in the previous call to this @@ -1344,17 +1566,18 @@ bool multi_update::send_data(List<Item> ¬_used_values) if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED)) continue; - uint offset= cur_table->shared; - table->file->position(table->record[0]); /* We can use compare_record() to optimize away updates if - the table handler is returning all columns + the table handler is returning all columns OR if + if all updated columns are read */ if (table == table_to_update) { bool can_compare_record; - can_compare_record= !(table->file->table_flags() & - HA_PARTIAL_COLUMN_READ); + can_compare_record= (!(table->file->ha_table_flags() & + HA_PARTIAL_COLUMN_READ) || + bitmap_is_subset(table->write_set, + table->read_set)); table->status|= STATUS_UPDATED; store_record(table,record[1]); if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset], @@ -1364,7 +1587,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) DBUG_RETURN(1); found++; - if (!can_compare_record || compare_record(table, thd->query_id)) + if (!can_compare_record || compare_record(table)) { int error; if ((error= cur_table->view_check_option(thd, ignore)) != @@ -1376,33 +1599,42 @@ bool multi_update::send_data(List<Item> ¬_used_values) else if (error == VIEW_CHECK_ERROR) DBUG_RETURN(1); } - if (!updated++) - { - /* - Inform the main table that we are going to update the table even - while we may be scanning it. This will flush the read cache - if it's used. - */ - main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE); - } - if ((error=table->file->update_row(table->record[1], - table->record[0]))) - { - updated--; - if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) - { + if (!updated++) + { + /* + Inform the main table that we are going to update the table even + while we may be scanning it. This will flush the read cache + if it's used. + */ + main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE); + } + if ((error=table->file->ha_update_row(table->record[1], + table->record[0])) && + error != HA_ERR_RECORD_IS_THE_SAME) + { + updated--; + if (!ignore || + table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) + { /* - If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to + If (ignore && error == is ignorable) we don't have to do anything; otherwise... */ - if (error != HA_ERR_FOUND_DUPP_KEY) + if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) thd->fatal_error(); /* Other handler errors are fatal */ - table->file->print_error(error,MYF(0)); - DBUG_RETURN(1); - } - } + + prepare_record_for_error_message(error, table); + table->file->print_error(error,MYF(0)); + DBUG_RETURN(1); + } + } else { + if (error == HA_ERR_RECORD_IS_THE_SAME) + { + error= 0; + updated--; + } /* non-transactional or transactional table got modified */ /* either multi_update class' flag is raised in its branch */ if (table->file->has_transactions()) @@ -1415,7 +1647,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)) - DBUG_RETURN(1); + DBUG_RETURN(1); } } } @@ -1423,10 +1655,6 @@ bool multi_update::send_data(List<Item> ¬_used_values) { int error; TABLE *tmp_table= tmp_tables[offset]; - /* Store regular updated fields in the row. */ - fill_record(thd, - tmp_table->field + 1 + unupdated_check_opt_tables.elements, - *values_for_table[offset], 1); /* For updatable VIEW store rowid of the updated table and rowids of tables used in the CHECK OPTION condition. @@ -1436,15 +1664,19 @@ bool multi_update::send_data(List<Item> ¬_used_values) TABLE *tbl= table; do { - if (tbl != table) - tbl->file->position(tbl->record[0]); + tbl->file->position(tbl->record[0]); memcpy((char*) tmp_table->field[field_num]->ptr, (char*) tbl->file->ref, tbl->file->ref_length); field_num++; } while ((tbl= tbl_it++)); + /* Store regular updated fields in the row. */ + fill_record(thd, + tmp_table->field + 1 + unupdated_check_opt_tables.elements, + *values_for_table[offset], 1); + /* Write row, ignoring duplicated updates to a row */ - error= tmp_table->file->write_row(tmp_table->record[0]); + error= tmp_table->file->ha_write_row(tmp_table->record[0]); if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE) { if (error && @@ -1466,7 +1698,11 @@ void multi_update::send_error(uint errcode,const char *err) { /* First send error what ever it is ... */ my_error(errcode, MYF(0), err); +} + +void multi_update::abort() +{ /* the error was handled or nothing deleted and no side effects return */ if (error_handled || !thd->transaction.stmt.modified_non_trans_table && !updated) @@ -1495,7 +1731,7 @@ void multi_update::send_error(uint errcode,const char *err) todo/fixme: do_update() is never called with the arg 1. should it change the signature to become argless? */ - VOID(do_updates(0)); + VOID(do_updates()); } } if (thd->transaction.stmt.modified_non_trans_table) @@ -1511,9 +1747,9 @@ void multi_update::send_error(uint errcode,const char *err) got caught and if happens later the killed error is written into repl event. */ - Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_tables, FALSE); - mysql_bin_log.write(&qinfo); + thd->binlog_query(THD::ROW_QUERY_TYPE, + thd->query, thd->query_length, + transactional_tables, FALSE); } thd->transaction.all.modified_non_trans_table= TRUE; } @@ -1526,14 +1762,14 @@ void multi_update::send_error(uint errcode,const char *err) } -int multi_update::do_updates(bool from_send_error) +int multi_update::do_updates() { TABLE_LIST *cur_table; int local_error= 0; ha_rows org_updated; TABLE *table, *tmp_table; List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables); - DBUG_ENTER("do_updates"); + DBUG_ENTER("multi_update::do_updates"); do_update= 0; // Don't retry this function if (!found) @@ -1577,8 +1813,10 @@ int multi_update::do_updates(bool from_send_error) if ((local_error = tmp_table->file->ha_rnd_init(1))) goto err; - can_compare_record= !(table->file->table_flags() & - HA_PARTIAL_COLUMN_READ); + can_compare_record= (!(table->file->ha_table_flags() & + HA_PARTIAL_COLUMN_READ) || + bitmap_is_subset(table->write_set, + table->read_set)); for (;;) { @@ -1601,7 +1839,7 @@ int multi_update::do_updates(bool from_send_error) { if((local_error= tbl->file->rnd_pos(tbl->record[0], - (byte *) tmp_table->field[field_num]->ptr))) + (uchar *) tmp_table->field[field_num]->ptr))) goto err; field_num++; } while((tbl= check_opt_it++)); @@ -1620,7 +1858,7 @@ int multi_update::do_updates(bool from_send_error) TRG_ACTION_BEFORE, TRUE)) goto err2; - if (!can_compare_record || compare_record(table, thd->query_id)) + if (!can_compare_record || compare_record(table)) { int error; if ((error= cur_table->view_check_option(thd, ignore)) != @@ -1631,13 +1869,18 @@ int multi_update::do_updates(bool from_send_error) else if (error == VIEW_CHECK_ERROR) goto err; } - if ((local_error=table->file->update_row(table->record[1], - table->record[0]))) + if ((local_error=table->file->ha_update_row(table->record[1], + table->record[0])) && + local_error != HA_ERR_RECORD_IS_THE_SAME) { - if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || + table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY)) goto err; } - updated++; + if (local_error != HA_ERR_RECORD_IS_THE_SAME) + updated++; + else + local_error= 0; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, @@ -1666,9 +1909,9 @@ int multi_update::do_updates(bool from_send_error) DBUG_RETURN(0); err: - if (!from_send_error) { thd->fatal_error(); + prepare_record_for_error_message(local_error, table); table->file->print_error(local_error,MYF(0)); } @@ -1698,20 +1941,22 @@ err2: bool multi_update::send_eof() { char buff[STRING_BUFFER_USUAL_SIZE]; + ulonglong id; THD::killed_state killed_status= THD::NOT_KILLED; - thd->proc_info="updating reference tables"; + DBUG_ENTER("multi_update::send_eof"); + thd_proc_info(thd, "updating reference tables"); /* Does updates for the last n - 1 tables, returns 0 if ok; error takes into account killed status gained in do_updates() */ - int local_error = (table_count) ? do_updates(0) : 0; + int local_error = (table_count) ? do_updates() : 0; /* if local_error is not set ON until after do_updates() then later carried out killing should not affect binlogging. */ killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed; - thd->proc_info= "end"; + thd_proc_info(thd, "end"); /* We must invalidate the query cache before binlog writing and ha_autocommit_... */ @@ -1737,10 +1982,13 @@ bool multi_update::send_eof() { if (local_error == 0) thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_tables, FALSE, killed_status); - if (mysql_bin_log.write(&qinfo) && trans_safe) - local_error= 1; // Rollback update + if (thd->binlog_query(THD::ROW_QUERY_TYPE, + thd->query, thd->query_length, + transactional_tables, FALSE, killed_status) && + trans_safe) + { + local_error= 1; // Rollback update + } } if (thd->transaction.stmt.modified_non_trans_table) thd->transaction.all.modified_non_trans_table= TRUE; @@ -1759,15 +2007,15 @@ bool multi_update::send_eof() /* Safety: If we haven't got an error before (can happen in do_updates) */ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update", MYF(0)); - return TRUE; + DBUG_RETURN(TRUE); } - + id= thd->arg_of_last_insert_id_function ? + thd->first_successful_insert_id_in_prev_stmt : 0; sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); thd->row_count_func= (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; - ::send_ok(thd, (ulong) thd->row_count_func, - thd->insert_id_used ? thd->last_insert_id : 0L,buff); - return FALSE; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); + DBUG_RETURN(FALSE); } |