diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/field.cc | 25 | ||||
-rw-r--r-- | sql/filesort.cc | 5 | ||||
-rw-r--r-- | sql/ha_berkeley.cc | 2 | ||||
-rw-r--r-- | sql/ha_innodb.cc | 9 | ||||
-rw-r--r-- | sql/item_timefunc.cc | 30 | ||||
-rw-r--r-- | sql/log_event.cc | 24 | ||||
-rw-r--r-- | sql/log_event.h | 4 | ||||
-rw-r--r-- | sql/mysql_priv.h | 9 | ||||
-rw-r--r-- | sql/sql_analyse.cc | 15 | ||||
-rw-r--r-- | sql/sql_base.cc | 28 | ||||
-rw-r--r-- | sql/sql_class.h | 21 | ||||
-rw-r--r-- | sql/sql_delete.cc | 3 | ||||
-rw-r--r-- | sql/sql_insert.cc | 35 | ||||
-rw-r--r-- | sql/sql_lex.cc | 1 | ||||
-rw-r--r-- | sql/sql_lex.h | 2 | ||||
-rw-r--r-- | sql/sql_load.cc | 7 | ||||
-rw-r--r-- | sql/sql_parse.cc | 27 | ||||
-rw-r--r-- | sql/sql_prepare.cc | 11 | ||||
-rw-r--r-- | sql/sql_repl.cc | 4 | ||||
-rw-r--r-- | sql/sql_repl.h | 2 | ||||
-rw-r--r-- | sql/sql_select.cc | 2 | ||||
-rw-r--r-- | sql/sql_show.cc | 8 | ||||
-rw-r--r-- | sql/sql_table.cc | 12 | ||||
-rw-r--r-- | sql/sql_union.cc | 4 | ||||
-rw-r--r-- | sql/sql_update.cc | 22 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 30 |
26 files changed, 196 insertions, 146 deletions
diff --git a/sql/field.cc b/sql/field.cc index e2c11cc7372..7357bc06f11 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4301,13 +4301,20 @@ int Field_str::store(double nr) char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; uint length; bool use_scientific_notation= TRUE; - use_scientific_notation= TRUE; - if (field_length < 32 && fabs(nr) < log_10[field_length]-1) + /* + Check fabs(nr) against longest value that can be stored in field, + which depends on whether the value is < 1 or not, and negative or not + */ + double anr= fabs(nr); + int neg= (nr < 0.0) ? 1 : 0; + if (field_length > 4 && field_length < 32 && + (anr < 1.0 ? anr > 1/(log_10[max(0,field_length-neg-2)]) /* -2 for "0." */ + : anr < log_10[field_length-neg]-1)) use_scientific_notation= FALSE; length= (uint) my_sprintf(buff, (buff, "%-.*g", (use_scientific_notation ? - max(0, (int)field_length-5) : + max(0, (int)field_length-neg-5) : field_length), nr)); /* @@ -4393,8 +4400,7 @@ void Field_string::sort_string(char *to,uint length) uint tmp=my_strnxfrm(field_charset, (unsigned char *) to, length, (unsigned char *) ptr, field_length); - if (tmp < length) - field_charset->cset->fill(field_charset, to + tmp, length - tmp, ' '); + DBUG_ASSERT(tmp == length); } @@ -4596,9 +4602,7 @@ void Field_varstring::sort_string(char *to,uint length) (uchar*) to, length, (uchar*) ptr+HA_KEY_BLOB_LENGTH, tot_length); - if (tot_length < length) - field_charset->cset->fill(field_charset, to+tot_length,length-tot_length, - binary() ? (char) 0 : ' '); + DBUG_ASSERT(tot_length == length); } @@ -5116,10 +5120,7 @@ void Field_blob::sort_string(char *to,uint length) blob_length=my_strnxfrm(field_charset, (uchar*) to, length, (uchar*) blob, blob_length); - if (blob_length < length) - field_charset->cset->fill(field_charset, to+blob_length, - length-blob_length, - binary() ? (char) 0 : ' '); + DBUG_ASSERT(blob_length == length); } } diff --git a/sql/filesort.cc b/sql/filesort.cc index bd0de022fd4..cb377070aaa 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -622,10 +622,7 @@ static void make_sortkey(register SORTPARAM *param, } uint tmp_length=my_strnxfrm(cs,to,sort_field->length, (unsigned char *) from, length); - if (tmp_length < sort_field->length) - cs->cset->fill(cs, (char*) to+tmp_length, - sort_field->length-tmp_length, - fill_char); + DBUG_ASSERT(tmp_length == sort_field->length); } else { diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 2c7cce4bcd0..e33cd3fca1b 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -165,11 +165,13 @@ bool berkeley_init(void) { db_env->close(db_env,0); /* purecov: inspected */ db_env=0; /* purecov: inspected */ + goto err; } (void) hash_init(&bdb_open_tables,system_charset_info,32,0,0, (hash_get_key) bdb_get_key,0,0); pthread_mutex_init(&bdb_mutex,MY_MUTEX_INIT_FAST); +err: DBUG_RETURN(db_env == 0); } diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index f82b9c03505..502f8eb92f3 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -5267,7 +5267,9 @@ ha_innobase::store_lock( if ((lock_type == TL_READ && thd->in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) || lock_type == TL_READ_WITH_SHARED_LOCKS || - lock_type == TL_READ_NO_INSERT) { + lock_type == TL_READ_NO_INSERT || + thd->lex->sql_command != SQLCOM_SELECT) { + /* The OR cases above are in this order: 1) MySQL is doing LOCK TABLES ... READ LOCAL, or 2) (we do not know when TL_READ_HIGH_PRIORITY is used), or @@ -5275,7 +5277,10 @@ ha_innobase::store_lock( 4) we are doing a complex SQL statement like INSERT INTO ... SELECT ... and the logical logging (MySQL binlog) requires the use of a locking read, or - MySQL is doing LOCK TABLES ... READ. */ + MySQL is doing LOCK TABLES ... READ. + 5) we let InnoDB do locking reads for all SQL statements that + are not simple SELECTs; note that select_lock_type in this + case may get strengthened in ::external_lock() to LOCK_X. */ prebuilt->select_lock_type = LOCK_S; prebuilt->stored_select_lock_type = LOCK_S; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 0d652a431cb..f9c9d1f013d 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2432,8 +2432,7 @@ void Item_func_add_time::print(String *str) String *Item_func_timediff::val_str(String *str) { DBUG_ASSERT(fixed == 1); - longlong seconds; - long microseconds; + longlong microseconds; long days; int l_sign= 1; TIME l_time1 ,l_time2, l_time3; @@ -2457,32 +2456,23 @@ String *Item_func_timediff::val_str(String *str) (uint) l_time2.month, (uint) l_time2.day)); - microseconds= l_time1.second_part - l_sign*l_time2.second_part; - seconds= ((longlong) days*86400L + l_time1.hour*3600L + - l_time1.minute*60L + l_time1.second + microseconds/1000000L - - (longlong)l_sign*(l_time2.hour*3600L+l_time2.minute*60L+ - l_time2.second)); + microseconds= ((longlong)days*86400L + + l_time1.hour*3600L + l_time1.minute*60L + l_time1.second - + (longlong)l_sign*(l_time2.hour*3600L + l_time2.minute*60L + + l_time2.second))*1000000 + + l_time1.second_part - l_sign*l_time2.second_part; l_time3.neg= 0; - if (seconds < 0) - { - seconds= -seconds; - l_time3.neg= 1; - } - else if (seconds == 0 && microseconds < 0) + if (microseconds < 0) { microseconds= -microseconds; l_time3.neg= 1; } - if (microseconds < 0) - { - microseconds+= 1000000L; - seconds--; - } - if ((l_time2.neg == l_time1.neg) && l_time1.neg) + if ((l_time2.neg == l_time1.neg) && l_time1.neg && microseconds) l_time3.neg= l_time3.neg ? 0 : 1; - calc_time_from_sec(&l_time3, (long) seconds, microseconds); + calc_time_from_sec(&l_time3, (long)(microseconds/1000000), + (long)(microseconds%1000000)); if (!make_datetime(l_time1.second_part || l_time2.second_part ? TIME_MICROSECOND : TIME_ONLY, diff --git a/sql/log_event.cc b/sql/log_event.cc index c027c3a8ee4..04395f121fd 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1418,7 +1418,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, const char *db_arg, const char *table_name_arg, List<Item> &fields_arg, enum enum_duplicates handle_dup, - bool using_trans) + bool ignore, bool using_trans) :Log_event(thd_arg, !thd_arg->tmp_table_used ? 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans), thread_id(thd_arg->thread_id), @@ -1456,9 +1456,6 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, sql_ex.empty_flags= 0; switch (handle_dup) { - case DUP_IGNORE: - sql_ex.opt_flags|= IGNORE_FLAG; - break; case DUP_REPLACE: sql_ex.opt_flags|= REPLACE_FLAG; break; @@ -1466,6 +1463,8 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, case DUP_ERROR: break; } + if (ignore) + sql_ex.opt_flags|= IGNORE_FLAG; if (!ex->field_term->length()) sql_ex.empty_flags |= FIELD_TERM_EMPTY; @@ -1791,6 +1790,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, { char llbuff[22]; enum enum_duplicates handle_dup; + bool ignore= 0; /* Make a simplified LOAD DATA INFILE query, for the information of the user in SHOW PROCESSLIST. Note that db is known in the 'db' column. @@ -1807,21 +1807,24 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, if (sql_ex.opt_flags & REPLACE_FLAG) handle_dup= DUP_REPLACE; else if (sql_ex.opt_flags & IGNORE_FLAG) - handle_dup= DUP_IGNORE; + { + ignore= 1; + handle_dup= DUP_ERROR; + } else { /* When replication is running fine, if it was DUP_ERROR on the - master then we could choose DUP_IGNORE here, because if DUP_ERROR + master then we could choose IGNORE here, because if DUP_ERROR suceeded on master, and data is identical on the master and slave, - then there should be no uniqueness errors on slave, so DUP_IGNORE is + then there should be no uniqueness errors on slave, so IGNORE is the same as DUP_ERROR. But in the unlikely case of uniqueness errors (because the data on the master and slave happen to be different (user error or bug), we want LOAD DATA to print an error message on the slave to discover the problem. If reading from net (a 3.23 master), mysql_load() will change this - to DUP_IGNORE. + to IGNORE. */ handle_dup= DUP_ERROR; } @@ -1855,7 +1858,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, */ thd->net.pkt_nr = net->pkt_nr; } - if (mysql_load(thd, &ex, &tables, field_list, handle_dup, net != 0, + if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, net != 0, TL_WRITE)) thd->query_error = 1; if (thd->cuted_fields) @@ -2747,8 +2750,9 @@ Create_file_log_event:: Create_file_log_event(THD* thd_arg, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, enum enum_duplicates handle_dup, + bool ignore, char* block_arg, uint block_len_arg, bool using_trans) - :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, + :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, ignore, using_trans), fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg), file_id(thd_arg->file_id = mysql_bin_log.next_file_id()) diff --git a/sql/log_event.h b/sql/log_event.h index 8a2334e8574..f848f2ae1b9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -599,7 +599,7 @@ public: Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, - List<Item>& fields_arg, enum enum_duplicates handle_dup, + List<Item>& fields_arg, enum enum_duplicates handle_dup, bool ignore, bool using_trans); void set_fields(const char* db, List<Item> &fields_arg); const char* get_db() { return db; } @@ -908,7 +908,7 @@ public: Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, - enum enum_duplicates handle_dup, + enum enum_duplicates handle_dup, bool ignore, char* block_arg, uint block_len_arg, bool using_trans); #ifdef HAVE_REPLICATION diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c300ea2885e..4b785aafc5f 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -538,6 +538,7 @@ int mysql_alter_table(THD *thd, char *new_db, char *new_name, List<Key> &keys, uint order_num, ORDER *order, enum enum_duplicates handle_duplicates, + bool ignore, ALTER_INFO *alter_info, bool do_send_ok=1); int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); int mysql_create_like_table(THD *thd, TABLE_LIST *table, @@ -557,11 +558,11 @@ int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields, List<Item> &values,COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates); + enum enum_duplicates handle_duplicates, bool ignore); int mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields, List<Item> *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE_LIST *insert_table_list, TABLE *table, @@ -570,7 +571,7 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, List<Item> &update_values, enum_duplicates duplic); int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, List<List_item> &values, List<Item> &update_fields, - List<Item> &update_values, enum_duplicates flag); + List<Item> &update_values, enum_duplicates flag, bool ignore); int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, ha_rows rows, ulong options); @@ -724,6 +725,7 @@ void wait_for_refresh(THD *thd); int open_tables(THD *thd, TABLE_LIST *tables, uint *counter); int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables); int open_and_lock_tables(THD *thd,TABLE_LIST *tables); +int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables); void relink_tables_for_derived(THD *thd); int lock_tables(THD *thd, TABLE_LIST *tables, uint counter); TABLE *open_temporary_table(THD *thd, const char *path, const char *db, @@ -759,6 +761,7 @@ bool eval_const_cond(COND *cond); /* sql_load.cc */ int mysql_load(THD *thd,sql_exchange *ex, TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, + bool ignore, bool local_file,thr_lock_type lock_type); int write_record(TABLE *table,COPY_INFO *info); diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 5265857f3b1..33ce62bc5cf 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -1029,20 +1029,19 @@ uint check_ulonglong(const char *str, uint length) } /* check_ulonlong */ - /* - FUNCTION: append_escaped() - + Quote special characters in a string. + + SYNOPSIS + append_escaped(to_str, from_str) + to_str (in) A pointer to a String. + from_str (to) A pointer to an allocated string + DESCRIPTION append_escaped() takes a String type variable, where it appends escaped the second argument. Only characters that require escaping will be escaped. - ARGUMENTS - A pointer to a String variable, where results will be appended - A pointer to a String variable, which is appended to the result - String, escaping those characters that require it. - RETURN VALUES 0 Success 1 Out of memory diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 2f2d9b290ac..263c68a82b7 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1703,6 +1703,34 @@ int open_and_lock_tables(THD *thd, TABLE_LIST *tables) /* + Open all tables in list and process derived tables + + SYNOPSIS + open_normal_and_derived_tables + thd - thread handler + tables - list of tables for open&locking + + RETURN + FALSE - ok + TRUE - error + + NOTE + This is to be used on prepare stage when you don't read any + data from the tables. +*/ + +int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables) +{ + uint counter; + DBUG_ENTER("open_normal_and_derived_tables"); + if (open_tables(thd, tables, &counter)) + DBUG_RETURN(-1); /* purecov: inspected */ + relink_tables_for_derived(thd); + DBUG_RETURN(mysql_handle_derived(thd->lex)); +} + + +/* Let us propagate pointers to open tables from global table list to table lists in particular selects if needed. */ diff --git a/sql/sql_class.h b/sql/sql_class.h index 7978aec8f1d..ce60ed06cfd 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -29,7 +29,7 @@ class Slave_log_event; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; -enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_IGNORE, DUP_UPDATE }; +enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE }; enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN}; enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON, DELAY_KEY_WRITE_ALL }; @@ -201,7 +201,8 @@ typedef struct st_copy_info { ha_rows error_count; enum enum_duplicates handle_duplicates; int escape_char, last_errno; -/* for INSERT ... UPDATE */ + bool ignore; + /* for INSERT ... UPDATE */ List<Item> *update_fields; List<Item> *update_values; } COPY_INFO; @@ -1232,19 +1233,21 @@ class select_insert :public select_result_interceptor { COPY_INFO info; select_insert(TABLE *table_par, List<Item> *fields_par, - enum_duplicates duplic) + enum_duplicates duplic, bool ignore) :table(table_par), fields(fields_par), last_insert_id(0) { bzero((char*) &info,sizeof(info)); + info.ignore= ignore; info.handle_duplicates=duplic; } select_insert(TABLE *table_par, List<Item> *fields_par, List<Item> *update_fields, List<Item> *update_values, - enum_duplicates duplic) + enum_duplicates duplic, bool ignore) :table(table_par), fields(fields_par), last_insert_id(0) { bzero((char*) &info,sizeof(info)); - info.handle_duplicates=duplic; + info.ignore= ignore; + info.handle_duplicates= duplic; info.update_fields= update_fields; info.update_values= update_values; } @@ -1273,8 +1276,8 @@ public: HA_CREATE_INFO *create_info_par, List<create_field> &fields_par, List<Key> &keys_par, - List<Item> &select_fields,enum_duplicates duplic) - :select_insert (NULL, &select_fields, duplic), db(db_name), + List<Item> &select_fields,enum_duplicates duplic, bool ignore) + :select_insert (NULL, &select_fields, duplic, ignore), db(db_name), name(table_name), extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par), lock(0) {} @@ -1525,11 +1528,11 @@ class multi_update :public select_result_interceptor uint table_count; Copy_field *copy_field; enum enum_duplicates handle_duplicates; - bool do_update, trans_safe, transactional_tables, log_delayed; + bool do_update, trans_safe, transactional_tables, log_delayed, ignore; public: multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields, - List<Item> *values, enum_duplicates handle_duplicates); + List<Item> *values, enum_duplicates handle_duplicates, bool ignore); ~multi_update(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 29d86a99ff3..8b4a0f0f6d0 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -57,8 +57,7 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, DBUG_RETURN(1); } - if (thd->lex->duplicates == DUP_IGNORE) - thd->lex->select_lex.no_error= 1; + thd->lex->select_lex.no_error= thd->lex->ignore; /* Test if the user wants to delete all rows */ if (!using_limit && const_cond && (!conds || conds->val_int()) && diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 622176e22cc..92c429bcfde 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -22,7 +22,7 @@ static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); -static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, +static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, char *query, uint query_length, int log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); @@ -111,7 +111,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<List_item> &values_list, List<Item> &update_fields, List<Item> &update_values, - enum_duplicates duplic) + enum_duplicates duplic, + bool ignore) { int error, res; /* @@ -222,9 +223,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, */ info.records= info.deleted= info.copied= info.updated= 0; + info.ignore= ignore; info.handle_duplicates=duplic; - info.update_fields=&update_fields; - info.update_values=&update_values; + info.update_fields= &update_fields; + info.update_values= &update_values; /* Count warnings for all inserts. For single line insert, generate an error if try to set a NOT NULL field @@ -289,7 +291,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { - error=write_delayed(thd,table,duplic,query, thd->query_length, log_on); + error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on); query=0; } else @@ -395,7 +397,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else { char buff[160]; - if (duplic == DUP_IGNORE) + if (ignore) sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (lock_type == TL_WRITE_DELAYED) ? (ulong) 0 : (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); @@ -591,7 +593,7 @@ int write_record(TABLE *table,COPY_INFO *info) } else if ((error=table->file->write_row(table->record[0]))) { - if (info->handle_duplicates != DUP_IGNORE || + if (!info->ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; } @@ -647,14 +649,14 @@ public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used; + bool query_start_used,last_insert_id_used,insert_id_used, ignore; int log_query; ulonglong last_insert_id; timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, int log_query_arg) - :record(0),query(0),dup(dup_arg),log_query(log_query_arg) {} + delayed_row(enum_duplicates dup_arg, bool ignore_arg, int log_query_arg) + :record(0),query(0),dup(dup_arg),ignore(ignore_arg),log_query(log_query_arg) {} ~delayed_row() { x_free(record); @@ -966,7 +968,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ -static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, +static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore, char *query, uint query_length, int log_on) { delayed_row *row=0; @@ -979,7 +981,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, pthread_cond_wait(&di->cond_client,&di->mutex); thd->proc_info="storing row into queue"; - if (thd->killed || !(row= new delayed_row(duplic, log_on))) + if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on))) goto err; if (!query) @@ -1340,8 +1342,9 @@ bool delayed_insert::handle_inserts(void) thd.insert_id_used=row->insert_id_used; table->timestamp_field_type= row->timestamp_field_type; + info.ignore= row->ignore; info.handle_duplicates= row->dup; - if (info.handle_duplicates == DUP_IGNORE || + if (info.ignore || info.handle_duplicates == DUP_REPLACE) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); @@ -1459,7 +1462,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) restore_record(table,default_values); // Get empty record table->next_number_field=table->found_next_number_field; thd->cuted_fields=0; - if (info.handle_duplicates == DUP_IGNORE || + if (info.ignore || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); @@ -1601,7 +1604,7 @@ bool select_insert::send_eof() DBUG_RETURN(1); } char buff[160]; - if (info.handle_duplicates == DUP_IGNORE) + if (info.ignore) sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); else @@ -1645,7 +1648,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) restore_record(table,default_values); // Get empty record thd->cuted_fields=0; - if (info.handle_duplicates == DUP_IGNORE || + if (info.ignore || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index d2ac0df1472..5730073bd35 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -158,6 +158,7 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); lex->sql_command=SQLCOM_END; lex->duplicates= DUP_ERROR; + lex->ignore= 0; lex->proc_list.first= 0; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 8421be7e735..e2e0bc61c23 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -618,7 +618,7 @@ typedef struct st_lex bool in_comment, ignore_space, verbose, no_write_to_binlog; bool derived_tables; bool safe_to_cache_query; - bool subqueries; + bool subqueries, ignore; ALTER_INFO alter_info; /* Prepared statements SQL syntax:*/ LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */ diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 17ab472c87b..c4f5b1427af 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -80,6 +80,7 @@ static int read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, + bool ignore, bool read_file_from_client,thr_lock_type lock_type) { char name[FN_REFLEN]; @@ -165,7 +166,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, /* We can't give an error in the middle when using LOCAL files */ if (read_file_from_client && handle_duplicates == DUP_ERROR) - handle_duplicates=DUP_IGNORE; + ignore= 1; #ifndef EMBEDDED_LIBRARY if (read_file_from_client) @@ -216,6 +217,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, COPY_INFO info; bzero((char*) &info,sizeof(info)); + info.ignore= ignore; info.handle_duplicates=handle_duplicates; info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX; @@ -237,6 +239,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, lf_info.db = db; lf_info.table_name = table_list->real_name; lf_info.fields = &fields; + lf_info.ignore= ignore; lf_info.handle_dup = handle_duplicates; lf_info.wrote_create_file = 0; lf_info.last_pos_in_file = HA_POS_ERROR; @@ -267,7 +270,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; table->next_number_field=table->found_next_number_field; - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); ha_enable_transaction(thd, FALSE); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index cd8e73c446d..c81aefc9cea 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -64,7 +64,7 @@ const char *command_name[]={ "Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist", "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", "Binlog Dump","Table Dump", "Connect Out", "Register Slave", - "Prepare", "Prepare Execute", "Long Data", "Close stmt", + "Prepare", "Execute", "Long Data", "Close stmt", "Reset stmt", "Set option", "Error" // Last command number }; @@ -1547,7 +1547,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, packet, (uint) (pend-packet), thd->charset()); table_list.alias= table_list.real_name= conv_name.str; packet= pend+1; - // command not cachable => no gap for data base name + thd->query_length= strlen(packet); // for simplicity: don't optimize if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; mysql_log.write(thd,command,"%s %s",table_list.real_name,fields); @@ -2393,7 +2393,8 @@ mysql_execute_command(THD *thd) &lex->create_info, lex->create_list, lex->key_list, - select_lex->item_list,lex->duplicates))) + select_lex->item_list, lex->duplicates, + lex->ignore))) { /* CREATE from SELECT give its SELECT_LEX for SELECT, @@ -2532,7 +2533,7 @@ unsent_create_error: lex->key_list, select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, - lex->duplicates, &lex->alter_info); + lex->duplicates, lex->ignore, &lex->alter_info); } break; } @@ -2694,7 +2695,7 @@ unsent_create_error: select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, select_lex->select_limit, - lex->duplicates); + lex->duplicates, lex->ignore); if (thd->net.report_error) res= -1; break; @@ -2707,7 +2708,7 @@ unsent_create_error: &lex->value_list, select_lex->where, select_lex->options, - lex->duplicates, unit, select_lex); + lex->duplicates, lex->ignore, unit, select_lex); break; } case SQLCOM_REPLACE: @@ -2715,9 +2716,9 @@ unsent_create_error: { if ((res= insert_precheck(thd, tables))) break; - res = mysql_insert(thd,tables,lex->field_list,lex->many_values, - lex->update_list, lex->value_list, - lex->duplicates); + res= mysql_insert(thd,tables,lex->field_list,lex->many_values, + lex->update_list, lex->value_list, + lex->duplicates, lex->ignore); if (thd->net.report_error) res= -1; break; @@ -2755,7 +2756,7 @@ unsent_create_error: lex->duplicates)) && (result= new select_insert(tables->table, &lex->field_list, &lex->update_list, &lex->value_list, - lex->duplicates))) + lex->duplicates, lex->ignore))) { TABLE *table= tables->table; /* Skip first table, which is the table we are inserting in */ @@ -3065,7 +3066,7 @@ unsent_create_error: goto error; } res=mysql_load(thd, lex->exchange, tables, lex->field_list, - lex->duplicates, (bool) lex->local_file, lex->lock_option); + lex->duplicates, lex->ignore, (bool) lex->local_file, lex->lock_option); break; } @@ -5118,7 +5119,7 @@ int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, &alter_info)); + DUP_ERROR, 0, &alter_info)); } @@ -5137,7 +5138,7 @@ int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, alter_info)); + DUP_ERROR, 0, alter_info)); } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 20ebc23e240..1dc46aef4da 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -897,8 +897,12 @@ static int mysql_test_insert(Prepared_statement *stmt, /* open temporary memory pool for temporary data allocated by derived tables & preparation procedure + Note that this is done without locks (should not be needed as we will not + access any data here) + If we would use locks, then we have to ensure we are not using + TL_WRITE_DELAYED as having two such locks can cause table corruption. */ - if (open_and_lock_tables(thd, table_list)) + if (open_normal_and_derived_tables(thd, table_list)) { DBUG_RETURN(-1); } @@ -1592,7 +1596,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, DBUG_RETURN(1); } - mysql_log.write(thd, COM_PREPARE, "%s", packet); + mysql_log.write(thd, COM_PREPARE, "[%lu] %s", stmt->id, packet); thd->current_arena= stmt; mysql_init_query(thd, (uchar *) thd->query, thd->query_length); @@ -1792,6 +1796,9 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query)) goto set_params_data_err; #endif + mysql_log.write(thd, COM_EXECUTE, "[%lu] %s", stmt->id, + expanded_query.length() ? expanded_query.c_ptr() : + stmt->query); thd->protocol= &thd->protocol_prep; // Switch to binary protocol execute_stmt(thd, stmt, &expanded_query, TRUE); thd->protocol= &thd->protocol_simple; // Use normal protocol diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 2f0d8d3aa0d..fd165ad1fa5 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1380,7 +1380,7 @@ err: int log_loaded_block(IO_CACHE* file) { - LOAD_FILE_INFO* lf_info; + LOAD_FILE_INFO *lf_info; uint block_len ; /* file->request_pos contains position where we started last read */ @@ -1402,7 +1402,7 @@ int log_loaded_block(IO_CACHE* file) { Create_file_log_event c(lf_info->thd,lf_info->ex,lf_info->db, lf_info->table_name, *lf_info->fields, - lf_info->handle_dup, buffer, + lf_info->handle_dup, lf_info->ignore, buffer, block_len, lf_info->log_delayed); mysql_bin_log.write(&c); lf_info->wrote_create_file = 1; diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 3c17540b664..21b3d2955f7 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -67,7 +67,7 @@ typedef struct st_load_file_info enum enum_duplicates handle_dup; char* db; char* table_name; - bool wrote_create_file, log_delayed; + bool wrote_create_file, log_delayed, ignore; } LOAD_FILE_INFO; int log_loaded_block(IO_CACHE* file); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 610a98d1983..03e322d28ee 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -468,7 +468,7 @@ JOIN::optimize() optimized= 1; // Ignore errors of execution if option IGNORE present - if (thd->lex->duplicates == DUP_IGNORE) + if (thd->lex->ignore) thd->lex->current_select->no_error= 1; #ifdef HAVE_REF_TO_FIELDS // Not done yet /* Add HAVING to WHERE if possible */ diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 88a56d26e35..8929872c466 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1404,14 +1404,18 @@ store_create_info(THD *thd, TABLE *table, String *packet) if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) && !limited_mysql_mode && !foreign_db_mode) { - if (table->db_type == DB_TYPE_HEAP && - key_info->algorithm == HA_KEY_ALG_BTREE) + if (key_info->algorithm == HA_KEY_ALG_BTREE) packet->append(" TYPE BTREE", 11); + if (key_info->algorithm == HA_KEY_ALG_HASH) + packet->append(" TYPE HASH", 10); + // +BAR: send USING only in non-default case: non-spatial rtree if ((key_info->algorithm == HA_KEY_ALG_RTREE) && !(key_info->flags & HA_SPATIAL)) packet->append(" TYPE RTREE", 11); + + // No need to send TYPE FULLTEXT, it is sent as FULLTEXT KEY } packet->append(" (", 2); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f9f635081cb..c3bfbe086f1 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -36,6 +36,7 @@ static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); static int copy_data_between_tables(TABLE *from,TABLE *to, List<create_field> &create, enum enum_duplicates handle_duplicates, + bool ignore, uint order_num, ORDER *order, ha_rows *copied,ha_rows *deleted); @@ -2686,7 +2687,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, TABLE_LIST *table_list, List<create_field> &fields, List<Key> &keys, uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, ALTER_INFO *alter_info, bool do_send_ok) { TABLE *table,*new_table; @@ -3205,7 +3206,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, copied=deleted=0; if (!new_table->is_view) error=copy_data_between_tables(table,new_table,create_list, - handle_duplicates, + handle_duplicates, ignore, order_num, order, &copied, &deleted); thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -3425,6 +3426,7 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, List<create_field> &create, enum enum_duplicates handle_duplicates, + bool ignore, uint order_num, ORDER *order, ha_rows *copied, ha_rows *deleted) @@ -3517,7 +3519,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, current query id */ from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); thd->row_count= 0; @@ -3543,7 +3545,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, } if ((error=to->file->write_row((byte*) to->record[0]))) { - if ((handle_duplicates != DUP_IGNORE && + if ((!ignore && handle_duplicates != DUP_REPLACE) || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) @@ -3619,7 +3621,7 @@ int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, lex->create_list, lex->key_list, 0, (ORDER *) 0, - DUP_ERROR, &lex->alter_info, do_send_ok)); + DUP_ERROR, 0, &lex->alter_info, do_send_ok)); } diff --git a/sql/sql_union.cc b/sql/sql_union.cc index b35209faeb2..f89b234f5b0 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -45,10 +45,10 @@ select_union::select_union(TABLE *table_par) { bzero((char*) &info,sizeof(info)); /* - We can always use DUP_IGNORE because the temporary table will only + We can always use IGNORE because the temporary table will only contain a unique key if we are using not using UNION ALL */ - info.handle_duplicates= DUP_IGNORE; + info.ignore= 1; } select_union::~select_union() diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 2b27cff13e2..761e8d6de8b 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -54,7 +54,8 @@ int mysql_update(THD *thd, COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates) + enum enum_duplicates handle_duplicates, + bool ignore) { bool using_limit=limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; @@ -274,7 +275,7 @@ int mysql_update(THD *thd, } } - if (handle_duplicates == DUP_IGNORE) + if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); init_read_record(&info,thd,table,select,0,1); @@ -299,8 +300,7 @@ int mysql_update(THD *thd, { updated++; } - else if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); @@ -476,7 +476,7 @@ int mysql_multi_update(THD *thd, List<Item> *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { int res; @@ -667,7 +667,7 @@ int mysql_multi_update(THD *thd, } if (!(result=new multi_update(thd, update_list, fields, values, - handle_duplicates))) + handle_duplicates, ignore))) DBUG_RETURN(-1); res= mysql_select(thd, &select_lex->ref_pointer_array, @@ -684,11 +684,11 @@ int mysql_multi_update(THD *thd, multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, List<Item> *field_list, List<Item> *value_list, - enum enum_duplicates handle_duplicates_arg) + enum enum_duplicates handle_duplicates_arg, bool ignore_arg) :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0), updated(0), found(0), fields(field_list), values(value_list), table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg), - do_update(1), trans_safe(0), transactional_tables(1) + do_update(1), trans_safe(0), transactional_tables(1), ignore(ignore_arg) {} @@ -1021,8 +1021,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) table->record[0]))) { updated--; - if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); @@ -1154,8 +1153,7 @@ int multi_update::do_updates(bool from_send_error) if ((local_error=table->file->update_row(table->record[1], table->record[0]))) { - if (local_error != HA_ERR_FOUND_DUPP_KEY || - handle_duplicates != DUP_IGNORE) + if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY) goto err; } updated++; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a09694ee1e6..4fcc72bc90e 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1415,7 +1415,7 @@ type: | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; } | DATE_SYM { $$=FIELD_TYPE_DATE; } | TIME_SYM { $$=FIELD_TYPE_TIME; } - | TIMESTAMP + | TIMESTAMP opt_len { if (YYTHD->variables.sql_mode & MODE_MAXDB) $$=FIELD_TYPE_DATETIME; @@ -1428,13 +1428,6 @@ type: $$=FIELD_TYPE_TIMESTAMP; } } - | TIMESTAMP '(' NUM ')' - { - LEX *lex= Lex; - lex->length= $3.str; - lex->type|= NOT_NULL_FLAG; - $$= FIELD_TYPE_TIMESTAMP; - } | DATETIME { $$=FIELD_TYPE_DATETIME; } | TINYBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_TINY_BLOB; } @@ -1849,8 +1842,9 @@ alter: { THD *thd= YYTHD; LEX *lex= thd->lex; - lex->sql_command = SQLCOM_ALTER_TABLE; - lex->name=0; + lex->sql_command= SQLCOM_ALTER_TABLE; + lex->name= 0; + lex->duplicates= DUP_ERROR; if (!lex->select_lex.add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING)) YYABORT; @@ -2035,8 +2029,9 @@ opt_column: | COLUMN_SYM {}; opt_ignore: - /* empty */ { Lex->duplicates=DUP_ERROR; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + /* empty */ { Lex->ignore= 0;} + | IGNORE_SYM { Lex->ignore= 1;} + ; opt_restrict: /* empty */ {} @@ -4012,7 +4007,8 @@ insert: INSERT { LEX *lex= Lex; - lex->sql_command = SQLCOM_INSERT; + lex->sql_command= SQLCOM_INSERT; + lex->duplicates= DUP_ERROR; /* for subselects */ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ; lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; @@ -4174,6 +4170,7 @@ update: mysql_init_select(lex); lex->sql_command= SQLCOM_UPDATE; lex->lock_option= TL_UNLOCK; /* Will be set later */ + lex->duplicates= DUP_ERROR; } opt_low_priority opt_ignore join_table_list SET update_list @@ -4233,6 +4230,7 @@ delete: LEX *lex= Lex; lex->sql_command= SQLCOM_DELETE; lex->lock_option= lex->thd->update_lock_default; + lex->ignore= 0; lex->select_lex.init_order(); } opt_delete_options single_multi {} @@ -4289,7 +4287,7 @@ opt_delete_options: opt_delete_option: QUICK { Select->options|= OPTION_QUICK; } | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; } - | IGNORE_SYM { Lex->duplicates= DUP_IGNORE; }; + | IGNORE_SYM { Lex->ignore= 1; }; truncate: TRUNCATE_SYM opt_table_sym table_name @@ -4698,6 +4696,8 @@ load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING_sys lex->sql_command= SQLCOM_LOAD; lex->lock_option= $3; lex->local_file= $4; + lex->duplicates= DUP_ERROR; + lex->ignore= 0; if (!(lex->exchange= new sql_exchange($6.str,0))) YYABORT; lex->field_list.empty(); @@ -4735,7 +4735,7 @@ load_data_lock: opt_duplicate: /* empty */ { Lex->duplicates=DUP_ERROR; } | REPLACE { Lex->duplicates=DUP_REPLACE; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + | IGNORE_SYM { Lex->ignore= 1; }; opt_field_term: /* empty */ |