summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_ndbcluster.cc2
-rw-r--r--sql/item_timefunc.cc19
-rw-r--r--sql/log_event.cc24
-rw-r--r--sql/log_event.h4
-rw-r--r--sql/mysql_priv.h11
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/sql_class.h16
-rw-r--r--sql/sql_delete.cc3
-rw-r--r--sql/sql_insert.cc40
-rw-r--r--sql/sql_lex.cc1
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_load.cc24
-rw-r--r--sql/sql_parse.cc23
-rw-r--r--sql/sql_repl.cc4
-rw-r--r--sql/sql_repl.h2
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_table.cc12
-rw-r--r--sql/sql_union.cc4
-rw-r--r--sql/sql_update.cc21
-rw-r--r--sql/sql_yacc.yy21
20 files changed, 129 insertions, 108 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 4514974c289..84e285fa360 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2683,7 +2683,7 @@ void ha_ndbcluster::info(uint flag)
if ((my_errno= check_ndb_connection()))
DBUG_VOID_RETURN;
Ndb *ndb= get_ndb();
- Uint64 rows;
+ Uint64 rows= 100;
if (current_thd->variables.ndb_use_exact_count)
ndb_get_table_statistics(ndb, m_tabname, &rows, 0);
records= rows;
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 27c000138d8..2d0e5d7632f 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1603,6 +1603,7 @@ void Item_func_from_unixtime::fix_length_and_dec()
collation.set(&my_charset_bin);
decimals=0;
max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
+ maybe_null= 1;
thd->time_zone_used= 1;
}
@@ -1642,11 +1643,12 @@ longlong Item_func_from_unixtime::val_int()
bool Item_func_from_unixtime::get_date(TIME *ltime,
uint fuzzy_date __attribute__((unused)))
{
- longlong tmp= args[0]->val_int();
-
- if ((null_value= (args[0]->null_value ||
- tmp < TIMESTAMP_MIN_VALUE ||
- tmp > TIMESTAMP_MAX_VALUE)))
+ ulonglong tmp= (ulonglong)(args[0]->val_int());
+ /*
+ "tmp > TIMESTAMP_MAX_VALUE" check also covers case of negative
+ from_unixtime() argument since tmp is unsigned.
+ */
+ if ((null_value= (args[0]->null_value || tmp > TIMESTAMP_MAX_VALUE)))
return 1;
thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)tmp);
@@ -2202,6 +2204,12 @@ String *Item_datetime_typecast::val_str(String *str)
bool Item_time_typecast::get_time(TIME *ltime)
{
bool res= get_arg0_time(ltime);
+ /*
+ For MYSQL_TIMESTAMP_TIME value we can have non-zero day part,
+ which we should not lose.
+ */
+ if (ltime->time_type == MYSQL_TIMESTAMP_DATETIME)
+ ltime->year= ltime->month= ltime->day= 0;
ltime->time_type= MYSQL_TIMESTAMP_TIME;
return res;
}
@@ -2225,6 +2233,7 @@ String *Item_time_typecast::val_str(String *str)
bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date)
{
bool res= get_arg0_date(ltime, TIME_FUZZY_DATE);
+ ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0;
ltime->time_type= MYSQL_TIMESTAMP_DATE;
return res;
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 581d3ef0d21..2791f482ad6 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -2128,7 +2128,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex,
const char *db_arg, const char *table_name_arg,
List<Item> &fields_arg,
enum enum_duplicates handle_dup,
- bool using_trans)
+ bool ignore, bool using_trans)
:Log_event(thd_arg, !thd_arg->tmp_table_used ?
0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans),
thread_id(thd_arg->thread_id),
@@ -2166,9 +2166,6 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex,
sql_ex.empty_flags= 0;
switch (handle_dup) {
- case DUP_IGNORE:
- sql_ex.opt_flags|= IGNORE_FLAG;
- break;
case DUP_REPLACE:
sql_ex.opt_flags|= REPLACE_FLAG;
break;
@@ -2176,6 +2173,8 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex,
case DUP_ERROR:
break;
}
+ if (ignore)
+ sql_ex.opt_flags|= IGNORE_FLAG;
if (!ex->field_term->length())
sql_ex.empty_flags |= FIELD_TERM_EMPTY;
@@ -2511,6 +2510,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
{
char llbuff[22];
enum enum_duplicates handle_dup;
+ bool ignore= 0;
/*
Make a simplified LOAD DATA INFILE query, for the information of the
user in SHOW PROCESSLIST. Note that db is known in the 'db' column.
@@ -2527,21 +2527,24 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
if (sql_ex.opt_flags & REPLACE_FLAG)
handle_dup= DUP_REPLACE;
else if (sql_ex.opt_flags & IGNORE_FLAG)
- handle_dup= DUP_IGNORE;
+ {
+ ignore= 1;
+ handle_dup= DUP_ERROR;
+ }
else
{
/*
When replication is running fine, if it was DUP_ERROR on the
- master then we could choose DUP_IGNORE here, because if DUP_ERROR
+ master then we could choose IGNORE here, because if DUP_ERROR
suceeded on master, and data is identical on the master and slave,
- then there should be no uniqueness errors on slave, so DUP_IGNORE is
+ then there should be no uniqueness errors on slave, so IGNORE is
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
(because the data on the master and slave happen to be different
(user error or bug), we want LOAD DATA to print an error message on
the slave to discover the problem.
If reading from net (a 3.23 master), mysql_load() will change this
- to DUP_IGNORE.
+ to IGNORE.
*/
handle_dup= DUP_ERROR;
}
@@ -2575,7 +2578,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
*/
thd->net.pkt_nr = net->pkt_nr;
}
- if (mysql_load(thd, &ex, &tables, field_list, handle_dup, net != 0,
+ if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, net != 0,
TL_WRITE, 0))
thd->query_error = 1;
if (thd->cuted_fields)
@@ -3495,8 +3498,9 @@ Create_file_log_event::
Create_file_log_event(THD* thd_arg, sql_exchange* ex,
const char* db_arg, const char* table_name_arg,
List<Item>& fields_arg, enum enum_duplicates handle_dup,
+ bool ignore,
char* block_arg, uint block_len_arg, bool using_trans)
- :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup,
+ :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, ignore,
using_trans),
fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg),
file_id(thd_arg->file_id = mysql_bin_log.next_file_id())
diff --git a/sql/log_event.h b/sql/log_event.h
index 390a8c8070d..64bb9d502e9 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -783,7 +783,7 @@ public:
Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
const char* table_name_arg,
- List<Item>& fields_arg, enum enum_duplicates handle_dup,
+ List<Item>& fields_arg, enum enum_duplicates handle_dup, bool ignore,
bool using_trans);
void set_fields(const char* db, List<Item> &fields_arg);
const char* get_db() { return db; }
@@ -1170,7 +1170,7 @@ public:
Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
const char* table_name_arg,
List<Item>& fields_arg,
- enum enum_duplicates handle_dup,
+ enum enum_duplicates handle_dup, bool ignore,
char* block_arg, uint block_len_arg,
bool using_trans);
#ifdef HAVE_REPLICATION
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c5568bf52a4..01e2510328d 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -613,6 +613,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
List<Key> &keys,
uint order_num, ORDER *order,
enum enum_duplicates handle_duplicates,
+ bool ignore,
ALTER_INFO *alter_info, bool do_send_ok=1);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
@@ -631,11 +632,11 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List<Item> &values,COND *conds,
uint order_num, ORDER *order, ha_rows limit,
- enum enum_duplicates handle_duplicates);
+ enum enum_duplicates handle_duplicates, bool ignore);
bool mysql_multi_update(THD *thd, TABLE_LIST *table_list,
List<Item> *fields, List<Item> *values,
COND *conds, ulong options,
- enum enum_duplicates handle_duplicates,
+ enum enum_duplicates handle_duplicates, bool ignore,
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex);
bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table,
List<Item> &fields, List_item *values,
@@ -644,7 +645,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table,
COND **where, bool select_insert);
bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
List<List_item> &values, List<Item> &update_fields,
- List<Item> &update_values, enum_duplicates flag);
+ List<Item> &update_values, enum_duplicates flag,
+ bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
bool mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order,
@@ -882,8 +884,7 @@ bool eval_const_cond(COND *cond);
/* sql_load.cc */
bool mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list,
List<Item> &fields, enum enum_duplicates handle_duplicates,
- bool local_file, thr_lock_type lock_type,
- bool ignore_check_option_errors);
+ bool ignore, bool local_file, thr_lock_type lock_type);
int write_record(THD *thd, TABLE *table, COPY_INFO *info);
/* sql_manager.cc */
diff --git a/sql/set_var.cc b/sql/set_var.cc
index fbbe209eea4..d710be2bb2e 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -3054,7 +3054,7 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var)
enum db_type db_type;
if (!(res=var->value->val_str(&str)) ||
!(var->save_result.ulong_value=
- (ulong) db_type= ha_resolve_by_name(res->ptr(), res->length())) ||
+ (ulong) (db_type= ha_resolve_by_name(res->ptr(), res->length()))) ||
ha_checktype(db_type) != db_type)
{
value= res ? res->c_ptr() : "NULL";
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2a0e0aac8b4..1cf9f5b3f48 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -32,7 +32,7 @@ class sp_cache;
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
-enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_IGNORE, DUP_UPDATE };
+enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN};
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
@@ -225,7 +225,8 @@ typedef struct st_copy_info {
ha_rows error_count;
enum enum_duplicates handle_duplicates;
int escape_char, last_errno;
-/* for INSERT ... UPDATE */
+ bool ignore;
+ /* for INSERT ... UPDATE */
List<Item> *update_fields;
List<Item> *update_values;
/* for VIEW ... WITH CHECK OPTION */
@@ -1375,8 +1376,7 @@ class select_insert :public select_result_interceptor {
select_insert(TABLE_LIST *table_list_par,
TABLE *table_par, List<Item> *fields_par,
List<Item> *update_fields, List<Item> *update_values,
- enum_duplicates duplic,
- bool ignore_check_option_errors);
+ enum_duplicates duplic, bool ignore);
~select_insert();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
@@ -1401,8 +1401,8 @@ public:
HA_CREATE_INFO *create_info_par,
List<create_field> &fields_par,
List<Key> &keys_par,
- List<Item> &select_fields,enum_duplicates duplic)
- :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, 0), create_table(table),
+ List<Item> &select_fields,enum_duplicates duplic, bool ignore)
+ :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table),
extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par),
lock(0)
{}
@@ -1673,12 +1673,12 @@ class multi_update :public select_result_interceptor
uint table_count;
Copy_field *copy_field;
enum enum_duplicates handle_duplicates;
- bool do_update, trans_safe, transactional_tables, log_delayed;
+ bool do_update, trans_safe, transactional_tables, log_delayed, ignore;
public:
multi_update(THD *thd_arg, TABLE_LIST *ut, TABLE_LIST *leaves_list,
List<Item> *fields, List<Item> *values,
- enum_duplicates handle_duplicates);
+ enum_duplicates handle_duplicates, bool ignore);
~multi_update();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index f4c5b0f8b59..8edfc08fa82 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -68,8 +68,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
DBUG_RETURN(TRUE);
}
- if (thd->lex->duplicates == DUP_IGNORE)
- select_lex->no_error= 1;
+ select_lex->no_error= thd->lex->ignore;
/*
Test if the user wants to delete all rows and deletion doesn't have
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 701ffe60cb3..2a6e772db32 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -25,7 +25,7 @@
static int check_null_fields(THD *thd,TABLE *entry);
#ifndef EMBEDDED_LIBRARY
static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list);
-static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup,
+static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore,
char *query, uint query_length, bool log_on);
static void end_delayed_insert(THD *thd);
extern "C" pthread_handler_decl(handle_delayed_insert,arg);
@@ -158,7 +158,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
List<List_item> &values_list,
List<Item> &update_fields,
List<Item> &update_values,
- enum_duplicates duplic)
+ enum_duplicates duplic,
+ bool ignore)
{
int error, res;
/*
@@ -168,7 +169,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
*/
bool log_on= (thd->options & OPTION_BIN_LOG) || (!(thd->master_access & SUPER_ACL));
bool transactional_table, log_delayed;
- bool ignore_err= (thd->lex->duplicates == DUP_IGNORE);
uint value_count;
ulong counter = 1;
ulonglong id;
@@ -271,11 +271,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
*/
info.records= info.deleted= info.copied= info.updated= 0;
+ info.ignore= ignore;
info.handle_duplicates=duplic;
- info.update_fields=&update_fields;
- info.update_values=&update_values;
+ info.update_fields= &update_fields;
+ info.update_values= &update_values;
info.view= (table_list->view ? table_list : 0);
- info.ignore= ignore_err;
+
/*
Count warnings for all inserts.
For single line insert, generate an error if try to set a NOT NULL field
@@ -366,7 +367,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if ((res= table_list->view_check_option(thd,
(values_list.elements == 1 ?
0 :
- ignore_err))) ==
+ ignore))) ==
VIEW_CHECK_SKIP)
continue;
else if (res == VIEW_CHECK_ERROR)
@@ -377,7 +378,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (lock_type == TL_WRITE_DELAYED)
{
- error=write_delayed(thd,table,duplic,query, thd->query_length, log_on);
+ error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on);
query=0;
}
else
@@ -490,7 +491,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
else
{
char buff[160];
- if (duplic == DUP_IGNORE)
+ if (ignore)
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(lock_type == TL_WRITE_DELAYED) ? (ulong) 0 :
(ulong) (info.records - info.copied), (ulong) thd->cuted_fields);
@@ -851,7 +852,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
else if ((error=table->file->write_row(table->record[0])))
{
- if (info->handle_duplicates != DUP_IGNORE ||
+ if (!info->ignore ||
(error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE))
goto err;
table->file->restore_auto_increment();
@@ -906,13 +907,13 @@ public:
char *record,*query;
enum_duplicates dup;
time_t start_time;
- bool query_start_used,last_insert_id_used,insert_id_used, log_query;
+ bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query;
ulonglong last_insert_id;
timestamp_auto_set_type timestamp_field_type;
uint query_length;
- delayed_row(enum_duplicates dup_arg, bool log_query_arg)
- :record(0),query(0),dup(dup_arg),log_query(log_query_arg) {}
+ delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg)
+ :record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {}
~delayed_row()
{
x_free(record);
@@ -1224,7 +1225,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
/* Put a question in queue */
-static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
+static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore,
char *query, uint query_length, bool log_on)
{
delayed_row *row=0;
@@ -1237,7 +1238,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
pthread_cond_wait(&di->cond_client,&di->mutex);
thd->proc_info="storing row into queue";
- if (thd->killed || !(row= new delayed_row(duplic, log_on)))
+ if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on)))
goto err;
if (!query)
@@ -1600,8 +1601,9 @@ bool delayed_insert::handle_inserts(void)
thd.insert_id_used=row->insert_id_used;
table->timestamp_field_type= row->timestamp_field_type;
+ info.ignore= row->ignore;
info.handle_duplicates= row->dup;
- if (info.handle_duplicates == DUP_IGNORE ||
+ if (info.ignore ||
info.handle_duplicates == DUP_REPLACE)
{
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
@@ -1803,7 +1805,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
thd->cuted_fields=0;
- if (info.handle_duplicates == DUP_IGNORE ||
+ if (info.ignore ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->start_bulk_insert((ha_rows) 0);
@@ -1965,7 +1967,7 @@ bool select_insert::send_eof()
DBUG_RETURN(1);
}
char buff[160];
- if (info.handle_duplicates == DUP_IGNORE)
+ if (info.ignore)
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.records - info.copied), (ulong) thd->cuted_fields);
else
@@ -2008,7 +2010,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,default_values); // Get empty record
thd->cuted_fields=0;
- if (info.handle_duplicates == DUP_IGNORE ||
+ if (info.ignore ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->start_bulk_insert((ha_rows) 0);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 1081246c9e3..5929ad5c14b 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -166,6 +166,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE);
lex->sql_command=SQLCOM_END;
lex->duplicates= DUP_ERROR;
+ lex->ignore= 0;
lex->sphead= NULL;
lex->spcont= NULL;
lex->proc_list.first= 0;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 007a4601338..6ed5fb247dc 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -721,7 +721,7 @@ typedef struct st_lex
/* special JOIN::prepare mode: changing of query is prohibited */
bool view_prepare_mode;
bool safe_to_cache_query;
- bool subqueries;
+ bool subqueries, ignore;
bool variables_used;
ALTER_INFO alter_info;
/* Prepared statements SQL syntax:*/
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 21dd2318504..eed3aee791a 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -82,8 +82,8 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
List<Item> &fields, enum enum_duplicates handle_duplicates,
- bool read_file_from_client,thr_lock_type lock_type,
- bool ignore_check_option_errors)
+ bool ignore,
+ bool read_file_from_client,thr_lock_type lock_type)
{
char name[FN_REFLEN];
File file;
@@ -186,7 +186,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
/* We can't give an error in the middle when using LOCAL files */
if (read_file_from_client && handle_duplicates == DUP_ERROR)
- handle_duplicates=DUP_IGNORE;
+ ignore= 1;
#ifndef EMBEDDED_LIBRARY
if (read_file_from_client)
@@ -237,6 +237,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
COPY_INFO info;
bzero((char*) &info,sizeof(info));
+ info.ignore= ignore;
info.handle_duplicates=handle_duplicates;
info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX;
@@ -258,6 +259,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
lf_info.db = db;
lf_info.table_name = table_list->real_name;
lf_info.fields = &fields;
+ lf_info.ignore= ignore;
lf_info.handle_dup = handle_duplicates;
lf_info.wrote_create_file = 0;
lf_info.last_pos_in_file = HA_POS_ERROR;
@@ -288,7 +290,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
table->next_number_field=table->found_next_number_field;
- if (handle_duplicates == DUP_IGNORE ||
+ if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
ha_enable_transaction(thd, FALSE);
@@ -303,11 +305,11 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (!field_term->length() && !enclosed->length())
error= read_fixed_length(thd, info, table_list, fields,read_info,
- skip_lines, ignore_check_option_errors);
+ skip_lines, ignore);
else
error= read_sep_field(thd, info, table_list, fields, read_info,
*enclosed, skip_lines,
- ignore_check_option_errors);
+ ignore);
if (table->file->end_bulk_insert())
error=1; /* purecov: inspected */
ha_enable_transaction(thd, TRUE);
@@ -485,9 +487,8 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count);
}
- switch(table_list->view_check_option(thd,
- ignore_check_option_errors))
- {
+ switch (table_list->view_check_option(thd,
+ ignore_check_option_errors)) {
case VIEW_CHECK_SKIP:
read_info.next_line();
goto continue_loop;
@@ -607,9 +608,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
}
}
- switch(table_list->view_check_option(thd,
- ignore_check_option_errors))
- {
+ switch (table_list->view_check_option(thd,
+ ignore_check_option_errors)) {
case VIEW_CHECK_SKIP:
read_info.next_line();
goto continue_loop;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 9ebeb9fe06d..442d398889f 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2628,7 +2628,8 @@ mysql_execute_command(THD *thd)
lex->create_list,
lex->key_list,
select_lex->item_list,
- lex->duplicates)))
+ lex->duplicates,
+ lex->ignore)))
{
/*
CREATE from SELECT give its SELECT_LEX for SELECT,
@@ -2768,7 +2769,7 @@ create_error:
lex->key_list,
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
- lex->duplicates, &lex->alter_info);
+ lex->duplicates, lex->ignore, &lex->alter_info);
}
break;
}
@@ -2933,7 +2934,7 @@ create_error:
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
select_lex->select_limit,
- lex->duplicates));
+ lex->duplicates, lex->ignore));
/* mysql_update return 2 if we need to switch to multi-update */
if (result != 2)
break;
@@ -2954,7 +2955,7 @@ create_error:
&lex->value_list,
select_lex->where,
select_lex->options,
- lex->duplicates, unit, select_lex);
+ lex->duplicates, lex->ignore, unit, select_lex);
break;
}
case SQLCOM_REPLACE:
@@ -2965,8 +2966,7 @@ create_error:
break;
res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values,
lex->update_list, lex->value_list,
- (lex->value_list.elements ?
- DUP_UPDATE : lex->duplicates));
+ lex->duplicates, lex->ignore);
if (first_table->view && !first_table->contain_auto_increment)
thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it
break;
@@ -2997,8 +2997,7 @@ create_error:
if (!res && (result= new select_insert(first_table, first_table->table,
&lex->field_list,
&lex->update_list, &lex->value_list,
- lex->duplicates,
- lex->duplicates == DUP_IGNORE)))
+ lex->duplicates, lex->ignore)))
{
/*
insert/replace from SELECT give its SELECT_LEX for SELECT,
@@ -3194,8 +3193,8 @@ create_error:
goto error;
}
res= mysql_load(thd, lex->exchange, first_table, lex->field_list,
- lex->duplicates, (bool) lex->local_file,
- lex->lock_option, lex->duplicates == DUP_IGNORE);
+ lex->duplicates, lex->ignore, (bool) lex->local_file,
+ lex->lock_option);
break;
}
@@ -6043,7 +6042,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, &alter_info));
+ DUP_ERROR, 0, &alter_info));
}
@@ -6061,7 +6060,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, alter_info));
+ DUP_ERROR, 0, alter_info));
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 9f083e19146..8ba92015535 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1510,7 +1510,7 @@ err:
int log_loaded_block(IO_CACHE* file)
{
- LOAD_FILE_INFO* lf_info;
+ LOAD_FILE_INFO *lf_info;
uint block_len ;
/* file->request_pos contains position where we started last read */
@@ -1532,7 +1532,7 @@ int log_loaded_block(IO_CACHE* file)
{
Create_file_log_event c(lf_info->thd,lf_info->ex,lf_info->db,
lf_info->table_name, *lf_info->fields,
- lf_info->handle_dup, buffer,
+ lf_info->handle_dup, lf_info->ignore, buffer,
block_len, lf_info->log_delayed);
mysql_bin_log.write(&c);
lf_info->wrote_create_file = 1;
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 71b25548da4..e8497fee343 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -67,7 +67,7 @@ typedef struct st_load_file_info
enum enum_duplicates handle_dup;
char* db;
char* table_name;
- bool wrote_create_file, log_delayed;
+ bool wrote_create_file, log_delayed, ignore;
} LOAD_FILE_INFO;
int log_loaded_block(IO_CACHE* file);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 6d0504802db..dce7c754f03 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -529,7 +529,7 @@ JOIN::optimize()
optimized= 1;
// Ignore errors of execution if option IGNORE present
- if (thd->lex->duplicates == DUP_IGNORE)
+ if (thd->lex->ignore)
thd->lex->current_select->no_error= 1;
#ifdef HAVE_REF_TO_FIELDS // Not done yet
/* Add HAVING to WHERE if possible */
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index e8655a2a304..c3aac59b98b 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -36,6 +36,7 @@ static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
static int copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
+ bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,ha_rows *deleted);
@@ -2806,7 +2807,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
uint order_num, ORDER *order,
- enum enum_duplicates handle_duplicates,
+ enum enum_duplicates handle_duplicates, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
@@ -3348,7 +3349,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
error=copy_data_between_tables(table,new_table,create_list,
- handle_duplicates,
+ handle_duplicates, ignore,
order_num, order, &copied, &deleted);
}
thd->last_insert_id=next_insert_id; // Needed for correct log
@@ -3567,6 +3568,7 @@ static int
copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
+ bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,
ha_rows *deleted)
@@ -3660,7 +3662,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
current query id */
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
- if (handle_duplicates == DUP_IGNORE ||
+ if (ignore ||
handle_duplicates == DUP_REPLACE)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;
@@ -3686,7 +3688,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
if ((error=to->file->write_row((byte*) to->record[0])))
{
- if ((handle_duplicates != DUP_IGNORE &&
+ if ((!ignore &&
handle_duplicates != DUP_REPLACE) ||
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
@@ -3764,7 +3766,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
- DUP_ERROR, &lex->alter_info, do_send_ok));
+ DUP_ERROR, 0, &lex->alter_info, do_send_ok));
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 012a26a6f4d..901cb0ba496 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -51,10 +51,10 @@ select_union::select_union(TABLE *table_par)
{
bzero((char*) &info,sizeof(info));
/*
- We can always use DUP_IGNORE because the temporary table will only
+ We can always use IGNORE because the temporary table will only
contain a unique key if we are using not using UNION ALL
*/
- info.handle_duplicates= DUP_IGNORE;
+ info.ignore= 1;
}
select_union::~select_union()
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 4d30ffbda9b..636d265d256 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -113,7 +113,7 @@ int mysql_update(THD *thd,
COND *conds,
uint order_num, ORDER *order,
ha_rows limit,
- enum enum_duplicates handle_duplicates)
+ enum enum_duplicates handle_duplicates, bool ignore)
{
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= thd->options & OPTION_SAFE_UPDATES;
@@ -380,7 +380,7 @@ int mysql_update(THD *thd,
}
}
- if (handle_duplicates == DUP_IGNORE)
+ if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (select && select->quick && select->quick->reset())
@@ -433,8 +433,7 @@ int mysql_update(THD *thd,
updated++;
thd->no_trans_update= !transactional_table;
}
- else if (handle_duplicates != DUP_IGNORE ||
- error != HA_ERR_FOUND_DUPP_KEY)
+ else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
{
thd->fatal_error(); // Force error message
table->file->print_error(error,MYF(0));
@@ -812,7 +811,7 @@ bool mysql_multi_update(THD *thd,
List<Item> *values,
COND *conds,
ulong options,
- enum enum_duplicates handle_duplicates,
+ enum enum_duplicates handle_duplicates, bool ignore,
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
{
bool res= FALSE;
@@ -825,7 +824,7 @@ bool mysql_multi_update(THD *thd,
if (!(result= new multi_update(thd, table_list,
thd->lex->select_lex.leaf_tables,
fields, values,
- handle_duplicates)))
+ handle_duplicates, ignore)))
DBUG_RETURN(TRUE);
thd->no_trans_update= 0;
@@ -851,12 +850,12 @@ bool mysql_multi_update(THD *thd,
multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
TABLE_LIST *leaves_list,
List<Item> *field_list, List<Item> *value_list,
- enum enum_duplicates handle_duplicates_arg)
+ enum enum_duplicates handle_duplicates_arg, bool ignore_arg)
:all_tables(table_list), leaves(leaves_list), update_tables(0),
thd(thd_arg), tmp_tables(0), updated(0), found(0), fields(field_list),
values(value_list), table_count(0), copy_field(0),
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0),
- transactional_tables(1)
+ transactional_tables(1), ignore(ignore_arg)
{}
@@ -1201,8 +1200,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
table->record[0])))
{
updated--;
- if (handle_duplicates != DUP_IGNORE ||
- error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
{
thd->fatal_error(); // Force error message
table->file->print_error(error,MYF(0));
@@ -1336,8 +1334,7 @@ int multi_update::do_updates(bool from_send_error)
if ((local_error=table->file->update_row(table->record[1],
table->record[0])))
{
- if (local_error != HA_ERR_FOUND_DUPP_KEY ||
- handle_duplicates != DUP_IGNORE)
+ if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY)
goto err;
}
updated++;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 2095a76d0c7..289b96dff51 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -3220,8 +3220,9 @@ alter:
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
- lex->sql_command = SQLCOM_ALTER_TABLE;
- lex->name=0;
+ lex->sql_command= SQLCOM_ALTER_TABLE;
+ lex->name= 0;
+ lex->duplicates= DUP_ERROR;
if (!lex->select_lex.add_table_to_list(thd, $4, NULL,
TL_OPTION_UPDATING))
YYABORT;
@@ -3449,8 +3450,9 @@ opt_column:
| COLUMN_SYM {};
opt_ignore:
- /* empty */ { Lex->duplicates=DUP_ERROR; }
- | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; };
+ /* empty */ { Lex->ignore= 0;}
+ | IGNORE_SYM { Lex->ignore= 1;}
+ ;
opt_restrict:
/* empty */ { Lex->drop_mode= DROP_DEFAULT; }
@@ -5573,7 +5575,8 @@ insert:
INSERT
{
LEX *lex= Lex;
- lex->sql_command = SQLCOM_INSERT;
+ lex->sql_command= SQLCOM_INSERT;
+ lex->duplicates= DUP_ERROR;
/* for subselects */
lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE;
@@ -5734,6 +5737,7 @@ update:
mysql_init_select(lex);
lex->sql_command= SQLCOM_UPDATE;
lex->lock_option= TL_UNLOCK; /* Will be set later */
+ lex->duplicates= DUP_ERROR;
}
opt_low_priority opt_ignore join_table_list
SET update_list
@@ -5793,6 +5797,7 @@ delete:
LEX *lex= Lex;
lex->sql_command= SQLCOM_DELETE;
lex->lock_option= lex->thd->update_lock_default;
+ lex->ignore= 0;
lex->select_lex.init_order();
}
opt_delete_options single_multi {}
@@ -5849,7 +5854,7 @@ opt_delete_options:
opt_delete_option:
QUICK { Select->options|= OPTION_QUICK; }
| LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }
- | IGNORE_SYM { Lex->duplicates= DUP_IGNORE; };
+ | IGNORE_SYM { Lex->ignore= 1; };
truncate:
TRUNCATE_SYM opt_table_sym table_name
@@ -6357,6 +6362,8 @@ load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING_sys
lex->sql_command= SQLCOM_LOAD;
lex->lock_option= $3;
lex->local_file= $4;
+ lex->duplicates= DUP_ERROR;
+ lex->ignore= 0;
if (!(lex->exchange= new sql_exchange($6.str,0)))
YYABORT;
lex->field_list.empty();
@@ -6394,7 +6401,7 @@ load_data_lock:
opt_duplicate:
/* empty */ { Lex->duplicates=DUP_ERROR; }
| REPLACE { Lex->duplicates=DUP_REPLACE; }
- | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; };
+ | IGNORE_SYM { Lex->ignore= 1; };
opt_field_term:
/* empty */