summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <antony@ppcg5.local>2007-06-28 13:36:26 -0700
committerunknown <antony@ppcg5.local>2007-06-28 13:36:26 -0700
commit94beb7cd8db07e816ecced8112945d9df9db67e4 (patch)
treeda2f7e222273c732b24ff4cef082448d41d7eb73
parent0e5e884b118baeea9d47520acc5da2caa905dca8 (diff)
downloadmariadb-git-94beb7cd8db07e816ecced8112945d9df9db67e4.tar.gz
Bug#25511
"Federated INSERT failures" Federated does not correctly handle "INSERT...ON DUPLICATE KEY UPDATE" However, implementing such support is not reasonably possible without increasing complexity of the storage engine: checking that constraints on remote server match local server and parsing error messages. This patch causes 'ON DUPLICATE KEY' to fail with ER_DUP_KEY message if a conflict occurs and not to fail silently. include/my_base.h: bug25511 new storage engine hint: HA_EXTRA_INSERT_WITH_UPDATE mysql-test/r/federated.result: test for bug25511 mysql-test/t/federated.test: test for bug25511 sql/ha_federated.cc: bug25511 implement support for handling HA_EXTRA_INSERT_WITH_UPDATE hint sql/ha_federated.h: bug25511 new property: insert_dup_update sql/sql_insert.cc: bug25511 implement support for HA_EXTRA_INSERT_WITH_UPDATE When checking duplicates flag, if it is DUP_UPDATE, send hint to the storage engine.
-rw-r--r--include/my_base.h8
-rw-r--r--mysql-test/r/federated.result15
-rw-r--r--mysql-test/t/federated.test26
-rw-r--r--sql/ha_federated.cc10
-rw-r--r--sql/ha_federated.h1
-rw-r--r--sql/sql_insert.cc8
6 files changed, 66 insertions, 2 deletions
diff --git a/include/my_base.h b/include/my_base.h
index d07a4de8e6a..a26217f8050 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -168,7 +168,13 @@ enum ha_extra_function {
These flags are reset by the handler::extra(HA_EXTRA_RESET) call.
*/
HA_EXTRA_DELETE_CANNOT_BATCH,
- HA_EXTRA_UPDATE_CANNOT_BATCH
+ HA_EXTRA_UPDATE_CANNOT_BATCH,
+ /*
+ Inform handler that write_row() should immediately report constraint
+ violations because a INSERT...ON DUPLICATE KEY UPDATE is in being
+ performed.
+ */
+ HA_EXTRA_INSERT_WITH_UPDATE
};
/* The following is parameter to ha_panic() */
diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result
index 52304de7609..4bef92319fb 100644
--- a/mysql-test/r/federated.result
+++ b/mysql-test/r/federated.result
@@ -1867,6 +1867,21 @@ a b
3 Curly
drop table federated.t1;
drop table federated.t1;
+create table federated.t1 (a int primary key, b varchar(64))
+DEFAULT CHARSET=utf8;
+create table federated.t1 (a int primary key, b varchar(64))
+ENGINE=FEDERATED
+connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'
+ DEFAULT CHARSET=utf8;
+insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe")
+on duplicate key update a=a+100;
+ERROR 23000: Can't write; duplicate key in table 't1'
+select * from federated.t1;
+a b
+1 Larry
+2 Curly
+drop table federated.t1;
+drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test
index bdd6076064a..bedb6b36d61 100644
--- a/mysql-test/t/federated.test
+++ b/mysql-test/t/federated.test
@@ -1603,4 +1603,30 @@ drop table federated.t1;
connection slave;
drop table federated.t1;
+#
+# BUG#25511 Federated Insert failures.
+#
+# When the user performs a INSERT...ON DUPLICATE KEY UPDATE, we want
+# it to fail if a duplicate key exists instead of ignoring it.
+#
+connection slave;
+create table federated.t1 (a int primary key, b varchar(64))
+ DEFAULT CHARSET=utf8;
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (a int primary key, b varchar(64))
+ ENGINE=FEDERATED
+ connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'
+ DEFAULT CHARSET=utf8;
+
+--error ER_DUP_KEY
+insert into federated.t1 values (1,"Larry"), (2,"Curly"), (1,"Moe")
+on duplicate key update a=a+100;
+select * from federated.t1;
+
+drop table federated.t1;
+connection slave;
+drop table federated.t1;
+
+
source include/federated_cleanup.inc;
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 9412db4b6a6..e691831bbc9 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -1627,7 +1627,7 @@ int ha_federated::write_row(byte *buf)
*/
if (replace_duplicates)
insert_string.append(STRING_WITH_LEN("REPLACE INTO "));
- else if (ignore_duplicates)
+ else if (ignore_duplicates && !insert_dup_update)
insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
else
insert_string.append(STRING_WITH_LEN("INSERT INTO "));
@@ -2548,6 +2548,7 @@ int ha_federated::extra(ha_extra_function operation)
ignore_duplicates= TRUE;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ insert_dup_update= FALSE;
ignore_duplicates= FALSE;
break;
case HA_EXTRA_WRITE_CAN_REPLACE:
@@ -2556,7 +2557,11 @@ int ha_federated::extra(ha_extra_function operation)
case HA_EXTRA_WRITE_CANNOT_REPLACE:
replace_duplicates= FALSE;
break;
+ case HA_EXTRA_INSERT_WITH_UPDATE:
+ insert_dup_update= TRUE;
+ break;
case HA_EXTRA_RESET:
+ insert_dup_update= FALSE;
ignore_duplicates= FALSE;
replace_duplicates= FALSE;
break;
@@ -2699,6 +2704,9 @@ int ha_federated::stash_remote_error()
DBUG_ENTER("ha_federated::stash_remote_error()");
remote_error_number= mysql_errno(mysql);
strmake(remote_error_buf, mysql_error(mysql), sizeof(remote_error_buf)-1);
+ if (remote_error_number == ER_DUP_ENTRY ||
+ remote_error_number == ER_DUP_KEY)
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
DBUG_RETURN(HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM);
}
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index 11a7547880a..28c89561b2c 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -158,6 +158,7 @@ class ha_federated: public handler
int remote_error_number;
char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE];
bool ignore_duplicates, replace_duplicates;
+ bool insert_dup_update;
private:
/*
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 228fc8860ae..3bfd84b324b 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -715,6 +715,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
*/
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
+ if (duplic == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@@ -2434,6 +2436,8 @@ bool Delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
using_opt_replace= 1;
}
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@@ -2761,6 +2765,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd->no_trans_update.stmt= FALSE;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@@ -3218,6 +3224,8 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
+ if (info.handle_duplicates == DUP_UPDATE)
+ table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update.stmt= FALSE;