summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <mikael@dator5.(none)>2006-06-21 21:24:04 -0400
committerunknown <mikael@dator5.(none)>2006-06-21 21:24:04 -0400
commit9ee380cbd3043d83d74c17853f418578d69338f7 (patch)
tree3a8ff6c71d3b7b21b9c49239f43829176b5e0f51
parent767cb0b4a6ed04113da94848f20442b84f73d61c (diff)
parent532915484940221bc3c9a3090667c0b441183517 (diff)
downloadmariadb-git-9ee380cbd3043d83d74c17853f418578d69338f7.tar.gz
Merge dator5.(none):/home/pappa/clean-mysql-5.1
into dator5.(none):/home/pappa/bug17138 sql/ha_ndbcluster.h: Auto merged sql/sql_insert.cc: Auto merged sql/sql_table.cc: Auto merged
-rw-r--r--mysql-test/r/partition.result15
-rw-r--r--mysql-test/t/partition.test27
-rw-r--r--sql/ha_ndbcluster.h8
-rw-r--r--sql/ha_partition.h8
-rw-r--r--sql/handler.h32
-rw-r--r--sql/item_sum.cc3
-rw-r--r--sql/sql_acl.cc9
-rw-r--r--sql/sql_insert.cc13
-rw-r--r--sql/sql_select.cc9
-rw-r--r--sql/sql_table.cc9
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc20
12 files changed, 117 insertions, 38 deletions
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index fa1baaec07e..b51436e6747 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -1043,4 +1043,19 @@ alter table t1 add partition (partition p2 values in (3));
alter table t1 drop partition p2;
use test;
drop database db99;
+create table t1 (a int)
+partition by list (a)
+(partition p0 values in (0));
+insert into t1 values (0);
+create procedure po ()
+begin
+begin
+declare continue handler for sqlexception begin end;
+update ignore t1 set a = 1 where a = 0;
+end;
+prepare stmt1 from 'alter table t1';
+execute stmt1;
+end//
+call po()//
+drop table t1;
End of 5.1 tests
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index f62bb2dcd01..8cfcc059920 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -1199,4 +1199,31 @@ alter table t1 drop partition p2;
use test;
drop database db99;
+#
+#BUG 17138 Problem with stored procedure and analyze partition
+#
+drop procedure mysqltest_1 if exists;
+
+create table t1 (a int)
+partition by list (a)
+(partition p0 values in (0));
+
+insert into t1 values (0);
+delimiter //;
+
+create procedure mysqltest_1 ()
+begin
+ begin
+ declare continue handler for sqlexception begin end;
+ update ignore t1 set a = 1 where a = 0;
+ end;
+ prepare stmt1 from 'alter table t1';
+ execute stmt1;
+end//
+
+call mysqltest_1()//
+delimiter ;//
+drop table t1;
+drop procedure mysqltest_1;
+
--echo End of 5.1 tests
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index df3c5791713..d5ad299607d 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -654,6 +654,14 @@ class ha_ndbcluster: public handler
int get_default_no_partitions(ulonglong max_rows);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags))
+ return FALSE;
+ if (error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index b52c8d92164..4b85ddd2def 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -302,6 +302,14 @@ public:
virtual void start_bulk_insert(ha_rows rows);
virtual int end_bulk_insert();
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags))
+ return FALSE;
+ if (error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
/*
-------------------------------------------------------------------------
MODULE full table scan
diff --git a/sql/handler.h b/sql/handler.h
index 52843b78266..3e42938b5a3 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -216,11 +216,6 @@
#define HA_BLOCK_LOCK 256 /* unlock when reading some records */
#define HA_OPEN_TEMPORARY 512
- /* Errors on write which is recoverable (Key exist) */
-#define HA_WRITE_SKIP 121 /* Duplicate key on write */
-#define HA_READ_CHECK 123 /* Update with is recoverable */
-#define HA_CANT_DO_THAT 131 /* Databasehandler can't do it */
-
/* Some key definitions */
#define HA_KEY_NULL_LENGTH 1
#define HA_KEY_BLOB_LENGTH 2
@@ -970,7 +965,30 @@ public:
bool has_transactions()
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
virtual uint extra_rec_buf_length() const { return 0; }
-
+
+ /*
+ This method is used to analyse the error to see whether the error
+ is ignorable or not, certain handlers can have more error that are
+ ignorable than others. E.g. the partition handler can get inserts
+ into a range where there is no partition and this is an ignorable
+ error.
+ HA_ERR_FOUND_DUPP_UNIQUE is a special case in MyISAM that means the
+ same thing as HA_ERR_FOUND_DUPP_KEY but can in some cases lead to
+ a slightly different error message.
+ */
+#define HA_CHECK_DUPP_KEY 1
+#define HA_CHECK_DUPP_UNIQUE 2
+#define HA_CHECK_DUPP (HA_CHECK_DUPP_KEY + HA_CHECK_DUPP_UNIQUE)
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!error ||
+ ((flags & HA_CHECK_DUPP_KEY) &&
+ (error == HA_ERR_FOUND_DUPP_KEY ||
+ error == HA_ERR_FOUND_DUPP_UNIQUE)))
+ return FALSE;
+ return TRUE;
+ }
+
/*
Number of rows in table. It will only be called if
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
@@ -1022,7 +1040,7 @@ public:
DBUG_RETURN(rnd_end());
}
int ha_reset();
-
+
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index d6bc2c326d6..caaa111645a 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -2663,8 +2663,7 @@ bool Item_sum_count_distinct::add()
return tree->unique_add(table->record[0] + table->s->null_bytes);
}
if ((error= table->file->ha_write_row(table->record[0])) &&
- error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE)
+ table->file->is_fatal_error(error, HA_CHECK_DUPP))
return TRUE;
return FALSE;
}
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 8066c41fd10..18591052a1f 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2049,8 +2049,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
}
else if ((error=table->file->ha_write_row(table->record[0]))) // insert
{ // This should never happen
- if (error && error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP))
{
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
error= -1; /* purecov: deadcode */
@@ -2172,7 +2171,7 @@ static int replace_db_table(TABLE *table, const char *db,
}
else if (rights && (error= table->file->ha_write_row(table->record[0])))
{
- if (error && error != HA_ERR_FOUND_DUPP_KEY) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2744,7 +2743,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
else
{
error=table->file->ha_write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2862,7 +2861,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
else
{
error=table->file->ha_write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
goto table_error;
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 4c2c0099908..8b0bd655360 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -976,12 +976,16 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
while ((error=table->file->ha_write_row(table->record[0])))
{
uint key_nr;
- if (error != HA_WRITE_SKIP)
+ bool is_duplicate_key_error;
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP))
goto err;
table->file->restore_auto_increment(); // it's too early here! BUG#20188
+ is_duplicate_key_error= table->file->is_fatal_error(error, 0);
+ if (info->ignore && !is_duplicate_key_error)
+ goto ok_or_after_trg_err;
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
- error=HA_WRITE_SKIP; /* Database can't find key */
+ error=HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */
goto err;
}
/* Read all columns for the row we are going to replace */
@@ -1062,7 +1066,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
- if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
+ if (info->ignore &&
+ !table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
goto ok_or_after_trg_err;
goto err;
}
@@ -1145,7 +1150,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
else if ((error=table->file->ha_write_row(table->record[0])))
{
if (!info->ignore ||
- (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE))
+ table->file->is_fatal_error(error, HA_CHECK_DUPP))
goto err;
table->file->restore_auto_increment();
goto ok_or_after_trg_err;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e9e2a4ed1e0..89ca6c7dc6c 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -9354,9 +9354,9 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
/* copy row that filled HEAP table */
if ((write_err=new_table.file->write_row(table->record[0])))
{
- if (write_err != HA_ERR_FOUND_DUPP_KEY &&
- write_err != HA_ERR_FOUND_DUPP_UNIQUE || !ignore_last_dupp_key_error)
- goto err;
+ if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUPP) ||
+ !ignore_last_dupp_key_error)
+ goto err;
}
/* remove heap table and change to use myisam table */
@@ -10777,8 +10777,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->found_records++;
if ((error=table->file->write_row(table->record[0])))
{
- if (error == HA_ERR_FOUND_DUPP_KEY ||
- error == HA_ERR_FOUND_DUPP_UNIQUE)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP))
goto end;
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error,1))
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index bf0535567b4..3bf4c0bd99b 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6268,12 +6268,11 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
if ((error=to->file->ha_write_row((byte*) to->record[0])))
{
- if ((!ignore &&
- handle_duplicates != DUP_REPLACE) ||
- (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE))
+ if (!ignore ||
+ handle_duplicates != DUP_REPLACE || /* Currently always false */
+ to->file->is_fatal_error(error, HA_CHECK_DUPP))
{
- if (error == HA_ERR_FOUND_DUPP_KEY)
+ if (!to->file->is_fatal_error(error, HA_CHECK_DUPP))
{
uint key_nr= to->file->get_dup_key(error);
if ((int) key_nr >= 0)
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index bf93f0d3bea..fd4529090d4 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -65,7 +65,7 @@ bool select_union::send_data(List<Item> &values)
if ((error= table->file->ha_write_row(table->record[0])))
{
/* create_myisam_from_heap will generate error if needed */
- if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP) &&
create_myisam_from_heap(thd, table, &tmp_table_param, error, 1))
return 1;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index da529cc0070..414c2b353b3 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -541,13 +541,14 @@ int mysql_update(THD *thd,
break;
}
}
- else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ else if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
error= 1;
@@ -1422,13 +1423,14 @@ bool multi_update::send_data(List<Item> &not_used_values)
table->record[0])))
{
updated--;
- if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error == is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUPP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
@@ -1457,8 +1459,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
/* Write row, ignoring duplicated updates to a row */
if ((error= tmp_table->file->ha_write_row(tmp_table->record[0])))
{
- if (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (tmp_table->file->is_fatal_error(error, HA_CHECK_DUPP) &&
create_myisam_from_heap(thd, tmp_table,
tmp_table_param + offset, error, 1))
{
@@ -1581,7 +1582,8 @@ int multi_update::do_updates(bool from_send_error)
if ((local_error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
- if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(local_error, HA_CHECK_DUPP_KEY))
goto err;
}
updated++;