summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@mashka.mysql.fi>2003-01-14 11:27:26 +0200
committerunknown <monty@mashka.mysql.fi>2003-01-14 11:27:26 +0200
commitef6c36c0c262ada284647cdf91d4cc2c599b7fdf (patch)
treed53ba7e1a43cc73daf4f75fa37f9467d8792fd64 /sql
parentda1ff072c28eb58197892ba28c281e6ad0487c17 (diff)
downloadmariadb-git-ef6c36c0c262ada284647cdf91d4cc2c599b7fdf.tar.gz
Guard against compiling without -fno-exceptions
Allocate bigger default thread stack because of problems with glibc Fixed bug in UPDATE ... not_null_field=expression_that_returns_null Fixed bug in replication when using auto_increment and LOAD DATA INFILE include/my_global.h: Guard against compiling without -fno-exceptions include/my_pthread.h: Allocate bigger default thread stack because of problems with glibc mysql-test/r/null.result: Updated result mysql-test/t/null.test: Test of using UPDATE/INSERT with NULL on not null fields. sql/field_conv.cc: Fixed bug in UPDATE ... not_null_field=expression_that_returns_null sql/sql_load.cc: Fixed bug in replication when using auto_increment and LOAD DATA INFILE
Diffstat (limited to 'sql')
-rw-r--r--sql/field_conv.cc9
-rw-r--r--sql/sql_load.cc26
2 files changed, 34 insertions, 1 deletions
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index ffc93f3e871..42272dd616f 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -118,6 +118,15 @@ set_field_to_null(Field *field)
field->reset();
return 0;
}
+ field->reset();
+ if (current_thd->count_cuted_fields)
+ {
+ current_thd->cuted_fields++; // Increment error counter
+ return 0;
+ }
+ if (!current_thd->no_errors)
+ my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0),
+ field->field_name);
return 1;
}
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 908ff8c6361..c1c6267879e 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -357,8 +357,10 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
{
List_iterator_fast<Item> it(fields);
Item_field *sql_field;
+ ulonglong id;
DBUG_ENTER("read_fixed_length");
+ id=0;
/* No fields can be null in this format. mark all fields as not null */
while ((sql_field= (Item_field*) it++))
sql_field->field->set_notnull();
@@ -401,6 +403,14 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
thd->cuted_fields++; /* To long row */
if (write_record(table,&info))
DBUG_RETURN(1);
+ /*
+ If auto_increment values are used, save the first one
+ for LAST_INSERT_ID() and for the binary/update log.
+ We can't use insert_id() as we don't want to touch the
+ last_insert_id_used flag.
+ */
+ if (!id && thd->insert_id_used)
+ id= thd->last_insert_id;
if (table->next_number_field)
table->next_number_field->reset(); // Clear for next record
if (read_info.next_line()) // Skip to next line
@@ -408,6 +418,8 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
if (read_info.line_cuted)
thd->cuted_fields++; /* To long row */
}
+ if (id && !read_info.error)
+ thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}
@@ -421,10 +433,12 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
List_iterator_fast<Item> it(fields);
Item_field *sql_field;
uint enclosed_length;
+ ulonglong id;
DBUG_ENTER("read_sep_field");
enclosed_length=enclosed.length();
-
+ id=0;
+
for (;;it.rewind())
{
if (thd->killed)
@@ -477,6 +491,14 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
}
if (write_record(table,&info))
DBUG_RETURN(1);
+ /*
+ If auto_increment values are used, save the first one
+ for LAST_INSERT_ID() and for the binary/update log.
+ We can't use insert_id() as we don't want to touch the
+ last_insert_id_used flag.
+ */
+ if (!id && thd->insert_id_used)
+ id= thd->last_insert_id;
if (table->next_number_field)
table->next_number_field->reset(); // Clear for next record
if (read_info.next_line()) // Skip to next line
@@ -484,6 +506,8 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
if (read_info.line_cuted)
thd->cuted_fields++; /* To long row */
}
+ if (id && !read_info.error)
+ thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}