From da1ff072c28eb58197892ba28c281e6ad0487c17 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 10 Jan 2003 01:55:05 +0200 Subject: Fixed core dump bug in str LIKE "%other_str%" where strings contained characters >= 128. Fixed problem with replication LOAD DATA INFILE when using --old-rpl-compat. When executing on master LOAD DATA and InnoDB failed with 'table full' error the binary log was corrupted. sql/item_cmpfunc.cc: Fixed core dump bug in str LIKE "%other_str%" where strings contained characters >= 128. sql/log_event.cc: Fixed problem with replication LOAD DATA INFILE when using --old-rpl-compat sql/sql_load.cc: When executing on master LOAD DATA and InnoDB failed with 'table full' error the binary log was corrupted. --- sql/sql_load.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'sql/sql_load.cc') diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 6375ba46fd7..908ff8c6361 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -283,6 +283,20 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { if (lf_info.wrote_create_file) { + /* + Make sure last block (the one which caused the error) gets logged. + This is needed because otherwise after write of + (to the binlog, not to read_info (which is a cache)) + Delete_file_log_event the bad block will remain in read_info. + At the end of mysql_load(), the destructor of read_info will call + end_io_cache() which will flush read_info, so we will finally have + this in the binlog: + Append_block # The last successfull block + Delete_file + Append_block # The failing block + which is nonsense. + */ + read_info.end_io_cache(); Delete_file_log_event d(thd, log_delayed); mysql_bin_log.write(&d); } -- cgit v1.2.1 From ef6c36c0c262ada284647cdf91d4cc2c599b7fdf Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 14 Jan 2003 11:27:26 +0200 Subject: Guard against compiling without -fno-exceptions Allocate bigger default thread stack because of problems with glibc Fixed bug in UPDATE ... not_null_field=expression_that_returns_null Fixed bug in replication when using auto_increment and LOAD DATA INFILE include/my_global.h: Guard against compiling without -fno-exceptions include/my_pthread.h: Allocate bigger default thread stack because of problems with glibc mysql-test/r/null.result: Updated result mysql-test/t/null.test: Test of using UPDATE/INSERT with NULL on not null fields. sql/field_conv.cc: Fixed bug in UPDATE ... not_null_field=expression_that_returns_null sql/sql_load.cc: Fixed bug in replication when using auto_increment and LOAD DATA INFILE --- sql/sql_load.cc | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'sql/sql_load.cc') diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 908ff8c6361..c1c6267879e 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -357,8 +357,10 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List &fields, { List_iterator_fast it(fields); Item_field *sql_field; + ulonglong id; DBUG_ENTER("read_fixed_length"); + id=0; /* No fields can be null in this format. mark all fields as not null */ while ((sql_field= (Item_field*) it++)) sql_field->field->set_notnull(); @@ -401,6 +403,14 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List &fields, thd->cuted_fields++; /* To long row */ if (write_record(table,&info)) DBUG_RETURN(1); + /* + If auto_increment values are used, save the first one + for LAST_INSERT_ID() and for the binary/update log. + We can't use insert_id() as we don't want to touch the + last_insert_id_used flag. + */ + if (!id && thd->insert_id_used) + id= thd->last_insert_id; if (table->next_number_field) table->next_number_field->reset(); // Clear for next record if (read_info.next_line()) // Skip to next line @@ -408,6 +418,8 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List &fields, if (read_info.line_cuted) thd->cuted_fields++; /* To long row */ } + if (id && !read_info.error) + thd->insert_id(id); // For binary/update log DBUG_RETURN(test(read_info.error)); } @@ -421,10 +433,12 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, List_iterator_fast it(fields); Item_field *sql_field; uint enclosed_length; + ulonglong id; DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); - + id=0; + for (;;it.rewind()) { if (thd->killed) @@ -477,6 +491,14 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, } if (write_record(table,&info)) DBUG_RETURN(1); + /* + If auto_increment values are used, save the first one + for LAST_INSERT_ID() and for the binary/update log. + We can't use insert_id() as we don't want to touch the + last_insert_id_used flag. + */ + if (!id && thd->insert_id_used) + id= thd->last_insert_id; if (table->next_number_field) table->next_number_field->reset(); // Clear for next record if (read_info.next_line()) // Skip to next line @@ -484,6 +506,8 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, if (read_info.line_cuted) thd->cuted_fields++; /* To long row */ } + if (id && !read_info.error) + thd->insert_id(id); // For binary/update log DBUG_RETURN(test(read_info.error)); } -- cgit v1.2.1