From 173802140785148cc9e68cb149d9d6501b54a04f Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Thu, 4 Jun 2009 23:36:34 +0500 Subject: Bug#43733 Select on processlist let the embedded server crash (concurrent_innodb_safelog) the thread->mysys_var parameter should be empty for the idle embedded-server threads so that working threads can safely free this memory. per-file comments: libmysqld/lib_sql.cc Bug#43733 Select on processlist let the embedded server crash (concurrent_innodb_safelog) set thread->mysys_var= 0 after the query is handled mysql-test/include/concurrent.inc Bug#43733 Select on processlist let the embedded server crash (concurrent_innodb_safelog) enable these for the embedded-server mode sql/sql_show.cc Bug#43733 Select on processlist let the embedded server crash (concurrent_innodb_safelog) show thread lock status in the query result --- sql/sql_show.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_show.cc b/sql/sql_show.cc index d08b3a248c4..e9ae8fea3ed 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1888,7 +1888,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) tmp->mysys_var->current_cond ? "Waiting on cond" : NullS); #else - val= (char *) "Writing to net"; + val= (char *) (tmp->proc_info ? tmp->proc_info : NullS); #endif if (val) { -- cgit v1.2.1 From 74deaae946f060318900b93249e1d48f4d43c9af Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Wed, 24 Jun 2009 13:02:20 +0200 Subject: Bug#43508: Renaming timestamp or date column triggers table copy Altering a table to update a column with types DATE or TIMESTAMP would incorrectly be seen as a significant change that necessitates a slow copy+rename operation instead of a fast update. There were two problems: The character set is magically set for TIMESTAMP to be "binary", but that was done too deep in field use code for ALTER TABLE to know of it. Now, put that in the constructor for Field_timestamp. Also, when we set the character set for the new replacement/ comparison field, also raise the "binary" field flag that tells us we should compare it exactly. That is necessary to match the old stored definition. Next is the problem that the default length for TIMESTAMP and DATE fields is different than the length read from the .frm . The compressed size is written to the file, but the human-readable, part-delimited length is used as default length. IIRC, for timestamp it was 19!=14, and for date it was 8!=10. Length mismatch causes a table copy. Also, clean up a place where a comparison function alters one of its parameters and replace it with an assertion of the condition it mutates. --- sql/field.cc | 47 +++++++++++++++++++---------------------------- sql/field.h | 22 ++++++++++++++++------ sql/sql_table.cc | 29 ++++++++++++++++------------- 3 files changed, 51 insertions(+), 47 deletions(-) (limited to 'sql') diff --git a/sql/field.cc b/sql/field.cc index ed085de1db3..ea80e809c4e 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4723,6 +4723,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg, { /* For 4.0 MYD and 4.0 InnoDB compatibility */ flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + field_charset= &my_charset_bin; if (!share->timestamp_field && unireg_check != NONE) { /* This timestamp has auto-update */ @@ -4743,6 +4744,7 @@ Field_timestamp::Field_timestamp(bool maybe_null_arg, { /* For 4.0 MYD and 4.0 InnoDB compatibility */ flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + field_charset= &my_charset_bin; if (unireg_check != TIMESTAMP_DN_FIELD) flags|= ON_UPDATE_NOW_FLAG; } @@ -6504,20 +6506,9 @@ uint Field::is_equal(Create_field *new_field) } -/* If one of the fields is binary and the other one isn't return 1 else 0 */ - -bool Field_str::compare_str_field_flags(Create_field *new_field, uint32 flag_arg) -{ - return (((new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) && - !(flag_arg & (BINCMP_FLAG | BINARY_FLAG))) || - (!(new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) && - (flag_arg & (BINCMP_FLAG | BINARY_FLAG)))); -} - - uint Field_str::is_equal(Create_field *new_field) { - if (compare_str_field_flags(new_field, flags)) + if (field_flags_are_binary() != new_field->field_flags_are_binary()) return 0; return ((new_field->sql_type == real_type()) && @@ -8283,7 +8274,7 @@ uint Field_blob::max_packed_col_length(uint max_length) uint Field_blob::is_equal(Create_field *new_field) { - if (compare_str_field_flags(new_field, flags)) + if (field_flags_are_binary() != new_field->field_flags_are_binary()) return 0; return ((new_field->sql_type == get_blob_type_from_length(max_data_length())) @@ -9569,7 +9560,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, } if (length == 0) - fld_length= 0; /* purecov: inspected */ + fld_length= NULL; /* purecov: inspected */ } sign_len= fld_type_modifier & UNSIGNED_FLAG ? 0 : 1; @@ -9721,8 +9712,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, case MYSQL_TYPE_TIMESTAMP: if (fld_length == NULL) { - /* Compressed date YYYYMMDDHHMMSS */ - length= MAX_DATETIME_COMPRESSED_WIDTH; + length= MAX_DATETIME_WIDTH; } else if (length != MAX_DATETIME_WIDTH) { @@ -9787,7 +9777,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, sql_type= MYSQL_TYPE_NEWDATE; /* fall trough */ case MYSQL_TYPE_NEWDATE: - length= 10; + length= MAX_DATE_WIDTH; break; case MYSQL_TYPE_TIME: length= 10; @@ -9868,6 +9858,17 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, DBUG_RETURN(TRUE); } + switch (fld_type) { + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_TIMESTAMP: + charset= &my_charset_bin; + flags|= BINCMP_FLAG; + default: break; + } + DBUG_RETURN(FALSE); /* success */ } @@ -9976,16 +9977,6 @@ Field *make_field(TABLE_SHARE *share, uchar *ptr, uint32 field_length, null_bit= ((uchar) 1) << null_bit; } - switch (field_type) { - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_TIMESTAMP: - field_charset= &my_charset_bin; - default: break; - } - if (f_is_alpha(pack_flag)) { if (!f_is_packed(pack_flag)) diff --git a/sql/field.h b/sql/field.h index 5136f760fc1..421411e8593 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2000-2008 MySQL AB, 2008, 2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -603,6 +603,12 @@ protected: handle_int64(to, from, low_byte_first_from, table->s->db_low_byte_first); return from + sizeof(int64); } + + bool field_flags_are_binary() + { + return (flags & (BINCMP_FLAG | BINARY_FLAG)) != 0; + } + }; @@ -658,7 +664,6 @@ public: friend class Create_field; my_decimal *val_decimal(my_decimal *); virtual bool str_needs_quotes() { return TRUE; } - bool compare_str_field_flags(Create_field *new_field, uint32 flags); uint is_equal(Create_field *new_field); }; @@ -1268,12 +1273,12 @@ public: Field_date(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, CHARSET_INFO *cs) - :Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg, + :Field_str(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, cs) {} Field_date(bool maybe_null_arg, const char *field_name_arg, CHARSET_INFO *cs) - :Field_str((uchar*) 0,10, maybe_null_arg ? (uchar*) "": 0,0, + :Field_str((uchar*) 0, MAX_DATE_WIDTH, maybe_null_arg ? (uchar*) "": 0,0, NONE, field_name_arg, cs) {} enum_field_types type() const { return MYSQL_TYPE_DATE;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; } @@ -1383,12 +1388,12 @@ public: Field_datetime(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, CHARSET_INFO *cs) - :Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg, + :Field_str(ptr_arg, MAX_DATETIME_WIDTH, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, cs) {} Field_datetime(bool maybe_null_arg, const char *field_name_arg, CHARSET_INFO *cs) - :Field_str((uchar*) 0,19, maybe_null_arg ? (uchar*) "": 0,0, + :Field_str((uchar*) 0, MAX_DATETIME_WIDTH, maybe_null_arg ? (uchar*) "": 0,0, NONE, field_name_arg, cs) {} enum_field_types type() const { return MYSQL_TYPE_DATETIME;} #ifdef HAVE_LONG_LONG @@ -2061,6 +2066,11 @@ public: Item *on_update_value, LEX_STRING *comment, char *change, List *interval_list, CHARSET_INFO *cs, uint uint_geom_type); + + bool field_flags_are_binary() + { + return (flags & (BINCMP_FLAG | BINARY_FLAG)) != 0; + } }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 1c88bef7c5a..ab0b453ff9e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2468,8 +2468,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, executing a prepared statement for the second time. */ sql_field->length= sql_field->char_length; - if (!sql_field->charset) + + if (sql_field->charset == NULL) sql_field->charset= create_info->default_table_charset; + /* table_charset is set in ALTER TABLE if we want change character set for all varchar/char columns. @@ -5470,8 +5472,8 @@ compare_tables(TABLE *table, Field **f_ptr, *field; uint changes= 0, tmp; uint key_count; - List_iterator_fast new_field_it, tmp_new_field_it; - Create_field *new_field, *tmp_new_field; + List_iterator_fast tmp_new_field_it; + Create_field *tmp_new_field; KEY_PART_INFO *key_part; KEY_PART_INFO *end; THD *thd= table->in_use; @@ -5497,6 +5499,9 @@ compare_tables(TABLE *table, pass it to mysql_prepare_create_table, then use the result to evaluate possibility of fast ALTER TABLE, and then destroy the copy. + + We shouldn't need to refer later in the function to alter_info + after this step. */ Alter_info tmp_alter_info(*alter_info, thd->mem_root); uint db_options= 0; /* not used */ @@ -5508,6 +5513,7 @@ compare_tables(TABLE *table, table->file, key_info_buffer, &key_count, 0)) DBUG_RETURN(1); + /* Allocate result buffers. */ if (! (*index_drop_buffer= (uint*) thd->alloc(sizeof(uint) * table->s->keys)) || @@ -5541,7 +5547,7 @@ compare_tables(TABLE *table, prior to 5.0 branch. See BUG#6236. */ - if (table->s->fields != alter_info->create_list.elements || + if (table->s->fields != tmp_alter_info.create_list.elements || table->s->db_type() != create_info->db_type || table->s->tmp_table || create_info->used_fields & HA_CREATE_USED_ENGINE || @@ -5550,7 +5556,7 @@ compare_tables(TABLE *table, (table->s->row_type != create_info->row_type) || create_info->used_fields & HA_CREATE_USED_PACK_KEYS || create_info->used_fields & HA_CREATE_USED_MAX_ROWS || - (alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) || + (tmp_alter_info.flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) || order_num || !table->s->mysql_version || (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar)) @@ -5563,22 +5569,19 @@ compare_tables(TABLE *table, Use transformed info to evaluate possibility of fast ALTER TABLE but use the preserved field to persist modifications. */ - new_field_it.init(alter_info->create_list); tmp_new_field_it.init(tmp_alter_info.create_list); /* Go through fields and check if the original ones are compatible with new table. */ - for (f_ptr= table->field, new_field= new_field_it++, - tmp_new_field= tmp_new_field_it++; + + + for (f_ptr= table->field, tmp_new_field= tmp_new_field_it++; (field= *f_ptr); - f_ptr++, new_field= new_field_it++, - tmp_new_field= tmp_new_field_it++) + f_ptr++, tmp_new_field= tmp_new_field_it++) { - /* Make sure we have at least the default charset in use. */ - if (!new_field->charset) - new_field->charset= create_info->default_table_charset; + DBUG_ASSERT(tmp_new_field->charset != NULL); /* Check that NULL behavior is same for old and new fields */ if ((tmp_new_field->flags & NOT_NULL_FLAG) != -- cgit v1.2.1 From a80bd0b5d7267d420aa876ed7d7edaed2c7e898d Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Thu, 23 Jul 2009 19:01:24 +0200 Subject: Bug #46212 safe_process: FATAL ERROR, Unknown option: --nocore Also fixed mysqld.cc to avoid popup-boxes --- sql/mysqld.cc | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 080f78993a1..fa8b4a971d7 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2103,15 +2103,14 @@ static void init_signals(void) win_install_sigabrt_handler(); if(opt_console) SetConsoleCtrlHandler(console_event_handler,TRUE); - else - { + /* Avoid MessageBox()es*/ - _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); - _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); - _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); - _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); - _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); - _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); + _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); + _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); + _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); + _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); + _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); + _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); /* Do not use SEM_NOGPFAULTERRORBOX in the following SetErrorMode (), @@ -2120,8 +2119,8 @@ static void init_signals(void) exception filter is not guaranteed to work in all situation (like heap corruption or stack overflow) */ - SetErrorMode(SetErrorMode(0)|SEM_FAILCRITICALERRORS|SEM_NOOPENFILEERRORBOX); - } + SetErrorMode(SetErrorMode(0) | SEM_FAILCRITICALERRORS + | SEM_NOOPENFILEERRORBOX); SetUnhandledExceptionFilter(my_unhandler_exception_filter); } -- cgit v1.2.1 From 32c7efa6b420b09e00d42b99f29a790064694d3b Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 2 Sep 2009 16:19:28 +0500 Subject: BUG#46483 - drop table of partitioned table may leave extraneous file Online/fast ALTER TABLE of a partitioned table may leave temporary file in database directory. Fixed by removing unnecessary call to handler::ha_create_handler_files(), which was creating partitioning definition file. mysql-test/r/partition_innodb.result: A test case for BUG#46483. mysql-test/t/partition_innodb.test: A test case for BUG#46483. sql/unireg.cc: Do not call ha_create_handler_files() when we were requested to create only dot-frm file. --- sql/unireg.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/unireg.cc b/sql/unireg.cc index 68a352e4a44..60674b8390b 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -412,10 +412,10 @@ int rea_create_table(THD *thd, const char *path, DBUG_ASSERT(*fn_rext(frm_name)); if (thd->variables.keep_files_on_create) create_info->options|= HA_CREATE_KEEP_FILES; - if (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info)) - goto err_handler; - if (!create_info->frm_only && ha_create_table(thd, path, db, table_name, - create_info,0)) + if (!create_info->frm_only && + (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG, + create_info) || + ha_create_table(thd, path, db, table_name, create_info, 0))) goto err_handler; DBUG_RETURN(0); -- cgit v1.2.1 From 76b8bd35898ea74734b4c57c10bdb2928dd7dd55 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Thu, 3 Sep 2009 12:08:55 +0200 Subject: Bug#46486 warnings produced when running mysql_install_db During start up some plugins are disabled by default. This caused an additional warning level message to be emitted as a result of a previous bug patch. Since there is risk of unnecessary confusion regarding the operation level of the server the redundant information is removed. --- sql/sql_plugin.cc | 3 --- 1 file changed, 3 deletions(-) (limited to 'sql') diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index da168d36429..f00e1f72390 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1565,9 +1565,6 @@ void plugin_shutdown(void) } } - if (count > free_slots) - sql_print_warning("Forcing shutdown of %d plugins", count - free_slots); - plugins= (struct st_plugin_int **) my_alloca(sizeof(void*) * (count+1)); /* -- cgit v1.2.1 From 2fc9c5d19993f4af526b97153ee7363ac108831b Mon Sep 17 00:00:00 2001 From: Satya B Date: Thu, 3 Sep 2009 16:02:03 +0530 Subject: Fix for BUG#46591 - .frm file isn't sync'd with sync_frm enabled for CREATE TABLE...LIKE... The mysql server option 'sync_frm' is ignored when table is created with syntax CREATE TABLE .. LIKE.. Fixed by adding the MY_SYNC flag and calling my_sync() from my_copy() when the flag is set. In mysql_create_table(), when the 'sync_frm' is set, MY_SYNC flag is passed to my_copy(). Note: TestCase is not attached and can be tested manually using debugger. client/Makefile.am: BUG#46591 - .frm file isn't sync'd with sync_frm enabled for CREATE TABLE...LIKE... add my_sync to sources as it is used in my_copy() method include/my_sys.h: BUG#46591 - .frm file isn't sync'd with sync_frm enabled for CREATE TABLE...LIKE... MY_SYNC flag is added to call my_sync() method mysys/my_copy.c: BUG#46591 - .frm file isn't sync'd with sync_frm enabled for CREATE TABLE...LIKE... my_sync() is method is called when MY_SYNC is set in my_copy() sql/sql_table.cc: BUG#46591 - .frm file isn't sync'd with sync_frm enabled for CREATE TABLE...LIKE... Fixed mysql_create_like_table() to call my_sync() when opt_sync_frm variable is set --- sql/sql_table.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 84370873054..5a3100685e0 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2773,6 +2773,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST *src_table, int err; bool res= TRUE; db_type not_used; + myf flags= MY_DONT_OVERWRITE_FILE; DBUG_ENTER("mysql_create_like_table"); /* @@ -2859,10 +2860,14 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST *src_table, DBUG_EXECUTE_IF("sleep_create_like_before_copy", my_sleep(6000000);); + if (opt_sync_frm && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) + flags|= MY_SYNC; + /* Create a new table by copying from source table + and sync the new table if the flag MY_SYNC is set */ - if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE))) + if (my_copy(src_path, dst_path, flags)) { if (my_errno == ENOENT) my_error(ER_BAD_DB_ERROR,MYF(0),db); -- cgit v1.2.1 From 629557ff13e28e3422dfa1c354c44ef2fd62e4d0 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 3 Sep 2009 18:03:46 +0300 Subject: Bug #46791: Assertion failed:(table->key_read==0),function unknown function,file sql_base.cc When uncacheable queries are written to a temp table the optimizer must preserve the original JOIN structure, because it is re-using the JOIN structure to read from the resulting temporary table. This was done only for uncacheable sub-queries. But top level queries can also benefit from this mechanism, specially if they're using index access and need a reset. Fixed by not limiting the saving of JOIN structure to subqueries exclusively. Added a new test file to extend the existing (large) subquery.test. --- sql/sql_select.cc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b670e6e3637..9d5e67c9532 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1474,12 +1474,8 @@ JOIN::optimize() } } - /* - If this join belongs to an uncacheable subquery save - the original join - */ - if (select_lex->uncacheable && !is_top_level_join() && - init_save_join_tab()) + /* If this join belongs to an uncacheable query save the original join */ + if (select_lex->uncacheable && init_save_join_tab()) DBUG_RETURN(-1); /* purecov: inspected */ } -- cgit v1.2.1 From 9776b6f9cd4fc4d8c84ae373fd0a44c486c7c2e2 Mon Sep 17 00:00:00 2001 From: V Narayanan Date: Fri, 4 Sep 2009 09:27:11 +0530 Subject: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Inserting a negative value in the autoincrement column of a partitioned innodb table was causing the value of the auto increment counter to wrap around into a very large positive value. The consequences are the same as if a very large positive value was inserted into a column, e.g. reduced autoincrement range, failure to read autoincrement counter. The current patch ensures that before calculating the next auto increment value, the current value is within the positive maximum allowed limit. mysql-test/suite/parts/inc/partition_auto_increment.inc: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Adds tests for performing insert,update and delete on a partition table with negative auto_increment values. mysql-test/suite/parts/r/partition_auto_increment_innodb.result: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Result file for the innodb engine. mysql-test/suite/parts/r/partition_auto_increment_memory.result: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Result file for the memory engine. mysql-test/suite/parts/r/partition_auto_increment_myisam.result: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Result file for the myisam engine. mysql-test/suite/parts/r/partition_auto_increment_ndb.result: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Result file for the ndb engine. mysql-test/suite/parts/t/partition_auto_increment_archive.test: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Adds a variable that allows the Archive engine to skip tests that involve insertion of negative auto increment values. mysql-test/suite/parts/t/partition_auto_increment_blackhole.test: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Adds a variable that allows the Blackhole engine to skip tests that involve insertion of negative auto increment values. sql/ha_partition.cc: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Ensures that the current value is lesser than the upper limit for the type of the field before setting the next auto increment value to be calculated. sql/ha_partition.h: Bug#45823 Assertion failure in file row/row0mysql.c line 1386 Modifies the set_auto_increment_if_higher function, to take into account negative auto increment values when doing a comparison. --- sql/ha_partition.cc | 4 ++-- sql/ha_partition.h | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index ac8c46ec4e3..63f8271cca1 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3024,7 +3024,7 @@ int ha_partition::write_row(uchar * buf) tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ error= m_file[part_id]->ha_write_row(buf); if (have_auto_increment && !table->s->next_number_keypart) - set_auto_increment_if_higher(table->next_number_field->val_int()); + set_auto_increment_if_higher(table->next_number_field); reenable_binlog(thd); exit: table->timestamp_field_type= orig_timestamp_type; @@ -3128,7 +3128,7 @@ exit: HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data; if (!ha_data->auto_inc_initialized) info(HA_STATUS_AUTO); - set_auto_increment_if_higher(table->found_next_number_field->val_int()); + set_auto_increment_if_higher(table->found_next_number_field); } table->timestamp_field_type= orig_timestamp_type; DBUG_RETURN(error); diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 8a81a759e2a..d0301e24a93 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -936,9 +936,11 @@ private: auto_increment_lock= FALSE; } } - virtual void set_auto_increment_if_higher(const ulonglong nr) + virtual void set_auto_increment_if_higher(Field *field) { HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data; + ulonglong nr= (((Field_num*) field)->unsigned_flag || + field->val_int() > 0) ? field->val_int() : 0; lock_auto_increment(); DBUG_ASSERT(ha_data->auto_inc_initialized == TRUE); /* must check when the mutex is taken */ -- cgit v1.2.1 From 6e27ef435e3863382295f1a1d41424c9b8d4c197 Mon Sep 17 00:00:00 2001 From: Satya B Date: Fri, 4 Sep 2009 12:21:54 +0530 Subject: Fix for BUG#46384 - mysqld segfault when trying to create table with same name as existing view When trying to create a table with the same name as existing view with join, mysql server crashes. The problem is when create table is issued with the same name as view, while verifying with the existing tables, we assume that base table object is created always. In this case, since it is a view over multiple tables, we don't have the mysql derived table object. Fixed the logic which checks if there is an existing table to not to assume that table object is created when the base table is view over multiple tables. mysql-test/r/create.result: BUG#46384 - mysqld segfault when trying to create table with same name as existing view Testcase for the bug mysql-test/t/create.test: BUG#46384 - mysqld segfault when trying to create table with same name as existing view Testcase for the bug sql/sql_insert.cc: BUG#46384 - mysqld segfault when trying to create table with same name as existing view Fixed create_table_from_items() method to properly check, if the base table is a view over multiple tables. --- sql/sql_insert.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d2a0f47f1a9..b0958201795 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3217,7 +3217,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, DBUG_EXECUTE_IF("sleep_create_select_before_check_if_exists", my_sleep(6000000);); if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE) && - create_table->table->db_stat) + (create_table->table && create_table->table->db_stat)) { /* Table already exists and was open at open_and_lock_tables() stage. */ if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) -- cgit v1.2.1 From 643fbe4234a06e51746c8912223652a3b41fe133 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Fri, 4 Sep 2009 12:20:53 +0500 Subject: Bug#45989 memory leak after explain encounters an error in the query Memory allocated in TMP_TABLE_PARAM::copy_field is not cleaned up. The fix is to clean up TMP_TABLE_PARAM::copy_field array in JOIN::destroy. mysql-test/r/explain.result: test result mysql-test/t/explain.test: test case sql/sql_select.cc: Memory allocated in TMP_TABLE_PARAM::copy_field is not cleaned up. The fix is to clean up TMP_TABLE_PARAM::copy_field array in JOIN::destroy. --- sql/sql_select.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ff179c432a8..b670e6e3637 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2176,7 +2176,7 @@ JOIN::destroy() } } tmp_join->tmp_join= 0; - tmp_table_param.copy_field=0; + tmp_table_param.cleanup(); DBUG_RETURN(tmp_join->destroy()); } cond_equal= 0; -- cgit v1.2.1 From 2a6ac469fc7cd940d7864babafb72798b7643bf3 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Fri, 4 Sep 2009 13:14:54 +0500 Subject: Fix for bug#46629: Item_in_subselect::val_int(): Assertion `0' on subquery inside a SP Problem: repeated call of a SP containing an incorrect query with a subselect may lead to failed ASSERT(). Fix: set proper sublelect's state in case of error occured during subquery transformation. mysql-test/r/sp.result: Fix for bug#46629: Item_in_subselect::val_int(): Assertion `0' on subquery inside a SP - test result. mysql-test/t/sp.test: Fix for bug#46629: Item_in_subselect::val_int(): Assertion `0' on subquery inside a SP - test case. sql/item_subselect.cc: Fix for bug#46629: Item_in_subselect::val_int(): Assertion `0' on subquery inside a SP - don't set Item_subselect::changed in the Item_subselect::fix_fields() if an error occured during subquery transformation. That prevents us of further processing incorrect subqueries after Item_in_subselect::select_in_like_transformer(). --- sql/item_subselect.cc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index cdb091fa07e..da651cec70c 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -155,13 +155,11 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref) if (check_stack_overrun(thd, STACK_MIN_SIZE, (uchar*)&res)) return TRUE; - res= engine->prepare(); - - // all transformation is done (used by prepared statements) - changed= 1; - - if (!res) + if (!(res= engine->prepare())) { + // all transformation is done (used by prepared statements) + changed= 1; + if (substitution) { int ret= 0; -- cgit v1.2.1 From 0c4c2d7b8db1fead32aa48b12d538b5a8b522c59 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Fri, 4 Sep 2009 12:32:21 +0200 Subject: Bug#46486 warnings produced when running mysql_install_db Incremental patch part 2 Removing dead code and changing a note level message to a warning. sql/sql_plugin.cc: * Remove free_slots. The only purpose for this variable was to trigger a redundant warning message and it failed. * Change the note level message about shutting down plugins which didn't end nicely to a warning level message. (If this shutdown fails and there still are reference counts in the plugin an additional error level message is emitted) --- sql/sql_plugin.cc | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'sql') diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index f00e1f72390..025c8a8248d 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1520,7 +1520,7 @@ error: void plugin_shutdown(void) { - uint i, count= plugin_array.elements, free_slots= 0; + uint i, count= plugin_array.elements; struct st_plugin_int **plugins, *plugin; struct st_plugin_dl **dl; DBUG_ENTER("plugin_shutdown"); @@ -1541,18 +1541,13 @@ void plugin_shutdown(void) while (reap_needed && (count= plugin_array.elements)) { reap_plugins(); - for (i= free_slots= 0; i < count; i++) + for (i= 0; i < count; i++) { plugin= *dynamic_element(&plugin_array, i, struct st_plugin_int **); - switch (plugin->state) { - case PLUGIN_IS_READY: + if (plugin->state == PLUGIN_IS_READY) + { plugin->state= PLUGIN_IS_DELETED; reap_needed= true; - break; - case PLUGIN_IS_FREED: - case PLUGIN_IS_UNINITIALIZED: - free_slots++; - break; } } if (!reap_needed) @@ -1586,8 +1581,8 @@ void plugin_shutdown(void) if (!(plugins[i]->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_FREED | PLUGIN_IS_DISABLED))) { - sql_print_information("Plugin '%s' will be forced to shutdown", - plugins[i]->name.str); + sql_print_warning("Plugin '%s' will be forced to shutdown", + plugins[i]->name.str); /* We are forcing deinit on plugins so we don't want to do a ref_count check until we have processed all the plugins. -- cgit v1.2.1 From 35d6911b283b3cf7720af9a7edb8c7ed3b3726db Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 4 Sep 2009 15:02:15 +0200 Subject: Bug#35845: unneccesary call to ha_start_bulk_insert for not used partitions (Backport) Problem is that when insert (ha_start_bulk_insert) in i partitioned table, it will call ha_start_bulk_insert for every partition, used or not. Solution is to delay the call to the partitions ha_start_bulk_insert until the first row is to be inserted into that partition sql/ha_partition.cc: Bug#35845: unneccesary call to ha_start_bulk_insert for not used partitions Using a bitmap for keeping record of which partitions for which ha_start_bulk_insert has been called, and check against that if one should call it before continue with the insert/update, or if it has already been called. This way it will only call ha_start_bulk_insert for the used partitions. There is also a little prediction on how many rows that will be inserted into the current partition, it will guess on equal distribution of the records across all partitions, accept for the first used partition, which will guess at 50% of the given estimate, if it is a monotonic partitioning function. sql/ha_partition.h: Bug#35845: unneccesary call to ha_start_bulk_insert for not used partitions Added help variables and function for delaying ha_bulk_insert until it has to be called. Fixed a comment. --- sql/ha_partition.cc | 95 +++++++++++++++++++++++++++++++++++++++++++++-------- sql/ha_partition.h | 10 +++++- 2 files changed, 91 insertions(+), 14 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 63f8271cca1..c849bbd12c5 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -239,6 +239,7 @@ void ha_partition::init_handler_variables() m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; is_clone= FALSE, + m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; /* @@ -2464,11 +2465,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) } } + /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ + if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) + DBUG_RETURN(1); + bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ if (!is_clone) { if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) + { + bitmap_free(&m_bulk_insert_started); DBUG_RETURN(1); + } bitmap_set_all(&(m_part_info->used_partitions)); } @@ -2552,12 +2560,18 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) calling open on all individual handlers. */ m_handler_status= handler_opened; + if (m_part_info->part_expr) + m_part_func_monotonicity_info= + m_part_info->part_expr->get_monotonicity_info(); + else if (m_part_info->list_of_part_fields) + m_part_func_monotonicity_info= MONOTONIC_STRICT_INCREASING; info(HA_STATUS_VARIABLE | HA_STATUS_CONST); DBUG_RETURN(0); err_handler: while (file-- != m_file) (*file)->close(); + bitmap_free(&m_bulk_insert_started); if (!is_clone) bitmap_free(&(m_part_info->used_partitions)); @@ -2605,6 +2619,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); + bitmap_free(&m_bulk_insert_started); if (!is_clone) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -3021,6 +3036,8 @@ int ha_partition::write_row(uchar * buf) } m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %d", part_id)); + start_part_bulk_insert(part_id); + tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ error= m_file[part_id]->ha_write_row(buf); if (have_auto_increment && !table->s->next_number_keypart) @@ -3083,6 +3100,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) } m_last_part= new_part_id; + start_part_bulk_insert(new_part_id); if (new_part_id == old_part_id) { DBUG_PRINT("info", ("Update in partition %d", new_part_id)); @@ -3247,22 +3265,65 @@ int ha_partition::delete_all_rows() DESCRIPTION rows == 0 means we will probably insert many rows */ - void ha_partition::start_bulk_insert(ha_rows rows) { - handler **file; DBUG_ENTER("ha_partition::start_bulk_insert"); - rows= rows ? rows/m_tot_parts + 1 : 0; - file= m_file; - do - { - (*file)->ha_start_bulk_insert(rows); - } while (*(++file)); + m_bulk_inserted_rows= 0; + bitmap_clear_all(&m_bulk_insert_started); + /* use the last bit for marking if bulk_insert_started was called */ + bitmap_set_bit(&m_bulk_insert_started, m_tot_parts); DBUG_VOID_RETURN; } +/* + Check if start_bulk_insert has been called for this partition, + if not, call it and mark it called +*/ +void ha_partition::start_part_bulk_insert(uint part_id) +{ + if (!bitmap_is_set(&m_bulk_insert_started, part_id) && + bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) + { + m_file[part_id]->ha_start_bulk_insert(guess_bulk_insert_rows()); + bitmap_set_bit(&m_bulk_insert_started, part_id); + } + m_bulk_inserted_rows++; +} + + +/* + Try to predict the number of inserts into this partition. + + If less than 10 rows (including 0 which means Unknown) + just give that as a guess + If monotonic partitioning function was used + guess that 50 % of the inserts goes to the first partition + For all other cases, guess on equal distribution between the partitions +*/ +ha_rows ha_partition::guess_bulk_insert_rows() +{ + DBUG_ENTER("guess_bulk_insert_rows"); + + if (estimation_rows_to_insert < 10) + DBUG_RETURN(estimation_rows_to_insert); + + /* If first insert/partition and monotonic partition function, guess 50%. */ + if (!m_bulk_inserted_rows && + m_part_func_monotonicity_info != NON_MONOTONIC && + m_tot_parts > 1) + DBUG_RETURN(estimation_rows_to_insert / 2); + + /* Else guess on equal distribution (+1 is to avoid returning 0/Unknown) */ + if (m_bulk_inserted_rows < estimation_rows_to_insert) + DBUG_RETURN(((estimation_rows_to_insert - m_bulk_inserted_rows) + / m_tot_parts) + 1); + /* The estimation was wrong, must say 'Unknown' */ + DBUG_RETURN(0); +} + + /* Finish a large batch of insert rows @@ -3272,21 +3333,29 @@ void ha_partition::start_bulk_insert(ha_rows rows) RETURN VALUE >0 Error code 0 Success + + Note: end_bulk_insert can be called without start_bulk_insert + being called, see bugĀ¤44108. + */ int ha_partition::end_bulk_insert() { int error= 0; - handler **file; + uint i; DBUG_ENTER("ha_partition::end_bulk_insert"); - file= m_file; - do + if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) + DBUG_RETURN(error); + + for (i= 0; i < m_tot_parts; i++) { int tmp; - if ((tmp= (*file)->ha_end_bulk_insert())) + if (bitmap_is_set(&m_bulk_insert_started, i) && + (tmp= m_file[i]->ha_end_bulk_insert())) error= tmp; - } while (*(++file)); + } + bitmap_clear_all(&m_bulk_insert_started); DBUG_RETURN(error); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index d0301e24a93..f47dfe8f621 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -176,6 +176,11 @@ private: This to ensure it will work with statement based replication. */ bool auto_increment_safe_stmt_log_lock; + /** For optimizing ha_start_bulk_insert calls */ + MY_BITMAP m_bulk_insert_started; + ha_rows m_bulk_inserted_rows; + /** used for prediction of start_bulk_insert rows */ + enum_monotonicity_info m_part_func_monotonicity_info; public: handler *clone(MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) @@ -353,7 +358,6 @@ public: Bulk inserts are supported if all underlying handlers support it. start_bulk_insert and end_bulk_insert is called before and after a number of calls to write_row. - Not yet though. */ virtual int write_row(uchar * buf); virtual int update_row(const uchar * old_data, uchar * new_data); @@ -361,6 +365,10 @@ public: virtual int delete_all_rows(void); virtual void start_bulk_insert(ha_rows rows); virtual int end_bulk_insert(); +private: + ha_rows guess_bulk_insert_rows(); + void start_part_bulk_insert(uint part_id); +public: virtual bool is_fatal_error(int error, uint flags) { -- cgit v1.2.1 From 505346028f975d26f1353c46bdb3db618b1e306c Mon Sep 17 00:00:00 2001 From: Alexey Kopytov Date: Sun, 6 Sep 2009 00:42:17 +0400 Subject: Bug #46159: simple query that never returns The external 'for' loop in remove_dup_with_compare() handled HA_ERR_RECORD_DELETED by just starting over without advancing to the next record which caused an infinite loop. This condition could be triggered on certain data by a SELECT query containing DISTINCT, GROUP BY and HAVING clauses. Fixed remove_dup_with_compare() so that we always advance to the next record when receiving HA_ERR_RECORD_DELETED from rnd_next(). mysql-test/r/distinct.result: Added a test case for bug #46159. mysql-test/t/distinct.test: Added a test case for bug #46159. sql/sql_select.cc: Fixed remove_dup_with_compare() so that we always advance to the next record when receiving HA_ERR_RECORD_DELETED from rnd_next(). --- sql/sql_select.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5cb0de1ba5c..a02430446d1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -13665,7 +13665,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, if (error) { if (error == HA_ERR_RECORD_DELETED) - continue; + { + error= file->rnd_next(record); + continue; + } if (error == HA_ERR_END_OF_FILE) break; goto err; -- cgit v1.2.1 From dd407c5228df80a7ef6086c412940084e7498627 Mon Sep 17 00:00:00 2001 From: Mikael Ronstrom Date: Mon, 7 Sep 2009 10:37:54 +0200 Subject: Fix to ensure that all subpartitions gets deleted before renaming starts, BUG#47029 --- sql/ha_partition.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index ac8c46ec4e3..2e303744c1a 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -705,6 +705,7 @@ int ha_partition::rename_partitions(const char *path) if (m_is_sub_partitioned) { List_iterator sub_it(part_elem->subpartitions); + j= 0; do { sub_elem= sub_it++; -- cgit v1.2.1 From 2cb3a131f47a8bcf0398a28e4a6a16d1c30c9708 Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Mon, 7 Sep 2009 11:57:22 +0200 Subject: Bug#46259: 5.0.83 -> 5.1.36, query doesn't work The parser rule for expressions in a udf parameter list contains two hacks: First, the parser input stream is read verbatim, bypassing the lexer. Second, the Item::name field is overwritten. If the argument to a udf was a field, the field's name as seen by name resolution was overwritten this way. If the field name was quoted or escaped, it would appear as e.g. "`field`". Fixed by not overwriting field names. mysql-test/r/udf.result: Bug#46259: Test result. mysql-test/t/udf.test: Bug#46259: Test case. sql/sql_yacc.yy: Bug#46259: Fix. --- sql/sql_yacc.yy | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a18f57bf9cf..db97e77bbd0 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -7921,7 +7921,13 @@ udf_expr: $2->is_autogenerated_name= FALSE; $2->set_name($4.str, $4.length, system_charset_info); } - else + /* + A field has to have its proper name in order for name + resolution to work, something we are only guaranteed if we + parse it out. If we hijack the input stream with + remember_name we may get quoted or escaped names. + */ + else if ($2->type() != Item::FIELD_ITEM) $2->set_name($1, (uint) ($3 - $1), YYTHD->charset()); $$= $2; } -- cgit v1.2.1 From 540b2dc0041c292220d389bbb5b87a8410aef3f4 Mon Sep 17 00:00:00 2001 From: Ingo Struewing Date: Mon, 7 Sep 2009 18:35:37 +0200 Subject: Bug#17332 - changing key_buffer_size on a running server can crash under load Backport from 5.1. Does also include key cache fixes from: Bug 44068 (RESTORE can disable the MyISAM Key Cache) Bug 40944 (Backup: crash after myisampack) include/keycache.h: Bug#17332 - changing key_buffer_size on a running server can crash under load Added KEY_CACHE components in_resize and waiting_for_resize_cnt. myisam/mi_preload.c: Bug#17332 - changing key_buffer_size on a running server can crash under load Added code to allow LOAD INDEX to load indexes of different block size. mysys/mf_keycache.c: Bug#17332 - changing key_buffer_size on a running server can crash under load . Changed resize_key_cache() to not disable the key cache after the flush phase. Changed queue handling to use standard functions. Wake all threads waiting on resize_queue. We can now have read/write threads waiting there (see below). . Combined add_to_queue() and the wait loops that were always following it to the new function wait_on_queue(). Combined release_queue() and the condition that was always preceding it to the new function release_whole_queue(). . Added code to flag and respect the exceptional situation BLOCK_IN_EVICTION. . Rewrote the resize branch of find_key_block(). . Added code to the eviction handling in find_key_block() to catch more exceptional cases. . Changed key_cache_read(), key_cache_insert() and key_cache_write() so that they lock keycache->cache_lock whenever the key cache is initialized. Checking for a disabled cache and incrementing and decrementing the "resize counter" is always done within the lock. Locking and unlocking as well as counting the "resize counter" is now done once outside the loop. All three functions can now handle a NULL return from find_key_block. This happens in the flush phase of a resize and demands direct file I/O. Care is taken for secondary requests (PAGE_WAIT_TO_BE_READ) to wait in any case. Moved block status changes behind the copying of buffer data. key_cache_insert() does now read the block if the caller did supply less data than a full cache block. key_cache_write() does now take care of parallel running flushes (BLOCK_FOR_UPDATE, BLOCK_IN_FLUSHWRITE). . Changed free_block() to un-initialize block variables in the correct order and respect an exceptional BLOCK_IN_EVICTION state. . Changed flushing to take care for parallel running writes. Changed flushing to avoid freeing blocks in eviction. Changed flushing to consider that parallel writes can move blocks from the file_blocks hash to the changed_blocks hash. Changed flushing to take care for other parallel flushes. Changed flushing to assure that it ends with everything flushed. Optimized normal flush at end of statement (FLUSH_KEEP), but let other flush types be stringent. . Added some comments and debugging statements. mysys/my_static.c: Bug#17332 - changing key_buffer_size on a running server can crash under load Removed an unused global variable. sql/ha_myisam.cc: Bug#17332 - changing key_buffer_size on a running server can crash under load Moved an automatic (stack) variable to the scope where it is used. sql/sql_table.cc: Bug#17332 - changing key_buffer_size on a running server can crash under load Changed TL_READ to TL_READ_NO_INSERT in mysql_preload_keys. --- sql/ha_myisam.cc | 2 +- sql/sql_table.cc | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index dfd739f6db8..a92e7bbb9fd 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1189,6 +1189,7 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) ulonglong map= ~(ulonglong) 0; TABLE_LIST *table_list= table->pos_in_table_list; my_bool ignore_leaves= table_list->ignore_leaves; + char buf[ERRMSGSIZE+20]; DBUG_ENTER("ha_myisam::preload_keys"); @@ -1216,7 +1217,6 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) errmsg= "Failed to allocate buffer"; break; default: - char buf[ERRMSGSIZE+20]; my_snprintf(buf, ERRMSGSIZE, "Failed to read from index file (errno: %d)", my_errno); errmsg= buf; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 5a3100685e0..e32c17bc678 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2741,8 +2741,13 @@ int reassign_keycache_tables(THD *thd, KEY_CACHE *src_cache, bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) { DBUG_ENTER("mysql_preload_keys"); + /* + We cannot allow concurrent inserts. The storage engine reads + directly from the index file, bypassing the cache. It could read + outdated information if parallel inserts into cache blocks happen. + */ DBUG_RETURN(mysql_admin_table(thd, tables, 0, - "preload_keys", TL_READ, 0, 0, 0, 0, + "preload_keys", TL_READ_NO_INSERT, 0, 0, 0, 0, &handler::preload_keys, 0)); } -- cgit v1.2.1 From 3228a2be669f5d92a607b8212c9c3260a02fd47f Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 9 Sep 2009 14:38:50 +0500 Subject: BUG#45638 - Create temporary table with engine innodb fails Create temporary InnoDB table fails on case insensitive filesystems, when lower_case_table_names is 2 (e.g. OS X) and temporary directory path contains upper case letters. The problem was that tmpdir prefix was converted to lower case when table was created, but was passed as is when table was opened. Fixed by leaving tmpdir prefix part intact. mysql-test/r/lowercase_mixed_tmpdir_innodb.result: A test case for BUG#45638. mysql-test/t/lowercase_mixed_tmpdir_innodb-master.opt: A test case for BUG#45638. mysql-test/t/lowercase_mixed_tmpdir_innodb-master.sh: A test case for BUG#45638. mysql-test/t/lowercase_mixed_tmpdir_innodb.test: A test case for BUG#45638. sql/handler.cc: Fixed get_canonical_filename() to not lowercase filesystem path prefix for temporary tables. --- sql/handler.cc | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'sql') diff --git a/sql/handler.cc b/sql/handler.cc index e5c64452aaf..a4d88e84f4c 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1885,12 +1885,42 @@ bool ha_flush_logs(handlerton *db_type) return FALSE; } + +/** + @brief make canonical filename + + @param[in] file table handler + @param[in] path original path + @param[out] tmp_path buffer for canonized path + + @details Lower case db name and table name path parts for + non file based tables when lower_case_table_names + is 2 (store as is, compare in lower case). + Filesystem path prefix (mysql_data_home or tmpdir) + is left intact. + + @note tmp_path may be left intact if no conversion was + performed. + + @retval canonized path + + @todo This may be done more efficiently when table path + gets built. Convert this function to something like + ASSERT_CANONICAL_FILENAME. +*/ const char *get_canonical_filename(handler *file, const char *path, char *tmp_path) { + uint i; if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED)) return path; + for (i= 0; i <= mysql_tmpdir_list.max; i++) + { + if (is_prefix(path, mysql_tmpdir_list.list[i])) + return path; + } + /* Ensure that table handler get path in lower case */ if (tmp_path != path) strmov(tmp_path, path); -- cgit v1.2.1 From 70972926ab8970267fa8e3f06086282c4b9e747d Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Thu, 10 Sep 2009 11:40:57 +0400 Subject: A patch for Bug#45118 (mysqld.exe crashed in debug mode on Windows in dbug.c) -- part 2: a patch for the DBUG subsystem to detect misuse of DBUG_ENTER / DBUG_RETURN macros. 5.1 version. --- sql/mysqld.cc | 4 +-- sql/rpl_filter.cc | 2 ++ sql/set_var.cc | 1 + sql/sql_insert.cc | 85 ++++++++++++++++++++++++++++++------------------------- sql/sql_parse.cc | 49 +++++++++++++++++++------------- 5 files changed, 82 insertions(+), 59 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3e2f8eabd39..3f536f01094 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4789,10 +4789,10 @@ static bool read_init_file(char *file_name) DBUG_ENTER("read_init_file"); DBUG_PRINT("enter",("name: %s",file_name)); if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME)))) - return(1); + DBUG_RETURN(TRUE); bootstrap(file); (void) my_fclose(file,MYF(MY_WME)); - return 0; + DBUG_RETURN(FALSE); } diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 3004a3905e5..68272c58bb1 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -350,6 +350,7 @@ Rpl_filter::add_do_db(const char* table_spec) DBUG_ENTER("Rpl_filter::add_do_db"); i_string *db = new i_string(table_spec); do_db.push_back(db); + DBUG_VOID_RETURN; } @@ -359,6 +360,7 @@ Rpl_filter::add_ignore_db(const char* table_spec) DBUG_ENTER("Rpl_filter::add_ignore_db"); i_string *db = new i_string(table_spec); ignore_db.push_back(db); + DBUG_VOID_RETURN; } extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool); diff --git a/sql/set_var.cc b/sql/set_var.cc index 0b89333ce03..b64b54fdd29 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1238,6 +1238,7 @@ void fix_slave_exec_mode(enum_var_type type) } if (bit_is_set(slave_exec_mode_options, SLAVE_EXEC_MODE_IDEMPOTENT) == 0) bit_do_set(slave_exec_mode_options, SLAVE_EXEC_MODE_STRICT); + DBUG_VOID_RETURN; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index b79b9b1ae9e..3ac40ae825a 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2274,44 +2274,9 @@ void kill_delayed_threads(void) } -/* - * Create a new delayed insert thread -*/ - -pthread_handler_t handle_delayed_insert(void *arg) +static void handle_delayed_insert_impl(THD *thd, Delayed_insert *di) { - Delayed_insert *di=(Delayed_insert*) arg; - THD *thd= &di->thd; - - pthread_detach_this_thread(); - /* Add thread to THD list so that's it's visible in 'show processlist' */ - pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; - thd->set_current_time(); - threads.append(thd); - thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; - pthread_mutex_unlock(&LOCK_thread_count); - - /* - Wait until the client runs into pthread_cond_wait(), - where we free it after the table is opened and di linked in the list. - If we did not wait here, the client might detect the opened table - before it is linked to the list. It would release LOCK_delayed_create - and allow another thread to create another handler for the same table, - since it does not find one in the list. - */ - pthread_mutex_lock(&di->mutex); -#if !defined( __WIN__) /* Win32 calls this in pthread_create */ - if (my_thread_init()) - { - /* Can't use my_error since store_globals has not yet been called */ - thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES, - ER(ER_OUT_OF_RESOURCES)); - goto end; - } -#endif - - DBUG_ENTER("handle_delayed_insert"); + DBUG_ENTER("handle_delayed_insert_impl"); thd->thread_stack= (char*) &thd; if (init_thr_lock() || thd->store_globals()) { @@ -2500,6 +2465,49 @@ err: */ ha_autocommit_or_rollback(thd, 1); + DBUG_VOID_RETURN; +} + + +/* + * Create a new delayed insert thread +*/ + +pthread_handler_t handle_delayed_insert(void *arg) +{ + Delayed_insert *di=(Delayed_insert*) arg; + THD *thd= &di->thd; + + pthread_detach_this_thread(); + /* Add thread to THD list so that's it's visible in 'show processlist' */ + pthread_mutex_lock(&LOCK_thread_count); + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; + thd->set_current_time(); + threads.append(thd); + thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; + pthread_mutex_unlock(&LOCK_thread_count); + + /* + Wait until the client runs into pthread_cond_wait(), + where we free it after the table is opened and di linked in the list. + If we did not wait here, the client might detect the opened table + before it is linked to the list. It would release LOCK_delayed_create + and allow another thread to create another handler for the same table, + since it does not find one in the list. + */ + pthread_mutex_lock(&di->mutex); +#if !defined( __WIN__) /* Win32 calls this in pthread_create */ + if (my_thread_init()) + { + /* Can't use my_error since store_globals has not yet been called */ + thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES, + ER(ER_OUT_OF_RESOURCES)); + goto end; + } +#endif + + handle_delayed_insert_impl(thd, di); + #ifndef __WIN__ end: #endif @@ -2523,7 +2531,8 @@ end: my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); + + return 0; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ca27d476213..a977740ebe2 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -408,29 +408,12 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var, } -/** - Execute commands from bootstrap_file. - - Used when creating the initial grant tables. -*/ - -pthread_handler_t handle_bootstrap(void *arg) +static void handle_bootstrap_impl(THD *thd) { - THD *thd=(THD*) arg; FILE *file=bootstrap_file; char *buff; const char* found_semicolon= NULL; - /* The following must be called before DBUG_ENTER */ - thd->thread_stack= (char*) &thd; - if (my_thread_init() || thd->store_globals()) - { -#ifndef EMBEDDED_LIBRARY - close_connection(thd, ER_OUT_OF_RESOURCES, 1); -#endif - thd->fatal_error(); - goto end; - } DBUG_ENTER("handle_bootstrap"); #ifndef EMBEDDED_LIBRARY @@ -525,6 +508,33 @@ pthread_handler_t handle_bootstrap(void *arg) #endif } + DBUG_VOID_RETURN; +} + + +/** + Execute commands from bootstrap_file. + + Used when creating the initial grant tables. +*/ + +pthread_handler_t handle_bootstrap(void *arg) +{ + THD *thd=(THD*) arg; + + /* The following must be called before DBUG_ENTER */ + thd->thread_stack= (char*) &thd; + if (my_thread_init() || thd->store_globals()) + { +#ifndef EMBEDDED_LIBRARY + close_connection(thd, ER_OUT_OF_RESOURCES, 1); +#endif + thd->fatal_error(); + goto end; + } + + handle_bootstrap_impl(thd); + end: net_end(&thd->net); thd->cleanup(); @@ -539,7 +549,8 @@ end: my_thread_end(); pthread_exit(0); #endif - DBUG_RETURN(0); + + return 0; } -- cgit v1.2.1 From 104d9ce76a68fd3da2f9217514a4c61454802c1d Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 10 Sep 2009 13:49:49 +0500 Subject: Bug#42364 SHOW ERRORS returns empty resultset after dropping non existent table partial backport of bug43138 fix mysql-test/r/warnings.result: test result mysql-test/t/warnings.test: test case sql/sql_class.cc: partial backport of bug43138 fix sql/sql_class.h: partial backport of bug43138 fix sql/sql_table.cc: partial backport of bug43138 fix --- sql/sql_class.cc | 25 +++++++++++++++++++++++++ sql/sql_class.h | 25 +++++++++++++++++++++++++ sql/sql_table.cc | 8 ++++---- 3 files changed, 54 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 3f568566c89..daef5a26742 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -399,6 +399,31 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, return buffer; } + +/** + Implementation of Drop_table_error_handler::handle_error(). + The reason in having this implementation is to silence technical low-level + warnings during DROP TABLE operation. Currently we don't want to expose + the following warnings during DROP TABLE: + - Some of table files are missed or invalid (the table is going to be + deleted anyway, so why bother that something was missed); + - A trigger associated with the table does not have DEFINER (One of the + MySQL specifics now is that triggers are loaded for the table being + dropped. So, we may have a warning that trigger does not have DEFINER + attribute during DROP TABLE operation). + + @return TRUE if the condition is handled. +*/ +bool Drop_table_error_handler::handle_error(uint sql_errno, + const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *thd) +{ + return ((sql_errno == EE_DELETE && my_errno == ENOENT) || + sql_errno == ER_TRG_NO_DEFINER); +} + + /** Clear this diagnostics area. diff --git a/sql/sql_class.h b/sql/sql_class.h index f52d5fae76f..c38eb17f191 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1091,6 +1091,31 @@ public: }; +/** + This class is an internal error handler implementation for + DROP TABLE statements. The thing is that there may be warnings during + execution of these statements, which should not be exposed to the user. + This class is intended to silence such warnings. +*/ + +class Drop_table_error_handler : public Internal_error_handler +{ +public: + Drop_table_error_handler(Internal_error_handler *err_handler) + :m_err_handler(err_handler) + { } + +public: + bool handle_error(uint sql_errno, + const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *thd); + +private: + Internal_error_handler *m_err_handler; +}; + + /** Stores status of the currently executed statement. Cleared at the beginning of the statement, and then diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 89a84ebc1fe..41e76211dd8 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1772,6 +1772,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, my_bool drop_temporary) { bool error= FALSE, need_start_waiters= FALSE; + Drop_table_error_handler err_handler(thd->get_internal_handler()); DBUG_ENTER("mysql_rm_table"); /* mark for close and remove all cached entries */ @@ -1792,7 +1793,10 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, LOCK_open during wait_if_global_read_lock(), other threads could not close their tables. This would make a pretty deadlock. */ + thd->push_internal_handler(&err_handler); error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); + thd->pop_internal_handler(); + if (need_start_waiters) start_waiting_global_read_lock(thd); @@ -1894,9 +1898,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, DBUG_RETURN(1); } - /* Don't give warnings for not found errors, as we already generate notes */ - thd->no_warnings_for_error= 1; - for (table= tables; table; table= table->next_local) { char *db=table->db; @@ -2145,7 +2146,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, err_with_placeholders: unlock_table_names(thd, tables, (TABLE_LIST*) 0); pthread_mutex_unlock(&LOCK_open); - thd->no_warnings_for_error= 0; DBUG_RETURN(error); } -- cgit v1.2.1 From e436b8866be640b2160f0b756f85559437cff67d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 10 Sep 2009 18:05:53 +0800 Subject: BUG#45999 Row based replication fails when auto_increment field = 0 In RBR, There is an inconsistency between slaves and master. When INSERT statement which includes an auto_increment field is executed, Store engine of master will check the value of the auto_increment field. It will generate a sequence number and then replace the value, if its value is NULL or empty. if the field's value is 0, the store engine will do like encountering the NULL values unless NO_AUTO_VALUE_ON_ZERO is set into SQL_MODE. In contrast, if the field's value is 0, Store engine of slave always generates a new sequence number whether or not NO_AUTO_VALUE_ON_ZERO is set into SQL_MODE. SQL MODE of slave sql thread is always consistency with master's. Another variable is related to this bug. If generateing a sequence number is decided by the values of table->auto_increment_field_not_null and SQL_MODE(if includes MODE_NO_AUTO_VALUE_ON_ZERO) The table->auto_increment_is_not_null is FALSE, which causes this bug to appear. .. --- sql/log_event.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index 0cda724b698..08fe3aba8ed 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8312,6 +8312,16 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability /* Honor next number column if present */ m_table->next_number_field= m_table->found_next_number_field; + /* + * Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new + * sequence numbers for auto_increment fields if the values of them are 0. + * If generateing a sequence number is decided by the values of + * table->auto_increment_field_not_null and SQL_MODE(if includes + * MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function. + * SQL_MODE of slave sql thread is always consistency with master's. + * In RBR, auto_increment fields never are NULL. + */ + m_table->auto_increment_field_not_null= TRUE; return error; } @@ -8321,6 +8331,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability * { int local_error= 0; m_table->next_number_field=0; + m_table->auto_increment_field_not_null= FALSE; if (bit_is_set(slave_exec_mode, SLAVE_EXEC_MODE_IDEMPOTENT) == 1 || m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER) { -- cgit v1.2.1 From 10406ae65871de074e807e626f9ede686e9321d4 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 10 Sep 2009 15:24:07 +0500 Subject: Bug#46815 CONCAT_WS returning wrong data The problem is that argument buffer can be used as result buffer and it leads to argument value change. The fix is to use 'old buffer' as result buffer only if first argument is not constant item. mysql-test/r/func_str.result: test result mysql-test/t/func_str.test: test case sql/item_strfunc.cc: The problem is that argument buffer can be used as result buffer and it leads to argument value change. The fix is to use 'old buffer' as result buffer only if first argument is not constant item. --- sql/item_strfunc.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index e3fe67f4324..6f697a1665a 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -600,6 +600,7 @@ String *Item_func_concat_ws::val_str(String *str) String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info), *sep_str, *res, *res2,*use_as_buff; uint i; + bool is_const= 0; null_value=0; if (!(sep_str= args[0]->val_str(&tmp_sep_str))) @@ -613,7 +614,11 @@ String *Item_func_concat_ws::val_str(String *str) // If not, return the empty string for (i=1; i < arg_count; i++) if ((res= args[i]->val_str(str))) + { + is_const= args[i]->const_item() || !args[i]->used_tables(); break; + } + if (i == arg_count) return &my_empty_string; @@ -631,7 +636,7 @@ String *Item_func_concat_ws::val_str(String *str) current_thd->variables.max_allowed_packet); goto null; } - if (res->alloced_length() >= + if (!is_const && res->alloced_length() >= res->length() + sep_str->length() + res2->length()) { // Use old buffer res->append(*sep_str); // res->length() > 0 always -- cgit v1.2.1 From b6c16b329a861b4b7da9cda2ede9921af912fd38 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Fri, 11 Sep 2009 22:06:27 +0500 Subject: Fix for bug#47130: misplaced or redundant check for null pointer? Problem: LOGGER::general_log_write() relied on valid "thd" parameter passed but had inconsistent "if (thd)" check. Fix: as we always pass a valid "thd" parameter to the method, redundant check removed. sql/log.cc: Fix for bug#47130: misplaced or redundant check for null pointer? - code clean-up, as we rely on the "thd" parameter in the LOGGER::general_log_write(), redundant "if (thd)" check removed, added assert(thd) instead. --- sql/log.cc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index 1af2f3a4ddc..feaa5499912 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1024,14 +1024,10 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command, Log_event_handler **current_handler= general_log_handler_list; char user_host_buff[MAX_USER_HOST_SIZE + 1]; Security_context *sctx= thd->security_ctx; - ulong id; uint user_host_len= 0; time_t current_time; - if (thd) - id= thd->thread_id; /* Normal thread */ - else - id= 0; /* Log from connect handler */ + DBUG_ASSERT(thd); lock_shared(); if (!opt_log) @@ -1050,7 +1046,7 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command, while (*current_handler) error|= (*current_handler++)-> log_general(thd, current_time, user_host_buff, - user_host_len, id, + user_host_len, thd->thread_id, command_name[(uint) command].str, command_name[(uint) command].length, query, query_length, -- cgit v1.2.1 From 31809edc24b2d6ee6b4a9932d5af0b2247ecf8dc Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 17 Sep 2009 14:25:07 +0300 Subject: Bug #46917: mysqd-nt installs wrong When parsing the service installation parameter in default_service_handling() make sure the value of the optional parameter doesn't overwrite it's name. --- sql/mysqld.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index afbbf753813..ce1d562d0ca 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4022,7 +4022,7 @@ default_service_handling(char **argv, if (opt_delim= strchr(extra_opt, '=')) { size_t length= ++opt_delim - extra_opt; - strnmov(pos, extra_opt, length); + pos= strnmov(pos, extra_opt, length); } else opt_delim= extra_opt; -- cgit v1.2.1 From 2535ede7138cb4d1e8986d18b4154c377ba37f3f Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 17 Sep 2009 16:33:23 +0500 Subject: Bug#42364 SHOW ERRORS returns empty resultset after dropping non existent table additional backport of of bug43138 fix mysql-test/t/myisam-system.test: additional backport of of bug43138 fix sql/sql_db.cc: additional backport of of bug43138 fix --- sql/sql_db.cc | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'sql') diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 3fca5bd7df6..bcc8fcf54fc 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -907,6 +907,9 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) remove_db_from_cache(db); pthread_mutex_unlock(&LOCK_open); + Drop_table_error_handler err_handler(thd->get_internal_handler()); + thd->push_internal_handler(&err_handler); + error= -1; /* We temporarily disable the binary log while dropping the objects @@ -939,6 +942,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) error = 0; reenable_binlog(thd); } + thd->pop_internal_handler(); } if (!silent && deleted>=0) { -- cgit v1.2.1 From 5999113f3323f520213b809e11fb439805bc514f Mon Sep 17 00:00:00 2001 From: Anurag Shekhar Date: Thu, 17 Sep 2009 17:35:43 +0530 Subject: Bug #45840 read_buffer_size allocated for each partition when "insert into.. select * from" When inserting into a partitioned table using 'insert into select * from ', read_buffer_size bytes of memory are allocated for each partition in the target table. This resulted in large memory consumption when the number of partitions are high. This patch introduces a new method which tries to estimate the buffer size required for each partition and limits the maximum buffer size used to maximum of 10 * read_buffer_size, 11 * read_buffer_size in case of monotonic partition functions. sql/ha_partition.cc: Introduced a method ha_partition::estimate_read_buffer_size to estimate buffer size required for each partition. Method ha_partition::start_part_bulk_insert updated to update the read_buffer_size before calling bulk upload in storage engines. Added thd in ha_partition::start_part_bulk_insert method signature. sql/ha_partition.h: Introduced a method ha_partition::estimate_read_buffer_size. Added thd in ha_partition::start_part_bulk_insert method signature. --- sql/ha_partition.cc | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- sql/ha_partition.h | 3 ++- 2 files changed, 51 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 5b053ab9cac..b27f493b80a 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3037,7 +3037,7 @@ int ha_partition::write_row(uchar * buf) } m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %d", part_id)); - start_part_bulk_insert(part_id); + start_part_bulk_insert(thd, part_id); tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ error= m_file[part_id]->ha_write_row(buf); @@ -3101,7 +3101,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) } m_last_part= new_part_id; - start_part_bulk_insert(new_part_id); + start_part_bulk_insert(thd, new_part_id); if (new_part_id == old_part_id) { DBUG_PRINT("info", ("Update in partition %d", new_part_id)); @@ -3282,17 +3282,63 @@ void ha_partition::start_bulk_insert(ha_rows rows) Check if start_bulk_insert has been called for this partition, if not, call it and mark it called */ -void ha_partition::start_part_bulk_insert(uint part_id) +void ha_partition::start_part_bulk_insert(THD *thd, uint part_id) { + long old_buffer_size; if (!bitmap_is_set(&m_bulk_insert_started, part_id) && bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) { + old_buffer_size= thd->variables.read_buff_size; + /* Update read_buffer_size for this partition */ + thd->variables.read_buff_size= estimate_read_buffer_size(old_buffer_size); m_file[part_id]->ha_start_bulk_insert(guess_bulk_insert_rows()); bitmap_set_bit(&m_bulk_insert_started, part_id); + thd->variables.read_buff_size= old_buffer_size; } m_bulk_inserted_rows++; } +/* + Estimate the read buffer size for each partition. + SYNOPSIS + ha_partition::estimate_read_buffer_size() + original_size read buffer size originally set for the server + RETURN VALUE + estimated buffer size. + DESCRIPTION + If the estimated number of rows to insert is less than 10 (but not 0) + the new buffer size is same as original buffer size. + In case of first partition of when partition function is monotonic + new buffer size is same as the original buffer size. + For rest of the partition total buffer of 10*original_size is divided + equally if number of partition is more than 10 other wise each partition + will be allowed to use original buffer size. +*/ +long ha_partition::estimate_read_buffer_size(long original_size) +{ + /* + If number of rows to insert is less than 10, but not 0, + return original buffer size. + */ + if (estimation_rows_to_insert && (estimation_rows_to_insert < 10)) + return (original_size); + /* + If first insert/partition and monotonic partition function, + allow using buffer size originally set. + */ + if (!m_bulk_inserted_rows && + m_part_func_monotonicity_info != NON_MONOTONIC && + m_tot_parts > 1) + return original_size; + /* + Allow total buffer used in all partition to go up to 10*read_buffer_size. + 11*read_buffer_size in case of monotonic partition function. + */ + + if (m_tot_parts < 10) + return original_size; + return (original_size * 10 / m_tot_parts); +} /* Try to predict the number of inserts into this partition. diff --git a/sql/ha_partition.h b/sql/ha_partition.h index f47dfe8f621..1c863d6c294 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -367,7 +367,8 @@ public: virtual int end_bulk_insert(); private: ha_rows guess_bulk_insert_rows(); - void start_part_bulk_insert(uint part_id); + void start_part_bulk_insert(THD *thd, uint part_id); + long estimate_read_buffer_size(long original_size); public: virtual bool is_fatal_error(int error, uint flags) -- cgit v1.2.1 From e5888b16afcef42e8127a815cc5a95abc283c6d9 Mon Sep 17 00:00:00 2001 From: Staale Smedseng Date: Thu, 17 Sep 2009 17:10:30 +0200 Subject: Bug #43414 Parenthesis (and other) warnings compiling MySQL with gcc 4.3.2 This is the fifth patch cleaning up more GCC warnings about variables used before initialized using the new macro UNINIT_VAR(). --- sql/item_func.cc | 6 ++---- sql/item_timefunc.cc | 11 +++-------- sql/sql_handler.cc | 7 ++----- sql/sql_lex.cc | 3 +-- sql/sql_select.cc | 3 +-- sql/udf_example.c | 4 ++-- 6 files changed, 11 insertions(+), 23 deletions(-) (limited to 'sql') diff --git a/sql/item_func.cc b/sql/item_func.cc index 3c1e2126008..c6dbdb1d9a7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -428,8 +428,7 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const Field *Item_func::tmp_table_field(TABLE *t_arg) { - Field *res; - LINT_INIT(res); + Field *res= NULL; switch (result_type()) { case INT_RESULT: @@ -4202,9 +4201,8 @@ void Item_func_set_user_var::save_item_result(Item *item) bool Item_func_set_user_var::update() { - bool res; + bool res= NULL; DBUG_ENTER("Item_func_set_user_var::update"); - LINT_INIT(res); switch (cached_result_type) { case REAL_RESULT: diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 39f869106b6..de76f821795 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -271,9 +271,9 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, int strict_week_number_year= -1; int frac_part; bool usa_time= 0; - bool sunday_first_n_first_week_non_iso; - bool strict_week_number; - bool strict_week_number_year_type; + bool UNINIT_VAR(sunday_first_n_first_week_non_iso); + bool UNINIT_VAR(strict_week_number); + bool UNINIT_VAR(strict_week_number_year_type); const char *val_begin= val; const char *val_end= val + length; const char *ptr= format->format.str; @@ -281,11 +281,6 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, CHARSET_INFO *cs= &my_charset_bin; DBUG_ENTER("extract_date_time"); - LINT_INIT(strict_week_number); - /* Remove valgrind varnings when using gcc 3.3 and -O1 */ - PURIFY_OR_LINT_INIT(strict_week_number_year_type); - PURIFY_OR_LINT_INIT(sunday_first_n_first_week_non_iso); - if (!sub_pattern_end) bzero((char*) l_time, sizeof(*l_time)); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 721b365a7b9..5bd28add428 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -385,16 +385,13 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, String buffer(buff, sizeof(buff), system_charset_info); int error, keyno= -1; uint num_rows; - byte *key; - uint key_len; + byte *UNINIT_VAR(key); + uint UNINIT_VAR(key_len); bool need_reopen; DBUG_ENTER("mysql_ha_read"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", tables->db, tables->table_name, tables->alias)); - LINT_INIT(key); - LINT_INIT(key_len); - thd->lex->select_lex.context.resolve_in_table_list_only(tables); list.push_front(new Item_field(&thd->lex->select_lex.context, NULL, NULL, "*")); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index b0b4256184c..17ee53d446b 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -530,7 +530,7 @@ static inline uint int_token(const char *str,uint length) int MYSQLlex(void *arg, void *yythd) { - reg1 uchar c; + reg1 uchar UNINIT_VAR(c); bool comment_closed; int tokval, result_state; uint length; @@ -550,7 +550,6 @@ int MYSQLlex(void *arg, void *yythd) lip->tok_start=lip->tok_end=lip->ptr; state=lip->next_state; lip->next_state=MY_LEX_OPERATOR_OR_IDENT; - LINT_INIT(c); for (;;) { switch (state) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9d5e67c9532..84b5b61c941 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -10480,9 +10480,8 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) { int rc= 0; enum_nested_loop_state error= NESTED_LOOP_OK; - JOIN_TAB *join_tab; + JOIN_TAB *UNINIT_VAR(join_tab); DBUG_ENTER("do_select"); - LINT_INIT(join_tab); join->procedure=procedure; join->tmp_table= table; /* Save for easy recursion */ diff --git a/sql/udf_example.c b/sql/udf_example.c index db48984eed8..019d4d834dd 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -139,10 +139,10 @@ typedef long long longlong; #include #include -static pthread_mutex_t LOCK_hostname; - #ifdef HAVE_DLOPEN +static pthread_mutex_t LOCK_hostname; + /* These must be right or mysqld will not find the symbol! */ my_bool metaphon_init(UDF_INIT *initid, UDF_ARGS *args, char *message); -- cgit v1.2.1 From 64badb5f269850d0111aec29788aff7e181c195d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 18 Sep 2009 16:20:29 +0800 Subject: Bug #42914 Log event that larger than max_allowed_packet results in stop of slave I/O thread, But there is no Last_IO_Error reported. On the master, if a binary log event is larger than max_allowed_packet, ER_MASTER_FATAL_ERROR_READING_BINLOG and the specific reason of this error is sent to a slave when it requests a dump from the master, thus leading the I/O thread to stop. On a slave, the I/O thread stops when receiving a packet larger than max_allowed_packet. In both cases, however, there was no Last_IO_Error reported. This patch adds code to report the Last_IO_Error and exact reason before stopping the I/O thread and also reports the case the out memory pops up while handling packets from the master. sql/sql_repl.cc: The master send the Specific reasons instead of "error reading log entry" to the slave which is requesting a dump. if an fatal error is returned by read_log_event function. --- sql/share/errmsg.txt | 2 +- sql/slave.cc | 12 ++++++++---- sql/sql_repl.cc | 14 ++++---------- 3 files changed, 13 insertions(+), 15 deletions(-) (limited to 'sql') diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 3aba434b284..fdad2a44b68 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -4702,7 +4702,7 @@ ER_NOT_SUPPORTED_YET 42000 swe "Denna version av MySQL kan ännu inte utföra '%s'" ER_MASTER_FATAL_ERROR_READING_BINLOG nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log" - eng "Got fatal error %d: '%-.128s' from master when reading data from binary log" + eng "Got fatal error %d from master when reading data from binary log: '%-.128s'" ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs" ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario" por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log" diff --git a/sql/slave.cc b/sql/slave.cc index fac9ee214c5..b489eb3b45f 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -2678,15 +2678,19 @@ Log entry on master is longer than max_allowed_packet (%ld) on \ slave. If the entry is correct, restart the server with a higher value of \ max_allowed_packet", thd->variables.max_allowed_packet); + mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, + ER(ER_NET_PACKET_TOO_LARGE)); goto err; case ER_MASTER_FATAL_ERROR_READING_BINLOG: - sql_print_error(ER(mysql_error_number), mysql_error_number, - mysql_error(mysql)); + mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, + ER(ER_MASTER_FATAL_ERROR_READING_BINLOG), + mysql_error_number, mysql_error(mysql)); goto err; - case EE_OUTOFMEMORY: - case ER_OUTOFMEMORY: + case ER_OUT_OF_RESOURCES: sql_print_error("\ Stopping slave I/O thread due to out-of-memory error from master"); + mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, + ER(ER_OUT_OF_RESOURCES)); goto err; } if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings, diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 0ec8d91214c..b8f2e1e39bf 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -623,7 +623,7 @@ impossible position"; */ { log.error=0; - bool read_packet = 0, fatal_error = 0; + bool read_packet = 0; #ifndef DBUG_OFF if (max_binlog_dump_events && !left_events--) @@ -645,7 +645,7 @@ impossible position"; */ pthread_mutex_lock(log_lock); - switch (Log_event::read_log_event(&log, packet, (pthread_mutex_t*)0)) { + switch (error= Log_event::read_log_event(&log, packet, (pthread_mutex_t*) 0)) { case 0: /* we read successfully, so we'll need to send it to the slave */ pthread_mutex_unlock(log_lock); @@ -671,8 +671,8 @@ impossible position"; default: pthread_mutex_unlock(log_lock); - fatal_error = 1; - break; + test_for_non_eof_log_read_errors(error, &errmsg); + goto err; } if (read_packet) @@ -701,12 +701,6 @@ impossible position"; */ } - if (fatal_error) - { - errmsg = "error reading log entry"; - my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; - goto err; - } log.error=0; } } -- cgit v1.2.1 From 5dda6c18cdb6b549681f76b6158baa3afd2816bb Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 18 Sep 2009 12:34:08 +0300 Subject: Bug #47106: Crash / segfault on adding EXPLAIN to a non-crashing query The fix for bug 46749 removed the check for OUTER_REF_TABLE_BIT and substituted it for a check on the presence of Item_ident::depended_from. Removing it altogether was wrong : OUTER_REF_TABLE_BIT should still be checked in addition to depended_from (because it's not set in all cases and doesn't contradict to the check of depended_from). Fixed by returning the old condition back as a compliment to the new one. --- sql/sql_select.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 84b5b61c941..76d6833de5c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3216,12 +3216,12 @@ add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, @retval FALSE it's something else */ -inline static bool +static bool is_local_field (Item *field) { - field= field->real_item(); - return field->type() == Item::FIELD_ITEM && - !((Item_field *)field)->depended_from; + return field->real_item()->type() == Item::FIELD_ITEM + && !(field->used_tables() & OUTER_REF_TABLE_BIT) + && !((Item_field *)field->real_item())->depended_from; } -- cgit v1.2.1 From 01e5bc703d91e5d39b9f00b8eb31d563a2cf6b40 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 18 Sep 2009 16:01:18 +0300 Subject: Bug#46760: Fast ALTER TABLE no longer works for InnoDB Despite copying the value of the old table's row type we don't always have to mark row type as being specified. Innodb uses this to check if it can do fast ALTER TABLE or not. Fixed by correctly flagging the presence of row_type only when it's actually changed. Added a test case for 39200. --- sql/handler.h | 9 +++++++++ sql/sql_table.cc | 10 ++++++++++ 2 files changed, 19 insertions(+) (limited to 'sql') diff --git a/sql/handler.h b/sql/handler.h index f759239d66e..fe8f7c437ff 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -913,6 +913,15 @@ typedef struct st_ha_create_information ulong key_block_size; SQL_LIST merge_list; handlerton *db_type; + /** + Row type of the table definition. + + Defaults to ROW_TYPE_DEFAULT for all non-ALTER statements. + For ALTER TABLE defaults to ROW_TYPE_NOT_USED (means "keep the current"). + + Can be changed either explicitly by the parser. + If nothing speficied inherits the value of the original table (if present). + */ enum row_type row_type; uint null_bits; /* NULL bits at start of record */ uint options; /* OR of HA_CREATE_ options */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 41e76211dd8..9d929c0d1a3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6656,9 +6656,19 @@ view_err: goto err; } + /* + If this is an ALTER TABLE and no explicit row type specified reuse + the table's row type. + Note : this is the same as if the row type was specified explicitly. + */ if (create_info->row_type == ROW_TYPE_NOT_USED) { + /* ALTER TABLE without explicit row type */ create_info->row_type= table->s->row_type; + } + else + { + /* ALTER TABLE with specific row type */ create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT; } -- cgit v1.2.1 From 5ec6043ac37380fac2cbcbc15e4f2e8b76653f15 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Mon, 21 Sep 2009 11:58:15 +0200 Subject: Fix for BUG#35570 "CHECKSUM TABLE unreliable if LINESTRING field (same content/ differen checksum)" The problem was that checksum of GEOMETRY type used memory addresses in the computation, making it un-repeatable thus useless. (This patch is a backport from 6.0 branch) mysql-test/r/myisam.result: test case for bug35570 that same tables give same checksums mysql-test/t/myisam.test: test case for bug35570 that same tables give same checksums sql/sql_table.cc: Type GEOMETRY is implemented on top of type BLOB, so, just like for BLOB, its 'field' contains pointers which it does not make sense to include in the checksum; it rather has to be converted to a string and then we can compute the checksum. --- sql/sql_table.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9d929c0d1a3..08f3311be9d 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -7897,8 +7897,14 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, for (uint i= 0; i < t->s->fields; i++ ) { Field *f= t->field[i]; - if ((f->type() == MYSQL_TYPE_BLOB) || - (f->type() == MYSQL_TYPE_VARCHAR)) + enum_field_types field_type= f->type(); + /* + BLOB and VARCHAR have pointers in their field, we must convert + to string; GEOMETRY is implemented on top of BLOB. + */ + if ((field_type == MYSQL_TYPE_BLOB) || + (field_type == MYSQL_TYPE_VARCHAR) || + (field_type == MYSQL_TYPE_GEOMETRY)) { String tmp; f->val_str(&tmp); -- cgit v1.2.1 From beb519e3caa387b715a6093251ec0b7e5ce61da0 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 23 Sep 2009 13:40:33 +0500 Subject: Bug#45989 memory leak after explain encounters an error in the query the fix is reverted from 5.1, mysql-pe as unnecessary(no valgrind warnings there). sql/sql_select.cc: the fix is reverted from 5.1, mysql-pe as unnecessary(no valgrind warnings there). --- sql/sql_select.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8b1e0ae365b..1ff068c8881 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2248,7 +2248,7 @@ JOIN::destroy() tab->cleanup(); } tmp_join->tmp_join= 0; - tmp_table_param.cleanup(); + tmp_table_param.copy_field= 0; DBUG_RETURN(tmp_join->destroy()); } cond_equal= 0; -- cgit v1.2.1 From 83bc7980ce8df92efac754923500b59574055ad0 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Tue, 22 Sep 2009 08:22:07 -0300 Subject: Bug#45498: Socket variable not available on Windows The "socket" variable is not available on Windows even though the --socket option can be used to specify the pipe name for local connections that use a named pipe. The solution is to ensure that the variable is always defined. mysql-test/r/windows.result: Add test case result for Bug#45498 mysql-test/t/windows.test: Add test case for Bug#45498 sql/set_var.cc: socket variable must always be present. --- sql/set_var.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/set_var.cc b/sql/set_var.cc index b64b54fdd29..51c6ca219c6 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -529,11 +529,11 @@ static sys_var_const sys_skip_networking(&vars, "skip_networking", static sys_var_const sys_skip_show_database(&vars, "skip_show_database", OPT_GLOBAL, SHOW_BOOL, (uchar*) &opt_skip_show_db); -#ifdef HAVE_SYS_UN_H + static sys_var_const sys_socket(&vars, "socket", OPT_GLOBAL, SHOW_CHAR_PTR, (uchar*) &mysqld_unix_port); -#endif + #ifdef HAVE_THR_SETCONCURRENCY /* purecov: begin tested */ static sys_var_const sys_thread_concurrency(&vars, "thread_concurrency", -- cgit v1.2.1 From d49913877064778d8301c7bbd610238b3f5422cd Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Wed, 23 Sep 2009 17:10:23 +0400 Subject: A patch for Bug#47474 (mysqld hits Dbug_violation_helper assert when compiled with Sun Studio compiler). The thing is that Sun Studio compiler calls destructor of stack objects when pthread_exit() is called. That triggered an assertion in DBUG_ENTER()/DBUG_RETURN() validation logic (if DBUG_ENTER() is used in the beginning of function, all returns should be replaced by DBUG_RETURN/DBUG_VOID_RETURN macros). A fix is to explicitly use DBUG_LEAVE macro. --- sql/ha_ndbcluster.cc | 4 +++- sql/ha_ndbcluster_binlog.cc | 9 ++++++--- sql/mysqld.cc | 23 ++++++++++++++++++----- sql/repl_failsafe.cc | 4 +++- sql/slave.cc | 9 ++++++--- 5 files changed, 36 insertions(+), 13 deletions(-) (limited to 'sql') diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index ddb90427fdc..dfd7ddc4d6c 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9426,9 +9426,11 @@ ndb_util_thread_fail: pthread_cond_signal(&COND_ndb_util_ready); pthread_mutex_unlock(&LOCK_ndb_util_thread); DBUG_PRINT("exit", ("ndb_util_thread")); + + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); - DBUG_RETURN(NULL); + return NULL; // Avoid compiler warnings } /* diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index d9a9738ce72..2a87697c7fa 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3663,9 +3663,11 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ndb_binlog_thread_running= -1; pthread_mutex_unlock(&injector_mutex); pthread_cond_signal(&injector_cond); + + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); - DBUG_RETURN(NULL); + return NULL; // Avoid compiler warnings } lex_start(thd); @@ -4376,10 +4378,11 @@ err: (void) pthread_cond_signal(&injector_cond); DBUG_PRINT("exit", ("ndb_binlog_thread")); - my_thread_end(); + DBUG_LEAVE; // Must match DBUG_ENTER() + my_thread_end(); pthread_exit(0); - DBUG_RETURN(NULL); + return NULL; // Avoid compiler warnings } bool diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 430592e7688..5778e6ab4aa 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1107,13 +1107,13 @@ void kill_mysql(void) #if defined(__NETWARE__) extern "C" void kill_server(int sig_ptr) -#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN +#define RETURN_FROM_KILL_SERVER return #elif !defined(__WIN__) static void *kill_server(void *sig_ptr) -#define RETURN_FROM_KILL_SERVER DBUG_RETURN(0) +#define RETURN_FROM_KILL_SERVER return 0 #else static void __cdecl kill_server(int sig_ptr) -#define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN +#define RETURN_FROM_KILL_SERVER return #endif { DBUG_ENTER("kill_server"); @@ -1121,7 +1121,10 @@ static void __cdecl kill_server(int sig_ptr) int sig=(int) (long) sig_ptr; // This is passed a int // if there is a signal during the kill in progress, ignore the other if (kill_in_progress) // Safety + { + DBUG_LEAVE; RETURN_FROM_KILL_SERVER; + } kill_in_progress=TRUE; abort_loop=1; // This should be set if (sig != 0) // 0 is not a valid signal number @@ -1156,12 +1159,19 @@ static void __cdecl kill_server(int sig_ptr) pthread_join(select_thread, NULL); // wait for main thread #endif /* __NETWARE__ */ + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); /* purecov: end */ -#endif /* EMBEDDED_LIBRARY */ + RETURN_FROM_KILL_SERVER; // Avoid compiler warnings + +#else /* EMBEDDED_LIBRARY*/ + + DBUG_LEAVE; RETURN_FROM_KILL_SERVER; + +#endif /* EMBEDDED_LIBRARY */ } @@ -1935,8 +1945,9 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache) my_thread_end(); (void) pthread_cond_broadcast(&COND_thread_count); + DBUG_LEAVE; // Must match DBUG_ENTER() pthread_exit(0); - DBUG_RETURN(0); // Impossible + return 0; // Avoid compiler warnings } @@ -2756,7 +2767,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) DBUG_PRINT("quit",("signal_handler: calling my_thread_end()")); my_thread_end(); signal_thread_in_use= 0; + DBUG_LEAVE; // Must match DBUG_ENTER() pthread_exit(0); // Safety + return 0; // Avoid compiler warnings } switch (sig) { case SIGTERM: diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 582348608de..312499bb4ee 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -638,9 +638,11 @@ err: if (recovery_captain) mysql_close(recovery_captain); delete thd; + + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); + return 0; // Avoid compiler warnings } #endif diff --git a/sql/slave.cc b/sql/slave.cc index b489eb3b45f..8bf126ce2c2 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -2799,9 +2799,11 @@ err: pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done DBUG_EXECUTE_IF("simulate_slave_delay_at_terminate_bug38694", sleep(5);); pthread_mutex_unlock(&mi->run_lock); + + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); // Can't return anything here + return 0; // Avoid compiler warnings } /* @@ -3157,10 +3159,11 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ pthread_cond_broadcast(&rli->stop_cond); DBUG_EXECUTE_IF("simulate_slave_delay_at_terminate_bug38694", sleep(5);); pthread_mutex_unlock(&rli->run_lock); // tell the world we are done - + + DBUG_LEAVE; // Must match DBUG_ENTER() my_thread_end(); pthread_exit(0); - DBUG_RETURN(0); // Can't return anything here + return 0; // Avoid compiler warnings } -- cgit v1.2.1 From 6a89842e3642729fbac20a92a1655452f4d45487 Mon Sep 17 00:00:00 2001 From: Staale Smedseng Date: Wed, 23 Sep 2009 15:21:29 +0200 Subject: Bug #43414 Parenthesis (and other) warnings compiling MySQL with gcc 4.3.2 Cleaning up warnings not present in 5.0. --- sql/event_db_repository.cc | 2 +- sql/ha_partition.cc | 2 +- sql/ha_partition.h | 8 ++++---- sql/handler.cc | 12 ++++-------- sql/item_timefunc.cc | 10 +++------- sql/item_xmlfunc.cc | 6 +++--- sql/log_event_old.cc | 6 +++--- sql/mysqld.cc | 3 ++- sql/partition_info.cc | 6 ++---- sql/rpl_rli.cc | 4 ++-- sql/set_var.cc | 9 ++++++--- sql/set_var.h | 3 ++- sql/slave.cc | 14 +++++++------- sql/sql_cache.cc | 2 +- sql/sql_connect.cc | 2 +- sql/sql_db.cc | 6 +++--- sql/sql_partition.cc | 4 ++-- sql/sql_plugin.cc | 2 +- sql/sql_show.cc | 2 ++ sql/strfunc.cc | 2 +- 20 files changed, 51 insertions(+), 54 deletions(-) (limited to 'sql') diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 9a253d74546..8faab5023da 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -711,7 +711,7 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, DBUG_ENTER("Event_db_repository::update_event"); /* None or both must be set */ - DBUG_ASSERT(new_dbname && new_name || new_dbname == new_name); + DBUG_ASSERT((new_dbname && new_name) || new_dbname == new_name); /* Reset sql_mode during data dictionary operations. */ thd->variables.sql_mode= 0; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index b27f493b80a..df5badccb1e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1001,7 +1001,7 @@ static bool print_admin_msg(THD* thd, const char* msg_type, if (!thd->vio_ok()) { - sql_print_error(msgbuf); + sql_print_error("%s", msgbuf); return TRUE; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1c863d6c294..c08b1f77eca 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -773,10 +773,10 @@ public: if (m_handler_status < handler_initialized || m_handler_status >= handler_closed) DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS); - else - DBUG_RETURN((m_file[0]->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); + + DBUG_RETURN((m_file[0]->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); } /* diff --git a/sql/handler.cc b/sql/handler.cc index a4d88e84f4c..d07ebed8ab9 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3501,14 +3501,10 @@ int handler::index_next_same(uchar *buf, const uchar *key, uint keylen) if (!(error=index_next(buf))) { my_ptrdiff_t ptrdiff= buf - table->record[0]; - uchar *save_record_0; - KEY *key_info; - KEY_PART_INFO *key_part; - KEY_PART_INFO *key_part_end; - LINT_INIT(save_record_0); - LINT_INIT(key_info); - LINT_INIT(key_part); - LINT_INIT(key_part_end); + uchar *UNINIT_VAR(save_record_0); + KEY *UNINIT_VAR(key_info); + KEY_PART_INFO *UNINIT_VAR(key_part); + KEY_PART_INFO *UNINIT_VAR(key_part_end); /* key_cmp_if_same() compares table->record[0] against 'key'. diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index cf38a294e55..b5037c08b3c 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1344,15 +1344,11 @@ bool get_interval_value(Item *args,interval_type int_type, String *str_value, INTERVAL *interval) { ulonglong array[5]; - longlong value; - const char *str; - size_t length; + longlong UNINIT_VAR(value); + const char *UNINIT_VAR(str); + size_t UNINIT_VAR(length); CHARSET_INFO *cs=str_value->charset(); - LINT_INIT(value); - LINT_INIT(str); - LINT_INIT(length); - bzero((char*) interval,sizeof(*interval)); if ((int) int_type <= INTERVAL_MICROSECOND) { diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 6320873e4d3..1eff00027f2 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -1354,7 +1354,7 @@ my_xpath_lex_scan(MY_XPATH *xpath, MY_XPATH_LEX *lex, const char *beg, const char *end) { int ch, ctype, length; - for ( ; beg < end && *beg == ' ' ; beg++); // skip leading spaces + for ( ; beg < end && *beg == ' ' ; beg++) ; // skip leading spaces lex->beg= beg; if (beg >= end) @@ -1423,7 +1423,7 @@ my_xpath_lex_scan(MY_XPATH *xpath, if (my_xdigit(ch)) // a sequence of digits { - for ( ; beg < end && my_xdigit(*beg) ; beg++); + for ( ; beg < end && my_xdigit(*beg) ; beg++) ; lex->end= beg; lex->term= MY_XPATH_LEX_DIGITS; return; @@ -1431,7 +1431,7 @@ my_xpath_lex_scan(MY_XPATH *xpath, if (ch == '"' || ch == '\'') // a string: either '...' or "..." { - for ( ; beg < end && *beg != ch ; beg++); + for ( ; beg < end && *beg != ch ; beg++) ; if (beg < end) { lex->end= beg+1; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 68cd2bf4673..f14ebe49706 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1245,8 +1245,8 @@ Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, solution, to be able to terminate a started statement in the binary log: the extraneous events will be removed in the future. */ - DBUG_ASSERT(tbl_arg && tbl_arg->s && tid != ~0UL || - !tbl_arg && !cols && tid == ~0UL); + DBUG_ASSERT((tbl_arg && tbl_arg->s && tid != ~0UL) || + (!tbl_arg && !cols && tid == ~0UL)); if (thd_arg->options & OPTION_NO_FOREIGN_KEY_CHECKS) set_flags(NO_FOREIGN_KEY_CHECKS_F); @@ -1409,7 +1409,7 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length) #endif DBUG_ASSERT(m_rows_buf <= m_rows_cur); - DBUG_ASSERT(!m_rows_buf || m_rows_end && m_rows_buf < m_rows_end); + DBUG_ASSERT(!m_rows_buf || (m_rows_end && m_rows_buf < m_rows_end)); DBUG_ASSERT(m_rows_cur <= m_rows_end); /* The cast will always work since m_rows_cur <= m_rows_end */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5778e6ab4aa..3433b54cb29 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3719,6 +3719,7 @@ static void end_ssl() static int init_server_components() { + FILE* reopen; DBUG_ENTER("init_server_components"); /* We need to call each of these following functions to ensure that @@ -3761,7 +3762,7 @@ static int init_server_components() if (freopen(log_error_file, "a+", stdout)) #endif { - freopen(log_error_file, "a+", stderr); + reopen= freopen(log_error_file, "a+", stderr); setbuf(stderr, NULL); } } diff --git a/sql/partition_info.cc b/sql/partition_info.cc index e2027d3571e..ba9ea0e876e 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -483,10 +483,8 @@ static bool check_engine_condition(partition_element *p_elem, { DBUG_RETURN(TRUE); } - else - { - DBUG_RETURN(FALSE); - } + + DBUG_RETURN(FALSE); } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 18fbae9bb9d..0c6cc15297f 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -33,10 +33,10 @@ Relay_log_info::Relay_log_info() :Slave_reporting_capability("SQL"), no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id), info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), + cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0), #if HAVE_purify is_fake(FALSE), #endif - cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0), group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), sql_thd(0), @@ -300,7 +300,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", DBUG_RETURN(error); err: - sql_print_error(msg); + sql_print_error("%s", msg); end_io_cache(&rli->info_file); if (info_fd >= 0) my_close(info_fd, MYF(0)); diff --git a/sql/set_var.cc b/sql/set_var.cc index 51c6ca219c6..cd01f6c8624 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3349,7 +3349,7 @@ int set_var_init() uint count= 0; DBUG_ENTER("set_var_init"); - for (sys_var *var=vars.first; var; var= var->next, count++); + for (sys_var *var=vars.first; var; var= var->next, count++) ; if (hash_init(&system_variable_hash, system_charset_info, count, 0, 0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE)) @@ -4184,10 +4184,10 @@ bool sys_var_opt_readonly::update(THD *thd, set_var *var) can cause to wait on a read lock, it's required for the client application to unlock everything, and acceptable for the server to wait on all locks. */ - if (result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE)) + if ((result= close_cached_tables(thd, NULL, FALSE, TRUE, TRUE))) goto end_with_read_lock; - if (result= make_global_read_lock_block_commit(thd)) + if ((result= make_global_read_lock_block_commit(thd))) goto end_with_read_lock; /* Change the opt_readonly system variable, safe because the lock is held */ @@ -4200,6 +4200,7 @@ end_with_read_lock: } +#ifndef DBUG_OFF /* even session variable here requires SUPER, because of -#o,file */ bool sys_var_thd_dbug::check(THD *thd, set_var *var) { @@ -4226,6 +4227,8 @@ uchar *sys_var_thd_dbug::value_ptr(THD *thd, enum_var_type type, LEX_STRING *b) DBUG_EXPLAIN(buf, sizeof(buf)); return (uchar*) thd->strdup(buf); } +#endif /* DBUG_OFF */ + #ifdef HAVE_EVENT_SCHEDULER bool sys_var_event_scheduler::check(THD *thd, set_var *var) diff --git a/sql/set_var.h b/sql/set_var.h index 10e6e0f9c35..a54591b0fd6 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -621,6 +621,7 @@ public: uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; +#ifndef DBUG_OFF class sys_var_thd_dbug :public sys_var_thd { public: @@ -634,7 +635,7 @@ public: void set_default(THD *thd, enum_var_type type) { DBUG_POP(); } uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b); }; - +#endif /* DBUG_OFF */ /* some variables that require special handling */ diff --git a/sql/slave.cc b/sql/slave.cc index 8bf126ce2c2..66cbef1029a 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1195,7 +1195,7 @@ err: if (master_res) mysql_free_result(master_res); DBUG_ASSERT(err_code != 0); - mi->report(ERROR_LEVEL, err_code, err_buff); + mi->report(ERROR_LEVEL, err_code, "%s", err_buff); DBUG_RETURN(1); } @@ -2386,7 +2386,7 @@ static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info) if (io_slave_killed(thd, mi)) { if (info && global_system_variables.log_warnings) - sql_print_information(info); + sql_print_information("%s", info); return TRUE; } return FALSE; @@ -2456,13 +2456,13 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi, } else { - sql_print_information(buf); + sql_print_information("%s", buf); } } if (safe_reconnect(thd, mysql, mi, 1) || io_slave_killed(thd, mi)) { if (global_system_variables.log_warnings) - sql_print_information(messages[SLAVE_RECON_MSG_KILLED_AFTER]); + sql_print_information("%s", messages[SLAVE_RECON_MSG_KILLED_AFTER]); return 1; } return 0; @@ -2679,7 +2679,7 @@ slave. If the entry is correct, restart the server with a higher value of \ max_allowed_packet", thd->variables.max_allowed_packet); mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, - ER(ER_NET_PACKET_TOO_LARGE)); + "%s", ER(ER_NET_PACKET_TOO_LARGE)); goto err; case ER_MASTER_FATAL_ERROR_READING_BINLOG: mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, @@ -2690,7 +2690,7 @@ max_allowed_packet", sql_print_error("\ Stopping slave I/O thread due to out-of-memory error from master"); mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, - ER(ER_OUT_OF_RESOURCES)); + "%s", ER(ER_OUT_OF_RESOURCES)); goto err; } if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings, @@ -3050,7 +3050,7 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME, This function is reporting an error which was not reported while executing exec_relay_log_event(). */ - rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), errmsg); + rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), "%s", errmsg); } else if (last_errno != thd->main_da.sql_errno()) { diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index df19fd05417..a41b7bd40bb 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -383,7 +383,7 @@ static void debug_wait_for_kill(const char *info) thd= current_thd; prev_info= thd->proc_info; thd->proc_info= info; - sql_print_information(info); + sql_print_information("%s", info); while(!thd->killed) my_sleep(1000); thd->killed= THD::NOT_KILLED; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 98574a07a4e..bb4ba2bf21b 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -985,7 +985,7 @@ static void end_connection(THD *thd) if (thd->user_connect) decrease_user_connections(thd->user_connect); - if (thd->killed || net->error && net->vio != 0) + if (thd->killed || (net->error && net->vio != 0)) { statistic_increment(aborted_threads,&LOCK_status); } diff --git a/sql/sql_db.cc b/sql/sql_db.cc index bcc8fcf54fc..c19bfba9fc1 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1450,11 +1450,11 @@ cmp_db_names(const char *db1_name, { return /* db1 is NULL and db2 is NULL */ - !db1_name && !db2_name || + (!db1_name && !db2_name) || /* db1 is not-NULL, db2 is not-NULL, db1 == db2. */ - db1_name && db2_name && - my_strcasecmp(system_charset_info, db1_name, db2_name) == 0; + (db1_name && db2_name && + my_strcasecmp(system_charset_info, db1_name, db2_name) == 0); } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 08ff2daacb9..2fb09ab7566 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -7098,9 +7098,9 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter) longlong dummy; field->store(part_iter->field_vals.cur++, ((Field_num*)field)->unsigned_flag); - if (part_iter->part_info->is_sub_partitioned() && + if ((part_iter->part_info->is_sub_partitioned() && !part_iter->part_info->get_part_partition_id(part_iter->part_info, - &part_id, &dummy) || + &part_id, &dummy)) || !part_iter->part_info->get_partition_id(part_iter->part_info, &part_id, &dummy)) return part_id; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 025c8a8248d..b411d5e3095 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -368,7 +368,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report) if (report & REPORT_TO_USER) my_error(ER_UDF_NO_PATHS, MYF(0)); if (report & REPORT_TO_LOG) - sql_print_error(ER(ER_UDF_NO_PATHS)); + sql_print_error("%s", ER(ER_UDF_NO_PATHS)); DBUG_RETURN(0); } /* If this dll is already loaded just increase ref_count. */ diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5c2c351652b..bb377e676e2 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3529,7 +3529,9 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, TABLE_SHARE *share= show_table->s; handler *file= show_table->file; handlerton *tmp_db_type= share->db_type(); +#ifdef WITH_PARTITION_STORAGE_ENGINE bool is_partitioned= FALSE; +#endif if (share->tmp_table == SYSTEM_TMP_TABLE) table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs); else if (share->tmp_table) diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 1fb9c1a8451..56fa4a380ea 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -147,7 +147,7 @@ static uint parse_name(TYPELIB *lib, const char **strpos, const char *end, } } else - for (; pos != end && *pos != '=' && *pos !=',' ; pos++); + for (; pos != end && *pos != '=' && *pos !=',' ; pos++) ; uint var_len= (uint) (pos - start); /* Determine which flag it is */ -- cgit v1.2.1 From 45c70a2ec50e81d3eeb1699fae939e113bf69ba1 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 24 Sep 2009 16:21:46 +0300 Subject: fixed compilation warnings --- sql/item_func.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/item_func.cc b/sql/item_func.cc index c6dbdb1d9a7..d6f315fda50 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -4201,7 +4201,7 @@ void Item_func_set_user_var::save_item_result(Item *item) bool Item_func_set_user_var::update() { - bool res= NULL; + bool res= 0; DBUG_ENTER("Item_func_set_user_var::update"); switch (cached_result_type) { -- cgit v1.2.1 From 9ae9f84ef45b57e29c328f9449004c07f8a62e3a Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Thu, 24 Sep 2009 15:52:52 +0100 Subject: BUG#42829: binlogging enabled for all schemas regardless of binlog-db-db / binlog-ignore-db InnoDB will return an error if statement based replication is used along with transaction isolation level READ-COMMITTED (or weaker), even if the statement in question is filtered out according to the binlog-do-db rules set. In this case, an error should not be printed. This patch addresses this issue by extending the existing check in external_lock to take into account the filter rules before deciding to print an error. Furthermore, it also changes decide_logging_format to take into consideration whether the statement is filtered out from binlog before decision is made. sql/sql_base.cc: Changed the check on decide_logging_format to take into account whether statement is filtered or not in SBR. sql/sql_class.cc: Added the thd_binlog_filter_ok to INNODB_COMPATIBILITY_HOOKS set. storage/innobase/handler/ha_innodb.cc: Extended check in external_lock to take into consideration the filtering when deciding to throw an error. storage/innobase/handler/ha_innodb.h: Added declaration of new hook. storage/innodb_plugin/handler/ha_innodb.cc: Extended check in external_lock to take into consideration the filtering when deciding to throw an error. storage/innodb_plugin/handler/ha_innodb.h: Added declaration of new hook. --- sql/sql_base.cc | 12 +++++++++++- sql/sql_class.cc | 6 ++++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_base.cc b/sql/sql_base.cc index f55d3fc5006..7de345ba0e7 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -24,6 +24,7 @@ #include #include #include +#include "rpl_filter.h" #ifdef __WIN__ #include #endif @@ -5094,7 +5095,16 @@ static void mark_real_tables_as_free_for_reuse(TABLE_LIST *table) int decide_logging_format(THD *thd, TABLE_LIST *tables) { - if (mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG)) + /* + In SBR mode, we are only proceeding if we are binlogging this + statement, ie, the filtering rules won't later filter this out. + + This check here is needed to prevent some spurious error to be + raised in some cases (See BUG#42829). + */ + if (mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG) && + (thd->variables.binlog_format != BINLOG_FORMAT_STMT || + binlog_filter->db_ok(thd->db))) { /* Compute the starting vectors for the computations by creating a diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 3f568566c89..9269e97cc4c 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -42,6 +42,7 @@ #include "sp_rcontext.h" #include "sp_cache.h" +#include "rpl_filter.h" /* The following is used to initialise Table_ident with a internal @@ -2980,6 +2981,11 @@ extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all) { mark_transaction_to_rollback(thd, all); } + +extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd) +{ + return binlog_filter->db_ok(thd->db); +} #endif // INNODB_COMPATIBILITY_HOOKS */ /**************************************************************************** -- cgit v1.2.1 From 43e6919d56561f5d2de89bbd1a6c03003e3dcf8d Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 25 Sep 2009 11:26:49 +0200 Subject: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Problem was still not completely fixed, due to qouting. This is the server side only fix (in explain_filename), the change from filename_to_tablename to use explain_filename in the InnoDB code must be done before the bug is fixed. mysql-test/include/have_not_innodb_plugin.inc: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Added include file to allow test for only the 'old' built-in innodb engine mysql-test/r/not_true.require: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Added require to match 'not' TRUE mysql-test/r/partition_innodb_builtin.result: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs New result file for partitioning specific to the 'old' built-in innodb engine mysql-test/r/partition_innodb_plugin.result: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs New result file for partitioning specific to the new plugin innodb engine mysql-test/t/disabled.def: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Disabling the new test until the fix is included in the InnoDB source too. mysql-test/t/partition_innodb_builtin.test: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs New test file for partitioning specific to the 'old' built-in innodb engine mysql-test/t/partition_innodb_plugin.test: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs New test file for partitioning specific to the new plugin innodb engine sql/mysql_priv.h: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Added thd as a parameter to explain_filename to be able to use the correct quote character sql/sql_table.cc: Bug#32430: 'show innodb status' causes errors Invalid (old?) table or database name in logs Changed explain_filename, so that it does qouting correctly according to the sessions qoute char. --- sql/mysql_priv.h | 5 ++--- sql/sql_table.cc | 58 +++++++++++++++++++++++++++++--------------------------- 2 files changed, 32 insertions(+), 31 deletions(-) (limited to 'sql') diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 381a0313add..6fde16d3049 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -2277,10 +2277,9 @@ enum enum_explain_filename_mode { EXPLAIN_ALL_VERBOSE= 0, EXPLAIN_PARTITIONS_VERBOSE, - EXPLAIN_PARTITIONS_AS_COMMENT, - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING + EXPLAIN_PARTITIONS_AS_COMMENT }; -uint explain_filename(const char *from, char *to, uint to_length, +uint explain_filename(THD* thd, const char *from, char *to, uint to_length, enum_explain_filename_mode explain_mode); uint filename_to_tablename(const char *from, char *to, uint to_length); uint tablename_to_filename(const char *from, char *to, uint to_length); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 41e76211dd8..5f718b25d60 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -70,15 +70,21 @@ static void wait_for_kill_signal(THD *thd) /** @brief Helper function for explain_filename + @param thd Thread handle + @param to_p Explained name in system_charset_info + @param end_p End of the to_p buffer + @param name Name to be converted + @param name_len Length of the name, in bytes */ -static char* add_identifier(char *to_p, const char * end_p, - const char* name, uint name_len, bool add_quotes) +static char* add_identifier(THD* thd, char *to_p, const char * end_p, + const char* name, uint name_len) { uint res; uint errors; const char *conv_name; char tmp_name[FN_REFLEN]; char conv_string[FN_REFLEN]; + int quote; DBUG_ENTER("add_identifier"); if (!name[name_len]) @@ -102,19 +108,21 @@ static char* add_identifier(char *to_p, const char * end_p, conv_name= conv_string; } - if (add_quotes && (end_p - to_p > 2)) + quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '"'; + + if (quote != EOF && (end_p - to_p > 2)) { - *(to_p++)= '`'; + *(to_p++)= (char) quote; while (*conv_name && (end_p - to_p - 1) > 0) { uint length= my_mbcharlen(system_charset_info, *conv_name); if (!length) length= 1; - if (length == 1 && *conv_name == '`') + if (length == 1 && *conv_name == (char) quote) { if ((end_p - to_p) < 3) break; - *(to_p++)= '`'; + *(to_p++)= (char) quote; *(to_p++)= *(conv_name++); } else if (((long) length) < (end_p - to_p)) @@ -125,7 +133,11 @@ static char* add_identifier(char *to_p, const char * end_p, else break; /* string already filled */ } - to_p= strnmov(to_p, "`", end_p - to_p); + if (end_p > to_p) { + *(to_p++)= (char) quote; + if (end_p > to_p) + *to_p= 0; /* terminate by NUL, but do not include it in the count */ + } } else to_p= strnmov(to_p, conv_name, end_p - to_p); @@ -145,6 +157,7 @@ static char* add_identifier(char *to_p, const char * end_p, diagnostic, error etc. when it would be useful to know what a particular file [and directory] means. Such as SHOW ENGINE STATUS, error messages etc. + @param thd Thread handle @param from Path name in my_charset_filename Null terminated in my_charset_filename, normalized to use '/' as directory separation character. @@ -161,13 +174,12 @@ static char* add_identifier(char *to_p, const char * end_p, [,[ Temporary| Renamed] Partition `p` [, Subpartition `sp`]] *| (| is really a /, and it is all in one line) - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING -> - same as above but no quotes are added. @retval Length of returned string */ -uint explain_filename(const char *from, +uint explain_filename(THD* thd, + const char *from, char *to, uint to_length, enum_explain_filename_mode explain_mode) @@ -281,14 +293,12 @@ uint explain_filename(const char *from, { to_p= strnmov(to_p, ER(ER_DATABASE_NAME), end_p - to_p); *(to_p++)= ' '; - to_p= add_identifier(to_p, end_p, db_name, db_name_len, 1); + to_p= add_identifier(thd, to_p, end_p, db_name, db_name_len); to_p= strnmov(to_p, ", ", end_p - to_p); } else { - to_p= add_identifier(to_p, end_p, db_name, db_name_len, - (explain_mode != - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)); + to_p= add_identifier(thd, to_p, end_p, db_name, db_name_len); to_p= strnmov(to_p, ".", end_p - to_p); } } @@ -296,16 +306,13 @@ uint explain_filename(const char *from, { to_p= strnmov(to_p, ER(ER_TABLE_NAME), end_p - to_p); *(to_p++)= ' '; - to_p= add_identifier(to_p, end_p, table_name, table_name_len, 1); + to_p= add_identifier(thd, to_p, end_p, table_name, table_name_len); } else - to_p= add_identifier(to_p, end_p, table_name, table_name_len, - (explain_mode != - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)); + to_p= add_identifier(thd, to_p, end_p, table_name, table_name_len); if (part_name) { - if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT || - explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING) + if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT) to_p= strnmov(to_p, " /* ", end_p - to_p); else if (explain_mode == EXPLAIN_PARTITIONS_VERBOSE) to_p= strnmov(to_p, " ", end_p - to_p); @@ -321,20 +328,15 @@ uint explain_filename(const char *from, } to_p= strnmov(to_p, ER(ER_PARTITION_NAME), end_p - to_p); *(to_p++)= ' '; - to_p= add_identifier(to_p, end_p, part_name, part_name_len, - (explain_mode != - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)); + to_p= add_identifier(thd, to_p, end_p, part_name, part_name_len); if (subpart_name) { to_p= strnmov(to_p, ", ", end_p - to_p); to_p= strnmov(to_p, ER(ER_SUBPARTITION_NAME), end_p - to_p); *(to_p++)= ' '; - to_p= add_identifier(to_p, end_p, subpart_name, subpart_name_len, - (explain_mode != - EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)); + to_p= add_identifier(thd, to_p, end_p, subpart_name, subpart_name_len); } - if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT || - explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING) + if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT) to_p= strnmov(to_p, " */", end_p - to_p); } DBUG_PRINT("exit", ("to '%s'", to)); -- cgit v1.2.1 From 3d211f39819db045c4992dcf1aab1c2473b56c66 Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Sun, 27 Sep 2009 22:02:47 +0100 Subject: BUG#47312: RBR: Disabling key on slave breaks replication: HA_ERR_WRONG_INDEX In RBR, disabling keys on slave table will break replication when updating or deleting a record. When the slave thread tries to find the row, by searching in the storage engine, it checks whether the table has a key or not. If it has one, then the slave thread uses it to search the record. Nonetheless, the slave only checks whether the key exists or not, it does not verify if it is active. Should the key be disabled (eg, DBA has issued an ALTER TABLE ... DISABLE KEYS) then it will result in error: HA_ERR_WRONG_INDEX. This patch addresses this issue by making the slave thread also check whether the key is active or not before actually using it. --- sql/log_event.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index 08fe3aba8ed..3c584011bfb 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8835,11 +8835,11 @@ int Rows_log_event::find_row(const Relay_log_info *rli) */ store_record(table,record[1]); - if (table->s->keys > 0) + if (table->s->keys > 0 && table->s->keys_in_use.is_set(0)) { DBUG_PRINT("info",("locating record using primary key (index_read)")); - /* We have a key: search the table using the index */ + /* The 0th key is active: search the table using the index */ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) { DBUG_PRINT("info",("ha_index_init returns error %d",error)); -- cgit v1.2.1 From c6186a2500390f5958d29b8d64a93140b74754a6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 28 Sep 2009 10:23:06 +0800 Subject: BUG #46572 DROP TEMPORARY table IF EXISTS does not have a consistent behavior in ROW mode In RBR, 'DROP TEMPORARY TABLE IF EXISTS...' statement is binlogged when the table does not exist. In fact, 'DROP TEMPORARY TABLE ...' statement should never be binlogged in RBR no matter if the table exists or not. This patch addresses this by checking whether we are dropping a temporary table or not, when building the custom drop statement. --- sql/sql_table.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 08f3311be9d..6dba2f02071 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1949,7 +1949,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, being built. The string always end in a comma and the comma will be chopped off before being written to the binary log. */ - if (thd->current_stmt_binlog_row_based && !dont_log_query) + if (!drop_temporary && thd->current_stmt_binlog_row_based && !dont_log_query) { non_temp_tables_count++; /* -- cgit v1.2.1 From 2dbe095c3aec3d77ee759f17e04b2039be3f9b4c Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Mon, 28 Sep 2009 12:48:52 +0200 Subject: Bug#46958: Assertion in Diagnostics_area::set_ok_status, trigger, merge table The problem with break statements is that they have very local effects. Hence a break statement within the inner loop of a nested-loops join caused execution to proceed to the next table even though a serious error occurred. The problem was fixed by breaking out the inner loop into its own method. The change empowers all errors to terminate the execution. The errors that will now halt multi-DELETE execution altogether are - triggers returning errors - handler errors - server being killed mysql-test/r/delete.result: Bug#46958: Test result. mysql-test/t/delete.test: Bug#46958: Test case. sql/sql_class.h: Bug#46958: New method declaration. sql/sql_delete.cc: Bug#46958: New method implementation. --- sql/sql_class.h | 3 +- sql/sql_delete.cc | 148 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 89 insertions(+), 62 deletions(-) (limited to 'sql') diff --git a/sql/sql_class.h b/sql/sql_class.h index c38eb17f191..b0128244030 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2907,7 +2907,8 @@ public: bool send_data(List &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); - int do_deletes(); + int do_deletes(); + int do_table_deletes(TABLE *table, bool ignore); bool send_eof(); virtual void abort(); }; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index d2f90fa9288..a81a5f4641f 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -860,22 +860,19 @@ void multi_delete::abort() -/* +/** Do delete from other tables. - Returns values: - 0 ok - 1 error + + @retval 0 ok + @retval 1 error + + @todo Is there any reason not use the normal nested-loops join? If not, and + there is no documentation supporting it, this method and callee should be + removed and there should be hooks within normal execution. */ int multi_delete::do_deletes() { - int local_error= 0, counter= 0, tmp_error; - bool will_batch; - /* - If the IGNORE option is used all non fatal errors will be translated - to warnings and we should not break the row-by-row iteration - */ - bool ignore= thd->lex->current_select->no_error; DBUG_ENTER("do_deletes"); DBUG_ASSERT(do_delete); @@ -886,79 +883,108 @@ int multi_delete::do_deletes() table_being_deleted= (delete_while_scanning ? delete_tables->next_local : delete_tables); - for (; table_being_deleted; + for (uint counter= 0; table_being_deleted; table_being_deleted= table_being_deleted->next_local, counter++) { - ha_rows last_deleted= deleted; TABLE *table = table_being_deleted->table; if (tempfiles[counter]->get(table)) + DBUG_RETURN(1); + + int local_error= + do_table_deletes(table, thd->lex->current_select->no_error); + + if (thd->killed && !local_error) + DBUG_RETURN(1); + + if (local_error == -1) // End of file + local_error = 0; + + if (local_error) + DBUG_RETURN(local_error); + } + DBUG_RETURN(0); +} + + +/** + Implements the inner loop of nested-loops join within multi-DELETE + execution. + + @param table The table from which to delete. + + @param ignore If used, all non fatal errors will be translated + to warnings and we should not break the row-by-row iteration. + + @return Status code + + @retval 0 All ok. + @retval 1 Triggers or handler reported error. + @retval -1 End of file from handler. +*/ +int multi_delete::do_table_deletes(TABLE *table, bool ignore) +{ + int local_error= 0; + READ_RECORD info; + ha_rows last_deleted= deleted; + DBUG_ENTER("do_deletes_for_table"); + init_read_record(&info, thd, table, NULL, 0, 1, FALSE); + /* + Ignore any rows not found in reference tables as they may already have + been deleted by foreign key handling + */ + info.ignore_not_found_rows= 1; + bool will_batch= !table->file->start_bulk_delete(); + while (!(local_error= info.read_record(&info)) && !thd->killed) + { + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE)) { - local_error=1; + local_error= 1; break; } - - READ_RECORD info; - init_read_record(&info, thd, table, NULL, 0, 1, FALSE); + + local_error= table->file->ha_delete_row(table->record[0]); + if (local_error && !ignore) + { + table->file->print_error(local_error, MYF(0)); + break; + } + /* - Ignore any rows not found in reference tables as they may already have - been deleted by foreign key handling + Increase the reported number of deleted rows only if no error occurred + during ha_delete_row. + Also, don't execute the AFTER trigger if the row operation failed. */ - info.ignore_not_found_rows= 1; - will_batch= !table->file->start_bulk_delete(); - while (!(local_error=info.read_record(&info)) && !thd->killed) + if (!local_error) { + deleted++; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_DELETE, - TRG_ACTION_BEFORE, FALSE)) + TRG_ACTION_AFTER, FALSE)) { local_error= 1; break; } - - local_error= table->file->ha_delete_row(table->record[0]); - if (local_error && !ignore) - { - table->file->print_error(local_error,MYF(0)); - break; - } - - /* - Increase the reported number of deleted rows only if no error occurred - during ha_delete_row. - Also, don't execute the AFTER trigger if the row operation failed. - */ - if (!local_error) - { - deleted++; - if (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_DELETE, - TRG_ACTION_AFTER, FALSE)) - { - local_error= 1; - break; - } - } } - if (will_batch && (tmp_error= table->file->end_bulk_delete())) + } + if (will_batch) + { + int tmp_error= table->file->end_bulk_delete(); + if (tmp_error && !local_error) { - if (!local_error) - { - local_error= tmp_error; - table->file->print_error(local_error,MYF(0)); - } + local_error= tmp_error; + table->file->print_error(local_error, MYF(0)); } - if (last_deleted != deleted && !table->file->has_transactions()) - thd->transaction.stmt.modified_non_trans_table= TRUE; - end_read_record(&info); - if (thd->killed && !local_error) - local_error= 1; - if (local_error == -1) // End of file - local_error = 0; } + if (last_deleted != deleted && !table->file->has_transactions()) + thd->transaction.stmt.modified_non_trans_table= TRUE; + + end_read_record(&info); + DBUG_RETURN(local_error); } - /* Send ok to the client -- cgit v1.2.1 From e86f08d054e4de694071e0eef92ca4ca2b27cf2b Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Mon, 28 Sep 2009 13:25:47 +0200 Subject: Bug#35996: SELECT + SHOW VIEW should be enough to display view definition During SHOW CREATE VIEW there is no reason to 'anonymize' errors that name objects that a user does not have access to. Moreover it was inconsistently implemented. For example base tables being referenced from a view appear to be ok, but not views. The manual on the other hand is clear: If a user has the privileges SELECT and SHOW VIEW, the view definition is available to that user, period. The fix changes the behavior to support the manual. mysql-test/r/information_schema_db.result: Bug#35996: Changed warnings. mysql-test/r/view_grant.result: Bug#35996: Changed warnings, test result. mysql-test/t/information_schema_db.test: Bug#35996: Changed test case to reflect new behavior. mysql-test/t/view_grant.test: Bug#35996: Test case. sql/sql_acl.cc: Bug#35996: Code no longer necessary, we may as well exempt SHOW CREATE VIEW from this check. sql/sql_show.cc: Bug#35996: The fix: An Internal_error_handler that hides most errors raised by access checking as they are not relevant to SHOW CREATE VIEW. sql/table.cc: Bug#35996: Restricting this hack to act only when there is no Internal_error_handler. --- sql/sql_acl.cc | 3 +- sql/sql_show.cc | 143 +++++++++++++++++++++++++++++++++++++++++++++++++------- sql/table.cc | 9 +++- 3 files changed, 134 insertions(+), 21 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ab18a2d1d04..d7d662f912d 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -4072,8 +4072,7 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, db_name= table_ref->view_db.str; table_name= table_ref->view_name.str; if (table_ref->belong_to_view && - (thd->lex->sql_command == SQLCOM_SHOW_FIELDS || - thd->lex->sql_command == SQLCOM_SHOW_CREATE)) + thd->lex->sql_command == SQLCOM_SHOW_FIELDS) { view_privs= get_column_grant(thd, grant, db_name, table_name, name); if (view_privs & VIEW_ANY_ACL) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index bb377e676e2..a065d953b67 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -581,6 +581,126 @@ find_files(THD *thd, List *files, const char *db, } +/** + An Internal_error_handler that suppresses errors regarding views' + underlying tables that occur during privilege checking within SHOW CREATE + VIEW commands. This happens in the cases when + + - A view's underlying table (e.g. referenced in its SELECT list) does not + exist. There should not be an error as no attempt was made to access it + per se. + + - Access is denied for some table, column, function or stored procedure + such as mentioned above. This error gets raised automatically, since we + can't untangle its access checking from that of the view itself. + */ +class Show_create_error_handler : public Internal_error_handler { + + TABLE_LIST *m_top_view; + bool m_handling; + Security_context *m_sctx; + + char m_view_access_denied_message[MYSQL_ERRMSG_SIZE]; + char *m_view_access_denied_message_ptr; + +public: + + /** + Creates a new Show_create_error_handler for the particular security + context and view. + + @thd Thread context, used for security context information if needed. + @top_view The view. We do not verify at this point that top_view is in + fact a view since, alas, these things do not stay constant. + */ + explicit Show_create_error_handler(THD *thd, TABLE_LIST *top_view) : + m_top_view(top_view), m_handling(FALSE), + m_view_access_denied_message_ptr(NULL) + { + + m_sctx = test(m_top_view->security_ctx) ? + m_top_view->security_ctx : thd->security_ctx; + } + + /** + Lazy instantiation of 'view access denied' message. The purpose of the + Show_create_error_handler is to hide details of underlying tables for + which we have no privileges behind ER_VIEW_INVALID messages. But this + obviously does not apply if we lack privileges on the view itself. + Unfortunately the information about for which table privilege checking + failed is not available at this point. The only way for us to check is by + reconstructing the actual error message and see if it's the same. + */ + char* get_view_access_denied_message() + { + if (!m_view_access_denied_message_ptr) + { + m_view_access_denied_message_ptr= m_view_access_denied_message; + my_snprintf(m_view_access_denied_message, MYSQL_ERRMSG_SIZE, + ER(ER_TABLEACCESS_DENIED_ERROR), "SHOW VIEW", + m_sctx->priv_user, + m_sctx->host_or_ip, m_top_view->get_table_name()); + } + return m_view_access_denied_message_ptr; + } + + bool handle_error(uint sql_errno, const char *message, + MYSQL_ERROR::enum_warning_level level, THD *thd) { + /* + The handler does not handle the errors raised by itself. + At this point we know if top_view is really a view. + */ + if (m_handling || !m_top_view->view) + return FALSE; + + m_handling= TRUE; + + bool is_handled; + + switch (sql_errno) + { + case ER_TABLEACCESS_DENIED_ERROR: + if (!strcmp(get_view_access_denied_message(), message)) + { + /* Access to top view is not granted, don't interfere. */ + is_handled= FALSE; + break; + } + case ER_COLUMNACCESS_DENIED_ERROR: + case ER_VIEW_NO_EXPLAIN: /* Error was anonymized, ignore all the same. */ + case ER_PROCACCESS_DENIED_ERROR: + is_handled= TRUE; + break; + + case ER_NO_SUCH_TABLE: + /* Established behavior: warn if underlying tables are missing. */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_VIEW_INVALID, + ER(ER_VIEW_INVALID), + m_top_view->get_db_name(), + m_top_view->get_table_name()); + is_handled= TRUE; + break; + + case ER_SP_DOES_NOT_EXIST: + /* Established behavior: warn if underlying functions are missing. */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_VIEW_INVALID, + ER(ER_VIEW_INVALID), + m_top_view->get_db_name(), + m_top_view->get_table_name()); + is_handled= TRUE; + break; + default: + is_handled= FALSE; + } + + m_handling= FALSE; + return is_handled; + } +}; + + bool mysqld_show_create(THD *thd, TABLE_LIST *table_list) { @@ -594,26 +714,13 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) /* We want to preserve the tree for views. */ thd->lex->view_prepare_mode= TRUE; - /* Only one table for now, but VIEW can involve several tables */ - if (open_normal_and_derived_tables(thd, table_list, 0)) { - if (!table_list->view || - (thd->is_error() && thd->main_da.sql_errno() != ER_VIEW_INVALID)) + Show_create_error_handler view_error_suppressor(thd, table_list); + thd->push_internal_handler(&view_error_suppressor); + bool error= open_normal_and_derived_tables(thd, table_list, 0); + thd->pop_internal_handler(); + if (error && thd->main_da.is_error()) DBUG_RETURN(TRUE); - - /* - Clear all messages with 'error' level status and - issue a warning with 'warning' level status in - case of invalid view and last error is ER_VIEW_INVALID - */ - mysql_reset_errors(thd, true); - thd->clear_error(); - - push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_WARN, - ER_VIEW_INVALID, - ER(ER_VIEW_INVALID), - table_list->view_db.str, - table_list->view_name.str); } /* TODO: add environment variables show when it become possible */ diff --git a/sql/table.cc b/sql/table.cc index 4f647618979..04f2a3fbcf8 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3338,7 +3338,12 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type) /** - Hide errors which show view underlying table information + Hide errors which show view underlying table information. + There are currently two mechanisms at work that handle errors for views, + this one and a more general mechanism based on an Internal_error_handler, + see Show_create_error_handler. The latter handles errors encountered during + execution of SHOW CREATE VIEW, while the machanism using this method is + handles SELECT from views. The two methods should not clash. @param[in,out] thd thread handler @@ -3347,6 +3352,8 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type) void TABLE_LIST::hide_view_error(THD *thd) { + if (thd->get_internal_handler()) + return; /* Hide "Unknown column" or "Unknown function" error */ DBUG_ASSERT(thd->is_error()); -- cgit v1.2.1 From 4102363f370f834a6c641a6adec560408c2d619d Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Mon, 28 Sep 2009 05:41:10 -0700 Subject: Bug#43746: YACC return wrong query string when parse 'load data infile' sql statement "load data" statements were written to the binlog as a mix of the original statement and bits recreated from parse-info. This relied on implementation details and broke with IGNORE_SPACES and versioned comments. We now completely resynthesize the query for LOAD DATA for binlog (which among other things normalizes them somewhat with regard to case, spaces, etc.). We have already parsed the query properly, so we make use of that rather than mix-and-match string literals and parsed items. This should make us safe with regard to versioned comments, even those spanning multiple tokens. Also no longer affected by IGNORE_SPACES. mysql-test/r/mysqlbinlog.result: LOAD DATA INFILE normalized mysql-test/suite/binlog/r/binlog_killed_simulate.result: LOAD DATA INFILE normalized mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result: LOAD DATA INFILE normalized mysql-test/suite/binlog/r/binlog_stm_blackhole.result: LOAD DATA INFILE normalized mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result: LOAD DATA INFILE normalized mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result: LOAD DATA INFILE normalized mysql-test/suite/rpl/r/rpl_loaddata.result: LOAD DATA INFILE normalized mysql-test/suite/rpl/r/rpl_loaddata_fatal.result: LOAD DATA INFILE normalized; offsets adjusted to reflect that mysql-test/suite/rpl/r/rpl_loaddata_map.result: LOAD DATA INFILE normalized mysql-test/suite/rpl/r/rpl_loaddatalocal.result: test for #43746 - trying to break LOAD DATA part of parser mysql-test/suite/rpl/r/rpl_stm_log.result: LOAD DATA INFILE normalized mysql-test/suite/rpl/t/rpl_loaddatalocal.test: try to break the LOAD DATA part of the parser (test for #43746) mysql-test/t/mysqlbinlog.test: LOAD DATA INFILE normalized; adjust offsets to reflect that sql/log_event.cc: clean up Load_log_event::print_query and friends so they don't print excess spaces. add support for printing charset names to print_query. sql/log_event.h: We already have three places where we synthesize LOAD DATA queries. Better use one of those! sql/sql_lex.h: When binlogging LOAD DATA statements, we make up the statement to be logged (from the parse-info, rather than substrings of the original query) now. Consequently, we no longer need (string-) pointers into the original query. sql/sql_load.cc: Completely rewrote write_execute_load_query_log_event() to synthesize the LOAD DATA statement wholesale, rather than piece it together from synthesized bits and literal excerpts from the original query. This will not only give us a nice, normalized statement (all uppercase, no excess spaces, etc.), it will also handle comments, including versioned comments right, which is certainly more than we can say about the previous incarnation. sql/sql_yacc.yy: We're no longer assembling LOAD DATA statements from bodyparts of the original query, so some bookkeeping in the parser can go. --- sql/log_event.cc | 29 ++++++++------ sql/log_event.h | 6 +-- sql/sql_lex.h | 7 ---- sql/sql_load.cc | 114 ++++++++++++++++++++++++++++++++++++++++++++++++------- sql/sql_yacc.yy | 12 ++---- 5 files changed, 125 insertions(+), 43 deletions(-) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index 08fe3aba8ed..d91b09d90c9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -4010,7 +4010,7 @@ uint Load_log_event::get_query_buffer_length() } -void Load_log_event::print_query(bool need_db, char *buf, +void Load_log_event::print_query(bool need_db, const char *cs, char *buf, char **end, char **fn_start, char **fn_end) { char *pos= buf; @@ -4034,9 +4034,9 @@ void Load_log_event::print_query(bool need_db, char *buf, pos= strmov(pos+fname_len, "' "); if (sql_ex.opt_flags & REPLACE_FLAG) - pos= strmov(pos, " REPLACE "); + pos= strmov(pos, "REPLACE "); else if (sql_ex.opt_flags & IGNORE_FLAG) - pos= strmov(pos, " IGNORE "); + pos= strmov(pos, "IGNORE "); pos= strmov(pos ,"INTO"); @@ -4047,8 +4047,16 @@ void Load_log_event::print_query(bool need_db, char *buf, memcpy(pos, table_name, table_name_len); pos+= table_name_len; - /* We have to create all optinal fields as the default is not empty */ - pos= strmov(pos, "` FIELDS TERMINATED BY "); + if (cs != NULL) + { + pos= strmov(pos ,"` CHARACTER SET "); + pos= strmov(pos , cs); + } + else + pos= strmov(pos, "`"); + + /* We have to create all optional fields as the default is not empty */ + pos= strmov(pos, " FIELDS TERMINATED BY "); pos= pretty_print_str(pos, sql_ex.field_term, sql_ex.field_term_len); if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG) pos= strmov(pos, " OPTIONALLY "); @@ -4102,7 +4110,7 @@ void Load_log_event::pack_info(Protocol *protocol) if (!(buf= (char*) my_malloc(get_query_buffer_length(), MYF(MY_WME)))) return; - print_query(TRUE, buf, &end, 0, 0); + print_query(TRUE, NULL, buf, &end, 0, 0); protocol->store(buf, end-buf, &my_charset_bin); my_free(buf, MYF(0)); } @@ -4367,9 +4375,9 @@ void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info, my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname); if (sql_ex.opt_flags & REPLACE_FLAG) - my_b_printf(&cache," REPLACE "); + my_b_printf(&cache,"REPLACE "); else if (sql_ex.opt_flags & IGNORE_FLAG) - my_b_printf(&cache," IGNORE "); + my_b_printf(&cache,"IGNORE "); my_b_printf(&cache, "INTO TABLE `%s`", table_name); my_b_printf(&cache, " FIELDS TERMINATED BY "); @@ -4577,8 +4585,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, goto error; } - print_query(FALSE, load_data_query, &end, (char **)&thd->lex->fname_start, - (char **)&thd->lex->fname_end); + print_query(FALSE, NULL, load_data_query, &end, NULL, NULL); *end= 0; thd->set_query(load_data_query, (uint) (end - load_data_query)); @@ -6732,7 +6739,7 @@ void Execute_load_query_log_event::print(FILE* file, my_b_printf(&cache, "\'"); if (dup_handling == LOAD_DUP_REPLACE) my_b_printf(&cache, " REPLACE"); - my_b_printf(&cache, " INTO "); + my_b_printf(&cache, " INTO"); my_b_write(&cache, (uchar*) query + fn_pos_end, q_len-fn_pos_end); my_b_printf(&cache, "\n%s\n", print_event_info->delimiter); } diff --git a/sql/log_event.h b/sql/log_event.h index 8202dddcc76..31d4a7480c2 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1960,15 +1960,15 @@ private: class Load_log_event: public Log_event { private: - uint get_query_buffer_length(); - void print_query(bool need_db, char *buf, char **end, - char **fn_start, char **fn_end); protected: int copy_log_event(const char *buf, ulong event_len, int body_offset, const Format_description_log_event* description_event); public: + uint get_query_buffer_length(); + void print_query(bool need_db, const char *cs, char *buf, char **end, + char **fn_start, char **fn_end); ulong thread_id; ulong slave_proxy_id; uint32 table_name_len; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 76fd5354c51..4e4794ef2cf 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1735,13 +1735,6 @@ typedef struct st_lex : public Query_tables_list const char *stmt_definition_end; - /* - Pointers to part of LOAD DATA statement that should be rewritten - during replication ("LOCAL 'filename' REPLACE INTO" part). - */ - const char *fname_start; - const char *fname_end; - /** During name resolution search only in the table list given by Name_resolution_context::first_name_resolution_table and diff --git a/sql/sql_load.cc b/sql/sql_load.cc index b7f33d51335..e830e29176b 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -83,10 +83,13 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, String &enclosed, ulong skip_lines, bool ignore_check_option_errors); #ifndef EMBEDDED_LIBRARY -static bool write_execute_load_query_log_event(THD *thd, - bool duplicates, bool ignore, - bool transactional_table, - int errcode); +static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, + const char* db_arg, + const char* table_name_arg, + enum enum_duplicates duplicates, + bool ignore, + bool transactional_table, + int errocode); #endif /* EMBEDDED_LIBRARY */ /* @@ -497,8 +500,10 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); if (thd->transaction.stmt.modified_non_trans_table) - write_execute_load_query_log_event(thd, handle_duplicates, - ignore, transactional_table, + write_execute_load_query_log_event(thd, ex, + tdb, table_list->table_name, + handle_duplicates, ignore, + transactional_table, errcode); else { @@ -542,8 +547,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (lf_info.wrote_create_file) { int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); - write_execute_load_query_log_event(thd, handle_duplicates, ignore, - transactional_table, errcode); + write_execute_load_query_log_event(thd, ex, + tdb, table_list->table_name, + handle_duplicates, ignore, + transactional_table, + errcode); } } } @@ -564,15 +572,95 @@ err: #ifndef EMBEDDED_LIBRARY /* Not a very useful function; just to avoid duplication of code */ -static bool write_execute_load_query_log_event(THD *thd, - bool duplicates, bool ignore, - bool transactional_table, +static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, + const char* db_arg, + const char* table_name_arg, + enum enum_duplicates duplicates, + bool ignore, + bool transactional_table, int errcode) { + char *load_data_query, + *end, + *fname_start, + *fname_end, + *p= NULL; + size_t pl= 0; + List fv; + Item *item, *val; + String pfield, pfields; + int n; + + Load_log_event lle(thd, ex, db_arg, table_name_arg, fv, duplicates, + ignore, transactional_table); + + /* + force in a LOCAL if there was one in the original. + */ + if (thd->lex->local_file) + lle.set_fname_outside_temp_buf(ex->file_name, strlen(ex->file_name)); + + /* + prepare fields-list and SET if needed; print_query won't do that for us. + */ + if (!thd->lex->field_list.is_empty()) + { + List_iterator li(thd->lex->field_list); + + pfields.append(" ("); + n= 0; + + while ((item= li++)) + { + if (n++) + pfields.append(", "); + if (item->name) + pfields.append(item->name); + else + item->print(&pfields, QT_ORDINARY); + } + pfields.append(")"); + } + + if (!thd->lex->update_list.is_empty()) + { + List_iterator lu(thd->lex->update_list); + List_iterator lv(thd->lex->value_list); + + pfields.append(" SET "); + n= 0; + + while ((item= lu++)) + { + val= lv++; + if (n++) + pfields.append(", "); + pfields.append(item->name); + pfields.append("="); + val->print(&pfields, QT_ORDINARY); + } + } + + p= pfields.c_ptr_safe(); + pl= strlen(p); + + if (!(load_data_query= (char *)thd->alloc(lle.get_query_buffer_length() + 1 + pl))) + return TRUE; + + lle.print_query(FALSE, (const char *) ex->cs?ex->cs->csname:NULL, + load_data_query, &end, + (char **)&fname_start, (char **)&fname_end); + + strcpy(end, p); + end += pl; + + thd->query_length= end - load_data_query; + thd->query= load_data_query; + Execute_load_query_log_event e(thd, thd->query, thd->query_length, - (uint) ((char*)thd->lex->fname_start - (char*)thd->query), - (uint) ((char*)thd->lex->fname_end - (char*)thd->query), + (uint) ((char*)fname_start - (char*)thd->query - 1), + (uint) ((char*)fname_end - (char*)thd->query), (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE : (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR), transactional_table, FALSE, errcode); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index db97e77bbd0..12e124230e5 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -10425,14 +10425,12 @@ load: { THD *thd= YYTHD; LEX *lex= thd->lex; - Lex_input_stream *lip= YYLIP; if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA"); MYSQL_YYABORT; } - lex->fname_start= lip->get_ptr(); } load_data {} @@ -10464,14 +10462,10 @@ load_data: if (!(lex->exchange= new sql_exchange($4.str, 0))) MYSQL_YYABORT; } - opt_duplicate INTO - { - Lex->fname_end= YYLIP->get_ptr(); - } - TABLE_SYM table_ident + opt_duplicate INTO TABLE_SYM table_ident { LEX *lex=Lex; - if (!Select->add_table_to_list(YYTHD, $10, NULL, TL_OPTION_UPDATING, + if (!Select->add_table_to_list(YYTHD, $9, NULL, TL_OPTION_UPDATING, lex->lock_option)) MYSQL_YYABORT; lex->field_list.empty(); @@ -10479,7 +10473,7 @@ load_data: lex->value_list.empty(); } opt_load_data_charset - { Lex->exchange->cs= $12; } + { Lex->exchange->cs= $11; } opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec opt_load_data_set_spec {} -- cgit v1.2.1 From 5f8bb5c507631a56a267f42972c809d51dacd84b Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Tue, 29 Sep 2009 07:23:38 +0500 Subject: Bug#47150 Assertion in Field_long::val_int() on MERGE + TRIGGER + multi-table UPDATE The bug is not related to MERGE table or TRIGGER. More correct description would be 'assertion on multi-table UPDATE + NATURAL JOIN + MERGEABLE VIEW'. On PREPARE stage(see test case) we call mark_common_columns() func which creates ON condition for NATURAL JOIN and sets appropriate table read_set bitmaps for fields which are used in ON condition. On EXECUTE stage mark_common_columns() is not called, we set necessary read_set bitmaps in setup_conds(). But 'B.f1' field is already processed and related item alredy fixed before setup_conds() as updated field and setup_conds can not set read_set bitmap because of that. The fix is to set read_set bitmap for appropriate table field even if Item_direct_view_ref item which represents a refernce to this field is fixed. mysql-test/r/join.result: test result mysql-test/t/join.test: test case sql/item.cc: The bug is not related to MERGE table or TRIGGER. More correct description would be 'assertion on multi-table UPDATE + NATURAL JOIN + MERGEABLE VIEW'. On PREPARE stage(see test case) we call mark_common_columns() func which creates ON condition for NATURAL JOIN and sets appropriate table read_set bitmaps for fields which are used in ON condition. On EXECUTE stage mark_common_columns() is not called, we set necessary read_set bitmaps in setup_conds(). But 'B.f1' field is already processed and related item alredy fixed before setup_conds() as updated field and setup_conds can not set read_set bitmap because of that. The fix is to set read_set bitmap for appropriate table field even if Item_direct_view_ref item which represents a refernce to this field is fixed. --- sql/item.cc | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 26df3a45971..86e4551e55b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6331,9 +6331,26 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) /* view fild reference must be defined */ DBUG_ASSERT(*ref); /* (*ref)->check_cols() will be made in Item_direct_ref::fix_fields */ - if (!(*ref)->fixed && - ((*ref)->fix_fields(thd, ref))) + if ((*ref)->fixed) + { + Item *ref_item= (*ref)->real_item(); + if (ref_item->type() == Item::FIELD_ITEM) + { + /* + In some cases we need to update table read set(see bug#47150). + If ref item is FIELD_ITEM and fixed then field and table + have proper values. So we can use them for update. + */ + Field *fld= ((Item_field*) ref_item)->field; + DBUG_ASSERT(fld && fld->table); + if (thd->mark_used_columns == MARK_COLUMNS_READ) + bitmap_set_bit(fld->table->read_set, fld->field_index); + } + } + else if (!(*ref)->fixed && + ((*ref)->fix_fields(thd, ref))) return TRUE; + return Item_direct_ref::fix_fields(thd, reference); } -- cgit v1.2.1 From 8d3d35ea5715739ac94785b8c9ff1a185699a84a Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Tue, 29 Sep 2009 07:58:42 -0300 Subject: Bug#45567: Fast ALTER TABLE broken for enum and set The problem was that appending values to the end of an existing ENUM or SET column was being treated as table data modification, preventing a immediately (fast) table alteration that occurs when only table metadata is being modified. The cause was twofold: adding a enumeration or set members to the end of the list of valid member values was not being considered a "compatible" table alteration, and for SET columns, the check was being done upon the max display length and not the underlying (pack) length of the field. The solution is to augment the function that checks wether two ENUM or SET fields are compatible -- by comparing the pack lengths and performing a limited comparison of the member values. mysql-test/r/alter_table.result: Add test case result for Bug#45567 mysql-test/t/alter_table.test: Add test case for Bug#45567 sql/field.cc: Check whether two fields can be considered 'equal' for table alteration purposes. Fields are equal if they retain the same pack length and if new members are added to the end of the list. sql/field.h: Add comment and remove method. --- sql/field.cc | 75 +++++++++++++++++++++++++++++++++++++++++++++++------------- sql/field.h | 8 ++++++- 2 files changed, 66 insertions(+), 17 deletions(-) (limited to 'sql') diff --git a/sql/field.cc b/sql/field.cc index 9f46552d5bd..0df9b0fc2e4 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8832,6 +8832,24 @@ bool Field::eq_def(Field *field) } +/** + Compare the first t1::count type names. + + @return TRUE if the type names of t1 match those of t2. FALSE otherwise. +*/ + +static bool compare_type_names(CHARSET_INFO *charset, TYPELIB *t1, TYPELIB *t2) +{ + for (uint i= 0; i < t1->count; i++) + if (my_strnncoll(charset, + (const uchar*) t1->type_names[i], + t1->type_lengths[i], + (const uchar*) t2->type_names[i], + t2->type_lengths[i])) + return FALSE; + return TRUE; +} + /** @return returns 1 if the fields are equally defined @@ -8839,32 +8857,57 @@ bool Field::eq_def(Field *field) bool Field_enum::eq_def(Field *field) { + TYPELIB *values; + if (!Field::eq_def(field)) - return 0; - return compare_enum_values(((Field_enum*) field)->typelib); -} + return FALSE; + values= ((Field_enum*) field)->typelib; -bool Field_enum::compare_enum_values(TYPELIB *values) -{ + /* Definition must be strictly equal. */ if (typelib->count != values->count) return FALSE; - for (uint i= 0; i < typelib->count; i++) - if (my_strnncoll(field_charset, - (const uchar*) typelib->type_names[i], - typelib->type_lengths[i], - (const uchar*) values->type_names[i], - values->type_lengths[i])) - return FALSE; - return TRUE; + + return compare_type_names(field_charset, typelib, values); } +/** + Check whether two fields can be considered 'equal' for table + alteration purposes. Fields are equal if they retain the same + pack length and if new members are added to the end of the list. + + @return IS_EQUAL_YES if fields are compatible. + IS_EQUAL_NO otherwise. +*/ + uint Field_enum::is_equal(Create_field *new_field) { - if (!Field_str::is_equal(new_field)) - return 0; - return compare_enum_values(new_field->interval); + TYPELIB *values= new_field->interval; + + /* + The fields are compatible if they have the same flags, + type, charset and have the same underlying length. + */ + if (compare_str_field_flags(new_field, flags) || + new_field->sql_type != real_type() || + new_field->charset != field_charset || + new_field->pack_length != pack_length()) + return IS_EQUAL_NO; + + /* + Changing the definition of an ENUM or SET column by adding a new + enumeration or set members to the end of the list of valid member + values only alters table metadata and not table data. + */ + if (typelib->count > values->count) + return IS_EQUAL_NO; + + /* Check whether there are modification before the end. */ + if (! compare_type_names(field_charset, typelib, new_field->interval)) + return IS_EQUAL_NO; + + return IS_EQUAL_YES; } diff --git a/sql/field.h b/sql/field.h index a9299256f88..7a9b69eff40 100644 --- a/sql/field.h +++ b/sql/field.h @@ -472,6 +472,13 @@ public: /* maximum possible display length */ virtual uint32 max_display_length()= 0; + /** + Whether a field being created is compatible with a existing one. + + Used by the ALTER TABLE code to evaluate whether the new definition + of a table is compatible with the old definition so that it can + determine if data needs to be copied over (table data change). + */ virtual uint is_equal(Create_field *new_field); /* convert decimal to longlong with overflow check */ longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag, @@ -1862,7 +1869,6 @@ public: CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; } private: int do_save_field_metadata(uchar *first_byte); - bool compare_enum_values(TYPELIB *values); uint is_equal(Create_field *new_field); }; -- cgit v1.2.1 From da9a5ef62283286c0fff70ddab65de6d514f9586 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Tue, 29 Sep 2009 17:06:51 +0200 Subject: Bug#42108 Wrong locking for UPDATE with subqueries leads to broken statement replication MySQL server uses wrong lock type (always TL_READ instead of TL_READ_NO_INSERT when appropriate) for tables used in subqueries of UPDATE statement. This leads in some cases to a broken replication as statements are written in the wrong order to the binlog. sql/sql_yacc.yy: * Set lock_option to either TL_READ_NO_INSERT or TL_READ for any sub-SELECT following UPDATE. * Changed line adjusted for parser identation rules; code begins at column 13. --- sql/sql_yacc.yy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 09a0a4b2f12..0aa6308ab93 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -7810,7 +7810,7 @@ update: LEX *lex= Lex; mysql_init_select(lex); lex->sql_command= SQLCOM_UPDATE; - lex->lock_option= TL_UNLOCK; /* Will be set later */ + lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ; lex->duplicates= DUP_ERROR; } opt_low_priority opt_ignore join_table_list -- cgit v1.2.1 From 4d57b851a0335ac098d46dad5b9f698ff2cd3e43 Mon Sep 17 00:00:00 2001 From: Ingo Struewing Date: Tue, 29 Sep 2009 17:38:40 +0200 Subject: WL#4259 - Debug Sync Facility Backport from 6.0 to 5.1. Only those sync points are included, which are used in debug_sync.test. The Debug Sync Facility allows to place synchronization points in the code: open_tables(...) DEBUG_SYNC(thd, "after_open_tables"); lock_tables(...) When activated, a sync point can - Send a signal and/or - Wait for a signal Nomenclature: - signal: A value of a global variable that persists until overwritten by a new signal. The global variable can also be seen as a "signal post" or "flag mast". Then the signal is what is attached to the "signal post" or "flag mast". - send a signal: Assign the value (the signal) to the global variable ("set a flag") and broadcast a global condition to wake those waiting for a signal. - wait for a signal: Loop over waiting for the global condition until the global value matches the wait-for signal. Please find more information in the top comment in debug_sync.cc or in the worklog entry. .bzrignore: WL#4259 - Debug Sync Facility Added the symbolic link libmysqld/debug_sync.cc. CMakeLists.txt: WL#4259 - Debug Sync Facility Added definition for ENABLED_DEBUG_SYNC. configure.in: WL#4259 - Debug Sync Facility Added definition for ENABLED_DEBUG_SYNC. include/my_sys.h: WL#4259 - Debug Sync Facility Added definition for the DEBUG_SYNC_C macro. libmysqld/CMakeLists.txt: WL#4259 - Debug Sync Facility Added sql/debug_sync.cc. libmysqld/Makefile.am: WL#4259 - Debug Sync Facility Added sql/debug_sync.cc. mysql-test/include/have_debug_sync.inc: WL#4259 - Debug Sync Facility New include file. mysql-test/mysql-test-run.pl: WL#4259 - Debug Sync Facility Added option --debug_sync_timeout. mysql-test/r/debug_sync.result: WL#4259 - Debug Sync Facility New test result. mysql-test/r/have_debug_sync.require: WL#4259 - Debug Sync Facility New require file. mysql-test/t/debug_sync.test: WL#4259 - Debug Sync Facility New test file. mysys/my_static.c: WL#4259 - Debug Sync Facility Added definition for debug_sync_C_callback_ptr. mysys/thr_lock.c: WL#4259 - Debug Sync Facility Added sync point "wait_for_lock". sql/CMakeLists.txt: WL#4259 - Debug Sync Facility Added debug_sync.cc and debug_sync.h. sql/Makefile.am: WL#4259 - Debug Sync Facility Added debug_sync.cc and debug_sync.h. sql/debug_sync.cc: WL#4259 - Debug Sync Facility New source file. sql/debug_sync.h: WL#4259 - Debug Sync Facility New header file. sql/mysqld.cc: WL#4259 - Debug Sync Facility Added opt_debug_sync_timeout. Added calls to debug_sync_init() and debug_sync_end(). Fixed a purecov comment (unrelated). sql/set_var.cc: WL#4259 - Debug Sync Facility Added server variable "debug_sync". sql/set_var.h: WL#4259 - Debug Sync Facility Added declaration for server variable "debug_sync". sql/share/errmsg.txt: WL#4259 - Debug Sync Facility Added error messages ER_DEBUG_SYNC_TIMEOUT and ER_DEBUG_SYNC_HIT_LIMIT. sql/sql_base.cc: WL#4259 - Debug Sync Facility Added sync points "after_flush_unlock" and "before_lock_tables_takes_lock". sql/sql_class.cc: WL#4259 - Debug Sync Facility Added initialization for debug_sync_control to THD::THD. Added calls to debug_sync_init_thread() and debug_sync_end_thread(). sql/sql_class.h: WL#4259 - Debug Sync Facility Added element debug_sync_control to THD. storage/myisam/myisamchk.c: Fixed a typo in an error message string (unrelated). --- sql/CMakeLists.txt | 1 + sql/Makefile.am | 2 + sql/debug_sync.cc | 1906 ++++++++++++++++++++++++++++++++++++++++++++++++++ sql/debug_sync.h | 60 ++ sql/mysqld.cc | 46 +- sql/set_var.cc | 6 + sql/set_var.h | 15 + sql/share/errmsg.txt | 7 + sql/sql_base.cc | 4 + sql/sql_class.cc | 15 + sql/sql_class.h | 5 + 11 files changed, 2066 insertions(+), 1 deletion(-) create mode 100644 sql/debug_sync.cc create mode 100644 sql/debug_sync.h (limited to 'sql') diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 6f162f4d84d..7f6074c903c 100755 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -64,6 +64,7 @@ SET (SQL_SOURCE sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc sql_list.cc sql_load.cc sql_manager.cc sql_map.cc sql_parse.cc sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc + debug_sync.cc debug_sync.h sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc diff --git a/sql/Makefile.am b/sql/Makefile.am index 2a12de2eaf6..0b22481f850 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -58,6 +58,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ ha_ndbcluster.h ha_ndbcluster_cond.h \ ha_ndbcluster_binlog.h ha_ndbcluster_tables.h \ ha_partition.h rpl_constants.h \ + debug_sync.h \ opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ rpl_reporting.h \ log.h sql_show.h rpl_rli.h rpl_mi.h \ @@ -102,6 +103,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ discover.cc time.cc opt_range.cc opt_sum.cc \ records.cc filesort.cc handler.cc \ ha_partition.cc \ + debug_sync.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc new file mode 100644 index 00000000000..58ec4af7841 --- /dev/null +++ b/sql/debug_sync.cc @@ -0,0 +1,1906 @@ +/* Copyright (C) 2008 MySQL AB, 2008 - 2009 Sun Microsystems, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/** + == Debug Sync Facility == + + The Debug Sync Facility allows placement of synchronization points in + the server code by using the DEBUG_SYNC macro: + + open_tables(...) + + DEBUG_SYNC(thd, "after_open_tables"); + + lock_tables(...) + + When activated, a sync point can + + - Emit a signal and/or + - Wait for a signal + + Nomenclature: + + - signal: A value of a global variable that persists + until overwritten by a new signal. The global + variable can also be seen as a "signal post" + or "flag mast". Then the signal is what is + attached to the "signal post" or "flag mast". + + - emit a signal: Assign the value (the signal) to the global + variable ("set a flag") and broadcast a + global condition to wake those waiting for + a signal. + + - wait for a signal: Loop over waiting for the global condition until + the global value matches the wait-for signal. + + By default, all sync points are inactive. They do nothing (except to + burn a couple of CPU cycles for checking if they are active). + + A sync point becomes active when an action is requested for it. + To do so, put a line like this in the test case file: + + SET DEBUG_SYNC= 'after_open_tables SIGNAL opened WAIT_FOR flushed'; + + This activates the sync point 'after_open_tables'. It requests it to + emit the signal 'opened' and wait for another thread to emit the signal + 'flushed' when the thread's execution runs through the sync point. + + For every sync point there can be one action per thread only. Every + thread can request multiple actions, but only one per sync point. In + other words, a thread can activate multiple sync points. + + Here is an example how to activate and use the sync points: + + --connection conn1 + SET DEBUG_SYNC= 'after_open_tables SIGNAL opened WAIT_FOR flushed'; + send INSERT INTO t1 VALUES(1); + --connection conn2 + SET DEBUG_SYNC= 'now WAIT_FOR opened'; + SET DEBUG_SYNC= 'after_abort_locks SIGNAL flushed'; + FLUSH TABLE t1; + + When conn1 runs through the INSERT statement, it hits the sync point + 'after_open_tables'. It notices that it is active and executes its + action. It emits the signal 'opened' and waits for another thread to + emit the signal 'flushed'. + + conn2 waits immediately at the special sync point 'now' for another + thread to emit the 'opened' signal. + + A signal remains in effect until it is overwritten. If conn1 signals + 'opened' before conn2 reaches 'now', conn2 will still find the 'opened' + signal. It does not wait in this case. + + When conn2 reaches 'after_abort_locks', it signals 'flushed', which lets + conn1 awake. + + Normally the activation of a sync point is cleared when it has been + executed. Sometimes it is necessary to keep the sync point active for + another execution. You can add an execute count to the action: + + SET DEBUG_SYNC= 'name SIGNAL sig EXECUTE 3'; + + This sets the signal point's activation counter to 3. Each execution + decrements the counter. After the third execution the sync point + becomes inactive. + + One of the primary goals of this facility is to eliminate sleeps from + the test suite. In most cases it should be possible to rewrite test + cases so that they do not need to sleep. (But this facility cannot + synchronize multiple processes.) However, to support test development, + and as a last resort, sync point waiting times out. There is a default + timeout, but it can be overridden: + + SET DEBUG_SYNC= 'name WAIT_FOR sig TIMEOUT 10 EXECUTE 2'; + + TIMEOUT 0 is special: If the signal is not present, the wait times out + immediately. + + When a wait timed out (even on TIMEOUT 0), a warning is generated so + that it shows up in the test result. + + You can throw an error message and kill the query when a synchronization + point is hit a certain number of times: + + SET DEBUG_SYNC= 'name HIT_LIMIT 3'; + + Or combine it with signal and/or wait: + + SET DEBUG_SYNC= 'name SIGNAL sig EXECUTE 2 HIT_LIMIT 3'; + + Here the first two hits emit the signal, the third hit returns the error + message and kills the query. + + For cases where you are not sure that an action is taken and thus + cleared in any case, you can force to clear (deactivate) a sync point: + + SET DEBUG_SYNC= 'name CLEAR'; + + If you want to clear all actions and clear the global signal, use: + + SET DEBUG_SYNC= 'RESET'; + + This is the only way to reset the global signal to an empty string. + + For testing of the facility itself you can execute a sync point just + as if it had been hit: + + SET DEBUG_SYNC= 'name TEST'; + + + === Formal Syntax === + + The string to "assign" to the DEBUG_SYNC variable can contain: + + {RESET | + TEST | + CLEAR | + {{SIGNAL | + WAIT_FOR [TIMEOUT ]} + [EXECUTE ] &| HIT_LIMIT } + + Here '&|' means 'and/or'. This means that one of the sections + separated by '&|' must be present or both of them. + + + === Activation/Deactivation === + + The facility is an optional part of the MySQL server. + It is enabled in a debug server by default. + + ./configure --enable-debug-sync + + The Debug Sync Facility, when compiled in, is disabled by default. It + can be enabled by a mysqld command line option: + + --debug-sync-timeout[=default_wait_timeout_value_in_seconds] + + 'default_wait_timeout_value_in_seconds' is the default timeout for the + WAIT_FOR action. If set to zero, the facility stays disabled. + + The facility is enabled by default in the test suite, but can be + disabled with: + + mysql-test-run.pl ... --debug-sync-timeout=0 ... + + Likewise the default wait timeout can be set: + + mysql-test-run.pl ... --debug-sync-timeout=10 ... + + The command line option influences the readable value of the system + variable 'debug_sync'. + + * If the facility is not compiled in, the system variable does not exist. + + * If --debug-sync-timeout=0 the value of the variable reads as "OFF". + + * Otherwise the value reads as "ON - current signal: " followed by the + current signal string, which can be empty. + + The readable variable value is the same, regardless if read as global + or session value. + + Setting the 'debug-sync' system variable requires 'SUPER' privilege. + You can never read back the string that you assigned to the variable, + unless you assign the value that the variable does already have. But + that would give a parse error. A syntactically correct string is + parsed into a debug sync action and stored apart from the variable value. + + + === Implementation === + + Pseudo code for a sync point: + + #define DEBUG_SYNC(thd, sync_point_name) + if (unlikely(opt_debug_sync_timeout)) + debug_sync(thd, STRING_WITH_LEN(sync_point_name)) + + The sync point performs a binary search in a sorted array of actions + for this thread. + + The SET DEBUG_SYNC statement adds a requested action to the array or + overwrites an existing action for the same sync point. When it adds a + new action, the array is sorted again. + + + === A typical synchronization pattern === + + There are quite a few places in MySQL, where we use a synchronization + pattern like this: + + pthread_mutex_lock(&mutex); + thd->enter_cond(&condition_variable, &mutex, new_message); + #if defined(ENABLE_DEBUG_SYNC) + if (!thd->killed && !end_of_wait_condition) + DEBUG_SYNC(thd, "sync_point_name"); + #endif + while (!thd->killed && !end_of_wait_condition) + pthread_cond_wait(&condition_variable, &mutex); + thd->exit_cond(old_message); + + Here some explanations: + + thd->enter_cond() is used to register the condition variable and the + mutex in thd->mysys_var. This is done to allow the thread to be + interrupted (killed) from its sleep. Another thread can find the + condition variable to signal and mutex to use for synchronization in + this thread's THD::mysys_var. + + thd->enter_cond() requires the mutex to be acquired in advance. + + thd->exit_cond() unregisters the condition variable and mutex and + releases the mutex. + + If you want to have a Debug Sync point with the wait, please place it + behind enter_cond(). Only then you can safely decide, if the wait will + be taken. Also you will have THD::proc_info correct when the sync + point emits a signal. DEBUG_SYNC sets its own proc_info, but restores + the previous one before releasing its internal mutex. As soon as + another thread sees the signal, it does also see the proc_info from + before entering the sync point. In this case it will be "new_message", + which is associated with the wait that is to be synchronized. + + In the example above, the wait condition is repeated before the sync + point. This is done to skip the sync point, if no wait takes place. + The sync point is before the loop (not inside the loop) to have it hit + once only. It is possible that the condition variable is signaled + multiple times without the wait condition to be true. + + A bit off-topic: At some places, the loop is taken around the whole + synchronization pattern: + + while (!thd->killed && !end_of_wait_condition) + { + pthread_mutex_lock(&mutex); + thd->enter_cond(&condition_variable, &mutex, new_message); + if (!thd->killed [&& !end_of_wait_condition]) + { + [DEBUG_SYNC(thd, "sync_point_name");] + pthread_cond_wait(&condition_variable, &mutex); + } + thd->exit_cond(old_message); + } + + Note that it is important to repeat the test for thd->killed after + enter_cond(). Otherwise the killing thread may kill this thread after + it tested thd->killed in the loop condition and before it registered + the condition variable and mutex in enter_cond(). In this case, the + killing thread does not know that this thread is going to wait on a + condition variable. It would just set THD::killed. But if we would not + test it again, we would go asleep though we are killed. If the killing + thread would kill us when we are after the second test, but still + before sleeping, we hold the mutex, which is registered in mysys_var. + The killing thread would try to acquire the mutex before signaling + the condition variable. Since the mutex is only released implicitly in + pthread_cond_wait(), the signaling happens at the right place. We + have a safe synchronization. + + === Co-work with the DBUG facility === + + When running the MySQL test suite with the --debug command line + option, the Debug Sync Facility writes trace messages to the DBUG + trace. The following shell commands proved very useful in extracting + relevant information: + + egrep 'query:|debug_sync_exec:' mysql-test/var/log/mysqld.1.trace + + It shows all executed SQL statements and all actions executed by + synchronization points. + + Sometimes it is also useful to see, which synchronization points have + been run through (hit) with or without executing actions. Then add + "|debug_sync_point:" to the egrep pattern. + + === Further reading === + + For a discussion of other methods to synchronize threads see + http://forge.mysql.com/wiki/MySQL_Internals_Test_Synchronization + + For complete syntax tests, functional tests, and examples see the test + case debug_sync.test. + + See also worklog entry WL#4259 - Test Synchronization Facility +*/ + +#include "debug_sync.h" + +#if defined(ENABLED_DEBUG_SYNC) + +/* + Due to weaknesses in our include files, we need to include + mysql_priv.h here. To have THD declared, we need to include + sql_class.h. This includes log_event.h, which in turn requires + declarations from mysql_priv.h (e.g. OPTION_AUTO_IS_NULL). + mysql_priv.h includes almost everything, so is sufficient here. +*/ +#include "mysql_priv.h" + +/* + Action to perform at a synchronization point. + NOTE: This structure is moved around in memory by realloc(), qsort(), + and memmove(). Do not add objects with non-trivial constuctors + or destructors, which might prevent moving of this structure + with these functions. +*/ +struct st_debug_sync_action +{ + ulong activation_count; /* max(hit_limit, execute) */ + ulong hit_limit; /* hits before kill query */ + ulong execute; /* executes before self-clear */ + ulong timeout; /* wait_for timeout */ + String signal; /* signal to emit */ + String wait_for; /* signal to wait for */ + String sync_point; /* sync point name */ + bool need_sort; /* if new action, array needs sort */ +}; + +/* Debug sync control. Referenced by THD. */ +struct st_debug_sync_control +{ + st_debug_sync_action *ds_action; /* array of actions */ + uint ds_active; /* # active actions */ + uint ds_allocated; /* # allocated actions */ + ulonglong dsp_hits; /* statistics */ + ulonglong dsp_executed; /* statistics */ + ulonglong dsp_max_active; /* statistics */ + /* + thd->proc_info points at unsynchronized memory. + It must not go away as long as the thread exists. + */ + char ds_proc_info[80]; /* proc_info string */ +}; + + +/** + Definitions for the debug sync facility. + 1. Global string variable to hold a "signal" ("signal post", "flag mast"). + 2. Global condition variable for signaling and waiting. + 3. Global mutex to synchronize access to the above. +*/ +struct st_debug_sync_globals +{ + String ds_signal; /* signal variable */ + pthread_cond_t ds_cond; /* condition variable */ + pthread_mutex_t ds_mutex; /* mutex variable */ + ulonglong dsp_hits; /* statistics */ + ulonglong dsp_executed; /* statistics */ + ulonglong dsp_max_active; /* statistics */ +}; +static st_debug_sync_globals debug_sync_global; /* All globals in one object */ + +/** + Callback pointer for C files. +*/ +extern "C" void (*debug_sync_C_callback_ptr)(const char *, size_t); + + +/** + Callback for debug sync, to be used by C files. See thr_lock.c for example. + + @description + + We cannot place a sync point directly in C files (like those in mysys or + certain storage engines written mostly in C like MyISAM or Maria). Because + they are C code and do not include mysql_priv.h. So they do not know the + macro DEBUG_SYNC(thd, sync_point_name). The macro needs a 'thd' argument. + Hence it cannot be used in files outside of the sql/ directory. + + The workaround is to call back simple functions like this one from + non-sql/ files. + + We want to allow modules like thr_lock to be used without sql/ and + especially without Debug Sync. So we cannot just do a simple call + of the callback function. Instead we provide a global pointer in + the other file, which is to be set to the callback by Debug Sync. + If the pointer is not set, no call back will be done. If Debug + Sync sets the pointer to a callback function like this one, it will + be called. That way thr_lock.c does not have an undefined reference + to Debug Sync and can be used without it. Debug Sync, in contrast, + has an undefined reference to that pointer and thus requires + thr_lock to be linked too. But this is not a problem as it is part + of the MySQL server anyway. + + @note + The callback pointer in C files is set only if debug sync is + initialized. And this is done only if opt_debug_sync_timeout is set. +*/ + +static void debug_sync_C_callback(const char *sync_point_name, + size_t name_len) +{ + if (unlikely(opt_debug_sync_timeout)) + debug_sync(current_thd, sync_point_name, name_len); +} + + +/** + Initialize the debug sync facility at server start. + + @return status + @retval 0 ok + @retval != 0 error +*/ + +int debug_sync_init(void) +{ + DBUG_ENTER("debug_sync_init"); + + if (opt_debug_sync_timeout) + { + int rc; + + /* Initialize the global variables. */ + debug_sync_global.ds_signal.length(0); + if ((rc= pthread_cond_init(&debug_sync_global.ds_cond, NULL)) || + (rc= pthread_mutex_init(&debug_sync_global.ds_mutex, + MY_MUTEX_INIT_FAST))) + DBUG_RETURN(rc); /* purecov: inspected */ + + /* Set the call back pointer in C files. */ + debug_sync_C_callback_ptr= debug_sync_C_callback; + } + + DBUG_RETURN(0); +} + + +/** + End the debug sync facility. + + @description + This is called at server shutdown or after a thread initialization error. +*/ + +void debug_sync_end(void) +{ + DBUG_ENTER("debug_sync_end"); + + /* End the facility only if it had been initialized. */ + if (debug_sync_C_callback_ptr) + { + /* Clear the call back pointer in C files. */ + debug_sync_C_callback_ptr= NULL; + + /* Destroy the global variables. */ + debug_sync_global.ds_signal.free(); + (void) pthread_cond_destroy(&debug_sync_global.ds_cond); + (void) pthread_mutex_destroy(&debug_sync_global.ds_mutex); + + /* Print statistics. */ + { + char llbuff[22]; + sql_print_information("Debug sync points hit: %22s", + llstr(debug_sync_global.dsp_hits, llbuff)); + sql_print_information("Debug sync points executed: %22s", + llstr(debug_sync_global.dsp_executed, llbuff)); + sql_print_information("Debug sync points max active per thread: %22s", + llstr(debug_sync_global.dsp_max_active, llbuff)); + } + } + + DBUG_VOID_RETURN; +} + + +/* purecov: begin tested */ + +/** + Disable the facility after lack of memory if no error can be returned. + + @note + Do not end the facility here because the global variables can + be in use by other threads. +*/ + +static void debug_sync_emergency_disable(void) +{ + DBUG_ENTER("debug_sync_emergency_disable"); + + opt_debug_sync_timeout= 0; + + DBUG_PRINT("debug_sync", + ("Debug Sync Facility disabled due to lack of memory.")); + sql_print_error("Debug Sync Facility disabled due to lack of memory."); + + DBUG_VOID_RETURN; +} + +/* purecov: end */ + + +/** + Initialize the debug sync facility at thread start. + + @param[in] thd thread handle +*/ + +void debug_sync_init_thread(THD *thd) +{ + DBUG_ENTER("debug_sync_init_thread"); + DBUG_ASSERT(thd); + + if (opt_debug_sync_timeout) + { + thd->debug_sync_control= (st_debug_sync_control*) + my_malloc(sizeof(st_debug_sync_control), MYF(MY_WME | MY_ZEROFILL)); + if (!thd->debug_sync_control) + { + /* + Error is reported by my_malloc(). + We must disable the facility. We have no way to return an error. + */ + debug_sync_emergency_disable(); /* purecov: tested */ + } + } + + DBUG_VOID_RETURN; +} + + +/** + End the debug sync facility at thread end. + + @param[in] thd thread handle +*/ + +void debug_sync_end_thread(THD *thd) +{ + DBUG_ENTER("debug_sync_end_thread"); + DBUG_ASSERT(thd); + + if (thd->debug_sync_control) + { + st_debug_sync_control *ds_control= thd->debug_sync_control; + + /* + This synchronization point can be used to synchronize on thread end. + This is the latest point in a THD's life, where this can be done. + */ + DEBUG_SYNC(thd, "thread_end"); + + if (ds_control->ds_action) + { + st_debug_sync_action *action= ds_control->ds_action; + st_debug_sync_action *action_end= action + ds_control->ds_allocated; + for (; action < action_end; action++) + { + action->signal.free(); + action->wait_for.free(); + action->sync_point.free(); + } + my_free(ds_control->ds_action, MYF(0)); + } + + /* Statistics. */ + pthread_mutex_lock(&debug_sync_global.ds_mutex); + debug_sync_global.dsp_hits+= ds_control->dsp_hits; + debug_sync_global.dsp_executed+= ds_control->dsp_executed; + if (debug_sync_global.dsp_max_active < ds_control->dsp_max_active) + debug_sync_global.dsp_max_active= ds_control->dsp_max_active; + pthread_mutex_unlock(&debug_sync_global.ds_mutex); + + my_free(ds_control, MYF(0)); + thd->debug_sync_control= NULL; + } + + DBUG_VOID_RETURN; +} + + +/** + Move a string by length. + + @param[out] to buffer for the resulting string + @param[in] to_end end of buffer + @param[in] from source string + @param[in] length number of bytes to copy + + @return pointer to end of copied string +*/ + +static char *debug_sync_bmove_len(char *to, char *to_end, + const char *from, size_t length) +{ + DBUG_ASSERT(to); + DBUG_ASSERT(to_end); + DBUG_ASSERT(!length || from); + set_if_smaller(length, (size_t) (to_end - to)); + memcpy(to, from, length); + return (to + length); +} + + +#if !defined(DBUG_OFF) + +/** + Create a string that describes an action. + + @param[out] result buffer for the resulting string + @param[in] size size of result buffer + @param[in] action action to describe +*/ + +static void debug_sync_action_string(char *result, uint size, + st_debug_sync_action *action) +{ + char *wtxt= result; + char *wend= wtxt + size - 1; /* Allow emergency '\0'. */ + DBUG_ASSERT(result); + DBUG_ASSERT(action); + + /* If an execute count is present, signal or wait_for are needed too. */ + DBUG_ASSERT(!action->execute || + action->signal.length() || action->wait_for.length()); + + if (action->execute) + { + if (action->signal.length()) + { + wtxt= debug_sync_bmove_len(wtxt, wend, STRING_WITH_LEN("SIGNAL ")); + wtxt= debug_sync_bmove_len(wtxt, wend, action->signal.ptr(), + action->signal.length()); + } + if (action->wait_for.length()) + { + if ((wtxt == result) && (wtxt < wend)) + *(wtxt++)= ' '; + wtxt= debug_sync_bmove_len(wtxt, wend, STRING_WITH_LEN(" WAIT_FOR ")); + wtxt= debug_sync_bmove_len(wtxt, wend, action->wait_for.ptr(), + action->wait_for.length()); + + if (action->timeout != opt_debug_sync_timeout) + { + wtxt+= my_snprintf(wtxt, wend - wtxt, " TIMEOUT %lu", action->timeout); + } + } + if (action->execute != 1) + { + wtxt+= my_snprintf(wtxt, wend - wtxt, " EXECUTE %lu", action->execute); + } + } + if (action->hit_limit) + { + wtxt+= my_snprintf(wtxt, wend - wtxt, "%sHIT_LIMIT %lu", + (wtxt == result) ? "" : " ", action->hit_limit); + } + + /* + If (wtxt == wend) string may not be terminated. + There is one byte left for an emergency termination. + */ + *wtxt= '\0'; +} + + +/** + Print actions. + + @param[in] thd thread handle +*/ + +static void debug_sync_print_actions(THD *thd) +{ + st_debug_sync_control *ds_control= thd->debug_sync_control; + uint idx; + DBUG_ENTER("debug_sync_print_actions"); + DBUG_ASSERT(thd); + + if (!ds_control) + return; + + for (idx= 0; idx < ds_control->ds_active; idx++) + { + const char *dsp_name= ds_control->ds_action[idx].sync_point.c_ptr(); + char action_string[256]; + + debug_sync_action_string(action_string, sizeof(action_string), + ds_control->ds_action + idx); + DBUG_PRINT("debug_sync_list", ("%s %s", dsp_name, action_string)); + } + + DBUG_VOID_RETURN; +} + +#endif /* !defined(DBUG_OFF) */ + + +/** + Compare two actions by sync point name length, string. + + @param[in] arg1 reference to action1 + @param[in] arg2 reference to action2 + + @return difference + @retval == 0 length1/string1 is same as length2/string2 + @retval < 0 length1/string1 is smaller + @retval > 0 length1/string1 is bigger +*/ + +static int debug_sync_qsort_cmp(const void* arg1, const void* arg2) +{ + st_debug_sync_action *action1= (st_debug_sync_action*) arg1; + st_debug_sync_action *action2= (st_debug_sync_action*) arg2; + int diff; + DBUG_ASSERT(action1); + DBUG_ASSERT(action2); + + if (!(diff= action1->sync_point.length() - action2->sync_point.length())) + diff= memcmp(action1->sync_point.ptr(), action2->sync_point.ptr(), + action1->sync_point.length()); + + return diff; +} + + +/** + Find a debug sync action. + + @param[in] actionarr array of debug sync actions + @param[in] quantity number of actions in array + @param[in] dsp_name name of debug sync point to find + @param[in] name_len length of name of debug sync point + + @return action + @retval != NULL found sync point in array + @retval NULL not found + + @description + Binary search. Array needs to be sorted by length, sync point name. +*/ + +static st_debug_sync_action *debug_sync_find(st_debug_sync_action *actionarr, + int quantity, + const char *dsp_name, + uint name_len) +{ + st_debug_sync_action *action; + int low ; + int high ; + int mid ; + int diff ; + DBUG_ASSERT(actionarr); + DBUG_ASSERT(dsp_name); + DBUG_ASSERT(name_len); + + low= 0; + high= quantity; + + while (low < high) + { + mid= (low + high) / 2; + action= actionarr + mid; + if (!(diff= name_len - action->sync_point.length()) && + !(diff= memcmp(dsp_name, action->sync_point.ptr(), name_len))) + return action; + if (diff > 0) + low= mid + 1; + else + high= mid - 1; + } + + if (low < quantity) + { + action= actionarr + low; + if ((name_len == action->sync_point.length()) && + !memcmp(dsp_name, action->sync_point.ptr(), name_len)) + return action; + } + + return NULL; +} + + +/** + Reset the debug sync facility. + + @param[in] thd thread handle + + @description + Remove all actions of this thread. + Clear the global signal. +*/ + +static void debug_sync_reset(THD *thd) +{ + st_debug_sync_control *ds_control= thd->debug_sync_control; + DBUG_ENTER("debug_sync_reset"); + DBUG_ASSERT(thd); + DBUG_ASSERT(ds_control); + + /* Remove all actions of this thread. */ + ds_control->ds_active= 0; + + /* Clear the global signal. */ + pthread_mutex_lock(&debug_sync_global.ds_mutex); + debug_sync_global.ds_signal.length(0); + pthread_mutex_unlock(&debug_sync_global.ds_mutex); + + DBUG_VOID_RETURN; +} + + +/** + Remove a debug sync action. + + @param[in] ds_control control object + @param[in] action action to be removed + + @description + Removing an action mainly means to decrement the ds_active counter. + But if the action is between other active action in the array, then + the array needs to be shrinked. The active actions above the one to + be removed have to be moved down by one slot. +*/ + +static void debug_sync_remove_action(st_debug_sync_control *ds_control, + st_debug_sync_action *action) +{ + uint dsp_idx= action - ds_control->ds_action; + DBUG_ENTER("debug_sync_remove_action"); + DBUG_ASSERT(ds_control); + DBUG_ASSERT(ds_control == current_thd->debug_sync_control); + DBUG_ASSERT(action); + DBUG_ASSERT(dsp_idx < ds_control->ds_active); + + /* Decrement the number of currently active actions. */ + ds_control->ds_active--; + + /* + If this was not the last active action in the array, we need to + shift remaining active actions down to keep the array gap-free. + Otherwise binary search might fail or take longer than necessary at + least. Also new actions are always put to the end of the array. + */ + if (ds_control->ds_active > dsp_idx) + { + /* + Do not make save_action an object of class st_debug_sync_action. + Its destructor would tamper with the String pointers. + */ + uchar save_action[sizeof(st_debug_sync_action)]; + + /* + Copy the to-be-removed action object to temporary storage before + the shift copies the string pointers over. Do not use assignment + because it would use assignment operator methods for the Strings. + This would copy the strings. The shift below overwrite the string + pointers without freeing them first. By using memmove() we save + the pointers, which are overwritten by the shift. + */ + memmove(save_action, action, sizeof(st_debug_sync_action)); + + /* Move actions down. */ + memmove(ds_control->ds_action + dsp_idx, + ds_control->ds_action + dsp_idx + 1, + (ds_control->ds_active - dsp_idx) * + sizeof(st_debug_sync_action)); + + /* + Copy back the saved action object to the now free array slot. This + replaces the double references of String pointers that have been + produced by the shift. Again do not use an assignment operator to + avoid string allocation/copy. + */ + memmove(ds_control->ds_action + ds_control->ds_active, save_action, + sizeof(st_debug_sync_action)); + } + + DBUG_VOID_RETURN; +} + + +/** + Get a debug sync action. + + @param[in] thd thread handle + @param[in] dsp_name debug sync point name + @param[in] name_len length of sync point name + + @return action + @retval != NULL ok + @retval NULL error + + @description + Find the debug sync action for a debug sync point or make a new one. +*/ + +static st_debug_sync_action *debug_sync_get_action(THD *thd, + const char *dsp_name, + uint name_len) +{ + st_debug_sync_control *ds_control= thd->debug_sync_control; + st_debug_sync_action *action; + DBUG_ENTER("debug_sync_get_action"); + DBUG_ASSERT(thd); + DBUG_ASSERT(dsp_name); + DBUG_ASSERT(name_len); + DBUG_ASSERT(ds_control); + DBUG_PRINT("debug_sync", ("sync_point: '%.*s'", (int) name_len, dsp_name)); + DBUG_PRINT("debug_sync", ("active: %u allocated: %u", + ds_control->ds_active, ds_control->ds_allocated)); + + /* There cannot be more active actions than allocated. */ + DBUG_ASSERT(ds_control->ds_active <= ds_control->ds_allocated); + /* If there are active actions, the action array must be present. */ + DBUG_ASSERT(!ds_control->ds_active || ds_control->ds_action); + + /* Try to reuse existing action if there is one for this sync point. */ + if (ds_control->ds_active && + (action= debug_sync_find(ds_control->ds_action, ds_control->ds_active, + dsp_name, name_len))) + { + /* Reuse an already active sync point action. */ + DBUG_ASSERT((uint)(action - ds_control->ds_action) < ds_control->ds_active); + DBUG_PRINT("debug_sync", ("reuse action idx: %ld", + (long) (action - ds_control->ds_action))); + } + else + { + /* Create a new action. */ + int dsp_idx= ds_control->ds_active++; + set_if_bigger(ds_control->dsp_max_active, ds_control->ds_active); + if (ds_control->ds_active > ds_control->ds_allocated) + { + uint new_alloc= ds_control->ds_active + 3; + void *new_action= my_realloc(ds_control->ds_action, + new_alloc * sizeof(st_debug_sync_action), + MYF(MY_WME | MY_ALLOW_ZERO_PTR)); + if (!new_action) + { + /* Error is reported by my_malloc(). */ + goto err; /* purecov: tested */ + } + ds_control->ds_action= (st_debug_sync_action*) new_action; + ds_control->ds_allocated= new_alloc; + /* Clear memory as we do not run string constructors here. */ + bzero((uchar*) (ds_control->ds_action + dsp_idx), + (new_alloc - dsp_idx) * sizeof(st_debug_sync_action)); + } + DBUG_PRINT("debug_sync", ("added action idx: %u", dsp_idx)); + action= ds_control->ds_action + dsp_idx; + if (action->sync_point.copy(dsp_name, name_len, system_charset_info)) + { + /* Error is reported by my_malloc(). */ + goto err; /* purecov: tested */ + } + action->need_sort= TRUE; + } + DBUG_ASSERT(action >= ds_control->ds_action); + DBUG_ASSERT(action < ds_control->ds_action + ds_control->ds_active); + DBUG_PRINT("debug_sync", ("action: 0x%lx array: 0x%lx count: %u", + (long) action, (long) ds_control->ds_action, + ds_control->ds_active)); + + DBUG_RETURN(action); + + /* purecov: begin tested */ + err: + DBUG_RETURN(NULL); + /* purecov: end */ +} + + +/** + Set a debug sync action. + + @param[in] thd thread handle + @param[in] action synchronization action + + @return status + @retval FALSE ok + @retval TRUE error + + @description + This is called from the debug sync parser. It arms the action for + the requested sync point. If the action parsed into an empty action, + it is removed instead. + + Setting an action for a sync point means to make the sync point + active. When it is hit it will execute this action. + + Before parsing, we "get" an action object. This is placed at the + end of the thread's action array unless the requested sync point + has an action already. + + Then the parser fills the action object from the request string. + + Finally the action is "set" for the sync point. If it was parsed + to be empty, it is removed from the array. If it did belong to a + sync point before, the sync point becomes inactive. If the action + became non-empty and it did not belong to a sync point before (it + was added at the end of the action array), the action array needs + to be sorted by sync point. + + If the sync point name is "now", it is executed immediately. +*/ + +static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action) +{ + st_debug_sync_control *ds_control= thd->debug_sync_control; + bool is_dsp_now= FALSE; + DBUG_ENTER("debug_sync_set_action"); + DBUG_ASSERT(thd); + DBUG_ASSERT(action); + DBUG_ASSERT(ds_control); + + action->activation_count= max(action->hit_limit, action->execute); + if (!action->activation_count) + { + debug_sync_remove_action(ds_control, action); + DBUG_PRINT("debug_sync", ("action cleared")); + } + else + { + const char *dsp_name= action->sync_point.c_ptr(); + DBUG_EXECUTE("debug_sync", { + /* Functions as DBUG_PRINT args can change keyword and line nr. */ + const char *sig_emit= action->signal.c_ptr(); + const char *sig_wait= action->wait_for.c_ptr(); + DBUG_PRINT("debug_sync", + ("sync_point: '%s' activation_count: %lu hit_limit: %lu " + "execute: %lu timeout: %lu signal: '%s' wait_for: '%s'", + dsp_name, action->activation_count, + action->hit_limit, action->execute, action->timeout, + sig_emit, sig_wait));}); + + /* Check this before sorting the array. action may move. */ + is_dsp_now= !my_strcasecmp(system_charset_info, dsp_name, "now"); + + if (action->need_sort) + { + action->need_sort= FALSE; + /* Sort actions by (name_len, name). */ + my_qsort(ds_control->ds_action, ds_control->ds_active, + sizeof(st_debug_sync_action), debug_sync_qsort_cmp); + } + } + DBUG_EXECUTE("debug_sync_list", debug_sync_print_actions(thd);); + + /* Execute the special sync point 'now' if activated above. */ + if (is_dsp_now) + { + DEBUG_SYNC(thd, "now"); + /* + If HIT_LIMIT for sync point "now" was 1, the execution of the sync + point decremented it to 0. In this case the following happened: + + - an error message was reported with my_error() and + - the statement was killed with thd->killed= THD::KILL_QUERY. + + If a statement reports an error, it must not call send_ok(). + The calling functions will not call send_ok(), if we return TRUE + from this function. + + thd->killed is also set if the wait is interrupted from a + KILL or KILL QUERY statement. In this case, no error is reported + and shall not be reported as a result of SET DEBUG_SYNC. + Hence, we check for the first condition above. + */ + if (thd->is_error()) + DBUG_RETURN(TRUE); + } + + DBUG_RETURN(FALSE); +} + + +/** + Extract a token from a string. + + @param[out] token_p returns start of token + @param[out] token_length_p returns length of token + @param[in,out] ptr current string pointer, adds '\0' terminators + + @return string pointer or NULL + @retval != NULL ptr behind token terminator or at string end + @retval NULL no token found in remainder of string + + @note + This function assumes that the string is in system_charset_info, + that this charset is single byte for ASCII NUL ('\0'), that no + character except of ASCII NUL ('\0') contains a byte with value 0, + and that ASCII NUL ('\0') is used as the string terminator. + + This function needs to return tokens that are terminated with ASCII + NUL ('\0'). The tokens are used in my_strcasecmp(). Unfortunately + there is no my_strncasecmp(). + + To return the last token without copying it, we require the input + string to be nul terminated. + + @description + This function skips space characters at string begin. + + It returns a pointer to the first non-space character in *token_p. + + If no non-space character is found before the string terminator + ASCII NUL ('\0'), the function returns NULL. *token_p and + *token_length_p remain unchanged in this case (they are not set). + + The function takes a space character or an ASCII NUL ('\0') as a + terminator of the token. The space character could be multi-byte. + + It returns the length of the token in bytes, excluding the + terminator, in *token_length_p. + + If the terminator of the token is ASCII NUL ('\0'), it returns a + pointer to the terminator (string end). + + If the terminator is a space character, it replaces the the first + byte of the terminator character by ASCII NUL ('\0'), skips the (now + corrupted) terminator character, and skips all following space + characters. It returns a pointer to the next non-space character or + to the string terminator ASCII NUL ('\0'). +*/ + +static char *debug_sync_token(char **token_p, uint *token_length_p, char *ptr) +{ + DBUG_ASSERT(token_p); + DBUG_ASSERT(token_length_p); + DBUG_ASSERT(ptr); + + /* Skip leading space */ + while (my_isspace(system_charset_info, *ptr)) + ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr); + + if (!*ptr) + { + ptr= NULL; + goto end; + } + + /* Get token start. */ + *token_p= ptr; + + /* Find token end. */ + while (*ptr && !my_isspace(system_charset_info, *ptr)) + ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr); + + /* Get token length. */ + *token_length_p= ptr - *token_p; + + /* If necessary, terminate token. */ + if (*ptr) + { + /* Get terminator character length. */ + uint mbspacelen= my_mbcharlen(system_charset_info, (uchar) *ptr); + + /* Terminate token. */ + *ptr= '\0'; + + /* Skip the terminator. */ + ptr+= mbspacelen; + + /* Skip trailing space */ + while (my_isspace(system_charset_info, *ptr)) + ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr); + } + + end: + return ptr; +} + + +/** + Extract a number from a string. + + @param[out] number_p returns number + @param[in] actstrptr current pointer in action string + + @return string pointer or NULL + @retval != NULL ptr behind token terminator or at string end + @retval NULL no token found or token is not valid number + + @note + The same assumptions about charset apply as for debug_sync_token(). + + @description + This function fetches a token from the string and converts it + into a number. + + If there is no token left in the string, or the token is not a valid + decimal number, NULL is returned. The result in *number_p is + undefined in this case. +*/ + +static char *debug_sync_number(ulong *number_p, char *actstrptr) +{ + char *ptr; + char *ept; + char *token; + uint token_length; + DBUG_ASSERT(number_p); + DBUG_ASSERT(actstrptr); + + /* Get token from string. */ + if (!(ptr= debug_sync_token(&token, &token_length, actstrptr))) + goto end; + + *number_p= strtoul(token, &ept, 10); + if (*ept) + ptr= NULL; + + end: + return ptr; +} + + +/** + Evaluate a debug sync action string. + + @param[in] thd thread handle + @param[in,out] action_str action string to receive '\0' terminators + + @return status + @retval FALSE ok + @retval TRUE error + + @description + This is called when the DEBUG_SYNC system variable is set. + Parse action string, build a debug sync action, activate it. + + Before parsing, we "get" an action object. This is placed at the + end of the thread's action array unless the requested sync point + has an action already. + + Then the parser fills the action object from the request string. + + Finally the action is "set" for the sync point. This means that the + sync point becomes active or inactive, depending on the action + values. + + @note + The input string needs to be ASCII NUL ('\0') terminated. We split + nul-terminated tokens in it without copy. + + @see the function comment of debug_sync_token() for more constraints + for the string. +*/ + +static bool debug_sync_eval_action(THD *thd, char *action_str) +{ + st_debug_sync_action *action= NULL; + const char *errmsg; + char *ptr; + char *token; + uint token_length; + DBUG_ENTER("debug_sync_eval_action"); + DBUG_ASSERT(thd); + DBUG_ASSERT(action_str); + + /* + Get debug sync point name. Or a special command. + */ + if (!(ptr= debug_sync_token(&token, &token_length, action_str))) + { + errmsg= "Missing synchronization point name"; + goto err; + } + + /* + If there is a second token, the first one is the sync point name. + */ + if (*ptr) + { + /* Get an action object to collect the requested action parameters. */ + action= debug_sync_get_action(thd, token, token_length); + if (!action) + { + /* Error message is sent. */ + DBUG_RETURN(TRUE); /* purecov: tested */ + } + } + + /* + Get kind of action to be taken at sync point. + */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + { + /* No action present. Try special commands. Token unchanged. */ + + /* + Try RESET. + */ + if (!my_strcasecmp(system_charset_info, token, "RESET")) + { + /* It is RESET. Reset all actions and global signal. */ + debug_sync_reset(thd); + goto end; + } + + /* Token unchanged. It still contains sync point name. */ + errmsg= "Missing action after synchronization point name '%.*s'"; + goto err; + } + + /* + Check for pseudo actions first. Start with actions that work on + an existing action. + */ + DBUG_ASSERT(action); + + /* + Try TEST. + */ + if (!my_strcasecmp(system_charset_info, token, "TEST")) + { + /* It is TEST. Nothing must follow it. */ + if (*ptr) + { + errmsg= "Nothing must follow action TEST"; + goto err; + } + + /* Execute sync point. */ + debug_sync(thd, action->sync_point.ptr(), action->sync_point.length()); + /* Fix statistics. This was not a real hit of the sync point. */ + thd->debug_sync_control->dsp_hits--; + goto end; + } + + /* + Now check for actions that define a new action. + Initialize action. Do not use bzero(). Strings may have malloced. + */ + action->activation_count= 0; + action->hit_limit= 0; + action->execute= 0; + action->timeout= 0; + action->signal.length(0); + action->wait_for.length(0); + + /* + Try CLEAR. + */ + if (!my_strcasecmp(system_charset_info, token, "CLEAR")) + { + /* It is CLEAR. Nothing must follow it. */ + if (*ptr) + { + errmsg= "Nothing must follow action CLEAR"; + goto err; + } + + /* Set (clear/remove) action. */ + goto set_action; + } + + /* + Now check for real sync point actions. + */ + + /* + Try SIGNAL. + */ + if (!my_strcasecmp(system_charset_info, token, "SIGNAL")) + { + /* It is SIGNAL. Signal name must follow. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + { + errmsg= "Missing signal name after action SIGNAL"; + goto err; + } + if (action->signal.copy(token, token_length, system_charset_info)) + { + /* Error is reported by my_malloc(). */ + /* purecov: begin tested */ + errmsg= NULL; + goto err; + /* purecov: end */ + } + + /* Set default for EXECUTE option. */ + action->execute= 1; + + /* Get next token. If none follows, set action. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + goto set_action; + } + + /* + Try WAIT_FOR. + */ + if (!my_strcasecmp(system_charset_info, token, "WAIT_FOR")) + { + /* It is WAIT_FOR. Wait_for signal name must follow. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + { + errmsg= "Missing signal name after action WAIT_FOR"; + goto err; + } + if (action->wait_for.copy(token, token_length, system_charset_info)) + { + /* Error is reported by my_malloc(). */ + /* purecov: begin tested */ + errmsg= NULL; + goto err; + /* purecov: end */ + } + + /* Set default for EXECUTE and TIMEOUT options. */ + action->execute= 1; + action->timeout= opt_debug_sync_timeout; + + /* Get next token. If none follows, set action. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + goto set_action; + + /* + Try TIMEOUT. + */ + if (!my_strcasecmp(system_charset_info, token, "TIMEOUT")) + { + /* It is TIMEOUT. Number must follow. */ + if (!(ptr= debug_sync_number(&action->timeout, ptr))) + { + errmsg= "Missing valid number after TIMEOUT"; + goto err; + } + + /* Get next token. If none follows, set action. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + goto set_action; + } + } + + /* + Try EXECUTE. + */ + if (!my_strcasecmp(system_charset_info, token, "EXECUTE")) + { + /* + EXECUTE requires either SIGNAL and/or WAIT_FOR to be present. + In this case action->execute has been preset to 1. + */ + if (!action->execute) + { + errmsg= "Missing action before EXECUTE"; + goto err; + } + + /* Number must follow. */ + if (!(ptr= debug_sync_number(&action->execute, ptr))) + { + errmsg= "Missing valid number after EXECUTE"; + goto err; + } + + /* Get next token. If none follows, set action. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + goto set_action; + } + + /* + Try HIT_LIMIT. + */ + if (!my_strcasecmp(system_charset_info, token, "HIT_LIMIT")) + { + /* Number must follow. */ + if (!(ptr= debug_sync_number(&action->hit_limit, ptr))) + { + errmsg= "Missing valid number after HIT_LIMIT"; + goto err; + } + + /* Get next token. If none follows, set action. */ + if (!(ptr= debug_sync_token(&token, &token_length, ptr))) + goto set_action; + } + + errmsg= "Illegal or out of order stuff: '%.*s'"; + + err: + if (errmsg) + { + /* + NOTE: errmsg must either have %.*s or none % at all. + It can be NULL if an error message is already reported + (e.g. by my_malloc()). + */ + set_if_smaller(token_length, 64); /* Limit error message length. */ + my_printf_error(ER_PARSE_ERROR, errmsg, MYF(0), token_length, token); + } + if (action) + debug_sync_remove_action(thd->debug_sync_control, action); + DBUG_RETURN(TRUE); + + set_action: + DBUG_RETURN(debug_sync_set_action(thd, action)); + + end: + DBUG_RETURN(FALSE); +} + + +/** + Check if the system variable 'debug_sync' can be set. + + @param[in] thd thread handle + @param[in] var set variable request + + @return status + @retval FALSE ok, variable can be set + @retval TRUE error, variable cannot be set +*/ + +bool sys_var_debug_sync::check(THD *thd, set_var *var) +{ + DBUG_ENTER("sys_var_debug_sync::check"); + DBUG_ASSERT(thd); + DBUG_ASSERT(var); + + /* + Variable can be set for the session only. + + This could be changed later. Then we need to have a global array of + actions in addition to the thread local ones. SET GLOBAL would + manage the global array, SET [SESSION] the local array. A sync point + would need to look for a local and a global action. Setting and + executing of global actions need to be protected by a mutex. + + The purpose of global actions could be to allow synchronizing with + connectionless threads that cannot execute SET statements. + */ + if (var->type == OPT_GLOBAL) + { + my_error(ER_LOCAL_VARIABLE, MYF(0), name); + DBUG_RETURN(TRUE); + } + + /* + Do not check for disabled facility. Test result should not + unnecessarily differ from enabled facility. + */ + + /* + Facility requires SUPER privilege. Sync points could be inside + global mutexes (e.g. LOCK_open). Waiting there forever would + stall the whole server. + */ + DBUG_RETURN(check_global_access(thd, SUPER_ACL)); +} + + +/** + Set the system variable 'debug_sync'. + + @param[in] thd thread handle + @param[in] var set variable request + + @return status + @retval FALSE ok, variable is set + @retval TRUE error, variable could not be set + + @note + "Setting" of the system variable 'debug_sync' does not mean to + assign a value to it as usual. Instead a debug sync action is parsed + from the input string and stored apart from the variable value. + + @note + For efficiency reasons, the action string parser places '\0' + terminators in the string. So we need to take a copy here. +*/ + +bool sys_var_debug_sync::update(THD *thd, set_var *var) +{ + char *val_str; + String *val_ptr; + String val_buf; + DBUG_ENTER("sys_var_debug_sync::update"); + DBUG_ASSERT(thd); + + /* + Depending on the value type (string literal, user variable, ...) + val_buf receives a copy of the value or not. But we always need + a copy. So we take a copy, if it is not done by val_str(). + If val_str() puts a copy into val_buf, then it returns &val_buf, + otherwise it returns a pointer to the string object that we need + to copy. + */ + val_ptr= var ? var->value->val_str(&val_buf) : &val_buf; + if (val_ptr != &val_buf) + { + val_buf.copy(*val_ptr); + } + val_str= val_buf.c_ptr(); + DBUG_PRINT("debug_sync", ("set action: '%s'", val_str)); + + /* + debug_sync_eval_action() places '\0' in the string, which itself + must be '\0' terminated. + */ + DBUG_RETURN(opt_debug_sync_timeout ? + debug_sync_eval_action(thd, val_str) : + FALSE); +} + + +/** + Retrieve the value of the system variable 'debug_sync'. + + @param[in] thd thread handle + @param[in] type variable type, unused + @param[in] base variable base, unused + + @return string + @retval != NULL ok, string pointer + @retval NULL memory allocation error + + @note + The value of the system variable 'debug_sync' reflects if + the facility is enabled ("ON") or disabled (default, "OFF"). + + When "ON", the current signal is added. +*/ + +uchar *sys_var_debug_sync::value_ptr(THD *thd, + enum_var_type type __attribute__((unused)), + LEX_STRING *base __attribute__((unused))) +{ + char *value; + DBUG_ENTER("sys_var_debug_sync::value_ptr"); + DBUG_ASSERT(thd); + + if (opt_debug_sync_timeout) + { + static char on[]= "ON - current signal: '"; + + // Ensure exclusive access to debug_sync_global.ds_signal + pthread_mutex_lock(&debug_sync_global.ds_mutex); + + size_t lgt= (sizeof(on) /* includes '\0' */ + + debug_sync_global.ds_signal.length() + 1 /* for '\'' */); + char *vend; + char *vptr; + + if ((value= (char*) alloc_root(thd->mem_root, lgt))) + { + vend= value + lgt - 1; /* reserve space for '\0'. */ + vptr= debug_sync_bmove_len(value, vend, STRING_WITH_LEN(on)); + vptr= debug_sync_bmove_len(vptr, vend, debug_sync_global.ds_signal.ptr(), + debug_sync_global.ds_signal.length()); + if (vptr < vend) + *(vptr++)= '\''; + *vptr= '\0'; /* We have one byte reserved for the worst case. */ + } + pthread_mutex_unlock(&debug_sync_global.ds_mutex); + } + else + { + /* purecov: begin tested */ + value= strmake_root(thd->mem_root, STRING_WITH_LEN("OFF")); + /* purecov: end */ + } + + DBUG_RETURN((uchar*) value); +} + + +/** + Execute requested action at a synchronization point. + + @param[in] thd thread handle + @param[in] action action to be executed + + @note + This is to be called only if activation count > 0. +*/ + +static void debug_sync_execute(THD *thd, st_debug_sync_action *action) +{ + IF_DBUG(const char *dsp_name= action->sync_point.c_ptr()); + IF_DBUG(const char *sig_emit= action->signal.c_ptr()); + IF_DBUG(const char *sig_wait= action->wait_for.c_ptr()); + DBUG_ENTER("debug_sync_execute"); + DBUG_ASSERT(thd); + DBUG_ASSERT(action); + DBUG_PRINT("debug_sync", + ("sync_point: '%s' activation_count: %lu hit_limit: %lu " + "execute: %lu timeout: %lu signal: '%s' wait_for: '%s'", + dsp_name, action->activation_count, action->hit_limit, + action->execute, action->timeout, sig_emit, sig_wait)); + + DBUG_ASSERT(action->activation_count); + action->activation_count--; + + if (action->execute) + { + const char *old_proc_info; + + action->execute--; + + /* + If we will be going to wait, set proc_info for the PROCESSLIST table. + Do this before emitting the signal, so other threads can see it + if they awake before we enter_cond() below. + */ + if (action->wait_for.length()) + { + st_debug_sync_control *ds_control= thd->debug_sync_control; + strxnmov(ds_control->ds_proc_info, sizeof(ds_control->ds_proc_info)-1, + "debug sync point: ", action->sync_point.c_ptr(), NullS); + old_proc_info= thd->proc_info; + thd_proc_info(thd, ds_control->ds_proc_info); + } + + /* + Take mutex to ensure that only one thread access + debug_sync_global.ds_signal at a time. Need to take mutex for + read access too, to create a memory barrier in order to avoid that + threads just reads an old cached version of the signal. + */ + pthread_mutex_lock(&debug_sync_global.ds_mutex); + + if (action->signal.length()) + { + /* Copy the signal to the global variable. */ + if (debug_sync_global.ds_signal.copy(action->signal)) + { + /* + Error is reported by my_malloc(). + We must disable the facility. We have no way to return an error. + */ + debug_sync_emergency_disable(); /* purecov: tested */ + } + /* Wake threads waiting in a sync point. */ + pthread_cond_broadcast(&debug_sync_global.ds_cond); + DBUG_PRINT("debug_sync_exec", ("signal '%s' at: '%s'", + sig_emit, dsp_name)); + } /* end if (action->signal.length()) */ + + if (action->wait_for.length()) + { + pthread_mutex_t *old_mutex; + pthread_cond_t *old_cond; + int error= 0; + struct timespec abstime; + + /* + We don't use enter_cond()/exit_cond(). They do not save old + mutex and cond. This would prohibit the use of DEBUG_SYNC + between other places of enter_cond() and exit_cond(). + */ + old_mutex= thd->mysys_var->current_mutex; + old_cond= thd->mysys_var->current_cond; + thd->mysys_var->current_mutex= &debug_sync_global.ds_mutex; + thd->mysys_var->current_cond= &debug_sync_global.ds_cond; + + set_timespec(abstime, action->timeout); + DBUG_EXECUTE("debug_sync_exec", { + /* Functions as DBUG_PRINT args can change keyword and line nr. */ + const char *sig_glob= debug_sync_global.ds_signal.c_ptr(); + DBUG_PRINT("debug_sync_exec", + ("wait for '%s' at: '%s' curr: '%s'", + sig_wait, dsp_name, sig_glob));}); + + /* + Wait until global signal string matches the wait_for string. + Interrupt when thread or query is killed or facility disabled. + The facility can become disabled when some thread cannot get + the required dynamic memory allocated. + */ + while (stringcmp(&debug_sync_global.ds_signal, &action->wait_for) && + !thd->killed && opt_debug_sync_timeout) + { + error= pthread_cond_timedwait(&debug_sync_global.ds_cond, + &debug_sync_global.ds_mutex, + &abstime); + DBUG_EXECUTE("debug_sync", { + /* Functions as DBUG_PRINT args can change keyword and line nr. */ + const char *sig_glob= debug_sync_global.ds_signal.c_ptr(); + DBUG_PRINT("debug_sync", + ("awoke from %s global: %s error: %d", + sig_wait, sig_glob, error));}); + if (error == ETIMEDOUT || error == ETIME) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_DEBUG_SYNC_TIMEOUT, ER(ER_DEBUG_SYNC_TIMEOUT)); + break; + } + error= 0; + } + DBUG_EXECUTE("debug_sync_exec", + if (thd->killed) + DBUG_PRINT("debug_sync_exec", + ("killed %d from '%s' at: '%s'", + thd->killed, sig_wait, dsp_name)); + else + DBUG_PRINT("debug_sync_exec", + ("%s from '%s' at: '%s'", + error ? "timeout" : "resume", + sig_wait, dsp_name));); + + /* + We don't use enter_cond()/exit_cond(). They do not save old + mutex and cond. This would prohibit the use of DEBUG_SYNC + between other places of enter_cond() and exit_cond(). The + protected mutex must always unlocked _before_ mysys_var->mutex + is locked. (See comment in THD::exit_cond().) + */ + pthread_mutex_unlock(&debug_sync_global.ds_mutex); + pthread_mutex_lock(&thd->mysys_var->mutex); + thd->mysys_var->current_mutex= old_mutex; + thd->mysys_var->current_cond= old_cond; + thd_proc_info(thd, old_proc_info); + pthread_mutex_unlock(&thd->mysys_var->mutex); + + } + else + { + /* In case we don't wait, we just release the mutex. */ + pthread_mutex_unlock(&debug_sync_global.ds_mutex); + } /* end if (action->wait_for.length()) */ + + } /* end if (action->execute) */ + + /* hit_limit is zero for infinite. Don't decrement unconditionally. */ + if (action->hit_limit) + { + if (!--action->hit_limit) + { + thd->killed= THD::KILL_QUERY; + my_error(ER_DEBUG_SYNC_HIT_LIMIT, MYF(0)); + } + DBUG_PRINT("debug_sync_exec", ("hit_limit: %lu at: '%s'", + action->hit_limit, dsp_name)); + } + + DBUG_VOID_RETURN; +} + + +/** + Execute requested action at a synchronization point. + + @param[in] thd thread handle + @param[in] sync_point_name name of synchronization point + @param[in] name_len length of sync point name +*/ + +void debug_sync(THD *thd, const char *sync_point_name, size_t name_len) +{ + st_debug_sync_control *ds_control= thd->debug_sync_control; + st_debug_sync_action *action; + DBUG_ENTER("debug_sync"); + DBUG_ASSERT(thd); + DBUG_ASSERT(sync_point_name); + DBUG_ASSERT(name_len); + DBUG_ASSERT(ds_control); + DBUG_PRINT("debug_sync_point", ("hit: '%s'", sync_point_name)); + + /* Statistics. */ + ds_control->dsp_hits++; + + if (ds_control->ds_active && + (action= debug_sync_find(ds_control->ds_action, ds_control->ds_active, + sync_point_name, name_len)) && + action->activation_count) + { + /* Sync point is active (action exists). */ + debug_sync_execute(thd, action); + + /* Statistics. */ + ds_control->dsp_executed++; + + /* If action became inactive, remove it to shrink the search array. */ + if (!action->activation_count) + debug_sync_remove_action(ds_control, action); + } + + DBUG_VOID_RETURN; +} + +#endif /* defined(ENABLED_DEBUG_SYNC) */ diff --git a/sql/debug_sync.h b/sql/debug_sync.h new file mode 100644 index 00000000000..f4cd0b364cf --- /dev/null +++ b/sql/debug_sync.h @@ -0,0 +1,60 @@ +#ifndef DEBUG_SYNC_INCLUDED +#define DEBUG_SYNC_INCLUDED + +/* Copyright (C) 2008 Sun Microsystems, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/** + @file + + Declarations for the Debug Sync Facility. See debug_sync.cc for details. +*/ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +#include + +class THD; + +#if defined(ENABLED_DEBUG_SYNC) + +/* Macro to be put in the code at synchronization points. */ +#define DEBUG_SYNC(_thd_, _sync_point_name_) \ + do { if (unlikely(opt_debug_sync_timeout)) \ + debug_sync(_thd_, STRING_WITH_LEN(_sync_point_name_)); \ + } while (0) + +/* Command line option --debug-sync-timeout. See mysqld.cc. */ +extern uint opt_debug_sync_timeout; + +/* Default WAIT_FOR timeout if command line option is given without argument. */ +#define DEBUG_SYNC_DEFAULT_WAIT_TIMEOUT 300 + +/* Debug Sync prototypes. See debug_sync.cc. */ +extern int debug_sync_init(void); +extern void debug_sync_end(void); +extern void debug_sync_init_thread(THD *thd); +extern void debug_sync_end_thread(THD *thd); +extern void debug_sync(THD *thd, const char *sync_point_name, size_t name_len); + +#else /* defined(ENABLED_DEBUG_SYNC) */ + +#define DEBUG_SYNC(_thd_, _sync_point_name_) /* disabled DEBUG_SYNC */ + +#endif /* defined(ENABLED_DEBUG_SYNC) */ + +#endif /* DEBUG_SYNC_INCLUDED */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3433b54cb29..896be4a7f19 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -26,6 +26,7 @@ #include "mysqld_suffix.h" #include "mysys_err.h" #include "events.h" +#include "debug_sync.h" #include "../storage/myisam/ha_myisam.h" @@ -486,6 +487,9 @@ my_bool lower_case_file_system= 0; my_bool opt_large_pages= 0; my_bool opt_myisam_use_mmap= 0; uint opt_large_page_size= 0; +#if defined(ENABLED_DEBUG_SYNC) +uint opt_debug_sync_timeout= 0; +#endif /* defined(ENABLED_DEBUG_SYNC) */ my_bool opt_old_style_user_limits= 0, trust_function_creators= 0; /* True if there is at least one per-hour limit for some user, so we should @@ -1333,6 +1337,10 @@ void clean_up(bool print_message) #ifdef USE_REGEX my_regex_end(); #endif +#if defined(ENABLED_DEBUG_SYNC) + /* End the debug sync facility. See debug_sync.cc. */ + debug_sync_end(); +#endif /* defined(ENABLED_DEBUG_SYNC) */ #if !defined(EMBEDDED_LIBRARY) if (!opt_bootstrap) @@ -3464,6 +3472,12 @@ static int init_common_variables(const char *conf_file_name, int argc, sys_var_slow_log_path.value= my_strdup(s, MYF(0)); sys_var_slow_log_path.value_length= strlen(s); +#if defined(ENABLED_DEBUG_SYNC) + /* Initialize the debug sync facility. See debug_sync.cc. */ + if (debug_sync_init()) + return 1; /* purecov: tested */ +#endif /* defined(ENABLED_DEBUG_SYNC) */ + #if (ENABLE_TEMP_POOL) if (use_temp_pool && bitmap_init(&temp_pool,0,1024,1)) return 1; @@ -4858,7 +4872,7 @@ void create_thread_to_handle_connection(THD *thd) handle_one_connection, (void*) thd))) { - /* purify: begin inspected */ + /* purecov: begin inspected */ DBUG_PRINT("error", ("Can't create thread to handle request (error %d)", error)); @@ -5671,6 +5685,9 @@ enum options_mysqld OPT_SECURE_FILE_PRIV, OPT_MIN_EXAMINED_ROW_LIMIT, OPT_LOG_SLOW_SLAVE_STATEMENTS, +#if defined(ENABLED_DEBUG_SYNC) + OPT_DEBUG_SYNC_TIMEOUT, +#endif /* defined(ENABLED_DEBUG_SYNC) */ OPT_OLD_MODE, OPT_SLAVE_EXEC_MODE, OPT_GENERAL_LOG_FILE, @@ -6442,6 +6459,14 @@ log and this option does nothing anymore.", "Decision to use in heuristic recover process. Possible values are COMMIT or ROLLBACK.", (uchar**) &opt_tc_heuristic_recover, (uchar**) &opt_tc_heuristic_recover, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#if defined(ENABLED_DEBUG_SYNC) + {"debug-sync-timeout", OPT_DEBUG_SYNC_TIMEOUT, + "Enable the debug sync facility " + "and optionally specify a default wait timeout in seconds. " + "A zero value keeps the facility disabled.", + (uchar**) &opt_debug_sync_timeout, 0, + 0, GET_UINT, OPT_ARG, 0, 0, UINT_MAX, 0, 0, 0}, +#endif /* defined(ENABLED_DEBUG_SYNC) */ {"temp-pool", OPT_TEMP_POOL, #if (ENABLE_TEMP_POOL) "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", @@ -7626,6 +7651,9 @@ static int mysql_init_variables(void) bzero((uchar*) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); bzero((char *) &global_status_var, sizeof(global_status_var)); opt_large_pages= 0; +#if defined(ENABLED_DEBUG_SYNC) + opt_debug_sync_timeout= 0; +#endif /* defined(ENABLED_DEBUG_SYNC) */ key_map_full.set_all(); /* Character sets */ @@ -8360,6 +8388,22 @@ mysqld_get_one_option(int optid, lower_case_table_names= argument ? atoi(argument) : 1; lower_case_table_names_used= 1; break; +#if defined(ENABLED_DEBUG_SYNC) + case OPT_DEBUG_SYNC_TIMEOUT: + /* + Debug Sync Facility. See debug_sync.cc. + Default timeout for WAIT_FOR action. + Default value is zero (facility disabled). + If option is given without an argument, supply a non-zero value. + */ + if (!argument) + { + /* purecov: begin tested */ + opt_debug_sync_timeout= DEBUG_SYNC_DEFAULT_WAIT_TIMEOUT; + /* purecov: end */ + } + break; +#endif /* defined(ENABLED_DEBUG_SYNC) */ } return 0; } diff --git a/sql/set_var.cc b/sql/set_var.cc index cd01f6c8624..b80bdde9670 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -625,6 +625,12 @@ static sys_var_long_ptr sys_table_cache_size(&vars, "table_open_cache", &table_cache_size); static sys_var_long_ptr sys_table_lock_wait_timeout(&vars, "table_lock_wait_timeout", &table_lock_wait_timeout); + +#if defined(ENABLED_DEBUG_SYNC) +/* Debug Sync Facility. Implemented in debug_sync.cc. */ +static sys_var_debug_sync sys_debug_sync(&vars, "debug_sync"); +#endif /* defined(ENABLED_DEBUG_SYNC) */ + static sys_var_long_ptr sys_thread_cache_size(&vars, "thread_cache_size", &thread_cache_size); #if HAVE_POOL_OF_THREADS == 1 diff --git a/sql/set_var.h b/sql/set_var.h index a54591b0fd6..fa747107870 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -637,6 +637,21 @@ public: }; #endif /* DBUG_OFF */ +#if defined(ENABLED_DEBUG_SYNC) +/* Debug Sync Facility. Implemented in debug_sync.cc. */ +class sys_var_debug_sync :public sys_var_thd +{ +public: + sys_var_debug_sync(sys_var_chain *chain, const char *name_arg) + :sys_var_thd(name_arg) + { chain_sys_var(chain); } + bool check(THD *thd, set_var *var); + bool update(THD *thd, set_var *var); + SHOW_TYPE show_type() { return SHOW_CHAR; } + bool check_update_type(Item_result type) { return type != STRING_RESULT; } + uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; +#endif /* defined(ENABLED_DEBUG_SYNC) */ /* some variables that require special handling */ diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index fdad2a44b68..a17ad94ba82 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6206,3 +6206,10 @@ ER_TOO_MANY_CONCURRENT_TRXS WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED eng "Non-ASCII separator arguments are not fully supported" + +ER_DEBUG_SYNC_TIMEOUT + eng "debug sync point wait timed out" + ger "Debug Sync Point Wartezeit überschritten" +ER_DEBUG_SYNC_HIT_LIMIT + eng "debug sync point hit limit reached" + ger "Debug Sync Point Hit Limit erreicht" diff --git a/sql/sql_base.cc b/sql/sql_base.cc index f55d3fc5006..af3ae03424e 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -17,6 +17,7 @@ /* Basic functions needed by many modules */ #include "mysql_priv.h" +#include "debug_sync.h" #include "sql_select.h" #include "sp_head.h" #include "sp.h" @@ -950,6 +951,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock, close_old_data_files(thd,thd->open_tables,1,1); mysql_ha_flush(thd); + DEBUG_SYNC(thd, "after_flush_unlock"); bool found=1; /* Wait until all threads has closed all the tables we had locked */ @@ -5289,6 +5291,8 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) } } + DEBUG_SYNC(thd, "before_lock_tables_takes_lock"); + if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start), lock_flag, need_reopen))) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index daef5a26742..f75dc2cb88a 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -42,6 +42,7 @@ #include "sp_rcontext.h" #include "sp_cache.h" +#include "debug_sync.h" /* The following is used to initialise Table_ident with a internal @@ -584,6 +585,9 @@ THD::THD() derived_tables_processing(FALSE), spcont(NULL), m_parser_state(NULL) +#if defined(ENABLED_DEBUG_SYNC) + , debug_sync_control(0) +#endif /* defined(ENABLED_DEBUG_SYNC) */ { ulong tmp; @@ -832,6 +836,11 @@ void THD::init(void) reset_current_stmt_binlog_row_based(); bzero((char *) &status_var, sizeof(status_var)); sql_log_bin_toplevel= options & OPTION_BIN_LOG; + +#if defined(ENABLED_DEBUG_SYNC) + /* Initialize the Debug Sync Facility. See debug_sync.cc. */ + debug_sync_init_thread(this); +#endif /* defined(ENABLED_DEBUG_SYNC) */ } @@ -911,6 +920,12 @@ void THD::cleanup(void) lock=locked_tables; locked_tables=0; close_thread_tables(this); } + +#if defined(ENABLED_DEBUG_SYNC) + /* End the Debug Sync Facility. See debug_sync.cc. */ + debug_sync_end_thread(this); +#endif /* defined(ENABLED_DEBUG_SYNC) */ + mysql_ha_cleanup(this); delete_dynamic(&user_var_events); hash_free(&user_vars); diff --git a/sql/sql_class.h b/sql/sql_class.h index c38eb17f191..27b59bde7be 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1896,6 +1896,11 @@ public: partition_info *work_part_info; #endif +#if defined(ENABLED_DEBUG_SYNC) + /* Debug Sync facility. See debug_sync.cc. */ + struct st_debug_sync_control *debug_sync_control; +#endif /* defined(ENABLED_DEBUG_SYNC) */ + THD(); ~THD(); -- cgit v1.2.1 From df2122a262cc421fff27f5e2265d7723a6e48829 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Wed, 30 Sep 2009 14:50:25 +0200 Subject: Bug#34895 'show procedure status' or 'show function status' + 'flush tables' crashes The server crashes when 'show procedure status' and 'flush tables' are run concurrently. This is caused by the way mysql.proc table is added twice to the list of table to lock although the requirements on the current locking API assumes differently. No test case is submitted because of the nature of the crash which is currently difficult to reproduce in a deterministic way. This is a backport from 5.1 myisam/mi_dbug.c: * check_table_is_closed is only used in EXTRA_DEBUG mode but since it is iterating over myisam shared data it still needs to be protected by an appropriate mutex. sql/sql_yacc.yy: * Since the I_S mechanism is already handling the open and close of mysql.proc there is no need for the method sp_add_to_query_tables. --- sql/sql_yacc.yy | 4 ---- 1 file changed, 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 0aa6308ab93..c0247fd8d56 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -8276,8 +8276,6 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SELECT; lex->orig_sql_command= SQLCOM_SHOW_STATUS_PROC; - if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) - MYSQL_YYABORT; if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) MYSQL_YYABORT; } @@ -8286,8 +8284,6 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SELECT; lex->orig_sql_command= SQLCOM_SHOW_STATUS_FUNC; - if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) - MYSQL_YYABORT; if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) MYSQL_YYABORT; } -- cgit v1.2.1 From 565f1bc4a1ccd9a7b853980d9aca5861d75ed104 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Wed, 30 Sep 2009 18:38:02 -0300 Subject: Bug#47525: MySQL crashed (Federated) On Mac OS X or Windows, sending a SIGHUP to the server or a asynchronous flush (triggered by flush_time), would cause the server to crash. The problem was that a hook used to detach client API handles wasn't prepared to handle cases where the thread does not have a associated session. The solution is to verify whether the thread has a associated session before trying to detach a handle. mysql-test/r/federated_debug.result: Add test case result for Bug#47525 mysql-test/t/federated_debug-master.opt: Debug point. mysql-test/t/federated_debug.test: Add test case for Bug#47525 sql/slave.cc: Check whether a the thread has a associated session. sql/sql_parse.cc: Add debug code to simulate a reload without thread session. --- sql/slave.cc | 2 +- sql/sql_parse.cc | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/slave.cc b/sql/slave.cc index 17855ed04e5..dd29a3f45c9 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4756,7 +4756,7 @@ extern "C" void slave_io_thread_detach_vio() { #ifdef SIGNAL_WITH_VIO_CLOSE THD *thd= current_thd; - if (thd->slave_thread) + if (thd && thd->slave_thread) thd->clear_active_vio(); #endif } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 65b2a9d60b0..62cca71ee97 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2161,6 +2161,26 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (check_global_access(thd,RELOAD_ACL)) break; mysql_log.write(thd,command,NullS); +#ifndef DBUG_OFF + DBUG_EXECUTE_IF("simulate_detached_thread_refresh", + { + /* + Simulate a reload without a attached thread session. + Provides a environment similar to that of when the + server receives a SIGHUP signal and reloads caches + and flushes tables. + */ + bool res; + my_pthread_setspecific_ptr(THR_THD, NULL); + res= reload_acl_and_cache(NULL, options | REFRESH_FAST, + NULL, ¬_used); + my_pthread_setspecific_ptr(THR_THD, thd); + if (!res) + send_ok(thd); + break; + } + ); +#endif if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, ¬_used)) send_ok(thd); break; -- cgit v1.2.1 From e218ac06ed3721e6b5496e1954fc9afd9d95cf24 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Wed, 30 Sep 2009 19:14:55 -0300 Subject: Post-merge fix: DBUG macros are wrapped inside a loop. sql/sql_parse.cc: DBUG macros are wrapped inside a loop. Allow to break the command switch from within a DBUG macro. --- sql/sql_parse.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 62cca71ee97..88e70a129c4 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2177,7 +2177,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, my_pthread_setspecific_ptr(THR_THD, thd); if (!res) send_ok(thd); - break; + goto end; } ); #endif @@ -2318,6 +2318,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } + + /* Break the switch for DBUG wrapped code. */ +#ifndef DBUG_OFF +end: +#endif + if (thd->lock || thd->open_tables || thd->derived_tables || thd->prelocked_mode) { -- cgit v1.2.1 From 3c5d9f4272f484fa71426581962c31413f47d984 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Wed, 30 Sep 2009 19:59:30 -0300 Subject: Post-merge cleanup: Reorganize code for better comprehensibility. Removes the need of a hack (the jump to label). --- sql/sql_parse.cc | 44 ++++++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 24 deletions(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 88e70a129c4..f34aa3c3bad 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2162,26 +2162,27 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; mysql_log.write(thd,command,NullS); #ifndef DBUG_OFF - DBUG_EXECUTE_IF("simulate_detached_thread_refresh", - { - /* - Simulate a reload without a attached thread session. - Provides a environment similar to that of when the - server receives a SIGHUP signal and reloads caches - and flushes tables. - */ - bool res; - my_pthread_setspecific_ptr(THR_THD, NULL); - res= reload_acl_and_cache(NULL, options | REFRESH_FAST, - NULL, ¬_used); - my_pthread_setspecific_ptr(THR_THD, thd); - if (!res) - send_ok(thd); - goto end; - } - ); + bool debug_simulate= FALSE; + DBUG_EXECUTE_IF("simulate_detached_thread_refresh", debug_simulate= TRUE;); + if (debug_simulate) + { + /* + Simulate a reload without a attached thread session. + Provides a environment similar to that of when the + server receives a SIGHUP signal and reloads caches + and flushes tables. + */ + bool res; + my_pthread_setspecific_ptr(THR_THD, NULL); + res= reload_acl_and_cache(NULL, options | REFRESH_FAST, + NULL, ¬_used); + my_pthread_setspecific_ptr(THR_THD, thd); + if (!res) + send_ok(thd); + break; + } #endif - if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, ¬_used)) + if (!reload_acl_and_cache(thd, options, NULL, ¬_used)) send_ok(thd); break; } @@ -2319,11 +2320,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } - /* Break the switch for DBUG wrapped code. */ -#ifndef DBUG_OFF -end: -#endif - if (thd->lock || thd->open_tables || thd->derived_tables || thd->prelocked_mode) { -- cgit v1.2.1 From 41e0d0a3ab76fcd78b7375ab28cd5e8c4e71bdf9 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 1 Oct 2009 07:19:36 +0800 Subject: Bug #45677 Slave stops with Duplicate entry for key PRIMARY when using trigger The problem is that there is only one autoinc value associated with the query when binlogging. If more than one autoinc values are used in the query, the autoinc values after the first one can be inserted wrongly on slave. So these autoinc values can become inconsistent on master and slave. The problem is resolved by marking all the statements that invoke a trigger or call a function that updated autoinc fields as unsafe, and will switch to row-format in Mixed mode. Actually, the statement is safe if just one autoinc value is used in sub-statement, but it's impossible to check how many autoinc values are used in sub-statement.) mysql-test/suite/rpl/r/rpl_auto_increment_update_failure.result: Test result for bug#45677 mysql-test/suite/rpl/t/rpl_auto_increment_update_failure.test: Added test to verify the following two properties: P1) insert/update in an autoinc column causes statement to be logged in row format if binlog_format=mixed P2) if binlog_format=mixed, and a trigger or function contains two or more inserts/updates in a table that has an autoinc column, then the slave should not go out of sync, even if there are concurrent transactions. sql/sql_base.cc: Added function 'has_write_table_with_auto_increment' to check if one (or more) write tables have auto_increment columns. Removed function 'has_two_write_locked_tables_with_auto_increment', because the function is included in function 'has_write_table_with_auto_increment'. --- sql/sql_base.cc | 48 +++++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 31 deletions(-) (limited to 'sql') diff --git a/sql/sql_base.cc b/sql/sql_base.cc index f55d3fc5006..89fe6dc4b41 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -106,7 +106,7 @@ static bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias, static void close_old_data_files(THD *thd, TABLE *table, bool morph_locks, bool send_refresh); static bool -has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables); +has_write_table_with_auto_increment(TABLE_LIST *tables); extern "C" uchar *table_cache_key(const uchar *record, size_t *length, @@ -5277,15 +5277,17 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) thd->in_lock_tables=1; thd->options|= OPTION_TABLE_LOCK; /* - If we have >= 2 different tables to update with auto_inc columns, - statement-based binlogging won't work. We can solve this problem in - mixed mode by switching to row-based binlogging: + A query that modifies autoinc column in sub-statement can make the + master and slave inconsistent. + We can solve these problems in mixed mode by switching to binlogging + if at least one updated table is used by sub-statement */ - if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED && - has_two_write_locked_tables_with_auto_increment(tables)) + /* The BINLOG_FORMAT_MIXED judgement is saved for suppressing + warnings, but it will be removed by fixing bug#45827 */ + if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED && tables && + has_write_table_with_auto_increment(thd->lex->first_not_own_table())) { thd->lex->set_stmt_unsafe(); - thd->set_current_stmt_binlog_row_based_if_mixed(); } } @@ -8815,47 +8817,31 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table /* - Tells if two (or more) tables have auto_increment columns and we want to - lock those tables with a write lock. + Check if one (or more) write tables have auto_increment columns. - SYNOPSIS - has_two_write_locked_tables_with_auto_increment - tables Table list + @param[in] tables Table list + + @retval 0 if at least one write tables has an auto_increment column + @retval 1 otherwise NOTES: Call this function only when you have established the list of all tables which you'll want to update (including stored functions, triggers, views inside your statement). - - RETURN - 0 No - 1 Yes */ static bool -has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables) +has_write_table_with_auto_increment(TABLE_LIST *tables) { - char *first_table_name= NULL, *first_db; - LINT_INIT(first_db); - for (TABLE_LIST *table= tables; table; table= table->next_global) { /* we must do preliminary checks as table->table may be NULL */ if (!table->placeholder() && table->table->found_next_number_field && (table->lock_type >= TL_WRITE_ALLOW_WRITE)) - { - if (first_table_name == NULL) - { - first_table_name= table->table_name; - first_db= table->db; - DBUG_ASSERT(first_db); - } - else if (strcmp(first_db, table->db) || - strcmp(first_table_name, table->table_name)) - return 1; - } + return 1; } + return 0; } -- cgit v1.2.1 From a0c2ee6454c5b545259e350217423b652e498478 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Sun, 4 Oct 2009 12:53:02 +0300 Subject: Fixed a valgrind error in debug_sync --- sql/debug_sync.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 9e7141b775b..2580d526b52 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -1276,7 +1276,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str) const char *errmsg; char *ptr; char *token; - uint token_length; + uint token_length= 0; DBUG_ENTER("debug_sync_eval_action"); DBUG_ASSERT(thd); DBUG_ASSERT(action_str); -- cgit v1.2.1 From 33cd911a16f667a1381c5d3de6e526169ba50d1e Mon Sep 17 00:00:00 2001 From: Gleb Shchepa Date: Mon, 5 Oct 2009 10:27:36 +0500 Subject: Bug #44139: Table scan when NULL appears in IN clause SELECT ... WHERE ... IN (NULL, ...) does full table scan, even if the same query without the NULL uses efficient range scan. The bugfix for the bug 18360 introduced an optimization: if 1) all right-hand arguments of the IN function are constants 2) result types of all right argument items are compatible enough to use the same single comparison function to compare all of them to the left argument, then we can convert the right-hand list of constant items to an array of equally-typed constant values for the further QUICK index access etc. (see Item_func_in::fix_length_and_dec()). The Item_null constant item objects have STRING_RESULT result types, so, as far as Item_func_in::fix_length_and_dec() is aware of NULLs in the right list, this improvement efficiently optimizes IN function calls with a mixed right list of NULLs and string constants. However, the optimization doesn't affect mixed lists of NULLs and integers, floats etc., because there is no unique common comparator. New optimization has been added to ignore the result type of NULL constants in the static analysis of mixed right-hand lists. This is safe, because at the execution phase we care about presence of NULLs anyway. 1. The collect_cmp_types() function has been modified to optionally ignore NULL constants in the item list. 2. NULL-skipping code of the Item_func_in::fix_length_and_dec() function has been modified to work not only with in_string vectors but with in_vectors of other types. mysql-test/r/func_in.result: Added test case for the bug #44139. mysql-test/t/func_in.test: Added test case for the bug #44139. sql/item_cmpfunc.cc: Bug #44139: Table scan when NULL appears in IN clause 1. The collect_cmp_types() function has been modified to optionally ignore NULL constants in the item list. 2. NULL-skipping code of the Item_func_in::fix_length_and_dec() function has been modified to work not only with in_string vectors but with in_vectors of other types. --- sql/item_cmpfunc.cc | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index d229453b795..c29031d25b5 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -189,6 +189,7 @@ enum_field_types agg_field_type(Item **items, uint nitems) collect_cmp_types() items Array of items to collect types from nitems Number of items in the array + skip_nulls Don't collect types of NULL items if TRUE DESCRIPTION This function collects different result types for comparison of the first @@ -199,7 +200,7 @@ enum_field_types agg_field_type(Item **items, uint nitems) Bitmap of collected types - otherwise */ -static uint collect_cmp_types(Item **items, uint nitems) +static uint collect_cmp_types(Item **items, uint nitems, bool skip_nulls= FALSE) { uint i; uint found_types; @@ -208,6 +209,8 @@ static uint collect_cmp_types(Item **items, uint nitems) found_types= 0; for (i= 1; i < nitems ; i++) { + if (skip_nulls && items[i]->type() == Item::NULL_ITEM) + continue; // Skip NULL constant items if ((left_result == ROW_RESULT || items[i]->result_type() == ROW_RESULT) && cmp_row_type(items[0], items[i])) @@ -215,6 +218,12 @@ static uint collect_cmp_types(Item **items, uint nitems) found_types|= 1<< (uint)item_cmp_type(left_result, items[i]->result_type()); } + /* + Even if all right-hand items are NULLs and we are skipping them all, we need + at least one type bit in the found_type bitmask. + */ + if (skip_nulls && !found_types) + found_types= 1 << (uint)left_result; return found_types; } @@ -3515,7 +3524,7 @@ void Item_func_in::fix_length_and_dec() uint type_cnt= 0, i; Item_result cmp_type= STRING_RESULT; left_result_type= args[0]->result_type(); - if (!(found_types= collect_cmp_types(args, arg_count))) + if (!(found_types= collect_cmp_types(args, arg_count, true))) return; for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++) @@ -3693,9 +3702,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - array->set(j,args[i]); if (!args[i]->null_value) // Skip NULL values + { + array->set(j,args[i]); j++; + } else have_null= 1; } -- cgit v1.2.1 From 14d1909440ab14ae839df974ce92c12c4220456e Mon Sep 17 00:00:00 2001 From: Alfranio Correia Date: Tue, 6 Oct 2009 01:38:58 +0100 Subject: BUG#47287 RBR: replication diff on basic case with txn- and non-txn tables in a statement Let - T be a transactional table and N non-transactional table. - B be begin, C commit and R rollback. - M be a mixed statement, i.e. a statement that updates both T and N. - M* be a mixed statement that fails while updating either T or N. This patch restore the behavior presented in 5.1.37 for rows either produced in the RBR or MIXED modes, when a M* statement that happened early in a transaction had their changes written to the binary log outside the boundaries of the transaction and wrapped in a BEGIN/ROLLBACK. This was done to keep the slave consistent with with the master as the rollback would keep the changes on N and undo them on T. In particular, we do what follows: . B M* T C would log - B M* R B T C. Note that, we are not preserving history from the master as we are introducing a rollback that never happened. However, this seems to be more acceptable than making the slave diverge. We do not fix the following case: . B T M* C would log B T M* C. The slave will diverge as the changes on T tables that originated from the M statement are rolled back on the master but not on the slave. Unfortunately, we cannot simply rollback the transaction as this would undo any uncommitted changes on T tables. SBR is not considered in this patch because a failing statement is written to the binary along with the error code and a slave executes and then rolls back the statement when it has an associated error code, thus undoing the effects on T. In RBR and MBR, a full-fledged fix will be pushed after the WL 2687. --- sql/log.cc | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index feaa5499912..9c733636bad 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -153,7 +153,7 @@ private: class binlog_trx_data { public: binlog_trx_data() - : at_least_one_stmt(0), incident(FALSE), m_pending(0), + : at_least_one_stmt_committed(0), incident(FALSE), m_pending(0), before_stmt_pos(MY_OFF_T_UNDEF) { trans_log.end_of_file= max_binlog_cache_size; @@ -182,7 +182,10 @@ public: { DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos)); DBUG_PRINT("info", ("before_stmt_pos=%lu", (ulong) pos)); - delete pending(); + if (pending()) + { + delete pending(); + } set_pending(0); reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0); trans_log.end_of_file= max_binlog_cache_size; @@ -192,12 +195,12 @@ public: /* The only valid positions that can be truncated to are at the beginning of a statement. We are relying on this fact to be able - to set the at_least_one_stmt flag correctly. In other word, if + to set the at_least_one_stmt_committed flag correctly. In other word, if we are truncating to the beginning of the transaction cache, there will be no statements in the cache, otherwhise, we will have at least one statement in the transaction cache. */ - at_least_one_stmt= (pos > 0); + at_least_one_stmt_committed= (pos > 0); } /* @@ -239,7 +242,7 @@ public: Boolean that is true if there is at least one statement in the transaction cache. */ - bool at_least_one_stmt; + bool at_least_one_stmt_committed; bool incident; private: @@ -1539,9 +1542,10 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) { Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, TRUE, 0); error= binlog_end_trans(thd, trx_data, &qev, all); - goto end; } + trx_data->at_least_one_stmt_committed = my_b_tell(&trx_data->trans_log) > 0; + end: if (!all) trx_data->before_stmt_pos = MY_OFF_T_UNDEF; // part of the stmt commit @@ -1608,15 +1612,18 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) { /* We flush the cache with a rollback, wrapped in a beging/rollback if: - . aborting a transcation that modified a non-transactional table or; + . aborting a transaction that modified a non-transactional table; . aborting a statement that modified both transactional and - non-transctional tables but which is not in the boundaries of any - transaction; + non-transactional tables but which is not in the boundaries of any + transaction or there was no early change; . the OPTION_KEEP_LOG is activate. */ if ((all && thd->transaction.all.modified_non_trans_table) || (!all && thd->transaction.stmt.modified_non_trans_table && !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT))) || + (!all && thd->transaction.stmt.modified_non_trans_table && + !trx_data->at_least_one_stmt_committed && + thd->current_stmt_binlog_row_based) || ((thd->options & OPTION_KEEP_LOG))) { Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, TRUE, 0); -- cgit v1.2.1 From 7e0da4352c9e85242f8fac0816b88358f4ba255e Mon Sep 17 00:00:00 2001 From: Alfranio Correia Date: Tue, 6 Oct 2009 01:54:00 +0100 Subject: BUG#47678 Changes to n-tables that happen early in a trans. are only flushed upon commit Let - T be a transactional table and N non-transactional table. - B be begin, C commit and R rollback. - N be a statement that accesses and changes only N-tables. - T be a statement that accesses and changes only T-tables. In RBR, changes to N-tables that happen early in a transaction are not immediately flushed upon committing a statement. This behavior may, however, break consistency in the presence of concurrency since changes done to N-tables become immediately visible to other connections. To fix this problem, we do the following: . B N N T C would log - B N C B N C B T C. . B N N T R would log - B N C B N C B T R. Note that we are not preserving history from the master as we are introducing a commit that never happened. However, this seems to be more acceptable than the possibility of breaking consistency in the presence of concurrency. --- sql/log.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index 9c733636bad..e8366c47863 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1275,7 +1275,7 @@ static bool stmt_has_updated_trans_table(THD *thd) { Ha_trx_info *ha_info; - for (ha_info= thd->transaction.stmt.ha_list; ha_info; ha_info= ha_info->next()) + for (ha_info= thd->transaction.stmt.ha_list; ha_info && ha_info->is_started(); ha_info= ha_info->next()) { if (ha_info->is_trx_read_write() && ha_info->ht() != binlog_hton) return (TRUE); @@ -1538,7 +1538,10 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) YESNO(in_transaction), YESNO(thd->transaction.all.modified_non_trans_table), YESNO(thd->transaction.stmt.modified_non_trans_table))); - if (!in_transaction || all) + if (!in_transaction || all || + (!all && !trx_data->at_least_one_stmt_committed && + !stmt_has_updated_trans_table(thd) && + thd->transaction.stmt.modified_non_trans_table)) { Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, TRUE, 0); error= binlog_end_trans(thd, trx_data, &qev, all); -- cgit v1.2.1 From 1ec86b21f9328b6ef472fcae1b78c2e3757f73e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Magnus=20Bl=C3=A5udd?= Date: Tue, 6 Oct 2009 13:04:51 +0200 Subject: Bug#47857 strip_sp function in mysys/mf_strip.c never used and cause name clash - Remove mf_strip.c and the declaration of 'strip_sp' --- sql/mysql_priv.h.pp | 1 - 1 file changed, 1 deletion(-) (limited to 'sql') diff --git a/sql/mysql_priv.h.pp b/sql/mysql_priv.h.pp index 8bb31f64587..d874a2591d1 100644 --- a/sql/mysql_priv.h.pp +++ b/sql/mysql_priv.h.pp @@ -773,7 +773,6 @@ extern int wild_compare(const char *str,const char *wildstr, extern WF_PACK *wf_comp(char * str); extern int wf_test(struct wild_file_pack *wf_pack,const char *name); extern void wf_end(struct wild_file_pack *buffer); -extern size_t strip_sp(char * str); extern my_bool array_append_string_unique(const char *str, const char **array, size_t size); extern void get_date(char * to,int timeflag,time_t use_time); -- cgit v1.2.1 From 1a48dd4e2bf0c6353bbf76b1f9b768620e0aa090 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Wed, 7 Oct 2009 18:03:42 +0300 Subject: Bug #43029: FORCE INDEX FOR ORDER BY is ignored when join buffering is used FORCE INDEX FOR ORDER BY now prevents the optimizer from using join buffering. As a result the optimizer can use indexed access on the first table and doesn't need to sort the complete resultset at the end of the statement. --- sql/sql_base.cc | 6 ++++-- sql/sql_parse.cc | 1 + sql/sql_select.cc | 19 ++++++++++++++----- sql/sql_select.h | 2 ++ sql/table.cc | 25 ++++++++++++++++++++----- sql/table.h | 12 ++++++++++++ 6 files changed, 53 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/sql_base.cc b/sql/sql_base.cc index d1e96fcdbb3..e706bd04ea6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2305,7 +2305,8 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list, bool link_in) table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= 0; + table->force_index= table->force_index_order= table->force_index_group= 0; table->status=STATUS_NO_RECORD; DBUG_RETURN(FALSE); } @@ -2963,7 +2964,8 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= 0; + table->force_index= table->force_index_order= table->force_index_group= 0; table->status=STATUS_NO_RECORD; table->insert_values= 0; table->fulltext_searched= 0; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 83ef525e3eb..27688e41d4f 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6258,6 +6258,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->table_name_length=table->table.length; ptr->lock_type= lock_type; ptr->updating= test(table_options & TL_OPTION_UPDATING); + /* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */ ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX); ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES); ptr->derived= table->sel; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3f1432914a0..9dacb2c2ce4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1231,13 +1231,22 @@ JOIN::optimize() (!group_list && tmp_table_param.sum_func_count)) order=0; - // Can't use sort on head table if using row cache + // Can't use sort on head table if using join buffering if (full_join) { - if (group_list) - simple_group=0; - if (order) - simple_order=0; + TABLE *stable= (sort_by_table == (TABLE *) 1 ? + join_tab[const_tables].table : sort_by_table); + /* + FORCE INDEX FOR ORDER BY can be used to prevent join buffering when + sorting on the first table. + */ + if (!stable || !stable->force_index_order) + { + if (group_list) + simple_group= 0; + if (order) + simple_order= 0; + } } /* diff --git a/sql/sql_select.h b/sql/sql_select.h index a0366d47149..3f06b402638 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -357,6 +357,8 @@ public: simple_xxxxx is set if ORDER/GROUP BY doesn't include any references to other tables than the first non-constant table in the JOIN. It's also set if ORDER/GROUP BY is empty. + Used for deciding for or against using a temporary table to compute + GROUP/ORDER BY. */ bool simple_order, simple_group; /** diff --git a/sql/table.cc b/sql/table.cc index 04f2a3fbcf8..d2538eb4d59 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -4637,7 +4637,8 @@ Item_subselect *TABLE_LIST::containing_subselect() (TABLE_LIST::index_hints). Using the information in this tagged list this function sets the members st_table::keys_in_use_for_query, st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, - st_table::force_index and st_table::covering_keys. + st_table::force_index, st_table::force_index_order, + st_table::force_index_group and st_table::covering_keys. Current implementation of the runtime does not allow mixing FORCE INDEX and USE INDEX, so this is checked here. Then the FORCE INDEX list @@ -4765,14 +4766,28 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl) } /* process FORCE INDEX as USE INDEX with a flag */ + if (!index_order[INDEX_HINT_FORCE].is_clear_all()) + { + tbl->force_index_order= TRUE; + index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]); + } + + if (!index_group[INDEX_HINT_FORCE].is_clear_all()) + { + tbl->force_index_group= TRUE; + index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]); + } + + /* + TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified) and + create tbl->force_index_join instead. + Then use the correct force_index_XX instead of the global one. + */ if (!index_join[INDEX_HINT_FORCE].is_clear_all() || - !index_order[INDEX_HINT_FORCE].is_clear_all() || - !index_group[INDEX_HINT_FORCE].is_clear_all()) + tbl->force_index_group || tbl->force_index_order) { tbl->force_index= TRUE; index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]); - index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]); - index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]); } /* apply USE INDEX */ diff --git a/sql/table.h b/sql/table.h index 40372fa91cf..e4a382c799f 100644 --- a/sql/table.h +++ b/sql/table.h @@ -752,6 +752,18 @@ struct st_table { bytes, it would take up 4. */ my_bool force_index; + + /** + Flag set when the statement contains FORCE INDEX FOR ORDER BY + See TABLE_LIST::process_index_hints(). + */ + my_bool force_index_order; + + /** + Flag set when the statement contains FORCE INDEX FOR GROUP BY + See TABLE_LIST::process_index_hints(). + */ + my_bool force_index_group; my_bool distinct,const_table,no_rows; /** -- cgit v1.2.1 From 3185118e1a8d7599b7b5238b4b320f1a97a47bd5 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Thu, 8 Oct 2009 16:56:31 +0500 Subject: Fix for bug #42803: Field_bit does not have unsigned_flag field, can lead to bad memory access Problem: Field_bit is the only field which returns INT_RESULT and doesn't have unsigned flag. As it's not a descendant of the Field_num, so using ((Field_num *) field_bit)->unsigned_flag may lead to unpredictable results. Fix: check the field type before casting. mysql-test/r/type_bit.result: Fix for bug #42803: Field_bit does not have unsigned_flag field, can lead to bad memory access - test result. mysql-test/t/type_bit.test: Fix for bug #42803: Field_bit does not have unsigned_flag field, can lead to bad memory access - test case. sql/opt_range.cc: Fix for bug #42803: Field_bit does not have unsigned_flag field, can lead to bad memory access - don't cast to (Field_num *) Field_bit, as it's not a Field_num descendant and is always unsigned by nature. --- sql/opt_range.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index fdf6cc03a44..355317fe280 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -4536,6 +4536,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, if (type == Item_func::LT_FUNC && (value->val_int() > 0)) type = Item_func::LE_FUNC; else if (type == Item_func::GT_FUNC && + (field->type() != FIELD_TYPE_BIT) && !((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag && (value->val_int() < 0)) @@ -4572,7 +4573,9 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, */ if (field->result_type() == INT_RESULT && value->result_type() == INT_RESULT && - ((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag) + ((field->type() == FIELD_TYPE_BIT || + ((Field_num *) field)->unsigned_flag) && + !((Item_int*) value)->unsigned_flag)) { longlong item_val= value->val_int(); if (item_val < 0) -- cgit v1.2.1 From ec42ccfdfb8c7f5c8c2ef13183690279566bc3c3 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 8 Oct 2009 16:21:07 +0300 Subject: Addendum to the fix for bug 43029 --- sql/mysql_priv.h | 1 + 1 file changed, 1 insertion(+) (limited to 'sql') diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6fde16d3049..c04b1f5ae38 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -2433,6 +2433,7 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; + table->force_index_order= table->force_index_group= 0; table->covering_keys= table->s->keys_for_keyread; table->merge_keys.clear_all(); } -- cgit v1.2.1 From 0186334ae86380935124c4e71f5887ce6dab59e9 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Thu, 8 Oct 2009 15:36:43 +0200 Subject: Bug#46922: crash when adding partitions and open_files_limit is reached Problem was bad error handling, leaving some new temporary partitions locked and initialized and some not yet initialized and locked, leading to a crash when trying to unlock the not yet initialized and locked partitions Solution was to unlock the already locked partitions, and not include any of the new temporary partitions in later unlocks mysql-test/r/partition_open_files_limit.result: Bug#46922: crash when adding partitions and open_files_limit is reached New test result mysql-test/t/partition_open_files_limit-master.opt: Bug#46922: crash when adding partitions and open_files_limit is reached New test opt-file for testing when open_files_limit is reached mysql-test/t/partition_open_files_limit.test: Bug#46922: crash when adding partitions and open_files_limit is reached New test case testing when open_files_limit is reached sql/ha_partition.cc: Bug#46922: crash when adding partitions and open_files_limit is reached When cleaning up the partitions already locked need to be unlocked, and not be unlocked/closed after cleaning up. --- sql/ha_partition.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 5b053ab9cac..6b5350a82cd 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1280,10 +1280,10 @@ void ha_partition::cleanup_new_partition(uint part_count) m_file= m_added_file; m_added_file= NULL; + external_lock(ha_thd(), F_UNLCK); /* delete_table also needed, a bit more complex */ close(); - m_added_file= m_file; m_file= save_m_file; } DBUG_VOID_RETURN; -- cgit v1.2.1 From 62395e6ffa34316ebe4b5297459f8742361f9143 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Thu, 8 Oct 2009 15:58:17 +0200 Subject: Bug#44059: Incorrect cardinality of indexes on a partitioned table backport for bug#44059 from mysql-pe to mysql-5.1-bugteam Using the partition with most rows instead of first partition to estimate the cardinality of indexes. mysql-test/r/partition.result: Bug#44059: Incorrect cardinality of indexes on a partitioned table Added test result mysql-test/t/partition.test: Bug#44059: Incorrect cardinality of indexes on a partitioned table Added test case sql/ha_partition.cc: Bug#44059: Incorrect cardinality of indexes on a partitioned table Checking which partition that has the most rows, and using that partition for HA_STATUS_CONST instead of first partition --- sql/ha_partition.cc | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index df5badccb1e..ac55c4e718e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -5011,8 +5011,9 @@ int ha_partition::info(uint flag) If the handler doesn't support statistics, it should set all of the above to 0. - We will allow the first handler to set the rec_per_key and use - this as an estimate on the total table. + We first scans through all partitions to get the one holding most rows. + We will then allow the handler with the most rows to set + the rec_per_key and use this as an estimate on the total table. max_data_file_length: Maximum data file length We ignore it, is only used in @@ -5024,14 +5025,33 @@ int ha_partition::info(uint flag) ref_length: We set this to the value calculated and stored in local object create_time: Creation time of table - Set by first handler - So we calculate these constants by using the variables on the first - handler. + So we calculate these constants by using the variables from the + handler with most rows. */ - handler *file; + handler *file, **file_array; + ulonglong max_records= 0; + uint32 i= 0; + uint32 handler_instance= 0; + + file_array= m_file; + do + { + file= *file_array; + /* Get variables if not already done */ + if (!(flag & HA_STATUS_VARIABLE) || + !bitmap_is_set(&(m_part_info->used_partitions), + (file_array - m_file))) + file->info(HA_STATUS_VARIABLE); + if (file->stats.records > max_records) + { + max_records= file->stats.records; + handler_instance= i; + } + i++; + } while (*(++file_array)); - file= m_file[0]; + file= m_file[handler_instance]; file->info(HA_STATUS_CONST); stats.create_time= file->stats.create_time; ref_length= m_ref_length; -- cgit v1.2.1 From fa4006bc8ac7c2c773be2acb1303840c372c3e11 Mon Sep 17 00:00:00 2001 From: He Zhenxing Date: Fri, 9 Oct 2009 16:54:48 +0800 Subject: Bug#47323 : mysqlbinlog --verbose displays bad output when events contain subset of columns Commit the non-NDB specific part (originated by frazer) to 5.1 mainline. --- sql/log_event.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index d7921ad3c27..ae7c4335f59 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1852,6 +1852,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, { const uchar *value0= value; const uchar *null_bits= value; + uint null_bit_index= 0; char typestr[64]= ""; value+= (m_width + 7) / 8; @@ -1860,7 +1861,8 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, for (size_t i= 0; i < td->size(); i ++) { - int is_null= (null_bits[i / 8] >> (i % 8)) & 0x01; + int is_null= (null_bits[null_bit_index / 8] + >> (null_bit_index % 8)) & 0x01; if (bitmap_is_set(cols_bitmap, i) == 0) continue; @@ -1897,6 +1899,8 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, } my_b_printf(file, "\n"); + + null_bit_index++; } return value - value0; } -- cgit v1.2.1 From 5ef9ec9d9e8457bbc850a1b66f41445a5a4adb44 Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Fri, 9 Oct 2009 11:30:40 +0200 Subject: Bug#42846: wrong result returned for range scan when using covering index When two range predicates were combined under an OR predicate, the algorithm tried to merge overlapping ranges into one. But the case when a range overlapped several other ranges was not handled. This lead to 1) ranges overlapping, which gave repeated results and 2) a range that overlapped several other ranges was cut off. Fixed by 1) Making sure that a range got an upper bound equal to the next range with a greater minimum. 2) Removing a continue statement mysql-test/r/group_min_max.result: Bug#42846: Changed query plans mysql-test/r/range.result: Bug#42846: Test result. mysql-test/t/range.test: Bug#42846: Test case. sql/opt_range.cc: Bug#42846: The fix. Part1: Previously, both endpoints from key2 were copied, which is not safe. Since ranges are processed in ascending order of minimum endpoints, it is safe to copy the minimum endpoint from key2 but not the maximum. The maximum may only be copied if there is no other range or the other range's minimum is greater than key2's maximum. --- sql/opt_range.cc | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1b1d948b3b9..119f90bc97a 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -6512,6 +6512,63 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1) } +/** + Combine two range expression under a common OR. On a logical level, the + transformation is key_or( expr1, expr2 ) => expr1 OR expr2. + + Both expressions are assumed to be in the SEL_ARG format. In a logic sense, + theformat is reminiscent of DNF, since an expression such as the following + + ( 1 < kp1 < 10 AND p1 ) OR ( 10 <= kp2 < 20 AND p2 ) + + where there is a key consisting of keyparts ( kp1, kp2, ..., kpn ) and p1 + and p2 are valid SEL_ARG expressions over keyparts kp2 ... kpn, is a valid + SEL_ARG condition. The disjuncts appear ordered by the minimum endpoint of + the first range and ranges must not overlap. It follows that they are also + ordered by maximum endpoints. Thus + + ( 1 < kp1 <= 2 AND ( kp2 = 2 OR kp2 = 3 ) ) OR kp1 = 3 + + Is a a valid SER_ARG expression for a key of at least 2 keyparts. + + For simplicity, we will assume that expr2 is a single range predicate, + i.e. on the form ( a < x < b AND ... ). It is easy to generalize to a + disjunction of several predicates by subsequently call key_or for each + disjunct. + + The algorithm iterates over each disjunct of expr1, and for each disjunct + where the first keypart's range overlaps with the first keypart's range in + expr2: + + If the predicates are equal for the rest of the keyparts, or if there are + no more, the range in expr2 has its endpoints copied in, and the SEL_ARG + node in expr2 is deallocated. If more ranges became connected in expr1, the + surplus is also dealocated. If they differ, two ranges are created. + + - The range leading up to the overlap. Empty if endpoints are equal. + + - The overlapping sub-range. May be the entire range if they are equal. + + Finally, there may be one more range if expr2's first keypart's range has a + greater maximum endpoint than the last range in expr1. + + For the overlapping sub-range, we recursively call key_or. Thus in order to + compute key_or of + + (1) ( 1 < kp1 < 10 AND 1 < kp2 < 10 ) + + (2) ( 2 < kp1 < 20 AND 4 < kp2 < 20 ) + + We create the ranges 1 < kp <= 2, 2 < kp1 < 10, 10 <= kp1 < 20. For the + first one, we simply hook on the condition for the second keypart from (1) + : 1 < kp2 < 10. For the second range 2 < kp1 < 10, key_or( 1 < kp2 < 10, 4 + < kp2 < 20 ) is called, yielding 1 < kp2 < 20. For the last range, we reuse + the range 4 < kp2 < 20 from (2) for the second keypart. The result is thus + + ( 1 < kp1 <= 2 AND 1 < kp2 < 10 ) OR + ( 2 < kp1 < 10 AND 1 < kp2 < 20 ) OR + ( 10 <= kp1 < 20 AND 4 < kp2 < 20 ) +*/ static SEL_ARG * key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) { @@ -6663,7 +6720,21 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) key1=key1->tree_delete(save); } last->copy_min(tmp); - if (last->copy_min(key2) || last->copy_max(key2)) + bool full_range= last->copy_min(key2); + if (!full_range) + { + if (last->next && key2->cmp_max_to_min(last->next) >= 0) + { + last->max_value= last->next->min_value; + if (last->next->min_flag & NEAR_MIN) + last->max_flag&= ~NEAR_MAX; + else + last->max_flag|= NEAR_MAX; + } + else + full_range= last->copy_max(key2); + } + if (full_range) { // Full range key1->free_tree(); for (; key2 ; key2=key2->next) @@ -6673,8 +6744,6 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) return 0; } } - key2=key2->next; - continue; } if (cmp >= 0 && tmp->cmp_min_to_min(key2) < 0) -- cgit v1.2.1 From 8e9ec6a5796535ad77ccc9e5cfdb561bbe7f8da4 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Mon, 12 Oct 2009 14:46:00 +0200 Subject: Bug#46944 Internal prepared XA transction XIDs are not removed if server_id changes When MySQL crashes (or a snapshot is taken which simulates a crash), then it is possible that internal XA transactions (used to sync the binary log and InnoDB) can be left in a PREPARED state, whereas they should be rolled back. This is done when the server_id changes before the restart occurs. This patch releases he restriction that the server_id should be consistent if the XID is to be considerred valid. The rollback phase should then be able to clean up all pending XA transactions. --- sql/handler.h | 1 - 1 file changed, 1 deletion(-) (limited to 'sql') diff --git a/sql/handler.h b/sql/handler.h index fe8f7c437ff..7fc2bf2fece 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -398,7 +398,6 @@ struct xid_t { my_xid get_my_xid() { return gtrid_length == MYSQL_XID_GTRID_LEN && bqual_length == 0 && - !memcmp(data+MYSQL_XID_PREFIX_LEN, &server_id, sizeof(server_id)) && !memcmp(data, MYSQL_XID_PREFIX, MYSQL_XID_PREFIX_LEN) ? quick_get_my_xid() : 0; } -- cgit v1.2.1 From 662d8367440f7de655500990469754c7b538ce73 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Tue, 13 Oct 2009 09:43:27 +0500 Subject: Fix for bug#47963: Wrong results when index is used Problem: using null microsecond part (e.g. "YYYY-MM-DD HH:MM:SS.0000") in a WHERE condition may lead to wrong results due to improper DATETIMEs comparison in some cases. Fix: as we compare DATETIMEs as strings we must trim trailing 0's in such cases. mysql-test/r/innodb_mysql.result: Fix for bug#47963: Wrong results when index is used - test result. mysql-test/t/innodb_mysql.test: Fix for bug#47963: Wrong results when index is used - test case. sql/item.cc: Fix for bug#47963: Wrong results when index is used - comparing DATETIMEs trim trailing 0's in the microsecond part. --- sql/item.cc | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 86e4551e55b..f637f9ffaea 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6900,17 +6900,37 @@ int stored_field_cmp_to_item(Field *field, Item *item) /* If comparing DATE with DATETIME, append the time-part to the DATE. So that the strings are equally formatted. - A DATE converted to string is 10 characters, and a DATETIME converted - to string is 19 characters. + A DATE converted to string is 10 (MAX_DATE_WIDTH) characters, + and a DATETIME converted to string is 19 (MAX_DATETIME_WIDTH) characters. */ field_type= field->type(); + uint32 item_length= item_result->length(); if (field_type == MYSQL_TYPE_DATE && - item_result->length() == 19) + item_length == MAX_DATETIME_WIDTH) field_tmp.append(" 00:00:00"); - else if (field_type == MYSQL_TYPE_DATETIME && - item_result->length() == 10) - item_result->append(" 00:00:00"); - + else if (field_type == MYSQL_TYPE_DATETIME) + { + if (item_length == MAX_DATE_WIDTH) + item_result->append(" 00:00:00"); + else if (item_length > MAX_DATETIME_WIDTH) + { + /* + We don't store microsecond part of DATETIME in field + but item_result contains it. As we compare DATETIMEs as strings + we must trim trailing 0's in item_result's microsecond part + to ensure "YYYY-MM-DD HH:MM:SS" == "YYYY-MM-DD HH:MM:SS.0000" + */ + char *end= (char *) item_result->ptr() + item_length - 1; + /* Trim trailing 0's */ + while (*end == '0') + end--; + /* Trim '.' if no microseconds */ + if (*end == '.') + end--; + DBUG_ASSERT(end - item_result->ptr() + 1 >= MAX_DATETIME_WIDTH); + item_result->length(end - item_result->ptr() + 1); + } + } return stringcmp(&field_tmp,item_result); } if (res_type == INT_RESULT) -- cgit v1.2.1 From bc9f56a6c2b121e3a5de277585322068afbe1887 Mon Sep 17 00:00:00 2001 From: Alexey Kopytov Date: Tue, 13 Oct 2009 19:49:32 +0400 Subject: Bug #47123: Endless 100% CPU loop with STRAIGHT_JOIN The problem was in incorrect handling of predicates involving NULL as a constant value by the range optimizer. For example, when creating a SEL_ARG node from a condition of the form "field < const" (which would normally result in the "NULL < field < const" SEL_ARG), the special case when "const" is NULL was not taken into account, so "NULL < field < NULL" was produced for the "field < NULL" condition. As a result, SEL_ARG structures of this form could not be further optimized which in turn could lead to incorrectly constructed SEL_ARG trees. In particular, code assuming SEL_ARG structures to always form a sequence of ordered disjoint intervals could enter an infinite loop under some circumstances. Fixed by changing get_mm_leaf() so that for any sargable predicate except "<=>" involving NULL as a constant, "empty" SEL_ARG is returned, since such a predicate is always false. mysql-test/r/range.result: Added a test case for bug #47123. mysql-test/t/range.test: Added a test case for bug #47123. sql/opt_range.cc: Fixed get_mm_leaf() so that for any sargable predicate except "<=>" involving NULL as a constant, "empty" SEL_ARG is returned, since such a predicate is always false. --- sql/opt_range.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2239aafbeec..cfaff76b96b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -5887,6 +5887,17 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, goto end; } field->table->in_use->variables.sql_mode= orig_sql_mode; + + /* + Any sargable predicate except "<=>" involving NULL as a constant is always + FALSE + */ + if (type != Item_func::EQUAL_FUNC && field->is_real_null()) + { + tree= &null_element; + goto end; + } + str= (uchar*) alloc_root(alloc, key_part->store_length+1); if (!str) goto end; -- cgit v1.2.1 From f9c6730258a9f5776608967fcc1f10183710c76f Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Oct 2009 09:39:05 +0800 Subject: Bug#46640: output from mysqlbinlog command in 5.1 breaks replication The BINLOG statement was sharing too much code with the slave SQL thread, introduced with the patch for Bug#32407. This caused statements to be logged with the wrong server_id, the id stored inside the events of the BINLOG statement rather than the id of the running server. Fix by rearranging code a bit so that only relevant parts of the code are executed by the BINLOG statement, and the server_id of the server executing the statements will not be overrided by the server_id stored in the 'format description BINLOG statement'. mysql-test/extra/binlog_tests/binlog.test: Added test to verify if the server_id stored in the 'format description BINLOG statement' will override the server_id of the server executing the statements. mysql-test/suite/binlog/r/binlog_row_binlog.result: Test result for bug#46640 mysql-test/suite/binlog/r/binlog_stm_binlog.result: Test result for bug#46640 sql/log_event.cc: Moved rows_event_stmt_clean() call from update_pos() to apply_event(). This in any case makes more sense, and is needed as update_pos() is no longer called when executing BINLOG statements. Moved setting of rli->relay_log.description_event_for_exec from Format_description_log_event::do_update_pos() to Format_description_log_event::do_apply_event() sql/log_event_old.cc: Moved rows_event_stmt_clean() call from update_pos() to apply_event(). This in any case makes more sense, and is needed as update_pos() is no longer called when executing BINLOG statements. sql/slave.cc: The skip flag is no longer needed, as the code path for BINLOG statement has been cleaned up. sql/sql_binlog.cc: Don't invoke the update_pos() code path for the BINLOG statement, as it contains code that is redundant and/or harmful (especially setting thd->server_id). --- sql/log_event.cc | 66 ++++++++++++++++---------------- sql/log_event_old.cc | 104 +++++++++++++++++++++++++-------------------------- sql/slave.cc | 57 +++++++++++++--------------- sql/slave.h | 3 +- sql/sql_binlog.cc | 51 +++++++++++++------------ 5 files changed, 137 insertions(+), 144 deletions(-) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index ae7c4335f59..0e1500dac39 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3859,6 +3859,7 @@ bool Format_description_log_event::write(IO_CACHE* file) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Format_description_log_event::do_apply_event(Relay_log_info const *rli) { + int ret= 0; DBUG_ENTER("Format_description_log_event::do_apply_event"); #ifdef USING_TRANSACTIONS @@ -3900,17 +3901,21 @@ int Format_description_log_event::do_apply_event(Relay_log_info const *rli) 0, then 96, then jump to first really asked event (which is >96). So this is ok. */ - DBUG_RETURN(Start_log_event_v3::do_apply_event(rli)); + ret= Start_log_event_v3::do_apply_event(rli); } - DBUG_RETURN(0); + + if (!ret) + { + /* Save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + const_cast(rli)->relay_log.description_event_for_exec= this; + } + + DBUG_RETURN(ret); } int Format_description_log_event::do_update_pos(Relay_log_info *rli) { - /* save the information describing this binlog */ - delete rli->relay_log.description_event_for_exec; - rli->relay_log.description_event_for_exec= this; - if (server_id == (uint32) ::server_id) { /* @@ -7506,6 +7511,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) thd->reset_current_stmt_binlog_row_based(); const_cast(rli)->cleanup_context(thd, error); thd->is_slave_error= 1; + DBUG_RETURN(error); } /* This code would ideally be placed in do_update_pos() instead, but @@ -7534,6 +7540,14 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) const_cast(rli)->last_event_start_time= my_time(0); } + if (get_flags(STMT_END_F)) + if (error= rows_event_stmt_cleanup(rli, thd)) + rli->report(ERROR_LEVEL, error, + "Error in %s event: commit of row events failed, " + "table `%s`.`%s`", + get_type_str(), m_table->s->db.str, + m_table->s->table_name.str); + DBUG_RETURN(error); } @@ -7632,33 +7646,19 @@ Rows_log_event::do_update_pos(Relay_log_info *rli) if (get_flags(STMT_END_F)) { - if ((error= rows_event_stmt_cleanup(rli, thd)) == 0) - { - /* - Indicate that a statement is finished. - Step the group log position if we are not in a transaction, - otherwise increase the event log position. - */ - rli->stmt_done(log_pos, when); - - /* - Clear any errors pushed in thd->net.last_err* if for example "no key - found" (as this is allowed). This is a safety measure; apparently - those errors (e.g. when executing a Delete_rows_log_event of a - non-existing row, like in rpl_row_mystery22.test, - thd->net.last_error = "Can't find record in 't1'" and last_errno=1032) - do not become visible. We still prefer to wipe them out. - */ - thd->clear_error(); - } - else - { - rli->report(ERROR_LEVEL, error, - "Error in %s event: commit of row events failed, " - "table `%s`.`%s`", - get_type_str(), m_table->s->db.str, - m_table->s->table_name.str); - } + /* + Indicate that a statement is finished. + Step the group log position if we are not in a transaction, + otherwise increase the event log position. + */ + rli->stmt_done(log_pos, when); + /* + Clear any errors in thd->net.last_err*. It is not known if this is + needed or not. It is believed that any errors that may exist in + thd->net.last_err* are allowed. Examples of errors are "key not + found", which is produced in the test case rpl_row_conflicts.test + */ + thd->clear_error(); } else { diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index f14ebe49706..3389821a718 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1814,33 +1814,6 @@ int Old_rows_log_event::do_apply_event(Relay_log_info const *rli) const_cast(rli)->last_event_start_time= my_time(0); } - DBUG_RETURN(0); -} - - -Log_event::enum_skip_reason -Old_rows_log_event::do_shall_skip(Relay_log_info *rli) -{ - /* - If the slave skip counter is 1 and this event does not end a - statement, then we should not start executing on the next event. - Otherwise, we defer the decision to the normal skipping logic. - */ - if (rli->slave_skip_counter == 1 && !get_flags(STMT_END_F)) - return Log_event::EVENT_SKIP_IGNORE; - else - return Log_event::do_shall_skip(rli); -} - -int -Old_rows_log_event::do_update_pos(Relay_log_info *rli) -{ - DBUG_ENTER("Old_rows_log_event::do_update_pos"); - int error= 0; - - DBUG_PRINT("info", ("flags: %s", - get_flags(STMT_END_F) ? "STMT_END_F " : "")); - if (get_flags(STMT_END_F)) { /* @@ -1869,7 +1842,12 @@ Old_rows_log_event::do_update_pos(Relay_log_info *rli) are involved, commit the transaction and flush the pending event to the binlog. */ - error= ha_autocommit_or_rollback(thd, 0); + if (error= ha_autocommit_or_rollback(thd, 0)) + rli->report(ERROR_LEVEL, error, + "Error in %s event: commit of row events failed, " + "table `%s`.`%s`", + get_type_str(), m_table->s->db.str, + m_table->s->table_name.str); /* Now what if this is not a transactional engine? we still need to @@ -1882,33 +1860,51 @@ Old_rows_log_event::do_update_pos(Relay_log_info *rli) */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); - if (error == 0) - { - /* - Indicate that a statement is finished. - Step the group log position if we are not in a transaction, - otherwise increase the event log position. - */ - rli->stmt_done(log_pos, when); + const_cast(rli)->cleanup_context(thd, 0); + } - /* - Clear any errors pushed in thd->net.client_last_err* if for - example "no key found" (as this is allowed). This is a safety - measure; apparently those errors (e.g. when executing a - Delete_rows_log_event_old of a non-existing row, like in - rpl_row_mystery22.test, thd->net.last_error = "Can't - find record in 't1'" and last_errno=1032) do not become - visible. We still prefer to wipe them out. - */ - thd->clear_error(); - } - else - rli->report(ERROR_LEVEL, error, - "Error in %s event: commit of row events failed, " - "table `%s`.`%s`", - get_type_str(), m_table->s->db.str, - m_table->s->table_name.str); + DBUG_RETURN(error); +} + + +Log_event::enum_skip_reason +Old_rows_log_event::do_shall_skip(Relay_log_info *rli) +{ + /* + If the slave skip counter is 1 and this event does not end a + statement, then we should not start executing on the next event. + Otherwise, we defer the decision to the normal skipping logic. + */ + if (rli->slave_skip_counter == 1 && !get_flags(STMT_END_F)) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + +int +Old_rows_log_event::do_update_pos(Relay_log_info *rli) +{ + DBUG_ENTER("Old_rows_log_event::do_update_pos"); + int error= 0; + + DBUG_PRINT("info", ("flags: %s", + get_flags(STMT_END_F) ? "STMT_END_F " : "")); + + if (get_flags(STMT_END_F)) + { + /* + Indicate that a statement is finished. + Step the group log position if we are not in a transaction, + otherwise increase the event log position. + */ + rli->stmt_done(log_pos, when); + /* + Clear any errors in thd->net.last_err*. It is not known if this is + needed or not. It is believed that any errors that may exist in + thd->net.last_err* are allowed. Examples of errors are "key not + found", which is produced in the test case rpl_row_conflicts.test + */ + thd->clear_error(); } else { diff --git a/sql/slave.cc b/sql/slave.cc index 82c9d035fd2..9718b54ea9e 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -2082,8 +2082,7 @@ static int has_temporary_error(THD *thd) @retval 2 No error calling ev->apply_event(), but error calling ev->update_pos(). */ -int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli, - bool skip) +int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli) { int exec_res= 0; @@ -2128,37 +2127,33 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli, ev->when= my_time(0); ev->thd = thd; // because up to this point, ev->thd == 0 - if (skip) - { - int reason= ev->shall_skip(rli); - if (reason == Log_event::EVENT_SKIP_COUNT) - --rli->slave_skip_counter; - pthread_mutex_unlock(&rli->data_lock); - if (reason == Log_event::EVENT_SKIP_NOT) - exec_res= ev->apply_event(rli); + int reason= ev->shall_skip(rli); + if (reason == Log_event::EVENT_SKIP_COUNT) + --rli->slave_skip_counter; + pthread_mutex_unlock(&rli->data_lock); + if (reason == Log_event::EVENT_SKIP_NOT) + exec_res= ev->apply_event(rli); + #ifndef DBUG_OFF - /* - This only prints information to the debug trace. + /* + This only prints information to the debug trace. - TODO: Print an informational message to the error log? - */ - static const char *const explain[] = { - // EVENT_SKIP_NOT, - "not skipped", - // EVENT_SKIP_IGNORE, - "skipped because event should be ignored", - // EVENT_SKIP_COUNT - "skipped because event skip counter was non-zero" - }; - DBUG_PRINT("info", ("OPTION_BEGIN: %d; IN_STMT: %d", - thd->options & OPTION_BEGIN ? 1 : 0, - rli->get_flag(Relay_log_info::IN_STMT))); - DBUG_PRINT("skip_event", ("%s event was %s", - ev->get_type_str(), explain[reason])); + TODO: Print an informational message to the error log? + */ + static const char *const explain[] = { + // EVENT_SKIP_NOT, + "not skipped", + // EVENT_SKIP_IGNORE, + "skipped because event should be ignored", + // EVENT_SKIP_COUNT + "skipped because event skip counter was non-zero" + }; + DBUG_PRINT("info", ("OPTION_BEGIN: %d; IN_STMT: %d", + thd->options & OPTION_BEGIN ? 1 : 0, + rli->get_flag(Relay_log_info::IN_STMT))); + DBUG_PRINT("skip_event", ("%s event was %s", + ev->get_type_str(), explain[reason])); #endif - } - else - exec_res= ev->apply_event(rli); DBUG_PRINT("info", ("apply_event error = %d", exec_res)); if (exec_res == 0) @@ -2278,7 +2273,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli) delete ev; DBUG_RETURN(1); } - exec_res= apply_event_and_update_pos(ev, thd, rli, TRUE); + exec_res= apply_event_and_update_pos(ev, thd, rli); /* Format_description_log_event should not be deleted because it will be diff --git a/sql/slave.h b/sql/slave.h index a44a7eed83e..f356d28b626 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -190,8 +190,7 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, void set_slave_thread_options(THD* thd); void set_slave_thread_default_charset(THD *thd, Relay_log_info const *rli); void rotate_relay_log(Master_info* mi); -int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli, - bool skip); +int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli); pthread_handler_t handle_slave_io(void *arg); pthread_handler_t handle_slave_sql(void *arg); diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 96e99b57e3c..ee51480411b 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -56,17 +56,20 @@ void mysql_client_binlog_statement(THD* thd) Format_description_event. */ my_bool have_fd_event= TRUE; - if (!thd->rli_fake) + int err; + Relay_log_info *rli; + rli= thd->rli_fake; + if (!rli) { - thd->rli_fake= new Relay_log_info; + rli= thd->rli_fake= new Relay_log_info; #ifdef HAVE_purify - thd->rli_fake->is_fake= TRUE; + rli->is_fake= TRUE; #endif have_fd_event= FALSE; } - if (thd->rli_fake && !thd->rli_fake->relay_log.description_event_for_exec) + if (rli && !rli->relay_log.description_event_for_exec) { - thd->rli_fake->relay_log.description_event_for_exec= + rli->relay_log.description_event_for_exec= new Format_description_log_event(4); have_fd_event= FALSE; } @@ -78,16 +81,16 @@ void mysql_client_binlog_statement(THD* thd) /* Out of memory check */ - if (!(thd->rli_fake && - thd->rli_fake->relay_log.description_event_for_exec && + if (!(rli && + rli->relay_log.description_event_for_exec && buf)) { my_error(ER_OUTOFMEMORY, MYF(0), 1); /* needed 1 bytes */ goto end; } - thd->rli_fake->sql_thd= thd; - thd->rli_fake->no_storage= TRUE; + rli->sql_thd= thd; + rli->no_storage= TRUE; for (char const *strptr= thd->lex->comment.str ; strptr < thd->lex->comment.str + thd->lex->comment.length ; ) @@ -170,8 +173,7 @@ void mysql_client_binlog_statement(THD* thd) } ev= Log_event::read_log_event(bufptr, event_len, &error, - thd->rli_fake->relay_log. - description_event_for_exec); + rli->relay_log.description_event_for_exec); DBUG_PRINT("info",("binlog base64 err=%s", error)); if (!ev) @@ -209,18 +211,10 @@ void mysql_client_binlog_statement(THD* thd) reporting. */ #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - if (apply_event_and_update_pos(ev, thd, thd->rli_fake, FALSE)) - { - delete ev; - /* - TODO: Maybe a better error message since the BINLOG statement - now contains several events. - */ - my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement"); - goto end; - } + err= ev->apply_event(rli); +#else + err= 0; #endif - /* Format_description_log_event should not be deleted because it will be used to read info about the relay log's format; it @@ -228,8 +222,17 @@ void mysql_client_binlog_statement(THD* thd) i.e. when this thread terminates. */ if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) - delete ev; + delete ev; ev= 0; + if (err) + { + /* + TODO: Maybe a better error message since the BINLOG statement + now contains several events. + */ + my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement"); + goto end; + } } } @@ -238,7 +241,7 @@ void mysql_client_binlog_statement(THD* thd) my_ok(thd); end: - thd->rli_fake->clear_tables_to_lock(); + rli->clear_tables_to_lock(); my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_VOID_RETURN; } -- cgit v1.2.1 From 6da93b223b4b929402b432f90b2585d38f2f1e5a Mon Sep 17 00:00:00 2001 From: Jorgen Loland Date: Wed, 14 Oct 2009 10:46:50 +0200 Subject: Bug#47280 - strange results from count(*) with order by multiple columns without where/group Simple SELECT with implicit grouping used to return many rows if the query was ordered by the aggregated column in the SELECT list. This was incorrect because queries with implicit grouping should only return a single record. The problem was that when JOIN:exec() decided if execution needed to handle grouping, it was assumed that sum_func_count==0 meant that there were no aggregate functions in the query. This assumption was not correct in JOIN::exec() because the aggregate functions might have been optimized away during JOIN::optimize(). The reason why queries without ordering behaved correctly was that sum_func_count is only recalculated if the optimizer chooses to use temporary tables (which it does in the ordered case). Hence, non-ordered queries were correctly treated as grouped. The fix for this bug was to remove the assumption that sum_func_count==0 means that there is no need for grouping. This was done by introducing variable "bool implicit_grouping" in the JOIN object. mysql-test/r/func_group.result: Add test for BUG#47280 mysql-test/t/func_group.test: Add test for BUG#47280 sql/opt_sum.cc: Improve comment for opt_sum_query() sql/sql_class.h: Add comment for variables in TMP_TABLE_PARAM sql/sql_select.cc: Introduce and use variable implicit_grouping instead of (!group_list && sum_func_count) in places that need to test if grouping is required. Also added comments for: optimization of aggregate fields for implicitly grouped queries (JOIN::optimize) and choice of end_select method (JOIN::execute) sql/sql_select.h: Add variable implicit_grouping, which will be TRUE for queries that contain aggregate functions but no GROUP BY clause. Also added comment to sort_and_group variable. --- sql/opt_sum.cc | 3 ++- sql/sql_class.h | 27 ++++++++++++++++++++++++++- sql/sql_select.cc | 27 ++++++++++++++++++++++----- sql/sql_select.h | 15 ++++++++++++++- 4 files changed, 64 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 8e7265ba1ad..e009cf1ca9f 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -97,7 +97,8 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables) @note This function is only called for queries with sum functions and no - GROUP BY part. + GROUP BY part. This means that the result set shall contain a single + row only @retval 0 no errors diff --git a/sql/sql_class.h b/sql/sql_class.h index 024a6587b43..922e960c805 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2647,7 +2647,32 @@ public: MI_COLUMNDEF *recinfo,*start_recinfo; KEY *keyinfo; ha_rows end_write_records; - uint field_count,sum_func_count,func_count; + /** + Number of normal fields in the query, including those referred to + from aggregate functions. Hence, "SELECT `field1`, + SUM(`field2`) from t1" sets this counter to 2. + + @see count_field_types + */ + uint field_count; + /** + Number of fields in the query that have functions. Includes both + aggregate functions (e.g., SUM) and non-aggregates (e.g., RAND). + Also counts functions referred to from aggregate functions, i.e., + "SELECT SUM(RAND())" sets this counter to 2. + + @see count_field_types + */ + uint func_count; + /** + Number of fields in the query that have aggregate functions. Note + that the optimizer may choose to optimize away these fields by + replacing them with constants, in which case sum_func_count will + need to be updated. + + @see opt_sum_query, count_field_types + */ + uint sum_func_count; uint hidden_field_count; uint group_parts,group_length,group_null_parts; uint quick_group; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9dacb2c2ce4..02e52e0c518 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -644,8 +644,11 @@ JOIN::prepare(Item ***rref_pointer_array, this->group= group_list != 0; unit= unit_arg; + if (tmp_table_param.sum_func_count && !group_list) + implicit_grouping= TRUE; + #ifdef RESTRICTED_GROUP - if (sum_func_count && !group_list && (func_count || field_count)) + if (implicit_grouping) { my_message(ER_WRONG_SUM_SELECT,ER(ER_WRONG_SUM_SELECT),MYF(0)); goto err; @@ -881,15 +884,23 @@ JOIN::optimize() } #endif - /* Optimize count(*), min() and max() */ - if (tables_list && tmp_table_param.sum_func_count && ! group_list) + /* + Try to optimize count(*), min() and max() to const fields if + there is implicit grouping (aggregate functions but no + group_list). In this case, the result set shall only contain one + row. + */ + if (tables_list && implicit_grouping) { int res; /* opt_sum_query() returns HA_ERR_KEY_NOT_FOUND if no rows match to the WHERE conditions, - or 1 if all items were resolved, + or 1 if all items were resolved (optimized away), or 0, or an error number HA_ERR_... + + If all items were resolved by opt_sum_query, there is no need to + open any tables. */ if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) { @@ -2024,7 +2035,7 @@ JOIN::exec() count_field_types(select_lex, &curr_join->tmp_table_param, *curr_all_fields, 0); - if (curr_join->group || curr_join->tmp_table_param.sum_func_count || + if (curr_join->group || curr_join->implicit_grouping || (procedure && (procedure->flags & PROC_GROUP))) { if (make_group_fields(this, curr_join)) @@ -10811,6 +10822,12 @@ Next_select_func setup_end_select_func(JOIN *join) } else { + /* + Choose method for presenting result to user. Use end_send_group + if the query requires grouping (has a GROUP BY clause and/or one or + more aggregate functions). Use end_send if the query should not + be grouped. + */ if ((join->sort_and_group || (join->procedure && join->procedure->flags & PROC_GROUP)) && !tmp_tbl->precomputed_group_by) diff --git a/sql/sql_select.h b/sql/sql_select.h index 3f06b402638..92356a4c96f 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -278,7 +278,14 @@ public: TABLE **table,**all_tables,*sort_by_table; uint tables,const_tables; uint send_group_parts; - bool sort_and_group,first_record,full_join,group, no_field_update; + /** + Indicates that grouping will be performed on the result set during + query execution. This field belongs to query execution. + + @see make_group_fields, alloc_group_fields, JOIN::exec + */ + bool sort_and_group; + bool first_record,full_join,group, no_field_update; bool do_send_rows; /** TRUE when we want to resume nested loop iterations when @@ -428,6 +435,7 @@ public: tables= 0; const_tables= 0; join_list= 0; + implicit_grouping= FALSE; sort_and_group= 0; first_record= 0; do_send_rows= 1; @@ -533,6 +541,11 @@ public: select_lex == unit->fake_select_lex)); } private: + /** + TRUE if the query contains an aggregate function but has no GROUP + BY clause. + */ + bool implicit_grouping; bool make_simple_join(JOIN *join, TABLE *tmp_table); }; -- cgit v1.2.1 From bf14598c9903067474a7cdf1c49bca9f52d0c55a Mon Sep 17 00:00:00 2001 From: Jorgen Loland Date: Wed, 14 Oct 2009 18:20:01 +0200 Subject: Followup patch for BUG#47280 Temporary tables may set join->group to 0 even though there is grouping. Also need to test if sum_func_count>0 when JOIN::exec() decides whether to present results in a grouped manner. sql/sql_select.cc: Temporary tables may set join->group to 0 even though there is grouping. Also need to test if sum_func_count>0 when JOIN::exec() decides whether to present results in a grouped manner. --- sql/sql_select.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 02e52e0c518..c5e0bce498d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2036,6 +2036,7 @@ JOIN::exec() *curr_all_fields, 0); if (curr_join->group || curr_join->implicit_grouping || + curr_join->tmp_table_param.sum_func_count || (procedure && (procedure->flags & PROC_GROUP))) { if (make_group_fields(this, curr_join)) -- cgit v1.2.1 From d80e35f26f814bd8c9f976e510e5b2dbda654d12 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 16 Oct 2009 11:42:16 +0300 Subject: Revert the fix for bug #47123 until test suite failures are resolved. --- sql/opt_range.cc | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 04dae4fd815..119f90bc97a 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -5891,17 +5891,6 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, goto end; } field->table->in_use->variables.sql_mode= orig_sql_mode; - - /* - Any sargable predicate except "<=>" involving NULL as a constant is always - FALSE - */ - if (type != Item_func::EQUAL_FUNC && field->is_real_null()) - { - tree= &null_element; - goto end; - } - str= (uchar*) alloc_root(alloc, key_part->store_length+1); if (!str) goto end; -- cgit v1.2.1 From 7b4ef910f7830e85e2bc240b0803b994d6e0446b Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 16 Oct 2009 13:29:42 +0300 Subject: Bug #40877: multi statement execution fails in 5.1.30 Implemented the server infrastructure for the fix: 1. Added a function LEX_STRING *thd_query_string(THD) to return a LEX_STRING structure instead of char *. This is the function that must be called in innodb instead of thd_query() 2. Did some encapsulation in THD : aggregated thd_query and thd_query_length into a LEX_STRING and made accessor and mutator methods for easy code updating. 3. Updated the server code to use the new methods where applicable. --- sql/event_data_objects.cc | 2 +- sql/events.cc | 10 +++++----- sql/ha_ndbcluster.cc | 13 +++++++------ sql/ha_ndbcluster_binlog.cc | 8 ++++---- sql/log.cc | 4 ++-- sql/log_event.cc | 14 +++++++------- sql/mysqld.cc | 2 +- sql/slave.cc | 3 ++- sql/sp.cc | 4 ++-- sql/sp_head.cc | 9 +++++---- sql/sql_acl.cc | 14 +++++++------- sql/sql_cache.cc | 15 ++++++++------- sql/sql_class.cc | 44 +++++++++++++++++++++++++++++++++----------- sql/sql_class.h | 7 +++++-- sql/sql_db.cc | 18 +++++++++--------- sql/sql_delete.cc | 8 ++++---- sql/sql_insert.cc | 17 +++++++++-------- sql/sql_load.cc | 9 ++++----- sql/sql_parse.cc | 36 +++++++++++++++++++----------------- sql/sql_partition.cc | 8 ++++---- sql/sql_prepare.cc | 25 ++++++++++++------------- sql/sql_rename.cc | 2 +- sql/sql_show.cc | 12 ++++++------ sql/sql_table.cc | 20 ++++++++++---------- sql/sql_tablespace.cc | 2 +- sql/sql_trigger.cc | 4 ++-- sql/sql_udf.cc | 4 ++-- sql/sql_update.cc | 6 +++--- sql/sql_view.cc | 2 +- sql/sql_yacc.yy | 2 +- 30 files changed, 177 insertions(+), 147 deletions(-) (limited to 'sql') diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index dba32cac6b2..f2ec0e8cf64 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -1433,7 +1433,7 @@ Event_job_data::execute(THD *thd, bool drop) thd->set_query(sp_sql.c_ptr_safe(), sp_sql.length()); { - Parser_state parser_state(thd, thd->query, thd->query_length); + Parser_state parser_state(thd, thd->query(), thd->query_length()); lex_start(thd); if (parse_sql(thd, & parser_state, creation_ctx)) diff --git a/sql/events.cc b/sql/events.cc index 34da0e185b7..458ad61718d 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -465,7 +465,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, if (!dropped) { /* Binlog the create event. */ - DBUG_ASSERT(thd->query && thd->query_length); + DBUG_ASSERT(thd->query() && thd->query_length()); String log_query; if (create_query_string(thd, &log_query)) { @@ -595,8 +595,8 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, event_queue->update_event(thd, parse_data->dbname, parse_data->name, new_element); /* Binlog the alter event. */ - DBUG_ASSERT(thd->query && thd->query_length); - write_bin_log(thd, TRUE, thd->query, thd->query_length); + DBUG_ASSERT(thd->query() && thd->query_length()); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -670,8 +670,8 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) if (event_queue) event_queue->drop_event(thd, dbname, name); /* Binlog the drop event. */ - DBUG_ASSERT(thd->query && thd->query_length); - write_bin_log(thd, TRUE, thd->query, thd->query_length); + DBUG_ASSERT(thd->query() && thd->query_length()); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index dfd7ddc4d6c..9fdb8b628ca 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5525,7 +5525,7 @@ int ha_ndbcluster::create(const char *name, if (share && !do_event_op) share->flags|= NSF_NO_BINLOG; ndbcluster_log_schema_op(thd, share, - thd->query, thd->query_length, + thd->query(), thd->query_length(), share->db, share->table_name, m_table->getObjectId(), m_table->getObjectVersion(), @@ -5967,7 +5967,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) */ if (!is_old_table_tmpfile) ndbcluster_log_schema_op(current_thd, share, - current_thd->query, current_thd->query_length, + current_thd->query(), + current_thd->query_length(), old_dbname, m_tabname, ndb_table_id, ndb_table_version, SOT_RENAME_TABLE, @@ -6162,7 +6163,7 @@ retry_temporary_error1: current_thd->lex->sql_command != SQLCOM_TRUNCATE) { ndbcluster_log_schema_op(thd, share, - thd->query, thd->query_length, + thd->query(), thd->query_length(), share->db, share->table_name, ndb_table_id, ndb_table_version, SOT_DROP_TABLE, 0, 0, 1); @@ -6884,7 +6885,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path) THD *thd= current_thd; ha_ndbcluster::set_dbname(path, db); ndbcluster_log_schema_op(thd, 0, - thd->query, thd->query_length, + thd->query(), thd->query_length(), db, "", 0, 0, SOT_DROP_DB, 0, 0, 0); #endif DBUG_VOID_RETURN; @@ -10251,13 +10252,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, #ifdef HAVE_NDB_BINLOG if (is_tablespace) ndbcluster_log_schema_op(thd, 0, - thd->query, thd->query_length, + thd->query(), thd->query_length(), "", alter_info->tablespace_name, 0, 0, SOT_TABLESPACE, 0, 0, 0); else ndbcluster_log_schema_op(thd, 0, - thd->query, thd->query_length, + thd->query(), thd->query_length(), "", alter_info->logfile_group_name, 0, 0, SOT_LOGFILE_GROUP, 0, 0, 0); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 2a87697c7fa..27af3f2cf2f 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -241,8 +241,8 @@ static void dbug_print_table(const char *info, TABLE *table) static void run_query(THD *thd, char *buf, char *end, const int *no_print_error, my_bool disable_binlog) { - ulong save_thd_query_length= thd->query_length; - char *save_thd_query= thd->query; + ulong save_thd_query_length= thd->query_length(); + char *save_thd_query= thd->query(); ulong save_thread_id= thd->variables.pseudo_thread_id; struct system_status_var save_thd_status_var= thd->status_var; THD_TRANS save_thd_transaction_all= thd->transaction.all; @@ -259,12 +259,12 @@ static void run_query(THD *thd, char *buf, char *end, if (disable_binlog) thd->options&= ~OPTION_BIN_LOG; - DBUG_PRINT("query", ("%s", thd->query)); + DBUG_PRINT("query", ("%s", thd->query())); DBUG_ASSERT(!thd->in_sub_stmt); DBUG_ASSERT(!thd->prelocked_mode); - mysql_parse(thd, thd->query, thd->query_length, &found_semicolon); + mysql_parse(thd, thd->query(), thd->query_length(), &found_semicolon); if (no_print_error && thd->is_slave_error) { diff --git a/sql/log.cc b/sql/log.cc index e8366c47863..e1c3944392f 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1717,7 +1717,7 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED); int const error= thd->binlog_query(THD::STMT_QUERY_TYPE, - thd->query, thd->query_length, TRUE, FALSE, errcode); + thd->query(), thd->query_length(), TRUE, FALSE, errcode); DBUG_RETURN(error); } @@ -1736,7 +1736,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED); int error= thd->binlog_query(THD::STMT_QUERY_TYPE, - thd->query, thd->query_length, TRUE, FALSE, errcode); + thd->query(), thd->query_length(), TRUE, FALSE, errcode); DBUG_RETURN(error); } binlog_trans_log_truncate(thd, *(my_off_t*)sv); diff --git a/sql/log_event.cc b/sql/log_event.cc index 0e1500dac39..76532836b73 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3043,7 +3043,7 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli, thd->query_id = next_query_id(); VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->variables.pseudo_thread_id= thread_id; // for temp tables - DBUG_PRINT("query",("%s",thd->query)); + DBUG_PRINT("query",("%s", thd->query())); if (ignored_error_code((expected_error= error_code)) || !unexpected_error_code(expected_error)) @@ -3137,7 +3137,7 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli, /* Execute the query (note that we bypass dispatch_command()) */ const char* found_semicolon= NULL; - mysql_parse(thd, thd->query, thd->query_length, &found_semicolon); + mysql_parse(thd, thd->query(), thd->query_length(), &found_semicolon); log_slow_statement(thd); } else @@ -3149,7 +3149,7 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli, we exit gracefully; otherwise we warn about the bad error and tell DBA to check/fix it. */ - if (mysql_test_parse_for_slave(thd, thd->query, thd->query_length)) + if (mysql_test_parse_for_slave(thd, thd->query(), thd->query_length())) clear_all_errors(thd, const_cast(rli)); /* Can ignore query */ else { @@ -3159,7 +3159,7 @@ Query partially completed on the master (error on master: %d) \ and was aborted. There is a chance that your master is inconsistent at this \ point. If you are sure that your master is ok, run this query manually on the \ slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; \ -START SLAVE; . Query: '%s'", expected_error, thd->query); +START SLAVE; . Query: '%s'", expected_error, thd->query()); thd->is_slave_error= 1; } goto end; @@ -3167,7 +3167,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query); /* If the query was not ignored, it is printed to the general log */ if (!thd->is_error() || thd->main_da.sql_errno() != ER_SLAVE_IGNORED_TABLE) - general_log_write(thd, COM_QUERY, thd->query, thd->query_length); + general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); compare_errors: @@ -4498,8 +4498,8 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, new_db.length= db_len; new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); thd->set_db(new_db.str, new_db.length); - DBUG_ASSERT(thd->query == 0); - thd->query_length= 0; // Should not be needed + DBUG_ASSERT(thd->query() == 0); + thd->set_query_inner(NULL, 0); // Should not be needed thd->is_slave_error= 0; clear_all_errors(thd, const_cast(rli)); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 896be4a7f19..722fe4872de 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2520,7 +2520,7 @@ terribly wrong...\n"); } fprintf(stderr, "Trying to get some variables.\n\ Some pointers may be invalid and cause the dump to abort...\n"); - my_safe_print_str("thd->query", thd->query, 1024); + my_safe_print_str("thd->query", thd->query(), 1024); fprintf(stderr, "thd->thread_id=%lu\n", (ulong) thd->thread_id); fprintf(stderr, "thd->killed=%s\n", kreason); } diff --git a/sql/slave.cc b/sql/slave.cc index 9718b54ea9e..f6660e5a5c8 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1288,7 +1288,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, thd->db = (char*)db; DBUG_ASSERT(thd->db != 0); thd->db_length= strlen(thd->db); - mysql_parse(thd, thd->query, packet_len, &found_semicolon); // run create table + /* run create table */ + mysql_parse(thd, thd->query(), packet_len, &found_semicolon); thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; thd->options = save_options; diff --git a/sql/sp.cc b/sql/sp.cc index 4d840f53e2f..fd420732628 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -997,7 +997,7 @@ sp_drop_routine(THD *thd, int type, sp_name *name) if (ret == SP_OK) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); sp_cache_invalidate(); } @@ -1067,7 +1067,7 @@ sp_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics) if (ret == SP_OK) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); sp_cache_invalidate(); } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 0736e5fc2a8..78b3df525f5 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2826,8 +2826,8 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) DBUG_ENTER("sp_instr_stmt::execute"); DBUG_PRINT("info", ("command: %d", m_lex_keeper.sql_command())); - query= thd->query; - query_length= thd->query_length; + query= thd->query(); + query_length= thd->query_length(); #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) /* This s-p instr is profilable and will be captured. */ thd->profiling.set_query_source(m_query.str, m_query.length); @@ -2840,10 +2840,11 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) queries with SP vars can't be cached) */ if (unlikely((thd->options & OPTION_LOG_OFF)==0)) - general_log_write(thd, COM_QUERY, thd->query, thd->query_length); + general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); if (query_cache_send_result_to_client(thd, - thd->query, thd->query_length) <= 0) + thd->query(), + thd->query_length()) <= 0) { res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this); diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index d7d662f912d..6ebdae49bd4 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -3184,7 +3184,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (!result) /* success */ { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } rw_unlock(&LOCK_grant); @@ -3349,7 +3349,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc, if (write_to_binlog) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } rw_unlock(&LOCK_grant); @@ -3461,7 +3461,7 @@ bool mysql_grant(THD *thd, const char *db, List &list, if (!result) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } rw_unlock(&LOCK_grant); @@ -5663,7 +5663,7 @@ bool mysql_create_user(THD *thd, List &list) my_error(ER_CANNOT_USER, MYF(0), "CREATE USER", wrong_users.c_ptr_safe()); if (some_users_created) - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); rw_unlock(&LOCK_grant); close_thread_tables(thd); @@ -5736,7 +5736,7 @@ bool mysql_drop_user(THD *thd, List &list) my_error(ER_CANNOT_USER, MYF(0), "DROP USER", wrong_users.c_ptr_safe()); if (some_users_deleted) - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); rw_unlock(&LOCK_grant); close_thread_tables(thd); @@ -5821,7 +5821,7 @@ bool mysql_rename_user(THD *thd, List &list) my_error(ER_CANNOT_USER, MYF(0), "RENAME USER", wrong_users.c_ptr_safe()); if (some_users_renamed && mysql_bin_log.is_open()) - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); rw_unlock(&LOCK_grant); close_thread_tables(thd); @@ -6003,7 +6003,7 @@ bool mysql_revoke_all(THD *thd, List &list) VOID(pthread_mutex_unlock(&acl_cache->lock)); - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); rw_unlock(&LOCK_grant); close_thread_tables(thd); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index a41b7bd40bb..861bd97928d 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1119,8 +1119,8 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) DBUG_VOID_RETURN; uint8 tables_type= 0; - if ((local_tables= is_cacheable(thd, thd->query_length, - thd->query, thd->lex, tables_used, + if ((local_tables= is_cacheable(thd, thd->query_length(), + thd->query(), thd->lex, tables_used, &tables_type))) { NET *net= &thd->net; @@ -1210,7 +1210,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", /* Key is query + database + flag */ if (thd->db_length) { - memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length); + memcpy(thd->query() + thd->query_length() + 1, thd->db, + thd->db_length); DBUG_PRINT("qcache", ("database: %s length: %u", thd->db, (unsigned) thd->db_length)); } @@ -1218,24 +1219,24 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", { DBUG_PRINT("qcache", ("No active database")); } - tot_length= thd->query_length + thd->db_length + 1 + + tot_length= thd->query_length() + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE; /* We should only copy structure (don't use it location directly) because of alignment issue */ - memcpy((void *)(thd->query + (tot_length - QUERY_CACHE_FLAGS_SIZE)), + memcpy((void*) (thd->query() + (tot_length - QUERY_CACHE_FLAGS_SIZE)), &flags, QUERY_CACHE_FLAGS_SIZE); /* Check if another thread is processing the same query? */ Query_cache_block *competitor = (Query_cache_block *) - hash_search(&queries, (uchar*) thd->query, tot_length); + hash_search(&queries, (uchar*) thd->query(), tot_length); DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor)); if (competitor == 0) { /* Query is not in cache and no one is working with it; Store it */ Query_cache_block *query_block; - query_block= write_block_data(tot_length, (uchar*) thd->query, + query_block= write_block_data(tot_length, (uchar*) thd->query(), ALIGN_SIZE(sizeof(Query_cache_query)), Query_cache_block::QUERY, local_tables); if (query_block != 0) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index f75dc2cb88a..aa556810402 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -376,14 +376,14 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, str.append(proc_info); } - if (thd->query) + if (thd->query()) { if (max_query_len < 1) - len= thd->query_length; + len= thd->query_length(); else - len= min(thd->query_length, max_query_len); + len= min(thd->query_length(), max_query_len); str.append('\n'); - str.append(thd->query, len); + str.append(thd->query(), len); } if (str.c_ptr_safe() == buffer) return buffer; @@ -2434,12 +2434,12 @@ Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg, id(id_arg), mark_used_columns(MARK_COLUMNS_READ), lex(lex_arg), - query(0), - query_length(0), cursor(0), db(NULL), db_length(0) { + query_string.length= 0; + query_string.str= NULL; name.str= NULL; } @@ -2455,8 +2455,7 @@ void Statement::set_statement(Statement *stmt) id= stmt->id; mark_used_columns= stmt->mark_used_columns; lex= stmt->lex; - query= stmt->query; - query_length= stmt->query_length; + query_string= stmt->query_string; cursor= stmt->cursor; } @@ -2480,6 +2479,15 @@ void Statement::restore_backup_statement(Statement *stmt, Statement *backup) } +/** Assign a new value to thd->query. */ + +void Statement::set_query_inner(char *query_arg, uint32 query_length_arg) +{ + query_string.str= query_arg; + query_string.length= query_length_arg; +} + + void THD::end_statement() { /* Cleanup SQL processing state to reuse this statement in next query. */ @@ -2993,9 +3001,24 @@ extern "C" struct charset_info_st *thd_charset(MYSQL_THD thd) return(thd->charset()); } +/** + OBSOLETE : there's no way to ensure the string is null terminated. + Use thd_query_string instead() +*/ extern "C" char **thd_query(MYSQL_THD thd) { - return(&thd->query); + return(&thd->query_string.str); +} + +/** + Get the current query string for the thread. + + @param The MySQL internal thread pointer + @return query string and length. May be non-null-terminated. +*/ +extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd) +{ + return(&thd->query_string); } extern "C" int thd_slave_thread(const MYSQL_THD thd) @@ -3174,8 +3197,7 @@ void THD::set_statement(Statement *stmt) void THD::set_query(char *query_arg, uint32 query_length_arg) { pthread_mutex_lock(&LOCK_thd_data); - query= query_arg; - query_length= query_length_arg; + set_query_inner(query_arg, query_length_arg); pthread_mutex_unlock(&LOCK_thd_data); } diff --git a/sql/sql_class.h b/sql/sql_class.h index 922e960c805..865fc834c68 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -642,10 +642,13 @@ public: This printing is needed at least in SHOW PROCESSLIST and SHOW ENGINE INNODB STATUS. */ - char *query; - uint32 query_length; // current query length + LEX_STRING query_string; Server_side_cursor *cursor; + inline char *query() { return query_string.str; } + inline uint32 query_length() { return query_string.length; } + void set_query_inner(char *query_arg, uint32 query_length_arg); + /** Name of the current (default) database. diff --git a/sql/sql_db.cc b/sql/sql_db.cc index c19bfba9fc1..e6ccd9aa594 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -703,7 +703,7 @@ not_silent: char *query; uint query_length; - if (!thd->query) // Only in replication + if (!thd->query()) // Only in replication { query= tmp_query; query_length= (uint) (strxmov(tmp_query,"create database `", @@ -711,8 +711,8 @@ not_silent: } else { - query= thd->query; - query_length= thd->query_length; + query= thd->query(); + query_length= thd->query_length(); } ha_binlog_log_query(thd, 0, LOGCOM_CREATE_DB, @@ -805,13 +805,13 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) } ha_binlog_log_query(thd, 0, LOGCOM_ALTER_DB, - thd->query, thd->query_length, + thd->query(), thd->query_length(), db, ""); if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, + Query_log_event qinfo(thd, thd->query(), thd->query_length(), 0, /* suppress_use */ TRUE, errcode); /* @@ -948,7 +948,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { const char *query; ulong query_length; - if (!thd->query) + if (!thd->query()) { /* The client used the old obsolete mysql_drop_db() call */ query= path; @@ -957,8 +957,8 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } else { - query =thd->query; - query_length= thd->query_length; + query= thd->query(); + query_length= thd->query_length(); } if (mysql_bin_log.is_open()) { @@ -1964,7 +1964,7 @@ bool mysql_upgrade_db(THD *thd, LEX_STRING *old_db) if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); - Query_log_event qinfo(thd, thd->query, thd->query_length, + Query_log_event qinfo(thd, thd->query(), thd->query_length(), 0, TRUE, errcode); thd->clear_error(); mysql_bin_log.write(&qinfo); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index a81a5f4641f..b80138af7f2 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -413,7 +413,7 @@ cleanup: therefore be treated as a DDL. */ int log_result= thd->binlog_query(query_type, - thd->query, thd->query_length, + thd->query(), thd->query_length(), is_trans, FALSE, errcode); if (log_result) @@ -850,7 +850,7 @@ void multi_delete::abort() { int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED); thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_tables, FALSE, errcode); } thd->transaction.all.modified_non_trans_table= true; @@ -1024,7 +1024,7 @@ bool multi_delete::send_eof() else errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); if (thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_tables, FALSE, errcode) && !normal_tables) { @@ -1166,7 +1166,7 @@ end: TRUNCATE must always be statement-based binlogged (not row-based) so we don't test current_stmt_binlog_row_based. */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); my_ok(thd); // This should return record count } VOID(pthread_mutex_lock(&LOCK_open)); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 3ac40ae825a..6e2b3903b52 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -567,7 +567,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, Name_resolution_context *context; Name_resolution_context_state ctx_state; #ifndef EMBEDDED_LIBRARY - char *query= thd->query; + char *query= thd->query(); /* log_on is about delayed inserts only. By default, both logs are enabled (this won't cause problems if the server @@ -801,7 +801,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { - LEX_STRING const st_query = { query, thd->query_length }; + LEX_STRING const st_query = { query, thd->query_length() }; error=write_delayed(thd, table, duplic, st_query, ignore, log_on); query=0; } @@ -894,7 +894,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0); if (thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_table, FALSE, errcode)) { @@ -1764,7 +1764,7 @@ public: pthread_cond_destroy(&cond); pthread_cond_destroy(&cond_client); thd.unlink(); // Must be unlinked under lock - x_free(thd.query); + x_free(thd.query()); thd.security_ctx->user= thd.security_ctx->host=0; thread_count--; delayed_insert_threads--; @@ -1910,7 +1910,7 @@ bool delayed_get_table(THD *thd, TABLE_LIST *table_list) pthread_mutex_unlock(&LOCK_thread_count); di->thd.set_db(table_list->db, (uint) strlen(table_list->db)); di->thd.set_query(my_strdup(table_list->table_name, MYF(MY_WME)), 0); - if (di->thd.db == NULL || di->thd.query == NULL) + if (di->thd.db == NULL || di->thd.query() == NULL) { /* The error is reported */ delete di; @@ -1919,7 +1919,7 @@ bool delayed_get_table(THD *thd, TABLE_LIST *table_list) } di->table_list= *table_list; // Needed to open table /* Replace volatile strings with local copies */ - di->table_list.alias= di->table_list.table_name= di->thd.query; + di->table_list.alias= di->table_list.table_name= di->thd.query(); di->table_list.db= di->thd.db; di->lock(); pthread_mutex_lock(&di->mutex); @@ -3250,7 +3250,7 @@ bool select_insert::send_eof() else errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), trans_table, FALSE, errcode); } table->file->ha_release_auto_increment(); @@ -3320,7 +3320,8 @@ void select_insert::abort() { if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED); - thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length, + thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query(), + thd->query_length(), transactional_table, FALSE, errcode); } if (!thd->current_stmt_binlog_row_based && !can_rollback_data()) diff --git a/sql/sql_load.cc b/sql/sql_load.cc index e830e29176b..59ba7b16d62 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -654,13 +654,12 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, strcpy(end, p); end += pl; - thd->query_length= end - load_data_query; - thd->query= load_data_query; + thd->set_query_inner(load_data_query, end - load_data_query); Execute_load_query_log_event - e(thd, thd->query, thd->query_length, - (uint) ((char*)fname_start - (char*)thd->query - 1), - (uint) ((char*)fname_end - (char*)thd->query), + e(thd, thd->query(), thd->query_length(), + (uint) ((char*) fname_start - (char*) thd->query() - 1), + (uint) ((char*) fname_end - (char*) thd->query()), (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE : (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR), transactional_table, FALSE, errcode); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 27688e41d4f..1779680f350 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -477,10 +477,10 @@ static void handle_bootstrap_impl(THD *thd) thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE); thd->set_query(query, length); - DBUG_PRINT("query",("%-.4096s",thd->query)); + DBUG_PRINT("query",("%-.4096s", thd->query())); #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) thd->profiling.start_new_query(); - thd->profiling.set_query_source(thd->query, length); + thd->profiling.set_query_source(thd->query(), length); #endif /* @@ -489,7 +489,7 @@ static void handle_bootstrap_impl(THD *thd) */ thd->query_id=next_query_id(); thd->set_time(); - mysql_parse(thd, thd->query, length, & found_semicolon); + mysql_parse(thd, thd->query(), length, & found_semicolon); close_thread_tables(thd); // Free tables bootstrap_error= thd->is_error(); @@ -1208,20 +1208,20 @@ bool dispatch_command(enum enum_server_command command, THD *thd, { if (alloc_query(thd, packet, packet_length)) break; // fatal error is set - char *packet_end= thd->query + thd->query_length; + char *packet_end= thd->query() + thd->query_length(); /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ const char* end_of_stmt= NULL; - general_log_write(thd, command, thd->query, thd->query_length); - DBUG_PRINT("query",("%-.4096s",thd->query)); + general_log_write(thd, command, thd->query(), thd->query_length()); + DBUG_PRINT("query",("%-.4096s",thd->query())); #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) - thd->profiling.set_query_source(thd->query, thd->query_length); + thd->profiling.set_query_source(thd->query(), thd->query_length()); #endif if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),QUERY_PRIOR); - mysql_parse(thd, thd->query, thd->query_length, &end_of_stmt); + mysql_parse(thd, thd->query(), thd->query_length(), &end_of_stmt); while (!thd->killed && (end_of_stmt != NULL) && ! thd->is_error()) { @@ -1664,7 +1664,8 @@ void log_slow_statement(THD *thd) { thd_proc_info(thd, "logging slow query"); thd->status_var.long_query_count++; - slow_log_print(thd, thd->query, thd->query_length, end_utime_of_query); + slow_log_print(thd, thd->query(), thd->query_length(), + end_utime_of_query); } } DBUG_VOID_RETURN; @@ -2975,7 +2976,7 @@ end_with_restore_list: /* Presumably, REPAIR and binlog writing doesn't require synchronization */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } select_lex->table_list.first= (uchar*) first_table; lex->query_tables=all_tables; @@ -3007,7 +3008,7 @@ end_with_restore_list: /* Presumably, ANALYZE and binlog writing doesn't require synchronization */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } select_lex->table_list.first= (uchar*) first_table; lex->query_tables=all_tables; @@ -3030,7 +3031,7 @@ end_with_restore_list: /* Presumably, OPTIMIZE and binlog writing doesn't require synchronization */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } select_lex->table_list.first= (uchar*) first_table; lex->query_tables=all_tables; @@ -3982,7 +3983,7 @@ end_with_restore_list: */ if (!lex->no_write_to_binlog && write_to_binlog) { - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); } my_ok(thd); } @@ -4559,7 +4560,7 @@ create_sp_error: case SP_KEY_NOT_FOUND: if (lex->drop_if_exists) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), SP_COM_STRING(lex), lex->spname->m_name.str); @@ -5950,9 +5951,10 @@ void mysql_parse(THD *thd, const char *inBuf, uint length, PROCESSLIST. Note that we don't need LOCK_thread_count to modify query_length. */ - if (*found_semicolon && - (thd->query_length= (ulong)(*found_semicolon - thd->query))) - thd->query_length--; + if (*found_semicolon && (ulong) (*found_semicolon - thd->query())) + thd->set_query_inner(thd->query(), + (uint32) (*found_semicolon - + thd->query() - 1)); /* Actually execute the query */ if (*found_semicolon) { diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 2fb09ab7566..04d22174c8e 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -4077,7 +4077,7 @@ static int fast_end_partition(THD *thd, ulonglong copied, if ((!is_empty) && (!written_bin_log) && (!thd->lex->no_write_to_binlog)) - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO), (ulong) (copied + deleted), @@ -6235,7 +6235,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_CRASH("crash_drop_partition_5") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, - thd->query, thd->query_length), FALSE)) || + thd->query(), thd->query_length()), FALSE)) || ERROR_INJECT_CRASH("crash_drop_partition_6") || ((frm_install= TRUE), FALSE) || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || @@ -6302,7 +6302,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_CRASH("crash_add_partition_5") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, - thd->query, thd->query_length), FALSE)) || + thd->query(), thd->query_length()), FALSE)) || ERROR_INJECT_CRASH("crash_add_partition_6") || write_log_rename_frm(lpt) || (not_completed= FALSE) || @@ -6392,7 +6392,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_CRASH("crash_change_partition_6") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, - thd->query, thd->query_length), FALSE)) || + thd->query(), thd->query_length()), FALSE)) || ERROR_INJECT_CRASH("crash_change_partition_7") || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || ERROR_INJECT_CRASH("crash_change_partition_8") || diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index c1839b7220f..3f1727ccb39 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -752,7 +752,7 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array, const String *res; DBUG_ENTER("insert_params_with_log"); - if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + if (query->copy(stmt->query(), stmt->query_length(), default_charset_info)) DBUG_RETURN(1); for (Item_param **it= begin; it < end; ++it) @@ -914,7 +914,7 @@ static bool emb_insert_params_with_log(Prepared_statement *stmt, DBUG_ENTER("emb_insert_params_with_log"); - if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + if (query->copy(stmt->query(), stmt->query_length(), default_charset_info)) DBUG_RETURN(1); for (; it < end; ++it, ++client_param) @@ -1065,7 +1065,7 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, DBUG_ENTER("insert_params_from_vars"); - if (query->copy(stmt->query, stmt->query_length, default_charset_info)) + if (query->copy(stmt->query(), stmt->query_length(), default_charset_info)) DBUG_RETURN(1); for (Item_param **it= begin; it < end; ++it) @@ -2457,9 +2457,9 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length) } #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) - thd->profiling.set_query_source(stmt->query, stmt->query_length); + thd->profiling.set_query_source(stmt->query(), stmt->query_length()); #endif - DBUG_PRINT("exec_query", ("%s", stmt->query)); + DBUG_PRINT("exec_query", ("%s", stmt->query())); DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt)); sp_cache_flush_obsolete(&thd->sp_proc_cache); @@ -3029,7 +3029,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) old_stmt_arena= thd->stmt_arena; thd->stmt_arena= this; - Parser_state parser_state(thd, thd->query, thd->query_length); + Parser_state parser_state(thd, thd->query(), thd->query_length()); parser_state.m_lip.stmt_prepare_mode= TRUE; lex_start(thd); @@ -3118,7 +3118,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) the general log. */ if (thd->spcont == NULL) - general_log_write(thd, COM_STMT_PREPARE, query, query_length); + general_log_write(thd, COM_STMT_PREPARE, query(), query_length()); } DBUG_RETURN(error); } @@ -3309,7 +3309,7 @@ Prepared_statement::reprepare() return TRUE; error= ((name.str && copy.set_name(&name)) || - copy.prepare(query, query_length) || + copy.prepare(query(), query_length()) || validate_metadata(©)); if (cur_db_changed) @@ -3547,8 +3547,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) to point at it even after we restore from backup. This is ok, as expanded query was allocated in thd->mem_root. */ - stmt_backup.query= thd->query; - stmt_backup.query_length= thd->query_length; + stmt_backup.set_query_inner(thd->query(), thd->query_length()); /* At first execution of prepared statement we may perform logical @@ -3573,8 +3572,8 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) Note that multi-statements cannot exist here (they are not supported in prepared statements). */ - if (query_cache_send_result_to_client(thd, thd->query, - thd->query_length) <= 0) + if (query_cache_send_result_to_client(thd, thd->query(), + thd->query_length()) <= 0) { error= mysql_execute_command(thd); } @@ -3619,7 +3618,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) the general log. */ if (error == 0 && thd->spcont == NULL) - general_log_write(thd, COM_STMT_EXECUTE, thd->query, thd->query_length); + general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length()); error: flags&= ~ (uint) IS_IN_USE; diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 0e0b8eb60b9..dac96f2e9c4 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -177,7 +177,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) /* Lets hope this doesn't fail as the result will be messy */ if (!silent && !error) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); my_ok(thd); } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 75c9ea6c524..2c1f360104b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1864,10 +1864,10 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) thd_info->query=0; /* Lock THD mutex that protects its data when looking at it. */ pthread_mutex_lock(&tmp->LOCK_thd_data); - if (tmp->query) + if (tmp->query()) { - uint length= min(max_query_length, tmp->query_length); - thd_info->query=(char*) thd->strmake(tmp->query,length); + uint length= min(max_query_length, tmp->query_length()); + thd_info->query= (char*) thd->strmake(tmp->query(),length); } pthread_mutex_unlock(&tmp->LOCK_thd_data); thread_infos.append(thd_info); @@ -1992,11 +1992,11 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) pthread_mutex_unlock(&mysys_var->mutex); /* INFO */ - if (tmp->query) + if (tmp->query()) { - table->field[7]->store(tmp->query, + table->field[7]->store(tmp->query(), min(PROCESS_LIST_INFO_WIDTH, - tmp->query_length), cs); + tmp->query_length()), cs); table->field[7]->set_notnull(); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 1d147c54059..ec50c4ec1a5 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2089,7 +2089,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, tables). In this case, we can write the original query into the binary log. */ - write_bin_log(thd, !error, thd->query, thd->query_length); + write_bin_log(thd, !error, thd->query(), thd->query_length()); } else if (thd->current_stmt_binlog_row_based && tmp_table_deleted) @@ -3554,7 +3554,7 @@ static inline void write_create_table_bin_log(THD *thd, (!thd->current_stmt_binlog_row_based || (thd->current_stmt_binlog_row_based && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)))) - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } @@ -5427,14 +5427,14 @@ binlog: write_bin_log(thd, TRUE, query.ptr(), query.length()); } else // Case 1 - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } /* Case 3 and 4 does nothing under RBR */ } else - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); res= FALSE; @@ -5522,7 +5522,7 @@ mysql_discard_or_import_tablespace(THD *thd, error=1; if (error) goto err; - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); err: ha_autocommit_or_rollback(thd, error); @@ -6531,7 +6531,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, + Query_log_event qinfo(thd, thd->query(), thd->query_length(), 0, FALSE, 0); mysql_bin_log.write(&qinfo); } @@ -6785,7 +6785,7 @@ view_err: if (!error) { - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); my_ok(thd); } else if (error > 0) @@ -7275,7 +7275,7 @@ view_err: goto err1; /* We don't replicate alter table statement on temporary tables */ if (!thd->current_stmt_binlog_row_based) - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); goto end_temporary; } @@ -7432,13 +7432,13 @@ view_err: DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000);); ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), db, table_name); DBUG_ASSERT(!(mysql_bin_log.is_open() && thd->current_stmt_binlog_row_based && (create_info->options & HA_LEX_CREATE_TMP_TABLE))); - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); if (ha_check_storage_engine_flag(old_db_type, HTON_FLUSH_AFTER_RENAME)) { diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc index 9fec0e3bc63..fcc442a8f9a 100644 --- a/sql/sql_tablespace.cc +++ b/sql/sql_tablespace.cc @@ -66,6 +66,6 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) ha_resolve_storage_engine_name(hton), "TABLESPACE or LOGFILE GROUP"); } - write_bin_log(thd, FALSE, thd->query, thd->query_length); + write_bin_log(thd, FALSE, thd->query(), thd->query_length()); DBUG_RETURN(FALSE); } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index c055268ecca..a251a533622 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -409,7 +409,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) */ result= FALSE; /* Still, we need to log the query ... */ - stmt_query.append(thd->query, thd->query_length); + stmt_query.append(thd->query(), thd->query_length()); goto end; } } @@ -918,7 +918,7 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables, List_iterator it_connection_cl_name(connection_cl_names); List_iterator it_db_cl_name(db_cl_names); - stmt_query->append(thd->query, thd->query_length); + stmt_query->append(thd->query(), thd->query_length()); while ((name= it_name++)) { diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index c60dac42fb8..c6b41b59a3f 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -506,7 +506,7 @@ int mysql_create_function(THD *thd,udf_func *udf) rw_unlock(&THR_LOCK_udf); /* Binlog the create function. */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); DBUG_RETURN(0); @@ -581,7 +581,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) rw_unlock(&THR_LOCK_udf); /* Binlog the drop function. */ - write_bin_log(thd, TRUE, thd->query, thd->query_length); + write_bin_log(thd, TRUE, thd->query(), thd->query_length()); DBUG_RETURN(0); err: diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 152613c0009..5fc7f07c6e1 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -810,7 +810,7 @@ int mysql_update(THD *thd, errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); if (thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_table, FALSE, errcode)) { error=1; // Rollback update @@ -1860,7 +1860,7 @@ void multi_update::abort() */ int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED); thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_tables, FALSE, errcode); } thd->transaction.all.modified_non_trans_table= TRUE; @@ -2093,7 +2093,7 @@ bool multi_update::send_eof() else errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); if (thd->binlog_query(THD::ROW_QUERY_TYPE, - thd->query, thd->query_length, + thd->query(), thd->query_length(), transactional_tables, FALSE, errcode)) { local_error= 1; // Rollback update diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 43d0b9fade0..ae3af0640a3 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1652,7 +1652,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) /* if something goes wrong, bin-log with possible error code, otherwise bin-log with error code cleared. */ - write_bin_log(thd, !something_wrong, thd->query, thd->query_length); + write_bin_log(thd, !something_wrong, thd->query(), thd->query_length()); } VOID(pthread_mutex_unlock(&LOCK_open)); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 12e124230e5..37c583ad572 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -10724,7 +10724,7 @@ param_marker: my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); MYSQL_YYABORT; } - item= new (thd->mem_root) Item_param((uint) (lip->get_tok_start() - thd->query)); + item= new (thd->mem_root) Item_param((uint) (lip->get_tok_start() - thd->query())); if (!($$= item) || lex->param_list.push_back(item)) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); -- cgit v1.2.1 From 3bd24616684258cc45915e7f0a550dc20c7385bd Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Fri, 16 Oct 2009 13:12:21 +0200 Subject: Bug#46019: ERROR 1356 When selecting from within another view that has Group By When SELECT'ing from a view that mentions another, materialized, view, access was being denied. The issue was resolved by lifting a special case which avoided such access checking in check_single_table_access. In the past, this was necessary since if such a check were performed, the error message would be downgraded to a warning in the case of SHOW CREATE VIEW. The downgrading of errors was meant to handle only that scenario, but could not distinguish the two as it read only the error messages. The special case was needed in the fix of bug no 36086. Before that, views were confused with derived tables. After bug no 35996 was fixed, the manipulation of errors during SHOW CREATE VIEW execution is not dependent on the actual error messages in the queue, it rather looks at the actual cause of the error and takes appropriate action. Hence the aforementioned special case is now superfluous and the bug is fixed. mysql-test/r/view_grant.result: Bug#46019: Test result. mysql-test/t/view_grant.test: Bug#46019: Test case. sql/sql_parse.cc: Bug#46019: fix. --- sql/sql_parse.cc | 2 -- 1 file changed, 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 27688e41d4f..50c738662d2 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5089,8 +5089,6 @@ bool check_single_table_access(THD *thd, ulong privilege, /* Show only 1 table for check_grant */ if (!(all_tables->belong_to_view && (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) && - !(all_tables->view && - all_tables->effective_algorithm == VIEW_ALGORITHM_TMPTABLE) && check_grant(thd, privilege, all_tables, 0, 1, no_errors)) goto deny; -- cgit v1.2.1 From f6868a4eb42f39a5f4e8179b91fe38f0ff299ebe Mon Sep 17 00:00:00 2001 From: Alexey Kopytov Date: Sat, 17 Oct 2009 00:19:51 +0400 Subject: Bug #47123: Endless 100% CPU loop with STRAIGHT_JOIN The problem was in incorrect handling of predicates involving NULL as a constant value by the range optimizer. For example, when creating a SEL_ARG node from a condition of the form "field < const" (which would normally result in the "NULL < field < const" SEL_ARG), the special case when "const" is NULL was not taken into account, so "NULL < field < NULL" was produced for the "field < NULL" condition. As a result, SEL_ARG structures of this form could not be further optimized which in turn could lead to incorrectly constructed SEL_ARG trees. In particular, code assuming SEL_ARG structures to always form a sequence of ordered disjoint intervals could enter an infinite loop under some circumstances. Fixed by changing get_mm_leaf() so that for any sargable predicate except "<=>" involving NULL as a constant, "empty" SEL_ARG is returned, since such a predicate is always false. mysql-test/r/partition_pruning.result: Fixed a broken test case. mysql-test/r/range.result: Added a test case for bug #47123. mysql-test/r/subselect.result: Fixed a broken test cases. mysql-test/t/range.test: Added a test case for bug #47123. sql/opt_range.cc: Fixed get_mm_leaf() so that for any sargable predicate except "<=>" involving NULL as a constant, "empty" SEL_ARG is returned, since such a predicate is always false. --- sql/opt_range.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 119f90bc97a..04dae4fd815 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -5891,6 +5891,17 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, goto end; } field->table->in_use->variables.sql_mode= orig_sql_mode; + + /* + Any sargable predicate except "<=>" involving NULL as a constant is always + FALSE + */ + if (type != Item_func::EQUAL_FUNC && field->is_real_null()) + { + tree= &null_element; + goto end; + } + str= (uchar*) alloc_root(alloc, key_part->store_length+1); if (!str) goto end; -- cgit v1.2.1 From 834ba32293173639880423839bbd245c601c0d50 Mon Sep 17 00:00:00 2001 From: Kristofer Pettersson Date: Mon, 19 Oct 2009 09:43:33 +0200 Subject: Bug#47627 SET @@{global.session}.local_variable in stored routine causes crash Adding @@session and @@global prefixes to a declared variable in a stored procedure the server would lead to a crash. The reason was that during the parsing of the syntactic rule 'option_value' an uninitialized set_var object was pushed to the parameter stack of the SET statement. The parent rule 'option_type_value' interpreted the existence of variables on the parameter stack as an assignment and wrapped it in a sp_instr_set object. As the procedure later was executed an attempt was made to run the method 'check()' on an uninitialized member object (NULL value) belonging to the previously created but uninitialized object. sql/sql_yacc.yy: * Assign the option_type at once since it is needed by the next parsing rule (internal_variable_name) * Rearranged the if statement to reduce negations and gain more clarity of code. * Added check for option_type to better detect if current variable is a SP local variable or a system variable. --- sql/sql_yacc.yy | 61 ++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 14 deletions(-) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 12e124230e5..ceebde4f7b3 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -11783,8 +11783,17 @@ option_type: ; option_type2: - /* empty */ { $$= OPT_DEFAULT; } - | ONE_SHOT_SYM { Lex->one_shot_set= 1; $$= OPT_SESSION; } + /* empty */ + { + $$= OPT_DEFAULT; + Lex->option_type= OPT_DEFAULT; + } + | ONE_SHOT_SYM + { + Lex->one_shot_set= 1; + $$= OPT_SESSION; + Lex->option_type= OPT_SESSION; + } ; opt_var_type: @@ -11795,10 +11804,26 @@ opt_var_type: ; opt_var_ident_type: - /* empty */ { $$=OPT_DEFAULT; } - | GLOBAL_SYM '.' { $$=OPT_GLOBAL; } - | LOCAL_SYM '.' { $$=OPT_SESSION; } - | SESSION_SYM '.' { $$=OPT_SESSION; } + /* empty */ + { + $$=OPT_DEFAULT; + Lex->option_type= OPT_DEFAULT; + } + | GLOBAL_SYM '.' + { + $$=OPT_GLOBAL; + Lex->option_type= OPT_GLOBAL; + } + | LOCAL_SYM '.' + { + $$=OPT_SESSION; + Lex->option_type= OPT_SESSION; + } + | SESSION_SYM '.' + { + $$=OPT_SESSION; + Lex->option_type= OPT_SESSION; + } ; ext_option_value: @@ -12038,8 +12063,22 @@ internal_variable_name: sp_pcontext *spc= lex->spcont; sp_variable_t *spv; - /* We have to lookup here since local vars can shadow sysvars */ - if (!spc || !(spv = spc->find_variable(&$1))) + /* + We have to lookup here since local vars can shadow sysvars. + + We also have to inspect the option_type first since the variable + identifier might have been prefixed with @@session or @@global + prefixes. Without this check we would wrongly identify them + as SP local variables. + */ + if (lex->option_type == OPT_DEFAULT && spc && + (spv= spc->find_variable(&$1))) + { + /* An SP local variable */ + $$.var= NULL; + $$.base_name= $1; + } + else { /* Not an SP local variable */ sys_var *tmp=find_sys_var(thd, $1.str, $1.length); @@ -12056,12 +12095,6 @@ internal_variable_name: lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; } } - else - { - /* An SP local variable */ - $$.var= NULL; - $$.base_name= $1; - } } | ident '.' ident { -- cgit v1.2.1 From e426eb2a3c7f501437b533d323959fe1928950df Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 19 Oct 2009 13:41:52 +0500 Subject: Bug#43207 wrong LC_TIME names for romanian locale A contributed patch by Andrei Boros (SCA signed). - Fixing locale definition file - Adding tests --- sql/sql_locale.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index 3def9864c29..5ddf65cd1b7 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -1309,9 +1309,9 @@ static const char *my_locale_month_names_ro_RO[13] = static const char *my_locale_ab_month_names_ro_RO[13] = {"ian","feb","mar","apr","mai","iun","iul","aug","sep","oct","nov","dec", NullS }; static const char *my_locale_day_names_ro_RO[8] = - {"Luni","MarÅ£i","Miercuri","Joi","Vineri","SĆ®mbĂtĂ","DuminicĂ", NullS }; + {"Luni","MarÅ£i","Miercuri","Joi","Vineri","SĆ¢mbătă","Duminică", NullS }; static const char *my_locale_ab_day_names_ro_RO[8] = - {"Lu","Ma","Mi","Jo","Vi","SĆ®","Du", NullS }; + {"Lu","Ma","Mi","Jo","Vi","SĆ¢","Du", NullS }; static TYPELIB my_locale_typelib_month_names_ro_RO = { array_elements(my_locale_month_names_ro_RO)-1, "", my_locale_month_names_ro_RO, NULL }; static TYPELIB my_locale_typelib_ab_month_names_ro_RO = -- cgit v1.2.1 From 8363e2665995539541222e40a0be60c292f7495c Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 26 Oct 2009 11:55:57 +0200 Subject: Bug #47412: Valgrind warnings / user can read uninitalized memory using SP variables A function call may end without throwing an error or without setting the return value. This can happen when e.g. an error occurs while calculating the return value. Fixed by setting the value to NULL when error occurs during evaluation of an expression. --- sql/sp_head.cc | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'sql') diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 78b3df525f5..f0c858cc50a 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -334,16 +334,18 @@ bool sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) { Item *expr_item; + enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; + bool save_abort_on_warning= thd->abort_on_warning; + bool save_stmt_modified_non_trans_table= + thd->transaction.stmt.modified_non_trans_table; DBUG_ENTER("sp_eval_expr"); if (!*expr_item_ptr) - DBUG_RETURN(TRUE); + goto error; if (!(expr_item= sp_prepare_func_item(thd, expr_item_ptr))) - DBUG_RETURN(TRUE); - - bool err_status= FALSE; + goto error; /* Set THD flags to emit warnings/errors in case of overflow/type errors @@ -352,10 +354,6 @@ sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) Save original values and restore them after save. */ - enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; - bool save_abort_on_warning= thd->abort_on_warning; - bool save_stmt_modified_non_trans_table= thd->transaction.stmt.modified_non_trans_table; - thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; thd->abort_on_warning= thd->variables.sql_mode & @@ -370,13 +368,18 @@ sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) thd->abort_on_warning= save_abort_on_warning; thd->transaction.stmt.modified_non_trans_table= save_stmt_modified_non_trans_table; - if (thd->is_error()) - { - /* Return error status if something went wrong. */ - err_status= TRUE; - } + if (!thd->is_error()) + DBUG_RETURN(FALSE); - DBUG_RETURN(err_status); +error: + /* + In case of error during evaluation, leave the result field set to NULL. + Sic: we can't do it in the beginning of the function because the + result field might be needed for its own re-evaluation, e.g. case of + set x = x + 1; + */ + result_field->set_null(); + DBUG_RETURN (TRUE); } -- cgit v1.2.1 From 74b0f6be2fd8f0f46f490fd2b10f6dbfef9dad41 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 19 Oct 2009 16:55:04 +0300 Subject: Bug #47788: Crash in TABLE_LIST::hide_view_error on UPDATE + VIEW + SP + MERGE + ALTER When cleaning up the stored procedure's internal structures the flag to ignore the errors for INSERT/UPDATE IGNORE was not cleaned up. As a result error ignoring was on during name resolution. And this is an abnormal situation : the SELECT_LEX flag can be on only during query execution. Fixed by correctly cleaning up the SELECT_LEX flag when reusing the SELECT_LEX in a second execution. --- sql/sql_prepare.cc | 3 +++ 1 file changed, 3 insertions(+) (limited to 'sql') diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 3f1727ccb39..e2a24f7da1e 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2342,6 +2342,9 @@ void reinit_stmt_before_use(THD *thd, LEX *lex) /* Fix ORDER list */ for (order= (ORDER *)sl->order_list.first; order; order= order->next) order->item= &order->item_ptr; + + /* clear the no_error flag for INSERT/UPDATE IGNORE */ + sl->no_error= FALSE; } { SELECT_LEX_UNIT *unit= sl->master_unit(); -- cgit v1.2.1 From 5ef63a4f1c2f1abb81caaec639b962837821000f Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Mon, 19 Oct 2009 21:42:10 -0700 Subject: Bug#28141: Control C on query waiting on lock causes ERROR 1053 (server shutdown) If a thread is killed in the server, we throw "shutdown" only if one is actually in progress; otherwise, we throw "query interrupted". Control-C in the mysql command-line client is "incremental" now. First Control-C sends KILL QUERY (when connected to 5.0+ server, otherwise, see next) Next Control-C sends KILL CONNECTION Next Control-C aborts client. As the first two steps only pertain to an existing query, Control-C will abort the client right away if no query is running. client will give more detailed/consistent feedback on Control-C now. client/mysql.cc: Extends Control-C handling; enhances up feedback to user. On 5.0+ servers, we try to be nice and send KILL QUERY first if Control-C is pressed in the command-line client, but if that doesn't work, we now give the user the opportunity to send KILL CONNECTION with another Control-C (and to kill the client with another Control-C if that somehow doesn't work either). mysql-test/t/flush_read_lock_kill.test: we're getting correct "thread killed" rather than "in shutdown" error now mysql-test/t/kill.test: we're getting correct "thread killed" rather than "in shutdown" error now mysql-test/t/rpl000001.test: we're getting correct "thread killed" rather than "in shutdown" error now mysql-test/t/rpl_error_ignored_table.test: we're getting correct "thread killed" rather than "in shutdown" error now sql/records.cc: make error messages on KILL uniform for rr_*() by folding that handling into rr_handle_error() sql/sql_class.h: Only throw "shutdown" when we have one flagged as being in progress; otherwise, throw "query interrupted" as it's likely to be "KILL CONNECTION" or related. --- sql/records.cc | 21 +++++++++------------ sql/sql_class.h | 6 ++++++ 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/records.cc b/sql/records.cc index d5c3a421cd9..d564533ae7a 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -56,6 +56,7 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, bool print_error, uint idx) { bzero((char*) info,sizeof(*info)); + info->thd= thd; info->table= table; info->file= table->file; info->record= table->record[0]; @@ -240,6 +241,12 @@ void end_read_record(READ_RECORD *info) static int rr_handle_error(READ_RECORD *info, int error) { + if (info->thd->killed) + { + info->thd->send_kill_message(); + return 1; + } + if (error == HA_ERR_END_OF_FILE) error= -1; else @@ -260,12 +267,7 @@ static int rr_quick(READ_RECORD *info) int tmp; while ((tmp= info->select->quick->get_next())) { - if (info->thd->killed) - { - my_error(ER_SERVER_SHUTDOWN, MYF(0)); - return 1; - } - if (tmp != HA_ERR_RECORD_DELETED) + if (info->thd->killed || (tmp != HA_ERR_RECORD_DELETED)) { tmp= rr_handle_error(info, tmp); break; @@ -331,16 +333,11 @@ int rr_sequential(READ_RECORD *info) int tmp; while ((tmp=info->file->rnd_next(info->record))) { - if (info->thd->killed) - { - info->thd->send_kill_message(); - return 1; - } /* rnd_next can return RECORD_DELETED for MyISAM when one thread is reading and another deleting without locks. */ - if (tmp != HA_ERR_RECORD_DELETED) + if (info->thd->killed || (tmp != HA_ERR_RECORD_DELETED)) { tmp= rr_handle_error(info, tmp); break; diff --git a/sql/sql_class.h b/sql/sql_class.h index 7c747e459a4..b41a5d5c6b7 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -44,6 +44,8 @@ extern char internal_table_name[2]; extern char empty_c_string[1]; extern const char **errmesg; +extern bool volatile shutdown_in_progress; + #define TC_LOG_PAGE_SIZE 8192 #define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE) @@ -1801,7 +1803,11 @@ public: { int err= killed_errno(); if (err) + { + if ((err == KILL_CONNECTION) && !shutdown_in_progress) + err = KILL_QUERY; my_message(err, ER(err), MYF(0)); + } } /* return TRUE if we will abort query if we make a warning now */ inline bool really_abort_on_warning() -- cgit v1.2.1 From 882535423de244036c8d26b055aeaf292d3abaf7 Mon Sep 17 00:00:00 2001 From: Satya B Date: Tue, 20 Oct 2009 11:47:57 +0530 Subject: Fix for Bug #41597 - After rename of user, there are additional grants when grants are reapplied. After renaming a user and trying to re-apply grants results in additional grants. This is because we use username as part of the key for GRANT_TABLE structure. When the user is renamed, we only change the username stored and the hash key still contains the old user name and this results in the extra privileges Fixed by rebuilding the hash key and updating the column_priv_hash structure when the user is renamed mysql-test/r/grant3.result: Bug #41597 - After rename of user, there are additional grants when grants are reapplied. Testcase for BUG#41597 mysql-test/t/grant3.test: Bug #41597 - After rename of user, there are additional grants when grants are reapplied. Testcase for BUG#41597 sql/sql_acl.cc: Bug #41597 - After rename of user, there are additional grants when grants are reapplied. Fixed handle_grant_struct() to update the hash key when the user is renamed. Added to set_user_details() method to GRANT_NAME class --- sql/sql_acl.cc | 46 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ea17a4227ef..8f1e2c70fe6 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2096,6 +2096,8 @@ public: GRANT_NAME (TABLE *form); virtual ~GRANT_NAME() {}; virtual bool ok() { return privs != 0; } + void set_user_details(const char *h, const char *d, + const char *u, const char *t); }; @@ -2113,27 +2115,36 @@ public: }; - -GRANT_NAME::GRANT_NAME(const char *h, const char *d,const char *u, - const char *t, ulong p) - :privs(p) +void GRANT_NAME::set_user_details(const char *h, const char *d, + const char *u, const char *t) { /* Host given by user */ update_hostname(&host, strdup_root(&memex, h)); - db = strdup_root(&memex,d); + if (db != d) + { + db= strdup_root(&memex, d); + if (lower_case_table_names) + my_casedn_str(files_charset_info, db); + } user = strdup_root(&memex,u); sort= get_sort(3,host.hostname,db,user); - tname= strdup_root(&memex,t); - if (lower_case_table_names) + if (tname != t) { - my_casedn_str(files_charset_info, db); - my_casedn_str(files_charset_info, tname); + tname= strdup_root(&memex, t); + if (lower_case_table_names) + my_casedn_str(files_charset_info, tname); } key_length =(uint) strlen(d)+(uint) strlen(u)+(uint) strlen(t)+3; hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); } +GRANT_NAME::GRANT_NAME(const char *h, const char *d,const char *u, + const char *t, ulong p) + :db(0), tname(0), privs(p) +{ + set_user_details(h, d, u, t); +} GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, ulong p, ulong c) @@ -5183,9 +5194,20 @@ static int handle_grant_struct(uint struct_no, bool drop, case 2: case 3: - grant_name->user= strdup_root(&mem, user_to->user.str); - update_hostname(&grant_name->host, - strdup_root(&mem, user_to->host.str)); + /* + Update the grant structure with the new user name and + host name + */ + grant_name->set_user_details(user_to->host.str, grant_name->db, + user_to->user.str, grant_name->tname); + + /* + Since username is part of the hash key, when the user name + is renamed, the hash key is changed. Update the hash to + ensure that the position matches the new hash key value + */ + hash_update(&column_priv_hash, (byte *)grant_name, + grant_name->hash_key, grant_name->key_length); break; } } -- cgit v1.2.1 From 6395cb8ecf18f7cae5464dc56a7c9d1dbfe827c6 Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Tue, 20 Oct 2009 09:39:40 +0100 Subject: BUG#34582: FLUSH LOGS does not close and reopen the binlog index file Issuing 'FLUSH LOGS' does not close and reopen indexfile. Instead a SEEK_SET is performed. This patch makes index file to be closed and reopened whenever a rotation happens (FLUSH LOGS is issued or binary log exceeds maximum configured size). mysql-test/suite/binlog/r/binlog_delete_and_flush_index.result: Result file. mysql-test/suite/binlog/t/binlog_delete_and_flush_index.test: Test case. sql/log.cc: Added LOG_CLOSE_INDEX flag when calling MYSQL_BIN_LOG::close from within MYSQL_BIN_LOG::new_file_impl (which should just be called whenever a rotation is to happen - FLUSH LOGS issued or binlog size exceeds the maximum configured). --- sql/log.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index e1c3944392f..e0eb97782a1 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3603,7 +3603,7 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) } old_name=name; name=0; // Don't free name - close(LOG_CLOSE_TO_BE_OPENED); + close(LOG_CLOSE_TO_BE_OPENED | LOG_CLOSE_INDEX); /* Note that at this point, log_state != LOG_CLOSED (important for is_open()). @@ -3618,8 +3618,10 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) trigger temp tables deletion on slaves. */ - open(old_name, log_type, new_name_ptr, - io_cache_type, no_auto_events, max_size, 1); + /* reopen index binlog file, BUG#34582 */ + if (!open_index_file(index_file_name, 0)) + open(old_name, log_type, new_name_ptr, + io_cache_type, no_auto_events, max_size, 1); my_free(old_name,MYF(0)); end: -- cgit v1.2.1 From 19ffe2308542735bc37e070bbdf266281ea8156a Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Wed, 21 Oct 2009 11:43:45 +0300 Subject: Bug #47780: crash when comparing GIS items from subquery If the first argument to GeomFromWKB function is a geometry field then the function just returns its value. However in doing so it's not preserving first argument's null_value flag and this causes unexpected null value to be returned to the calling function. Fixed by updating the null_value of the GeomFromWKB function in such cases (and all other cases that return a NULL e.g. because of not enough memory for the return buffer). --- sql/item_geofunc.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 71bd1347f6e..d3e7096a0bd 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -76,7 +76,9 @@ String *Item_func_geometry_from_wkb::val_str(String *str) if (args[0]->field_type() == MYSQL_TYPE_GEOMETRY) { - return args[0]->val_str(str); + String *str_ret= args[0]->val_str(str); + null_value= args[0]->null_value; + return str_ret; } wkb= args[0]->val_str(&arg_val); @@ -86,7 +88,10 @@ String *Item_func_geometry_from_wkb::val_str(String *str) str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) - return 0; + { + null_value= TRUE; /* purecov: inspected */ + return 0; /* purecov: inspected */ + } str->length(0); str->q_append(srid); if ((null_value= -- cgit v1.2.1 From 17ed6b9abda235daf41eb62512116384c1e06e25 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Wed, 21 Oct 2009 14:04:08 +0500 Subject: Fix for bug#47019: Assertion failed: 0, file .\rt_mbr.c, line 138 when forcing a spatial index Problem: "Spatial indexes can be involved in the search for queries that use a function such as MBRContains() or MBRWithin() in the WHERE clause". Using spatial indexes for JOINs with =, <=> etc. predicates is incorrect. Fix: disable spatial indexes for such queries. mysql-test/r/select.result: Fix for bug#47019: Assertion failed: 0, file .\rt_mbr.c, line 138 when forcing a spatial index - test result. mysql-test/t/select.test: Fix for bug#47019: Assertion failed: 0, file .\rt_mbr.c, line 138 when forcing a spatial index - test case. sql/sql_select.cc: Fix for bug#47019: Assertion failed: 0, file .\rt_mbr.c, line 138 when forcing a spatial index - disable spatial indexes for queries which use non-spatial conditions (e.g. NATURAL JOINs). --- sql/sql_select.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 76d6833de5c..3bf67299d58 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3447,7 +3447,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) { if (!(form->keys_in_use_for_query.is_set(key))) continue; - if (form->key_info[key].flags & HA_FULLTEXT) + if (form->key_info[key].flags & (HA_FULLTEXT | HA_SPATIAL)) continue; // ToDo: ft-keys in non-ft queries. SerG uint key_parts= (uint) form->key_info[key].key_parts; -- cgit v1.2.1 From 081c43ea64500357d4edc78168ece789223462aa Mon Sep 17 00:00:00 2001 From: Alfranio Correia Date: Thu, 22 Oct 2009 11:18:28 +0100 Subject: Post-fix for BUG#47287. The label "end" was causing compiler warnings as it was no longer used. To fix the problem we removed it. --- sql/log.cc | 1 - 1 file changed, 1 deletion(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index e1c3944392f..c087ac41a66 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1549,7 +1549,6 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) trx_data->at_least_one_stmt_committed = my_b_tell(&trx_data->trans_log) > 0; -end: if (!all) trx_data->before_stmt_pos = MY_OFF_T_UNDEF; // part of the stmt commit DBUG_RETURN(error); -- cgit v1.2.1 From 2436755508f081912134d375aea5f39a2b64325f Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Oct 2009 11:13:42 +0800 Subject: Bug#46640: output from mysqlbinlog command in 5.1 breaks replication Added parentheses around assignment used as truth value for suppressing warnings. --- sql/log_event.cc | 2 +- sql/log_event_old.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/log_event.cc b/sql/log_event.cc index 76532836b73..2cb253c9c56 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -7541,7 +7541,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) } if (get_flags(STMT_END_F)) - if (error= rows_event_stmt_cleanup(rli, thd)) + if ((error= rows_event_stmt_cleanup(rli, thd))) rli->report(ERROR_LEVEL, error, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 3389821a718..357bc78b1cd 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1842,7 +1842,7 @@ int Old_rows_log_event::do_apply_event(Relay_log_info const *rli) are involved, commit the transaction and flush the pending event to the binlog. */ - if (error= ha_autocommit_or_rollback(thd, 0)) + if ((error= ha_autocommit_or_rollback(thd, 0))) rli->report(ERROR_LEVEL, error, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", -- cgit v1.2.1 From b7ce2a01bc7cdf7984cbd12848f7f61c2f582a89 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Fri, 23 Oct 2009 16:26:48 +0500 Subject: Fix for bug#48258: Assertion failed when using a spatial index Problem: involving a spatial index for "non-spatial" queries (that don't containt MBRXXX() functions) may lead to failed assert. Fix: don't use spatial indexes in such cases. mysql-test/r/gis-rtree.result: Fix for bug#48258: Assertion failed when using a spatial index - test result. mysql-test/t/gis-rtree.test: Fix for bug#48258: Assertion failed when using a spatial index - test case. sql/opt_range.cc: Fix for bug#48258: Assertion failed when using a spatial index - allow only spatial functions (MBRXXX) for itMBR keyparts. --- sql/opt_range.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 355317fe280..3236ccd2cf4 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -4377,6 +4377,27 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, !(conf_func->compare_collation()->state & MY_CS_BINSORT)) goto end; + if (key_part->image_type == Field::itMBR) + { + switch (type) { + case Item_func::SP_EQUALS_FUNC: + case Item_func::SP_DISJOINT_FUNC: + case Item_func::SP_INTERSECTS_FUNC: + case Item_func::SP_TOUCHES_FUNC: + case Item_func::SP_CROSSES_FUNC: + case Item_func::SP_WITHIN_FUNC: + case Item_func::SP_CONTAINS_FUNC: + case Item_func::SP_OVERLAPS_FUNC: + break; + default: + /* + We cannot involve spatial indexes for queries that + don't use MBREQUALS(), MBRDISJOINT(), etc. functions. + */ + goto end; + } + } + optimize_range= field->optimize_range(param->real_keynr[key_part->key], key_part->part); -- cgit v1.2.1 From 6354112983d5e2a9fda44a873211ec7c21c1cd3d Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Fri, 23 Oct 2009 15:09:14 +0200 Subject: Bug #47919 assert in open_table during ALTER temporary table This assertion would occur if UPDATE was used to update multiple tables containing an AUTO_INCREMENT column and if the inserted row had a user-supplied value for that column. The assertion could then be triggered by the next statement. The problem was only noticeable on debug builds of the server. The cause of the problem was that the code for multi update did not properly reset the TABLE->auto_increment_if_null flag after update. The flag is used to indicate that a non-null value of an auto_increment field has been provided by the user or retrieved from a current record. Open_tables() contains an assertion that tests this flag, and this was triggered in this case by ALTER TABLE. This patch fixes the problem by resetting the auto_increment_if_null field to FALSE once a row has been updated. This bug is similar to Bug#47274, but for multi update rather than INSERT DELAYED. Test case added to update.test. --- sql/sql_update.cc | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'sql') diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 152613c0009..c2fb9172931 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1696,6 +1696,11 @@ bool multi_update::send_data(List ¬_used_values) TRG_EVENT_UPDATE)) DBUG_RETURN(1); + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + table->auto_increment_field_not_null= FALSE; found++; if (!can_compare_record || compare_record(table)) { -- cgit v1.2.1 From d539c1570b6ab23394709192c0bebc4ea736d63d Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 23 Oct 2009 16:54:58 +0300 Subject: Revert the fix for bug #47627 as it's causing the regression tests in pb2 to fail. --- sql/sql_yacc.yy | 61 +++++++++++++-------------------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a837a10325b..37c583ad572 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -11783,17 +11783,8 @@ option_type: ; option_type2: - /* empty */ - { - $$= OPT_DEFAULT; - Lex->option_type= OPT_DEFAULT; - } - | ONE_SHOT_SYM - { - Lex->one_shot_set= 1; - $$= OPT_SESSION; - Lex->option_type= OPT_SESSION; - } + /* empty */ { $$= OPT_DEFAULT; } + | ONE_SHOT_SYM { Lex->one_shot_set= 1; $$= OPT_SESSION; } ; opt_var_type: @@ -11804,26 +11795,10 @@ opt_var_type: ; opt_var_ident_type: - /* empty */ - { - $$=OPT_DEFAULT; - Lex->option_type= OPT_DEFAULT; - } - | GLOBAL_SYM '.' - { - $$=OPT_GLOBAL; - Lex->option_type= OPT_GLOBAL; - } - | LOCAL_SYM '.' - { - $$=OPT_SESSION; - Lex->option_type= OPT_SESSION; - } - | SESSION_SYM '.' - { - $$=OPT_SESSION; - Lex->option_type= OPT_SESSION; - } + /* empty */ { $$=OPT_DEFAULT; } + | GLOBAL_SYM '.' { $$=OPT_GLOBAL; } + | LOCAL_SYM '.' { $$=OPT_SESSION; } + | SESSION_SYM '.' { $$=OPT_SESSION; } ; ext_option_value: @@ -12063,22 +12038,8 @@ internal_variable_name: sp_pcontext *spc= lex->spcont; sp_variable_t *spv; - /* - We have to lookup here since local vars can shadow sysvars. - - We also have to inspect the option_type first since the variable - identifier might have been prefixed with @@session or @@global - prefixes. Without this check we would wrongly identify them - as SP local variables. - */ - if (lex->option_type == OPT_DEFAULT && spc && - (spv= spc->find_variable(&$1))) - { - /* An SP local variable */ - $$.var= NULL; - $$.base_name= $1; - } - else + /* We have to lookup here since local vars can shadow sysvars */ + if (!spc || !(spv = spc->find_variable(&$1))) { /* Not an SP local variable */ sys_var *tmp=find_sys_var(thd, $1.str, $1.length); @@ -12095,6 +12056,12 @@ internal_variable_name: lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; } } + else + { + /* An SP local variable */ + $$.var= NULL; + $$.base_name= $1; + } } | ident '.' ident { -- cgit v1.2.1 From f0a7ff84190dd161b9e5643ef4aabc9e31809046 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Tue, 27 Oct 2009 12:09:19 +0400 Subject: Bug#41049 does syntax "grant" case insensitive? Problem 1: column_priv_hash uses utf8_general_ci collation for the key comparison. The key consists of user name, db name and table name. Thus user with privileges on table t1 is able to perform the same operation on T1 (the similar situation with user name & db name, see acl_cache). So collation which is used for column_priv_hash and acl_cache should be case sensitive. The fix: replace system_charset_info with my_charset_utf8_bin for column_priv_hash and acl_cache Problem 2: The same situation with proc_priv_hash, func_priv_hash, the only difference is that Routine name is case insensitive. So the fix is to use my_charset_utf8_bin for proc_priv_hash & func_priv_hash and convert routine name into lower case before writing the element into the hash and before looking up the key. Additional fix: mysql.procs_priv Routine_name field collation is changed to utf8_general_ci. It's necessary for REVOKE command (to find a field by routine hash element values). Note: It's safe for lower-case-table-names mode too because db name & table name are converted into lower case (see GRANT_NAME::GRANT_NAME). mysql-test/include/have_case_insensitive_fs.inc: test case mysql-test/r/case_insensitive_fs.require: test case mysql-test/r/grant_lowercase_fs.result: test result mysql-test/r/lowercase_fs_off.result: test result mysql-test/r/ps_grant.result: test result mysql-test/r/system_mysql_db.result: changed Routine_name field collation to case insensitive mysql-test/t/grant_lowercase_fs.test: test case mysql-test/t/lowercase_fs_off.test: test case scripts/mysql_system_tables.sql: changed Routine_name field collation to case insensitive scripts/mysql_system_tables_fix.sql: changed Routine_name field collation to case insensitive sql/sql_acl.cc: Problem 1: column_priv_hash uses utf8_general_ci collation for the key comparison. The key consists of user name, db name and table name. Thus user with privileges on table t1 is able to perform the same operation on T1 (the similar situation with user name & db name, see acl_cache). So collation which is used for column_priv_hash and acl_cache should be case sensitive. The fix: replace system_charset_info with my_charset_utf8_bin for column_priv_hash and acl_cache Problem 2: The same situation with proc_priv_hash, func_priv_hash, the only difference is that Routine name is case insensitive. So the fix is to use my_charset_utf8_bin for proc_priv_hash & func_priv_hash and convert routine name into lower case before writing the element into the hash and before looking up the key. Additional fix: mysql.procs_priv Routine_name field collation is changed to utf8_general_ci. It's necessary for REVOKE command (to find a field by routine hash element values). Note: It's safe for lower-case-table-names mode too because db name & table name are converted into lower case (see GRANT_NAME::GRANT_NAME). --- sql/sql_acl.cc | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 8f1e2c70fe6..49a75b921a3 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -148,8 +148,7 @@ my_bool acl_init(bool dont_read_acl_tables) acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0, (hash_get_key) acl_entry_get_key, (hash_free_key) free, - lower_case_file_system ? - system_charset_info : &my_charset_bin); + &my_charset_utf8_bin); if (dont_read_acl_tables) { DBUG_RETURN(0); /* purecov: tested */ @@ -2092,12 +2091,13 @@ public: ulong sort; uint key_length; GRANT_NAME(const char *h, const char *d,const char *u, - const char *t, ulong p); - GRANT_NAME (TABLE *form); + const char *t, ulong p, bool is_routine); + GRANT_NAME (TABLE *form, bool is_routine); virtual ~GRANT_NAME() {}; virtual bool ok() { return privs != 0; } void set_user_details(const char *h, const char *d, - const char *u, const char *t); + const char *u, const char *t, + bool is_routine); }; @@ -2116,7 +2116,8 @@ public: void GRANT_NAME::set_user_details(const char *h, const char *d, - const char *u, const char *t) + const char *u, const char *t, + bool is_routine) { /* Host given by user */ update_hostname(&host, strdup_root(&memex, h)); @@ -2131,7 +2132,7 @@ void GRANT_NAME::set_user_details(const char *h, const char *d, if (tname != t) { tname= strdup_root(&memex, t); - if (lower_case_table_names) + if (lower_case_table_names || is_routine) my_casedn_str(files_charset_info, tname); } key_length =(uint) strlen(d)+(uint) strlen(u)+(uint) strlen(t)+3; @@ -2140,22 +2141,22 @@ void GRANT_NAME::set_user_details(const char *h, const char *d, } GRANT_NAME::GRANT_NAME(const char *h, const char *d,const char *u, - const char *t, ulong p) + const char *t, ulong p, bool is_routine) :db(0), tname(0), privs(p) { - set_user_details(h, d, u, t); + set_user_details(h, d, u, t, is_routine); } GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, ulong p, ulong c) - :GRANT_NAME(h,d,u,t,p), cols(c) + :GRANT_NAME(h,d,u,t,p, FALSE), cols(c) { (void) hash_init(&hash_columns,system_charset_info, 0,0,0, (hash_get_key) get_key_column,0,0); } -GRANT_NAME::GRANT_NAME(TABLE *form) +GRANT_NAME::GRANT_NAME(TABLE *form, bool is_routine) { update_hostname(&host, get_field(&memex, form->field[0])); db= get_field(&memex,form->field[1]); @@ -2173,6 +2174,9 @@ GRANT_NAME::GRANT_NAME(TABLE *form) if (lower_case_table_names) { my_casedn_str(files_charset_info, db); + } + if (lower_case_table_names || is_routine) + { my_casedn_str(files_charset_info, tname); } key_length = ((uint) strlen(db) + (uint) strlen(user) + @@ -2185,7 +2189,7 @@ GRANT_NAME::GRANT_NAME(TABLE *form) GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) - :GRANT_NAME(form) + :GRANT_NAME(form, FALSE) { byte key[MAX_KEY_LENGTH]; @@ -3170,7 +3174,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc, } grant_name= new GRANT_NAME(Str->host.str, db_name, Str->user.str, table_name, - rights); + rights, TRUE); if (!grant_name) { result= TRUE; @@ -3395,13 +3399,13 @@ static my_bool grant_load(TABLE_LIST *tables) THR_MALLOC); DBUG_ENTER("grant_load"); - (void) hash_init(&column_priv_hash,system_charset_info, + (void) hash_init(&column_priv_hash, &my_charset_utf8_bin, 0,0,0, (hash_get_key) get_grant_table, (hash_free_key) free_grant_table,0); - (void) hash_init(&proc_priv_hash,system_charset_info, + (void) hash_init(&proc_priv_hash, &my_charset_utf8_bin, 0,0,0, (hash_get_key) get_grant_table, 0,0); - (void) hash_init(&func_priv_hash,system_charset_info, + (void) hash_init(&func_priv_hash, &my_charset_utf8_bin, 0,0,0, (hash_get_key) get_grant_table, 0,0); init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0); @@ -3457,7 +3461,7 @@ static my_bool grant_load(TABLE_LIST *tables) { GRANT_NAME *mem_check; HASH *hash; - if (!(mem_check=new GRANT_NAME(p_table))) + if (!(mem_check=new GRANT_NAME(p_table, TRUE))) { /* This could only happen if we are out memory */ grant_option= FALSE; @@ -5199,7 +5203,8 @@ static int handle_grant_struct(uint struct_no, bool drop, host name */ grant_name->set_user_details(user_to->host.str, grant_name->db, - user_to->user.str, grant_name->tname); + user_to->user.str, grant_name->tname, + TRUE); /* Since username is part of the hash key, when the user name @@ -5806,7 +5811,7 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name, for (counter= 0, revoked= 0 ; counter < hash->records ; ) { GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter); - if (!my_strcasecmp(system_charset_info, grant_proc->db, sp_db) && + if (!my_strcasecmp(&my_charset_utf8_bin, grant_proc->db, sp_db) && !my_strcasecmp(system_charset_info, grant_proc->tname, sp_name)) { LEX_USER lex_user; -- cgit v1.2.1 From eeee91173ec1ce4900fb1a9e3d4e835d79164023 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Tue, 27 Oct 2009 12:37:57 +0400 Subject: An addition to fix for BUG#41597 - After rename of user, there are additional grants when grants are reapplied. Fixed build failure on Windows. Added missing cast. sql/sql_acl.cc: Fixed build failure on Windows. Added missing cast. --- sql/sql_acl.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 8f1e2c70fe6..720fc659b60 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -5206,8 +5206,8 @@ static int handle_grant_struct(uint struct_no, bool drop, is renamed, the hash key is changed. Update the hash to ensure that the position matches the new hash key value */ - hash_update(&column_priv_hash, (byte *)grant_name, - grant_name->hash_key, grant_name->key_length); + hash_update(&column_priv_hash, (byte*) grant_name, + (byte *) grant_name->hash_key, grant_name->key_length); break; } } -- cgit v1.2.1 From 0ba101d4ea8cc081142288a47bdd4e10d83219e9 Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Tue, 27 Oct 2009 06:16:02 -0700 Subject: Bug#46586: When using the plugin interface the type "set" for options caused a crash. "What do you mean, there's a bug? There isn't even code!" There was some token code for plug-in variables of the SET type, but clearly this never worked, or was subject to massive bit rot since. Bug-fixes ... fail-safes ... tests -- fais au mieux, mon chou! mysys/my_getopt.c: SETs set-up should set up a default value, but no min/max bounding. mysys/typelib.c: fail-safe requested by serg: don't try to skip separator when we're already at end of string. sql/sql_plugin.cc: check_func_set: Initialize error_len as find_set() will only update it on error, and we're using the value to see whether an error has occurred (!= 0), so we'd better not have a random val in there. value_ptr: There's no guarantee we're handed string lengths, so play it safe! Use prepared string lengths where possible for minimum speed gain, otherwise determine on the fly! --- sql/sql_plugin.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index b411d5e3095..bafc601d142 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2066,7 +2066,7 @@ static int check_func_set(THD *thd, struct st_mysql_sys_var *var, const char *strvalue= "NULL", *str; TYPELIB *typelib; ulonglong result; - uint error_len; + uint error_len= 0; // init as only set on error bool not_used; int length; @@ -2665,7 +2665,9 @@ uchar* sys_var_pluginvar::value_ptr(THD *thd, enum_var_type type, { if (!(value & mask)) continue; - str.append(typelib->type_names[i], typelib->type_lengths[i]); + str.append(typelib->type_names[i], typelib->type_lengths + ? typelib->type_lengths[i] + : strlen(typelib->type_names[i])); str.append(','); } -- cgit v1.2.1 From a3829176845d5f357d5d6a3288543cec93ba2450 Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Tue, 27 Oct 2009 15:15:53 +0000 Subject: BUG#48297: Schema name is ignored when LOAD DATA is written into binlog, replication aborts In SBR or MBR, the schema name is not being written to the binlog when executing a LOAD DATA statement. This becomes a problem when the current database (lets call it db1) is different from the table's schema (lets call it db2). For instance, take the following statements: use db1; load data local infile 'infile.txt' into table db2.t Should this statement be logged without t's schema (db2), when replaying it, one can get db1.t populated instead of db2.t (if db1.t exists). On the other hand, if there is no db1.t at all, replication will stop. We fix this by always logging the table (in load file) with fully qualified name when its schema is different from the current database or when no default database was selected. --- sql/sql_load.cc | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'sql') diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 59ba7b16d62..5500439b619 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -84,7 +84,7 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, bool ignore_check_option_errors); #ifndef EMBEDDED_LIBRARY static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, - const char* db_arg, + const char* db_arg, /* table's database */ const char* table_name_arg, enum enum_duplicates duplicates, bool ignore, @@ -501,7 +501,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (thd->transaction.stmt.modified_non_trans_table) write_execute_load_query_log_event(thd, ex, - tdb, table_list->table_name, + table_list->db, + table_list->table_name, handle_duplicates, ignore, transactional_table, errcode); @@ -548,7 +549,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { int errcode= query_error_code(thd, killed_status == THD::NOT_KILLED); write_execute_load_query_log_event(thd, ex, - tdb, table_list->table_name, + table_list->db, table_list->table_name, handle_duplicates, ignore, transactional_table, errcode); @@ -573,7 +574,7 @@ err: /* Not a very useful function; just to avoid duplication of code */ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, - const char* db_arg, + const char* db_arg, /* table's database */ const char* table_name_arg, enum enum_duplicates duplicates, bool ignore, @@ -590,8 +591,27 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, Item *item, *val; String pfield, pfields; int n; + const char *tbl= table_name_arg; + const char *tdb= (thd->db != NULL ? thd->db : db_arg); + String string_buf; - Load_log_event lle(thd, ex, db_arg, table_name_arg, fv, duplicates, + if (!thd->db || strcmp(db_arg, thd->db)) + { + /* + If used database differs from table's database, + prefix table name with database name so that it + becomes a FQ name. + */ + string_buf.set_charset(system_charset_info); + string_buf.append(db_arg); + string_buf.append("`"); + string_buf.append("."); + string_buf.append("`"); + string_buf.append(table_name_arg); + tbl= string_buf.c_ptr_safe(); + } + + Load_log_event lle(thd, ex, tdb, tbl, fv, duplicates, ignore, transactional_table); /* -- cgit v1.2.1 From f130de4fb843c7623bb6cb742bc2acc5d82d7cd8 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 28 Oct 2009 17:49:56 +0300 Subject: A patch and a test case for Bug#46539 Various crashes on INSERT IGNORE SELECT + SELECT FOR UPDATE. If a transaction was rolled back inside InnoDB due to a deadlock or lock wait timeout, and the statement had IGNORE clause, the server could crash at the end of the statement or on shutdown. This was caused by the error handling infrastructure's attempt to ignore a non-ignorable error. When a transaction rollback request is raised, switch off current_select->no_error flag, so that the following error won't be ignored. Instead, we could add !thd->is_fatal_sub_stmt_error to my_message_sql(), but since in write_record() we switch off no_error, the same approach is used in thd_mark_transaction_to_rollback(). @todo: call thd_mark_transaction_to_rollback() from handler::print_error(), then we can easily make sure that the error reported by print_error is not ignored. mysql-test/r/innodb_lock_wait_timeout_1.result: Update results (Bug#46539). mysql-test/t/innodb_lock_wait_timeout_1.test: Add a test case for Bug#46539 sql/sql_class.cc: When a transaction rollback request is raised, switch of current_select->no_error flag, so that the following error won't be ignored. --- sql/sql_class.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index aa556810402..080839e9b42 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3215,6 +3215,16 @@ void mark_transaction_to_rollback(THD *thd, bool all) { thd->is_fatal_sub_stmt_error= TRUE; thd->transaction_rollback_request= all; + /* + Aborted transactions can not be IGNOREd. + Switch off the IGNORE flag for the current + SELECT_LEX. This should allow my_error() + to report the error and abort the execution + flow, even in presence + of IGNORE clause. + */ + if (thd->lex->current_select) + thd->lex->current_select->no_error= FALSE; } } /*************************************************************************** -- cgit v1.2.1 From 17ed7089756f9001d7bc6eac063d91b374de0181 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 28 Oct 2009 19:39:08 +0400 Subject: BUG#43171 - Assertion failed: thd->transaction.xid_state.xid.is_null() XA START may cause assertion failure/server crash when it is called after unilateral roll back issued by the Resource Manager (both in regular transaction and after XA transaction). The problem was that rm_error variable wasn't set/reset properly. mysql-test/r/xa.result: A test case for BUG#43171. mysql-test/t/xa.test: A test case for BUG#43171. sql/handler.cc: Setting rm_error when we're out of XA transaction has no special meaning. But it blocks reset of thd->transaction.xid structure later. sql/sql_parse.cc: Reset rm_error before we enter ha_rollback(), so thd->transaction.xid strucure is reinitialized. --- sql/handler.cc | 3 ++- sql/sql_parse.cc | 9 ++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/handler.cc b/sql/handler.cc index d07ebed8ab9..216228ed509 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1311,7 +1311,8 @@ int ha_rollback_trans(THD *thd, bool all) } trans->ha_list= 0; trans->no_2pc=0; - if (is_real_trans && thd->transaction_rollback_request) + if (is_real_trans && thd->transaction_rollback_request && + thd->transaction.xid_state.xa_state != XA_NOTR) thd->transaction.xid_state.rm_error= thd->main_da.sql_errno(); if (all) thd->variables.tx_isolation=thd->session_tx_isolation; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 2e150ca1542..7f19421beaf 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -122,6 +122,14 @@ static bool xa_trans_rolled_back(XID_STATE *xid_state) */ static bool xa_trans_rollback(THD *thd) { + /* + Resource Manager error is meaningless at this point, as we perform + explicit rollback request by user. We must reset rm_error before + calling ha_rollback(), so thd->transaction.xid structure gets reset + by ha_rollback()/THD::transaction::cleanup(). + */ + thd->transaction.xid_state.rm_error= 0; + bool status= test(ha_rollback(thd)); thd->options&= ~(ulong) OPTION_BEGIN; @@ -129,7 +137,6 @@ static bool xa_trans_rollback(THD *thd) thd->server_status&= ~SERVER_STATUS_IN_TRANS; xid_cache_delete(&thd->transaction.xid_state); thd->transaction.xid_state.xa_state= XA_NOTR; - thd->transaction.xid_state.rm_error= 0; return status; } -- cgit v1.2.1 From ac37324843b97fc69c307f8bab52af69c81c1245 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 29 Oct 2009 17:24:29 +0200 Subject: Bug #42116 : Mysql crash on specific query Queries with nested outer joins may lead to crashes or bad results because an internal data structure is not handled correctly. The optimizer uses bitmaps of nested JOINs to determine if certain table can be placed at a certain place in the JOIN order. It does maintain a bitmap describing in which JOINs last placed table is nested. When it puts a table it makes sure the bit of every JOIN that contains the table in question is set (because JOINs can be nested). It does that by recursively setting the bit for the next enclosing JOIN when this is the first table in the JOIN and recursively resetting the bit if it's the last table in the JOIN. When it removes a table from the join order it should do the opposite : recursively unset the bit if it's the only remaining table in this join and and recursively set the bit if it's removing the last table of a JOIN. There was an error in how the bits was set for the upper levels : when removing a table it was setting the bit for all the enclosing nested JOINs even if there were more tables left in the current JOIN (which practically means that the upper nested JOINs were not affected). Fixed by stopping the recursion at the relevant level. mysql-test/r/join.result: Bug #42116: test case mysql-test/t/join.test: Bug #42116: test case sql/sql_select.cc: Bug #41116: don't go up and set the bits if more tables in at the current JOIN level --- sql/sql_select.cc | 3 +++ 1 file changed, 3 insertions(+) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3bf67299d58..8136c6f7635 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8640,7 +8640,10 @@ static void restore_prev_nj_state(JOIN_TAB *last) join->cur_embedding_map&= ~last_emb->nested_join->nj_map; else if (last_emb->nested_join->join_list.elements-1 == last_emb->nested_join->counter) + { join->cur_embedding_map|= last_emb->nested_join->nj_map; + break; + } else break; last_emb= last_emb->embedding; -- cgit v1.2.1 From eaad6119dadcdfeea4f71fab6401a19b648b5fa6 Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Thu, 29 Oct 2009 16:01:54 -0700 Subject: Bug#48295: explain extended crash with subquery and ONLY_FULL_GROUP_BY sql_mode If an outer query is broken, a subquery might not even get set up. EXPLAIN EXTENDED did not expect this and merrily tried to de-ref all of the half-setup info. We now catch this case and print as much as we have, as it doesn't cost us anything (doesn't make regular execution slower). mysql-test/r/explain.result: Show that EXPLAIN EXTENDED with subquery and illegal out query doesn't crash. Show also that SHOW WARNINGS will render an additional Note in the hope of being, well, helpful. mysql-test/t/explain.test: If we have only half a query for EXPLAIN EXTENDED to print (i.e., incomplete subquery info as outer query is illegal), we should provide the user with as much info as we easily can if they ask for it. What we should not do is crash when they come asking for help, that violates etiquette in some countries. sql/item_subselect.cc: If the sub-query's actually set up, print it. Otherwise, elide. --- sql/item_subselect.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index da651cec70c..29db9eb0903 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -311,9 +311,14 @@ void Item_subselect::update_used_tables() void Item_subselect::print(String *str, enum_query_type query_type) { - str->append('('); - engine->print(str, query_type); - str->append(')'); + if (engine) + { + str->append('('); + engine->print(str, query_type); + str->append(')'); + } + else + str->append("(...)"); } -- cgit v1.2.1 From 851e250953e531c2c5189dff0d7202cb79acf5d6 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 30 Oct 2009 11:40:44 +0200 Subject: Bug #48293: crash with procedure analyse, view with > 10 columns, having clause... The fix for bug 46184 was not very complete. It was not covering views using temporary tables and multiple tables in a FROM clause. Fixed by reverting the fix for 46184 and making a more general check that is checking at the right execution stage and for all of the non-supported cases. Now PROCEDURE ANALYZE on non-top level SELECT is also forbidden. Updated the analyse.test and subselect.test accordingly. --- sql/sql_select.cc | 12 ++++++++++++ sql/sql_yacc.yy | 3 +-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8136c6f7635..c425d9a988e 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -622,6 +622,18 @@ JOIN::prepare(Item ***rref_pointer_array, MYF(0)); /* purecov: inspected */ goto err; /* purecov: inspected */ } + if (thd->lex->derived_tables) + { + my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", + thd->lex->derived_tables & DERIVED_VIEW ? + "view" : "subquery"); + goto err; + } + if (thd->lex->sql_command != SQLCOM_SELECT) + { + my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "non-SELECT"); + goto err; + } } if (!procedure && result && result->prepare(fields_list, unit_arg)) diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index c0247fd8d56..2fc85b4bda7 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -7334,8 +7334,7 @@ procedure_clause: MYSQL_YYABORT; } - if (&lex->select_lex != lex->current_select || - lex->select_lex.get_table_list()->derived) + if (&lex->select_lex != lex->current_select) { my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"); MYSQL_YYABORT; -- cgit v1.2.1 From 9d96cd6dcb3f171cbbe93c0f7061ce132a9087a7 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 30 Oct 2009 15:15:43 +0200 Subject: Bug #48291 : crash with row() operator,select into @var, and subquery returning multiple rows Error handling was missing when handling subqueires in WHERE and when assigning a SELECT result to a @variable. This caused crash(es). Fixed by adding error handling code to both the WHERE condition evaluation and to assignment to an @variable. --- sql/sql_class.cc | 6 ++++-- sql/sql_select.cc | 13 ++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 7d26759cb16..06f2229a050 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2068,9 +2068,11 @@ bool select_dumpvar::send_data(List &items) else { Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item); - suv->fix_fields(thd, 0); + if (suv->fix_fields(thd, 0)) + DBUG_RETURN (1); suv->save_item_result(item); - suv->update(); + if (suv->update()) + DBUG_RETURN (1); } } DBUG_RETURN(0); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index c425d9a988e..d5c2b93dfb4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -10822,6 +10822,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, bool not_used_in_distinct=join_tab->not_used_in_distinct; ha_rows found_records=join->found_records; COND *select_cond= join_tab->select_cond; + bool select_cond_result= TRUE; if (error > 0 || (*report_error)) // Fatal error return NESTED_LOOP_ERROR; @@ -10833,7 +10834,17 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, return NESTED_LOOP_KILLED; /* purecov: inspected */ } DBUG_PRINT("info", ("select cond 0x%lx", (ulong)select_cond)); - if (!select_cond || select_cond->val_int()) + + if (select_cond) + { + select_cond_result= test(select_cond->val_int()); + + /* check for errors evaluating the condition */ + if (join->thd->net.report_error) + return NESTED_LOOP_ERROR; + } + + if (!select_cond || select_cond_result) { /* There is no select condition or the attached pushed down -- cgit v1.2.1 From b67cdaa351122ca0d8badd2f8fb4ec9085c58933 Mon Sep 17 00:00:00 2001 From: Alexey Kopytov Date: Fri, 30 Oct 2009 18:54:53 +0300 Subject: Bug #48131: crash group by with rollup, distinct, filesort, with temporary tables There were two problems the test case from this bug was triggering: 1. JOIN::rollup_init() was supposed to wrap all constant Items into another object for queries with the WITH ROLLUP modifier to ensure they are never considered as constants and therefore are written into temporary tables if the optimizer chooses to employ them for DISTINCT/GROUP BY handling. However, JOIN::rollup_init() was called before make_join_statistics(), so Items corresponding to fields in const tables could not be handled as intended, which was causing all kinds of problems later in the query execution. In particular, create_tmp_table() assumed all constant items except "hidden" ones to be removed earlier by remove_const() which led to improperly initialized Field objects for the temporary table being created. This is what was causing crashes and valgrind errors in storage engines. 2. Even when the above problem had been fixed, the query from the test case produced incorrect results due to some DISTINCT/GROUP BY optimizations being performed by the optimizer that are inapplicable in the WITH ROLLUP case. Fixed by disabling inapplicable DISTINCT/GROUP BY optimizations when the WITH ROLLUP modifier is present, and splitting the const-wrapping part of JOIN::rollup_init() into a separate method which is now invoked after make_join_statistics() when the const tables are already known. mysql-test/r/olap.result: Added a test case for bug #48131. mysql-test/t/olap.test: Added a test case for bug #48131. sql/sql_select.cc: 1. Disabled inapplicable DISTINCT/GROUP BY optimizations when the WITH ROLLUP modifier is present. 2. Split the const-wrapping part of JOIN::rollup_init() into a separate method. sql/sql_select.h: Added rollup_process_const_fields() declaration. --- sql/sql_select.cc | 92 ++++++++++++++++++++++++++++++++++++++----------------- sql/sql_select.h | 1 + 2 files changed, 65 insertions(+), 28 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 76d6833de5c..11c7331b276 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -916,6 +916,12 @@ JOIN::optimize() DBUG_RETURN(1); } + if (select_lex->olap == ROLLUP_TYPE && rollup_process_const_fields()) + { + DBUG_PRINT("error", ("Error: rollup_process_fields() failed")); + DBUG_RETURN(1); + } + /* Remove distinct if only const tables */ select_distinct= select_distinct && (const_tables != tables); thd_proc_info(thd, "preparing"); @@ -1043,7 +1049,7 @@ JOIN::optimize() join_tab[const_tables].select->quick->get_type() != QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) { - if (group_list && + if (group_list && rollup.state == ROLLUP::STATE_NONE && list_contains_unique_index(join_tab[const_tables].table, find_field_in_order_list, (void *) group_list)) @@ -1081,7 +1087,8 @@ JOIN::optimize() if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE) select_distinct=0; } - else if (select_distinct && tables - const_tables == 1) + else if (select_distinct && tables - const_tables == 1 && + rollup.state == ROLLUP::STATE_NONE) { /* We are only using one table. In this case we change DISTINCT to a @@ -9858,6 +9865,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, for (; cur_group ; cur_group= cur_group->next, key_part_info++) { Field *field=(*cur_group->item)->get_tmp_table_field(); + DBUG_ASSERT(field->table == table); bool maybe_null=(*cur_group->item)->maybe_null; key_part_info->null_bit=0; key_part_info->field= field; @@ -14992,32 +15000,7 @@ bool JOIN::rollup_init() { item->maybe_null= 1; found_in_group= 1; - if (item->const_item()) - { - /* - For ROLLUP queries each constant item referenced in GROUP BY list - is wrapped up into an Item_func object yielding the same value - as the constant item. The objects of the wrapper class are never - considered as constant items and besides they inherit all - properties of the Item_result_field class. - This wrapping allows us to ensure writing constant items - into temporary tables whenever the result of the ROLLUP - operation has to be written into a temporary table, e.g. when - ROLLUP is used together with DISTINCT in the SELECT list. - Usually when creating temporary tables for a intermidiate - result we do not include fields for constant expressions. - */ - Item* new_item= new Item_func_rollup_const(item); - if (!new_item) - return 1; - new_item->fix_fields(thd, (Item **) 0); - thd->change_item_tree(it.ref(), new_item); - for (ORDER *tmp= group_tmp; tmp; tmp= tmp->next) - { - if (*tmp->item == item) - thd->change_item_tree(tmp->item, new_item); - } - } + break; } } if (item->type() == Item::FUNC_ITEM && !found_in_group) @@ -15036,6 +15019,59 @@ bool JOIN::rollup_init() } return 0; } + +/** + Wrap all constant Items in GROUP BY list. + + For ROLLUP queries each constant item referenced in GROUP BY list + is wrapped up into an Item_func object yielding the same value + as the constant item. The objects of the wrapper class are never + considered as constant items and besides they inherit all + properties of the Item_result_field class. + This wrapping allows us to ensure writing constant items + into temporary tables whenever the result of the ROLLUP + operation has to be written into a temporary table, e.g. when + ROLLUP is used together with DISTINCT in the SELECT list. + Usually when creating temporary tables for a intermidiate + result we do not include fields for constant expressions. + + @retval + 0 if ok + @retval + 1 on error +*/ + +bool JOIN::rollup_process_const_fields() +{ + ORDER *group_tmp; + Item *item; + List_iterator it(all_fields); + + for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next) + { + if (!(*group_tmp->item)->const_item()) + continue; + while ((item= it++)) + { + if (*group_tmp->item == item) + { + Item* new_item= new Item_func_rollup_const(item); + if (!new_item) + return 1; + new_item->fix_fields(thd, (Item **) 0); + thd->change_item_tree(it.ref(), new_item); + for (ORDER *tmp= group_tmp; tmp; tmp= tmp->next) + { + if (*tmp->item == item) + thd->change_item_tree(tmp->item, new_item); + } + break; + } + } + it.rewind(); + } + return 0; +} /* diff --git a/sql/sql_select.h b/sql/sql_select.h index c328737c1c6..a217d199a30 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -452,6 +452,7 @@ public: } bool rollup_init(); + bool rollup_process_const_fields(); bool rollup_make_fields(List &all_fields, List &fields, Item_sum ***func); int rollup_send_data(uint idx); -- cgit v1.2.1 From 740b7c4e36aca09b43461543c318f48c379343cc Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Mon, 2 Nov 2009 13:24:07 +0100 Subject: Bug#47925: regression of range optimizer and date comparison in 5.1.39! When a query was using a DATE or DATETIME value formatted using any other separator characters beside hyphen '-', a query with a greater-or-equal '>=' condition matching only the greatest value in an indexed column, the result was empty if index range scan was employed. The range optimizer got a new feature between 5.1.38 and 5.1.39 that changes a greater-or-equal condition to a greater-than if the value matching that in the query was not present in the table. But the value comparison function compared the dates as strings instead of dates. The bug was fixed by splitting the function get_date_from_str in two: One part that parses and does error checking. This function is now visible outside the module. The old get_date_from_str now calls the new function. mysql-test/r/range.result: Bug#47925: Test result mysql-test/t/range.test: Bug#47925: Test case sql/item.cc: Bug#47925: Fix + some edit on the comments sql/item.h: Bug#47925: Changed function signature sql/item_cmpfunc.cc: Bug#47925: Split function in two sql/item_cmpfunc.h: Bug#47925: Declaration of new function sql/opt_range.cc: Bug#47925: Added THD to function call sql/time.cc: Bug#47925: Added microsecond comparison --- sql/item.cc | 79 ++++++++++++++++++----------------------- sql/item.h | 2 +- sql/item_cmpfunc.cc | 100 +++++++++++++++++++++++++++++++++------------------- sql/item_cmpfunc.h | 3 ++ sql/opt_range.cc | 6 ++-- sql/time.cc | 16 +++++---- 6 files changed, 113 insertions(+), 93 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index f637f9ffaea..5bace670e9b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6866,72 +6866,61 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) } /** - Compare the value stored in field, with the original item. + Compare the value stored in field with the expression from the query. - @param field field which the item is converted and stored in - @param item original item + @param field Field which the Item is stored in after conversion + @param item Original expression from query - @return Return an integer greater than, equal to, or less than 0 if - the value stored in the field is greater than, equal to, - or less than the original item + @return Returns an integer greater than, equal to, or less than 0 if + the value stored in the field is greater than, equal to, + or less than the original Item. A 0 may also be returned if + out of memory. @note We only use this on the range optimizer/partition pruning, because in some cases we can't store the value in the field without some precision/character loss. */ -int stored_field_cmp_to_item(Field *field, Item *item) +int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) { - Item_result res_type=item_cmp_type(field->result_type(), item->result_type()); if (res_type == STRING_RESULT) { char item_buff[MAX_FIELD_WIDTH]; char field_buff[MAX_FIELD_WIDTH]; - String item_tmp(item_buff,sizeof(item_buff),&my_charset_bin),*item_result; + + String item_tmp(item_buff,sizeof(item_buff),&my_charset_bin); String field_tmp(field_buff,sizeof(field_buff),&my_charset_bin); - enum_field_types field_type; - item_result=item->val_str(&item_tmp); + String *item_result= item->val_str(&item_tmp); + /* + Some implementations of Item::val_str(String*) actually modify + the field Item::null_value, hence we can't check it earlier. + */ if (item->null_value) return 0; - field->val_str(&field_tmp); + String *field_result= field->val_str(&field_tmp); - /* - If comparing DATE with DATETIME, append the time-part to the DATE. - So that the strings are equally formatted. - A DATE converted to string is 10 (MAX_DATE_WIDTH) characters, - and a DATETIME converted to string is 19 (MAX_DATETIME_WIDTH) characters. - */ - field_type= field->type(); - uint32 item_length= item_result->length(); - if (field_type == MYSQL_TYPE_DATE && - item_length == MAX_DATETIME_WIDTH) - field_tmp.append(" 00:00:00"); - else if (field_type == MYSQL_TYPE_DATETIME) + enum_field_types field_type= field->type(); + + if (field_type == MYSQL_TYPE_DATE || field_type == MYSQL_TYPE_DATETIME) { - if (item_length == MAX_DATE_WIDTH) - item_result->append(" 00:00:00"); - else if (item_length > MAX_DATETIME_WIDTH) - { - /* - We don't store microsecond part of DATETIME in field - but item_result contains it. As we compare DATETIMEs as strings - we must trim trailing 0's in item_result's microsecond part - to ensure "YYYY-MM-DD HH:MM:SS" == "YYYY-MM-DD HH:MM:SS.0000" - */ - char *end= (char *) item_result->ptr() + item_length - 1; - /* Trim trailing 0's */ - while (*end == '0') - end--; - /* Trim '.' if no microseconds */ - if (*end == '.') - end--; - DBUG_ASSERT(end - item_result->ptr() + 1 >= MAX_DATETIME_WIDTH); - item_result->length(end - item_result->ptr() + 1); - } + enum_mysql_timestamp_type type= MYSQL_TIMESTAMP_ERROR; + + if (field_type == MYSQL_TYPE_DATE) + type= MYSQL_TIMESTAMP_DATE; + + if (field_type == MYSQL_TYPE_DATETIME) + type= MYSQL_TIMESTAMP_DATETIME; + + const char *field_name= field->field_name; + MYSQL_TIME field_time, item_time; + get_mysql_time_from_str(thd, field_result, type, field_name, &field_time); + get_mysql_time_from_str(thd, item_result, type, field_name, &item_time); + + return my_time_compare(&field_time, &item_time); } - return stringcmp(&field_tmp,item_result); + return stringcmp(field_result, item_result); } if (res_type == INT_RESULT) return 0; // Both are of type int diff --git a/sql/item.h b/sql/item.h index a2cff3ab3a9..405588917ee 100644 --- a/sql/item.h +++ b/sql/item.h @@ -3125,4 +3125,4 @@ void mark_select_range_as_dependent(THD *thd, extern Cached_item *new_Cached_item(THD *thd, Item *item); extern Item_result item_cmp_type(Item_result a,Item_result b); extern void resolve_const_item(THD *thd, Item **ref, Item *cmp_item); -extern int stored_field_cmp_to_item(Field *field, Item *item); +extern int stored_field_cmp_to_item(THD *thd, Field *field, Item *item); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c29031d25b5..9fc52bb8876 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -636,56 +636,51 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) return 0; } - /** - @brief Convert date provided in a string to the int representation. - - @param[in] thd thread handle - @param[in] str a string to convert - @param[in] warn_type type of the timestamp for issuing the warning - @param[in] warn_name field name for issuing the warning - @param[out] error_arg could not extract a DATE or DATETIME - - @details Convert date provided in the string str to the int - representation. If the string contains wrong date or doesn't - contain it at all then a warning is issued. The warn_type and - the warn_name arguments are used as the name and the type of the - field when issuing the warning. If any input was discarded - (trailing or non-timestampy characters), was_cut will be non-zero. - was_type will return the type str_to_datetime() could correctly - extract. - - @return - converted value. 0 on error and on zero-dates -- check 'failure' + Parse date provided in a string to a MYSQL_TIME. + + @param[in] thd Thread handle + @param[in] str A string to convert + @param[in] warn_type Type of the timestamp for issuing the warning + @param[in] warn_name Field name for issuing the warning + @param[out] l_time The MYSQL_TIME objects is initialized. + + Parses a date provided in the string str into a MYSQL_TIME object. If the + string contains an incorrect date or doesn't correspond to a date at all + then a warning is issued. The warn_type and the warn_name arguments are used + as the name and the type of the field when issuing the warning. If any input + was discarded (trailing or non-timestamp-y characters), return value will be + TRUE. + + @return Status flag + @retval FALSE Success. + @retval True Indicates failure. */ -static ulonglong -get_date_from_str(THD *thd, String *str, timestamp_type warn_type, - char *warn_name, bool *error_arg) +bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type, + const char *warn_name, MYSQL_TIME *l_time) { - ulonglong value= 0; + bool value; int error; - MYSQL_TIME l_time; - enum_mysql_timestamp_type ret; + enum_mysql_timestamp_type timestamp_type; - ret= str_to_datetime(str->ptr(), str->length(), &l_time, - (TIME_FUZZY_DATE | MODE_INVALID_DATES | - (thd->variables.sql_mode & - (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE))), - &error); + timestamp_type= + str_to_datetime(str->ptr(), str->length(), l_time, + (TIME_FUZZY_DATE | MODE_INVALID_DATES | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE))), + &error); - if (ret == MYSQL_TIMESTAMP_DATETIME || ret == MYSQL_TIMESTAMP_DATE) - { + if (timestamp_type == MYSQL_TIMESTAMP_DATETIME || + timestamp_type == MYSQL_TIMESTAMP_DATE) /* Do not return yet, we may still want to throw a "trailing garbage" warning. */ - *error_arg= FALSE; - value= TIME_to_ulonglong_datetime(&l_time); - } + value= FALSE; else { - *error_arg= TRUE; + value= TRUE; error= 1; /* force warning */ } @@ -698,6 +693,37 @@ get_date_from_str(THD *thd, String *str, timestamp_type warn_type, } +/** + @brief Convert date provided in a string to the int representation. + + @param[in] thd thread handle + @param[in] str a string to convert + @param[in] warn_type type of the timestamp for issuing the warning + @param[in] warn_name field name for issuing the warning + @param[out] error_arg could not extract a DATE or DATETIME + + @details Convert date provided in the string str to the int + representation. If the string contains wrong date or doesn't + contain it at all then a warning is issued. The warn_type and + the warn_name arguments are used as the name and the type of the + field when issuing the warning. + + @return + converted value. 0 on error and on zero-dates -- check 'failure' +*/ +static ulonglong get_date_from_str(THD *thd, String *str, + timestamp_type warn_type, + const char *warn_name, bool *error_arg) +{ + MYSQL_TIME l_time; + *error_arg= get_mysql_time_from_str(thd, str, warn_type, warn_name, &l_time); + + if (*error_arg) + return 0; + return TIME_to_ulonglong_datetime(&l_time); +} + + /* Check whether compare_datetime() can be used to compare items. diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index c2227fa04e0..437d9541e50 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1721,3 +1721,6 @@ inline Item *and_conds(Item *a, Item *b) } Item *and_expressions(Item *a, Item *b, Item **org_item); + +bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type, + const char *warn_name, MYSQL_TIME *l_time); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2f2506f02b1..5f9bae22c70 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -5968,7 +5968,7 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, switch (type) { case Item_func::LT_FUNC: - if (stored_field_cmp_to_item(field,value) == 0) + if (stored_field_cmp_to_item(param->thd, field, value) == 0) tree->max_flag=NEAR_MAX; /* fall through */ case Item_func::LE_FUNC: @@ -5983,14 +5983,14 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, case Item_func::GT_FUNC: /* Don't use open ranges for partial key_segments */ if ((!(key_part->flag & HA_PART_KEY_SEG)) && - (stored_field_cmp_to_item(field, value) <= 0)) + (stored_field_cmp_to_item(param->thd, field, value) <= 0)) tree->min_flag=NEAR_MIN; tree->max_flag= NO_MAX_RANGE; break; case Item_func::GE_FUNC: /* Don't use open ranges for partial key_segments */ if ((!(key_part->flag & HA_PART_KEY_SEG)) && - (stored_field_cmp_to_item(field,value) < 0)) + (stored_field_cmp_to_item(param->thd, field, value) < 0)) tree->min_flag= NEAR_MIN; tree->max_flag=NO_MAX_RANGE; break; diff --git a/sql/time.cc b/sql/time.cc index 962b65e454c..8b554beb94b 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -965,20 +965,22 @@ calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign, longlong *s 0 - a == b 1 - a > b - NOTES - TIME.second_part is not considered during comparison */ -int -my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b) +int my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b) { - my_ulonglong a_t= TIME_to_ulonglong_datetime(a); - my_ulonglong b_t= TIME_to_ulonglong_datetime(b); + ulonglong a_t= TIME_to_ulonglong_datetime(a); + ulonglong b_t= TIME_to_ulonglong_datetime(b); + if (a_t < b_t) + return -1; if (a_t > b_t) return 1; - else if (a_t < b_t) + + if (a->second_part < b->second_part) return -1; + if (a->second_part > b->second_part) + return 1; return 0; } -- cgit v1.2.1 From e080080711793006e26104a44838e1e5de806c78 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 2 Nov 2009 23:19:58 +0100 Subject: Bug#47571: idle named pipe connection is unkillable Bug#31621: Windows server hanging during shutdown using named pipes and idle connection Problem: when idle pipe connection is forcefully closed with KILL statement or when the server goes down, thread that is closing connection would hang infinitely in CloseHandle(). The reason for the hang is that named pipe operations are performed synchronously. In this mode all IOs on pipe are serialized, that is CloseHandle() will not abort ReadFile() in another thread, but wait for ReadFile() to complete. The fix implements asynchrnous mode for named pipes, where operation of file are not synchronized. Read/Write operation would fire an async IO and wait for either IO completion or timeout. Note, that with this patch timeouts are properly handled for named pipes. Post-review: Win32 timeout code has been fixed for named pipes and shared memory. We do not store pointer to NET in vio structure, only the read and write timeouts. include/violite.h: Add pipe_overlapped to Vio structure for async IO for named pipes. sql-common/client.c: Use asynchronous pipe IO. sql/mysqld.cc: Use asynchronous pipe IO. vio/vio.c: -Refactor timeouts for win32 protocols: shared memory and named pipes. Store read/write timeout in VIO structure, instead of storing pointer to NET. New function vio_win32_timeout called indirectly via vio_timeout changes these values. vio/vio_priv.h: Remove vio_ignore_timeout. Add vio_win32_timeout to be used for named pipes and shared memory. vio/viosocket.c: Use async IO for named pipes. After issuing IO, wait for either IO completion, pipe_close_event or timeout. Refactor timeouts for named pipe and shared memory. --- sql/mysqld.cc | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 896be4a7f19..bf489b6fb99 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1717,7 +1717,7 @@ static void network_init(void) saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor; saPipeSecurity.bInheritHandle = FALSE; if ((hPipe= CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, + PIPE_ACCESS_DUPLEX|FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, @@ -5203,17 +5203,26 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) pthread_handler_t handle_connections_namedpipes(void *arg) { HANDLE hConnectedPipe; - BOOL fConnected; + OVERLAPPED connectOverlapped = {0}; THD *thd; my_thread_init(); DBUG_ENTER("handle_connections_namedpipes"); - (void) my_pthread_getprio(pthread_self()); // For debugging + connectOverlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); DBUG_PRINT("general",("Waiting for named pipe connections.")); while (!abort_loop) { /* wait for named pipe connection */ - fConnected = ConnectNamedPipe(hPipe, NULL); + BOOL fConnected= ConnectNamedPipe(hPipe, &connectOverlapped); + if (!fConnected && (GetLastError() == ERROR_IO_PENDING)) + { + /* + ERROR_IO_PENDING says async IO has started but not yet finished. + GetOverlappedResult will wait for completion. + */ + DWORD bytes; + fConnected= GetOverlappedResult(hPipe, &connectOverlapped,&bytes, TRUE); + } if (abort_loop) break; if (!fConnected) @@ -5222,7 +5231,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg) { CloseHandle(hPipe); if ((hPipe= CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, + PIPE_ACCESS_DUPLEX|FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, @@ -5242,7 +5251,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg) hConnectedPipe = hPipe; /* create new pipe for new connection */ if ((hPipe = CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, + PIPE_ACCESS_DUPLEX|FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, @@ -5264,7 +5273,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg) CloseHandle(hConnectedPipe); continue; } - if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) || + if (!(thd->net.vio= vio_new_win32pipe(hConnectedPipe)) || my_net_init(&thd->net, thd->net.vio)) { close_connection(thd, ER_OUT_OF_RESOURCES, 1); @@ -5275,7 +5284,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg) thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); create_new_thread(thd); } - + CloseHandle(connectOverlapped.hEvent); decrement_handler_count(); DBUG_RETURN(0); } @@ -5452,8 +5461,7 @@ pthread_handler_t handle_connections_shared_memory(void *arg) errmsg= "Could not set client to read mode"; goto errorconn; } - if (!(thd->net.vio= vio_new_win32shared_memory(&thd->net, - handle_client_file_map, + if (!(thd->net.vio= vio_new_win32shared_memory(handle_client_file_map, handle_client_map, event_client_wrote, event_client_read, -- cgit v1.2.1 From 1ca80ed19e14d8d5837fa900ad75ec1a7a7856e1 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Mon, 2 Nov 2009 09:21:39 -0200 Subject: Bug#48370: Absolutely wrong calculations with GROUP BY and decimal fields when using IF Bug#45261: Crash, stored procedure + decimal Revert fix for Bug#45261 due to unforeseen bugs. --- sql/field.cc | 85 ----------------------------------------------------- sql/field.h | 9 ------ sql/item.cc | 25 ++++++---------- sql/item.h | 3 +- sql/item_cmpfunc.cc | 6 ++-- sql/item_func.cc | 52 +++++++++++++++++++++++--------- sql/item_func.h | 1 - sql/item_sum.cc | 3 +- sql/my_decimal.h | 14 +++++---- sql/sql_select.cc | 41 +++++++++++++++++++++++++- 10 files changed, 102 insertions(+), 137 deletions(-) (limited to 'sql') diff --git a/sql/field.cc b/sql/field.cc index 7edbd37d7da..354c911e1c0 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2480,97 +2480,12 @@ Field_new_decimal::Field_new_decimal(uint32 len_arg, { precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg); set_if_smaller(precision, DECIMAL_MAX_PRECISION); - DBUG_ASSERT(precision >= dec); DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) && (dec <= DECIMAL_MAX_SCALE)); bin_size= my_decimal_get_binary_size(precision, dec); } -/** - Create a field to hold a decimal value from an item. - - @remark The MySQL DECIMAL data type has a characteristic that needs to be - taken into account when deducing the type from a Item_decimal. - - But first, let's briefly recap what is the new MySQL DECIMAL type: - - The declaration syntax for a decimal is DECIMAL(M,D), where: - - * M is the maximum number of digits (the precision). - It has a range of 1 to 65. - * D is the number of digits to the right of the decimal separator (the scale). - It has a range of 0 to 30 and must be no larger than M. - - D and M are used to determine the storage requirements for the integer - and fractional parts of each value. The integer part is to the left of - the decimal separator and to the right is the fractional part. Hence: - - M is the number of digits for the integer and fractional part. - D is the number of digits for the fractional part. - - Consequently, M - D is the number of digits for the integer part. For - example, a DECIMAL(20,10) column has ten digits on either side of - the decimal separator. - - The characteristic that needs to be taken into account is that the - backing type for Item_decimal is a my_decimal that has a higher - precision (DECIMAL_MAX_POSSIBLE_PRECISION, see my_decimal.h) than - DECIMAL. - - Drawing a comparison between my_decimal and DECIMAL: - - * M has a range of 1 to 81. - * D has a range of 0 to 81. - - There can be a difference in range if the decimal contains a integer - part. This is because the fractional part must always be on a group - boundary, leaving at least one group for the integer part. Since each - group is 9 (DIG_PER_DEC1) digits and there are 9 (DECIMAL_BUFF_LENGTH) - groups, the fractional part is limited to 72 digits if there is at - least one digit in the integral part. - - Although the backing type for a DECIMAL is also my_decimal, every - time a my_decimal is stored in a DECIMAL field, the precision and - scale are explicitly capped at 65 (DECIMAL_MAX_PRECISION) and 30 - (DECIMAL_MAX_SCALE) digits, following my_decimal truncation procedure - (FIX_INTG_FRAC_ERROR). -*/ - -Field_new_decimal * -Field_new_decimal::new_decimal_field(const Item *item) -{ - uint32 len; - uint intg= item->decimal_int_part(), scale= item->decimals; - - DBUG_ASSERT(item->decimal_precision() >= item->decimals); - - /* - Employ a procedure along the lines of the my_decimal truncation process: - - If the integer part is equal to or bigger than the maximum precision: - Truncate integer part to fit and the fractional becomes zero. - - Otherwise: - Truncate fractional part to fit. - */ - if (intg >= DECIMAL_MAX_PRECISION) - { - intg= DECIMAL_MAX_PRECISION; - scale= 0; - } - else - { - uint room= min(DECIMAL_MAX_PRECISION - intg, DECIMAL_MAX_SCALE); - if (scale > room) - scale= room; - } - - len= my_decimal_precision_to_length(intg + scale, scale, item->unsigned_flag); - - return new Field_new_decimal(len, item->maybe_null, item->name, scale, - item->unsigned_flag); -} - - int Field_new_decimal::reset(void) { store_value(&decimal_zero); diff --git a/sql/field.h b/sql/field.h index 4f82f18a833..784b9133790 100644 --- a/sql/field.h +++ b/sql/field.h @@ -621,10 +621,6 @@ protected: class Field_num :public Field { public: - /** - The scale of the Field's value, i.e. the number of digits to the right - of the decimal point. - */ const uint8 dec; bool zerofill,unsigned_flag; // Purify cannot handle bit fields Field_num(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, @@ -782,11 +778,6 @@ public: Field_new_decimal(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg, uint8 dec_arg, bool unsigned_arg); - /* - Create a field to hold a decimal value from an item. - Truncates the precision and/or scale if necessary. - */ - static Field_new_decimal *new_decimal_field(const Item *item); enum_field_types type() const { return MYSQL_TYPE_NEWDECIMAL;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; } Item_result result_type () const { return DECIMAL_RESULT; } diff --git a/sql/item.cc b/sql/item.cc index 5bace670e9b..8f487872f1b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -435,26 +435,17 @@ Item::Item(THD *thd, Item *item): } -/** - Decimal precision of the item. - - @remark The precision must not be capped as it can be used in conjunction - with Item::decimals to determine the size of the integer part when - constructing a decimal data type. - - @see Item::decimal_int_part() - @see Item::decimals -*/ - uint Item::decimal_precision() const { - uint precision= max_length; Item_result restype= result_type(); if ((restype == DECIMAL_RESULT) || (restype == INT_RESULT)) - precision= my_decimal_length_to_precision(max_length, decimals, unsigned_flag); - - return precision; + { + uint prec= + my_decimal_length_to_precision(max_length, decimals, unsigned_flag); + return min(prec, DECIMAL_MAX_PRECISION); + } + return min(max_length, DECIMAL_MAX_PRECISION); } @@ -4908,7 +4899,9 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) switch (field_type()) { case MYSQL_TYPE_DECIMAL: case MYSQL_TYPE_NEWDECIMAL: - field= Field_new_decimal::new_decimal_field(this); + field= new Field_new_decimal((uchar*) 0, max_length, null_ptr, 0, + Field::NONE, name, decimals, 0, + unsigned_flag); break; case MYSQL_TYPE_TINY: field= new Field_tiny((uchar*) 0, max_length, null_ptr, 0, Field::NONE, diff --git a/sql/item.h b/sql/item.h index 405588917ee..3a4b6e53b3a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -762,10 +762,9 @@ public: virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} - /** Returns the uncapped decimal precision of this item. */ virtual uint decimal_precision() const; inline int decimal_int_part() const - { return decimal_precision() - decimals; } + { return my_decimal_int_part(decimal_precision(), decimals); } /* Returns true if this is constant (during query execution, i.e. its value will not change until next fix_fields) and its value is known. diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9fc52bb8876..df92c165f2d 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2217,7 +2217,7 @@ uint Item_func_ifnull::decimal_precision() const int arg1_int_part= args[1]->decimal_int_part(); int max_int_part= max(arg0_int_part, arg1_int_part); int precision= max_int_part + decimals; - return precision; + return min(precision, DECIMAL_MAX_PRECISION); } @@ -2401,7 +2401,7 @@ uint Item_func_if::decimal_precision() const int arg1_prec= args[1]->decimal_int_part(); int arg2_prec= args[2]->decimal_int_part(); int precision=max(arg1_prec,arg2_prec) + decimals; - return precision; + return min(precision, DECIMAL_MAX_PRECISION); } @@ -2809,7 +2809,7 @@ uint Item_func_case::decimal_precision() const if (else_expr_num != -1) set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part()); - return max_int_part + decimals; + return min(max_int_part + decimals, DECIMAL_MAX_PRECISION); } diff --git a/sql/item_func.cc b/sql/item_func.cc index d9e6f76dd6b..ac52f36474a 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -451,8 +451,45 @@ Field *Item_func::tmp_table_field(TABLE *table) return make_string_field(table); break; case DECIMAL_RESULT: - field= Field_new_decimal::new_decimal_field(this); + { + uint8 dec= decimals; + uint8 intg= decimal_precision() - dec; + uint32 len= max_length; + + /* + Trying to put too many digits overall in a DECIMAL(prec,dec) + will always throw a warning. We must limit dec to + DECIMAL_MAX_SCALE however to prevent an assert() later. + */ + + if (dec > 0) + { + int overflow; + + dec= min(dec, DECIMAL_MAX_SCALE); + + /* + If the value still overflows the field with the corrected dec, + we'll throw out decimals rather than integers. This is still + bad and of course throws a truncation warning. + */ + + const int required_length= + my_decimal_precision_to_length(intg + dec, dec, + unsigned_flag); + + overflow= required_length - len; + + if (overflow > 0) + dec= max(0, dec - overflow); // too long, discard fract + else + /* Corrected value fits. */ + len= required_length; + } + + field= new Field_new_decimal(len, maybe_null, name, dec, unsigned_flag); break; + } case ROW_RESULT: default: // This case should never be chosen @@ -4739,19 +4776,6 @@ void Item_func_get_user_var::fix_length_and_dec() } -uint Item_func_get_user_var::decimal_precision() const -{ - uint precision= max_length; - Item_result restype= result_type(); - - /* Default to maximum as the precision is unknown a priori. */ - if ((restype == DECIMAL_RESULT) || (restype == INT_RESULT)) - precision= DECIMAL_MAX_PRECISION; - - return precision; -} - - bool Item_func_get_user_var::const_item() const { return (!var_entry || current_thd->query_id != var_entry->update_query_id); diff --git a/sql/item_func.h b/sql/item_func.h index fdbbff89e60..025ac12fe07 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1393,7 +1393,6 @@ public: table_map used_tables() const { return const_item() ? 0 : RAND_TABLE_BIT; } bool eq(const Item *item, bool binary_cmp) const; - uint decimal_precision() const; private: bool set_value(THD *thd, sp_rcontext *ctx, Item **it); diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 08a48c6ce2f..38251294053 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -517,7 +517,8 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, name, table->s, collation.collation); break; case DECIMAL_RESULT: - field= Field_new_decimal::new_decimal_field(this); + field= new Field_new_decimal(max_length, maybe_null, name, + decimals, unsigned_flag); break; case ROW_RESULT: default: diff --git a/sql/my_decimal.h b/sql/my_decimal.h index b1df1395dcd..21669e82c44 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -48,12 +48,10 @@ C_MODE_END digits * number of decimal digits in one our big digit - number of decimal digits in one our big digit decreased by 1 (because we always put decimal point on the border of our big digits)) - - This value is 65 due to historical reasons partly due to it being used - as the maximum allowed precision and not the actual maximum precision. */ #define DECIMAL_MAX_PRECISION (DECIMAL_MAX_POSSIBLE_PRECISION - 8*2) #define DECIMAL_MAX_SCALE 30 +#define DECIMAL_NOT_SPECIFIED 31 /** maximum length of string representation (number of maximum decimal @@ -77,6 +75,12 @@ inline uint my_decimal_size(uint precision, uint scale) } +inline int my_decimal_int_part(uint precision, uint decimals) +{ + return precision - ((decimals == DECIMAL_NOT_SPECIFIED) ? 0 : decimals); +} + + /** my_decimal class limits 'decimal_t' type to what we need in MySQL. @@ -180,7 +184,7 @@ inline uint my_decimal_length_to_precision(uint length, uint scale, } inline uint32 my_decimal_precision_to_length_no_truncation(uint precision, - uint scale, + uint8 scale, bool unsigned_flag) { /* @@ -192,7 +196,7 @@ inline uint32 my_decimal_precision_to_length_no_truncation(uint precision, (unsigned_flag || !precision ? 0 : 1)); } -inline uint32 my_decimal_precision_to_length(uint precision, uint scale, +inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale, bool unsigned_flag) { /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 27ffb491173..854dd296039 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -9433,8 +9433,47 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, new_field->set_derivation(item->collation.derivation); break; case DECIMAL_RESULT: - new_field= Field_new_decimal::new_decimal_field(item); + { + uint8 dec= item->decimals; + uint8 intg= ((Item_decimal *) item)->decimal_precision() - dec; + uint32 len= item->max_length; + + /* + Trying to put too many digits overall in a DECIMAL(prec,dec) + will always throw a warning. We must limit dec to + DECIMAL_MAX_SCALE however to prevent an assert() later. + */ + + if (dec > 0) + { + signed int overflow; + + dec= min(dec, DECIMAL_MAX_SCALE); + + /* + If the value still overflows the field with the corrected dec, + we'll throw out decimals rather than integers. This is still + bad and of course throws a truncation warning. + +1: for decimal point + */ + + const int required_length= + my_decimal_precision_to_length(intg + dec, dec, + item->unsigned_flag); + + overflow= required_length - len; + + if (overflow > 0) + dec= max(0, dec - overflow); // too long, discard fract + else + /* Corrected value fits. */ + len= required_length; + } + + new_field= new Field_new_decimal(len, maybe_null, item->name, + dec, item->unsigned_flag); break; + } case ROW_RESULT: default: // This case should never be choosen -- cgit v1.2.1 From 9819885177059fd75f3250b917142eb9d6ae5b8c Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 3 Nov 2009 17:00:41 +0800 Subject: BUG#48216 Replication fails on all slaves after upgrade to 5.0.86 on master When a sessione is closed, all temporary tables of the session are automatically dropped and are binlogged. But it will be binlogged with wrong database names when the length of the temporary tables' database names are greater than the length of the current database name or the current database is not set. Query_log_event's db_len is forgot to set when Query_log_event's db is set. This patch wrote code to set db_len immediately after db has set. --- sql/sql_base.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'sql') diff --git a/sql/sql_base.cc b/sql/sql_base.cc index db4ab29d6de..178c3e12e23 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -798,6 +798,7 @@ void close_temporary_tables(THD *thd) s_query.length() - 1 /* to remove trailing ',' */, 0, FALSE, THD::NOT_KILLED); qinfo.db= db.ptr(); + qinfo.db_len= db.length(); thd->variables.character_set_client= cs_save; DBUG_ASSERT(qinfo.error_code == 0); mysql_bin_log.write(&qinfo); -- cgit v1.2.1 From bec35067d343438ec3f412510a3ecefca17c5e8d Mon Sep 17 00:00:00 2001 From: Jorgen Loland Date: Tue, 3 Nov 2009 13:48:59 +0100 Subject: Bug#48177 - SELECTs with NOT IN subqueries containing NULL values return too many records WHERE clauses with "outer_value_list NOT IN subselect" were handled incorrectly if the outer value list contained multiple items where at least one of these could be NULL. The first outer record with NULL value was handled correctly, but if a second record with NULL value existed, the optimizer would choose to reuse the result it got on the last execution of the subselect. This is incorrect if the outer value list has multiple items. The fix is to make Item_in_optimizer::val_int (in item_cmpfunc.cc) reuse the result of the latest execution for NULL values only if all values in the outer_value_list are NULL. mysql-test/r/subselect3.result: Added test for BUG#48177 mysql-test/t/subselect3.test: Added test for BUG#48177 sql/item_cmpfunc.cc: Make Item_in_optimizer::val_int (in item_cmpfunc.cc) reuse the result of the latest execution for NULL values only if all values in the outer_value_list are NULL. --- sql/item_cmpfunc.cc | 96 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 42 deletions(-) (limited to 'sql') diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index df92c165f2d..c6b88cd8188 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1585,61 +1585,73 @@ longlong Item_in_optimizer::val_int() if (cache->null_value) { + /* + We're evaluating + " [NOT] IN (SELECT ...)" + where one or more of the outer values is NULL. + */ if (((Item_in_subselect*)args[1])->is_top_level_item()) { /* - We're evaluating "NULL IN (SELECT ...)". The result can be NULL or - FALSE, and we can return one instead of another. Just return NULL. + We're evaluating a top level item, e.g. + " IN (SELECT ...)", + and in this case a NULL value in the outer_value_list means + that the result shall be NULL/FALSE (makes no difference for + top level items). The cached value is NULL, so just return + NULL. */ null_value= 1; } else { - if (!((Item_in_subselect*)args[1])->is_correlated && - result_for_null_param != UNKNOWN) + /* + We're evaluating an item where a NULL value in either the + outer or inner value list does not automatically mean that we + can return NULL/FALSE. An example of such a query is + " NOT IN (SELECT ...)" + The result when there is at least one NULL value is: NULL if the + SELECT evaluated over the non-NULL values produces at least + one row, FALSE otherwise + */ + Item_in_subselect *item_subs=(Item_in_subselect*)args[1]; + bool all_left_cols_null= true; + const uint ncols= cache->cols(); + + /* + Turn off the predicates that are based on column compares for + which the left part is currently NULL + */ + for (uint i= 0; i < ncols; i++) { - /* Use cached value from previous execution */ - null_value= result_for_null_param; + if (cache->element_index(i)->null_value) + item_subs->set_cond_guard_var(i, FALSE); + else + all_left_cols_null= false; } - else + + if (!((Item_in_subselect*)args[1])->is_correlated && + all_left_cols_null && result_for_null_param != UNKNOWN) { - /* - We're evaluating "NULL IN (SELECT ...)". The result is: - FALSE if SELECT produces an empty set, or - NULL otherwise. - We disable the predicates we've pushed down into subselect, run the - subselect and see if it has produced any rows. + /* + This is a non-correlated subquery, all values in the outer + value list are NULL, and we have already evaluated the + subquery for all NULL values: Return the same result we + did last time without evaluating the subquery. */ - Item_in_subselect *item_subs=(Item_in_subselect*)args[1]; - if (cache->cols() == 1) - { - item_subs->set_cond_guard_var(0, FALSE); - (void) args[1]->val_bool_result(); - result_for_null_param= null_value= !item_subs->engine->no_rows(); - item_subs->set_cond_guard_var(0, TRUE); - } - else - { - uint i; - uint ncols= cache->cols(); - /* - Turn off the predicates that are based on column compares for - which the left part is currently NULL - */ - for (i= 0; i < ncols; i++) - { - if (cache->element_index(i)->null_value) - item_subs->set_cond_guard_var(i, FALSE); - } - - (void) args[1]->val_bool_result(); - result_for_null_param= null_value= !item_subs->engine->no_rows(); - - /* Turn all predicates back on */ - for (i= 0; i < ncols; i++) - item_subs->set_cond_guard_var(i, TRUE); - } + null_value= result_for_null_param; + } + else + { + /* The subquery has to be evaluated */ + (void) args[1]->val_bool_result(); + null_value= !item_subs->engine->no_rows(); + if (all_left_cols_null) + result_for_null_param= null_value; } + + /* Turn all predicates back on */ + for (uint i= 0; i < ncols; i++) + item_subs->set_cond_guard_var(i, TRUE); } return 0; } -- cgit v1.2.1 From 06c9d62a9f72c1554b3619f10388092da4ee9c04 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Tue, 3 Nov 2009 19:58:54 +0300 Subject: A fix and a test case for Bug#41756 "Strange error messages about locks from InnoDB". In JT_EQ_REF (join_read_key()) access method, don't try to unlock rows in the handler, unless certain that a) they were locked b) they are not used. Unlocking of rows is done by the logic of the nested join loop, and is unaware of the possible caching that the access method may have. This could lead to double unlocking, when a row was unlocked first after reading into the cache, and then when taken from cache, as well as to unlocking of rows which were actually used (but taken from cache). Delegate part of the unlocking logic to the access method, and in JT_EQ_REF count how many times a record was actually used in the join. Unlock it only if it's usage count is 0. Implemented review comments. mysql-test/r/bug41756.result: Add result file (Bug#41756) mysql-test/t/bug41756-master.opt: Use --innodb-locks-unsafe-for-binlog, as in 5.0 just using read_committed isolation is not sufficient to reproduce the bug. mysql-test/t/bug41756.test: Add a test file (Bug#41756) sql/item_subselect.cc: Complete struct READ_RECORD initialization with a new member to unlock records. sql/records.cc: Extend READ_RECORD API with a method to unlock read records. sql/sql_select.cc: In JT_EQ_REF (join_read_key()) access method, don't try to unlock rows in the handler, unless certain that a) they were locked b) they are not used. sql/sql_select.h: Add members to TABLE_REF to count TABLE_REF buffer usage count. sql/structs.h: Update declarations. --- sql/item_subselect.cc | 1 + sql/records.cc | 2 ++ sql/sql_select.cc | 61 +++++++++++++++++++++++++++++++++++++++++++++++++-- sql/sql_select.h | 8 ++++++- sql/structs.h | 14 ++++++++---- 5 files changed, 79 insertions(+), 7 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 805669b3cfa..80fbc2c74d3 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1869,6 +1869,7 @@ int subselect_single_select_engine::exec() tab->read_record.record= tab->table->record[0]; tab->read_record.thd= join->thd; tab->read_record.ref_length= tab->table->file->ref_length; + tab->read_record.unlock_row= rr_unlock_row; *(last_changed_tab++)= tab; break; } diff --git a/sql/records.cc b/sql/records.cc index d564533ae7a..26f4fd4aa23 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -61,6 +61,7 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, info->file= table->file; info->record= table->record[0]; info->print_error= print_error; + info->unlock_row= rr_unlock_row; table->status=0; /* And it's always found */ if (!table->file->inited) @@ -135,6 +136,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, } info->select=select; info->print_error=print_error; + info->unlock_row= rr_unlock_row; info->ignore_not_found_rows= 0; table->status=0; /* And it's always found */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0014504a52f..51794c1f158 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -140,6 +140,7 @@ static int join_read_const_table(JOIN_TAB *tab, POSITION *pos); static int join_read_system(JOIN_TAB *tab); static int join_read_const(JOIN_TAB *tab); static int join_read_key(JOIN_TAB *tab); +static void join_read_key_unlock_row(st_join_table *tab); static int join_read_always_key(JOIN_TAB *tab); static int join_read_last_key(JOIN_TAB *tab); static int join_no_more_records(READ_RECORD *info); @@ -5376,7 +5377,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length); j->ref.key_err=1; + j->ref.has_record= FALSE; j->ref.null_rejecting= 0; + j->ref.use_count= 0; keyuse=org_keyuse; store_key **ref_key= j->ref.key_copy; @@ -6176,6 +6179,20 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) DBUG_RETURN(0); } + +/** + The default implementation of unlock-row method of READ_RECORD, + used in all access methods. +*/ + +void rr_unlock_row(st_join_table *tab) +{ + READ_RECORD *info= &tab->read_record; + info->file->unlock_row(); +} + + + static void make_join_readinfo(JOIN *join, ulonglong options) { @@ -6191,6 +6208,7 @@ make_join_readinfo(JOIN *join, ulonglong options) TABLE *table=tab->table; tab->read_record.table= table; tab->read_record.file=table->file; + tab->read_record.unlock_row= rr_unlock_row; tab->next_select=sub_select; /* normal select */ /* @@ -6234,6 +6252,7 @@ make_join_readinfo(JOIN *join, ulonglong options) delete tab->quick; tab->quick=0; tab->read_first_record= join_read_key; + tab->read_record.unlock_row= join_read_key_unlock_row; tab->read_record.read_record= join_no_more_records; if (table->used_keys.is_set(tab->ref.key) && !table->no_keyread) @@ -10936,7 +10955,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, return NESTED_LOOP_NO_MORE_ROWS; } else - join_tab->read_record.file->unlock_row(); + join_tab->read_record.unlock_row(join_tab); } else { @@ -10946,7 +10965,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, */ join->examined_rows++; join->thd->row_count++; - join_tab->read_record.file->unlock_row(); + join_tab->read_record.unlock_row(join_tab); } return NESTED_LOOP_OK; } @@ -11299,17 +11318,55 @@ join_read_key(JOIN_TAB *tab) table->status=STATUS_NOT_FOUND; return -1; } + /* + Moving away from the current record. Unlock the row + in the handler if it did not match the partial WHERE. + */ + if (tab->ref.has_record && tab->ref.use_count == 0) + { + tab->read_record.file->unlock_row(); + tab->ref.has_record= FALSE; + } + error=table->file->index_read(table->record[0], tab->ref.key_buff, tab->ref.key_length,HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND) return report_error(table, error); + + if (! error) + { + tab->ref.has_record= TRUE; + tab->ref.use_count= 1; + } + } + else if (table->status == 0) + { + DBUG_ASSERT(tab->ref.has_record); + tab->ref.use_count++; } table->null_row=0; return table->status ? -1 : 0; } +/** + Since join_read_key may buffer a record, do not unlock + it if it was not used in this invocation of join_read_key(). + Only count locks, thus remembering if the record was left unused, + and unlock already when pruning the current value of + TABLE_REF buffer. + @sa join_read_key() +*/ + +static void +join_read_key_unlock_row(st_join_table *tab) +{ + DBUG_ASSERT(tab->ref.use_count); + if (tab->ref.use_count) + tab->ref.use_count--; +} + /* ref access method implementation: "read_first" function diff --git a/sql/sql_select.h b/sql/sql_select.h index a217d199a30..346d98aae58 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -53,6 +53,8 @@ class store_key; typedef struct st_table_ref { bool key_err; + /** True if something was read into buffer in join_read_key. */ + bool has_record; uint key_parts; // num of ... uint key_length; // length of key_buff int key; // key no @@ -79,7 +81,11 @@ typedef struct st_table_ref key_part_map null_rejecting; table_map depend_map; // Table depends on these tables. byte *null_ref_key; // null byte position in the key_buf. - // used for REF_OR_NULL optimization. + /* + The number of times the record associated with this key was used + in the join. + */ + ha_rows use_count; } TABLE_REF; /* diff --git a/sql/structs.h b/sql/structs.h index be55527eaa0..3a7b2f49b2a 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -117,16 +117,22 @@ typedef struct st_reginfo { /* Extra info about reg */ } REGINFO; -struct st_read_record; /* For referense later */ class SQL_SELECT; class THD; class handler; +struct st_join_table; + +void rr_unlock_row(st_join_table *tab); -typedef struct st_read_record { /* Parameter to read_record */ +struct READ_RECORD { /* Parameter to read_record */ + typedef int (*Read_func)(READ_RECORD*); + typedef void (*Unlock_row_func)(st_join_table *); struct st_table *table; /* Head-form */ handler *file; struct st_table **forms; /* head and ref forms */ - int (*read_record)(struct st_read_record *); + + Read_func read_record; + Unlock_row_func unlock_row; THD *thd; SQL_SELECT *select; uint cache_records; @@ -138,7 +144,7 @@ typedef struct st_read_record { /* Parameter to read_record */ byte *cache,*cache_pos,*cache_end,*read_positions; IO_CACHE *io_cache; bool print_error, ignore_not_found_rows; -} READ_RECORD; +}; /* -- cgit v1.2.1