From 8c641484dc43571b16edeb9780a587472fc1f14b Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Tue, 11 Sep 2012 16:29:51 +0200 Subject: WL#6454: Deprecate SHOW AUTHORS and SHOW CONTRIBUTORS Added deprecation warning for SHOW AUTHORS and SHOW CONTRIBUTORS. This is the 5.5 version of the patch. --- sql/sql_yacc.yy | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'sql') diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 45024faa03f..e93ed4e8853 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -11010,11 +11010,19 @@ show_param: { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_AUTHORS; + push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT, + ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT), + "SHOW AUTHORS"); } | CONTRIBUTORS_SYM { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_CONTRIBUTORS; + push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT, + ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT), + "SHOW CONTRIBUTORS"); } | PRIVILEGES { -- cgit v1.2.1 From 88f2746a7a22734d2f2aaa2ac20cc40665e52f6c Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Tue, 18 Sep 2012 17:32:02 +0200 Subject: Bug#14542543 FIX BUG #12694872 IN 5.5 Bug#14530242 CRASH / MEMORY CORRUPTION IN FILESORT_BUFFER::GET_RECORD_BUFFER WITH MYISAM This is a backport of Bug#12694872 - VALGRIND: 18,816 BYTES IN 196 BLOCKS ARE DEFINITELY LOST Bug#13340270: assertion table->sort.record_pointers == __null Bug#14536113 CRASH IN CLOSEFRM (TABLE.CC) OR UNPACK (FIELD.H) ON SUBQUERY WITH MYISAM TABLES Also: removed and re-added test files with file-ids from trunk. --- sql/filesort.cc | 3 +++ sql/opt_range.cc | 7 ++++++- sql/sql_select.cc | 12 ++++++------ sql/uniques.cc | 7 ++++++- 4 files changed, 21 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/filesort.cc b/sql/filesort.cc index dba09c969e3..a11be501991 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -138,6 +138,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, */ memcpy(&table_sort, &table->sort, sizeof(FILESORT_INFO)); table->sort.io_cache= NULL; + DBUG_ASSERT(table_sort.record_pointers == NULL); outfile= table_sort.io_cache; my_b_clear(&tempfile); @@ -366,6 +367,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, void filesort_free_buffers(TABLE *table, bool full) { + DBUG_ENTER("filesort_free_buffers"); my_free(table->sort.record_pointers); table->sort.record_pointers= NULL; @@ -383,6 +385,7 @@ void filesort_free_buffers(TABLE *table, bool full) my_free(table->sort.addon_field); table->sort.addon_buf= NULL; table->sort.addon_field= NULL; + DBUG_VOID_RETURN; } /** Make a array of string pointers. */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 8d221af392b..ce48a8da958 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -74,6 +74,7 @@ #include "records.h" // init_read_record, end_read_record #include #include "sql_select.h" +#include "filesort.h" // filesort_free_buffers #ifndef EXTRA_DEBUG #define test_rb_tree(A,B) {} @@ -1246,7 +1247,8 @@ int QUICK_INDEX_MERGE_SELECT::init() int QUICK_INDEX_MERGE_SELECT::reset() { DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::reset"); - DBUG_RETURN(read_keys_and_merge()); + const int retval= read_keys_and_merge(); + DBUG_RETURN(retval); } bool @@ -8295,7 +8297,10 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() thd->variables.sortbuff_size); } else + { unique->reset(); + filesort_free_buffers(head, false); + } DBUG_ASSERT(file->ref_length == unique->get_size()); DBUG_ASSERT(thd->variables.sortbuff_size == unique->get_max_in_memory_size()); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index fd6d0e44597..3bde5aa8f6a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7173,14 +7173,14 @@ void JOIN::cleanup(bool full) { JOIN_TAB *tab,*end; /* - Only a sorted table may be cached. This sorted table is always the - first non const table in join->all_tables + Free resources allocated by filesort() and Unique::get() */ if (tables > const_tables) // Test for not-const tables - { - free_io_cache(all_tables[const_tables]); - filesort_free_buffers(all_tables[const_tables],full); - } + for (uint ix= const_tables; ix < tables; ++ix) + { + free_io_cache(all_tables[ix]); + filesort_free_buffers(all_tables[ix], full); + } if (full) { diff --git a/sql/uniques.cc b/sql/uniques.cc index e7ce2197147..71e680682cd 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -57,7 +57,10 @@ int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique) Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, uint size_arg, ulonglong max_in_memory_size_arg) - :max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0) + :max_in_memory_size(max_in_memory_size_arg), + record_pointers(NULL), + size(size_arg), + elements(0) { my_b_clear(&file); init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0, @@ -583,6 +586,7 @@ bool Unique::get(TABLE *table) if (my_b_tell(&file) == 0) { /* Whole tree is in memory; Don't use disk if you don't need to */ + DBUG_ASSERT(table->sort.record_pointers == NULL); if ((record_pointers=table->sort.record_pointers= (uchar*) my_malloc(size * tree.elements_in_tree, MYF(0)))) { @@ -603,6 +607,7 @@ bool Unique::get(TABLE *table) bool error=1; /* Open cached file if it isn't open */ + DBUG_ASSERT(table->sort.io_cache == NULL); outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_ZEROFILL)); -- cgit v1.2.1 From 8ce6582c37be81281b7ada6797c456c661c214dd Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Mon, 10 Sep 2012 13:32:50 +0200 Subject: Bug#14495351: CRASH IN HA_PARTITION::HANDLE_UNORDERED_NEXT The partitioning engine does not implement index_next for partitions which return HA_ERR_KEY_NOT_FOUND in index_read_map. If HA_ERR_KEY_NOT_FOUND was returned by a partition during index_read_map, that partition would not be included in following calls to index_next. If no partition returned a row in index_read_map, then the subsequent call to index_next would try to use a non existing handler (index out of bound). Even after fixing the index out of bound if at least one partition returned. So it is really two connected bugs 1) crash due to index out of bound (-1 unsigned). 2) not including partitions that returned HA_ERR_KEY_NOT_FOUND. Fixed by recording the partitions that returned HA_ERR_KEY_NOT_FOUND, and include them too when doing handle_ordered_next the first time. --- sql/ha_partition.cc | 218 +++++++++++++++++++++++++++++++++++++++++----------- sql/ha_partition.h | 4 + 2 files changed, 176 insertions(+), 46 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 14b89bf0dc8..e9ffc7abbac 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2670,6 +2670,17 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); + /* + Initialize the bitmap we use to keep track of partitions which returned + HA_ERR_KEY_NOT_FOUND from index_read_map. + */ + if (bitmap_init(&m_key_not_found_partitions, NULL, m_tot_parts, FALSE)) + { + bitmap_free(&m_bulk_insert_started); + DBUG_RETURN(error); + } + bitmap_clear_all(&m_key_not_found_partitions); + m_key_not_found= false; /* Initialize the bitmap we use to determine what partitions are used */ if (!m_is_clone_of) { @@ -2810,6 +2821,7 @@ err_handler: (*file)->close(); err_alloc: bitmap_free(&m_bulk_insert_started); + bitmap_free(&m_key_not_found_partitions); if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); @@ -2886,6 +2898,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); destroy_record_priority_queue(); bitmap_free(&m_bulk_insert_started); + bitmap_free(&m_key_not_found_partitions); if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -4419,21 +4432,24 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key, } -/* +/** Common routine for a number of index_read variants - SYNOPSIS - ha_partition::common_index_read() - buf Buffer where the record should be returned - have_start_key TRUE <=> the left endpoint is available, i.e. - we're in index_read call or in read_range_first - call and the range has left endpoint - - FALSE <=> there is no left endpoint (we're in - read_range_first() call and the range has no left - endpoint) + @param buf Buffer where the record should be returned. + @param have_start_key TRUE <=> the left endpoint is available, i.e. + we're in index_read call or in read_range_first + call and the range has left endpoint. + FALSE <=> there is no left endpoint (we're in + read_range_first() call and the range has no left + endpoint). - DESCRIPTION + @return Operation status + @retval 0 OK + @retval HA_ERR_END_OF_FILE Whole index scanned, without finding the record. + @retval HA_ERR_KEY_NOT_FOUND Record not found, but index cursor positioned. + @retval other error code. + + @details Start scanning the range (when invoked from read_range_first()) or doing an index lookup (when invoked from index_read_XXX): - If possible, perform partition selection @@ -4443,10 +4459,6 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key, handle_unordered_scan_next_partition) YES: Fill the priority queue and get the record that is the first in the ordering - - RETURN - 0 OK - other HA_ERR_END_OF_FILE or other error code. */ int ha_partition::common_index_read(uchar *buf, bool have_start_key) @@ -4456,14 +4468,16 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) bool reverse_order= FALSE; DBUG_ENTER("ha_partition::common_index_read"); - DBUG_PRINT("info", ("m_ordered %u m_ordered_scan_ong %u have_start_key %u", - m_ordered, m_ordered_scan_ongoing, have_start_key)); + DBUG_PRINT("info", ("m_ordered %u m_ordered_scan_ong %u", + m_ordered, m_ordered_scan_ongoing)); if (have_start_key) { m_start_key.length= key_len= calculate_key_len(table, active_index, m_start_key.key, m_start_key.keypart_map); + DBUG_PRINT("info", ("have_start_key map %u find_flag %u len %u", + m_start_key.keypart_map, m_start_key.flag, key_len)); DBUG_ASSERT(key_len); } if ((error= partition_scan_set_up(buf, have_start_key))) @@ -4481,24 +4495,16 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) } DBUG_PRINT("info", ("m_ordered %u m_o_scan_ong %u have_start_key %u", m_ordered, m_ordered_scan_ongoing, have_start_key)); - if (!m_ordered_scan_ongoing || - (have_start_key && m_start_key.flag == HA_READ_KEY_EXACT && - !m_pkey_is_clustered && - key_len >= m_curr_key_info[0]->key_length)) + if (!m_ordered_scan_ongoing) { /* - We use unordered index scan either when read_range is used and flag - is set to not use ordered or when an exact key is used and in this - case all records will be sorted equal and thus the sort order of the - resulting records doesn't matter. + We use unordered index scan when read_range is used and flag + is set to not use ordered. We also use an unordered index scan when the number of partitions to scan is only one. The unordered index scan will use the partition set created. - Need to set unordered scan ongoing since we can come here even when - it isn't set. */ DBUG_PRINT("info", ("doing unordered scan")); - m_ordered_scan_ongoing= FALSE; error= handle_unordered_scan_next_partition(buf); } else @@ -4616,7 +4622,7 @@ int ha_partition::common_first_last(uchar *buf) int ha_partition::index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map) { - DBUG_ENTER("ha_partition::index_read_last"); + DBUG_ENTER("ha_partition::index_read_last_map"); m_ordered= TRUE; // Safety measure end_range= 0; @@ -4709,6 +4715,8 @@ int ha_partition::index_next(uchar * buf) TODO(low priority): If we want partition to work with the HANDLER commands, we must be able to do index_last() -> index_prev() -> index_next() + and if direction changes, we must step back those partitions in + the record queue so we don't return a value from the wrong direction. */ DBUG_ASSERT(m_index_scan_type != partition_index_last); if (!m_ordered_scan_ongoing) @@ -4960,10 +4968,18 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag) int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) { - handler *file= m_file[m_part_spec.start_part]; + handler *file; int error; DBUG_ENTER("ha_partition::handle_unordered_next"); + if (m_part_spec.start_part >= m_tot_parts) + { + /* Should never happen! */ + DBUG_ASSERT(0); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + file= m_file[m_part_spec.start_part]; + /* We should consider if this should be split into three functions as partition_read_range is_next_same are always local constants @@ -5024,6 +5040,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) int ha_partition::handle_unordered_scan_next_partition(uchar * buf) { uint i; + int saved_error= HA_ERR_END_OF_FILE; DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition"); for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) @@ -5074,26 +5091,33 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf) } if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND)) DBUG_RETURN(error); - DBUG_PRINT("info", ("HA_ERR_END_OF_FILE on partition %d", i)); + + /* + If HA_ERR_KEY_NOT_FOUND, we must return that error instead of + HA_ERR_END_OF_FILE, to be able to continue search. + */ + if (saved_error != HA_ERR_KEY_NOT_FOUND) + saved_error= error; + DBUG_PRINT("info", ("END_OF_FILE/KEY_NOT_FOUND on partition %d", i)); } - m_part_spec.start_part= NO_CURRENT_PART_ID; - DBUG_RETURN(HA_ERR_END_OF_FILE); + if (saved_error == HA_ERR_END_OF_FILE) + m_part_spec.start_part= NO_CURRENT_PART_ID; + DBUG_RETURN(saved_error); } -/* - Common routine to start index scan with ordered results +/** + Common routine to start index scan with ordered results. - SYNOPSIS - handle_ordered_index_scan() - out:buf Read row in MySQL Row Format + @param[out] buf Read row in MySQL Row Format - RETURN VALUE - HA_ERR_END_OF_FILE End of scan - 0 Success - other Error code + @return Operation status + @retval HA_ERR_END_OF_FILE End of scan + @retval HA_ERR_KEY_NOT_FOUNE End of scan + @retval 0 Success + @retval other Error code - DESCRIPTION + @details This part contains the logic to handle index scans that require ordered output. This includes all except those started by read_range_first with the flag ordered set to FALSE. Thus most direct index_read and all @@ -5115,8 +5139,14 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) uint j= 0; bool found= FALSE; uchar *part_rec_buf_ptr= m_ordered_rec_buffer; + int saved_error= HA_ERR_END_OF_FILE; DBUG_ENTER("ha_partition::handle_ordered_index_scan"); + if (m_key_not_found) + { + m_key_not_found= false; + bitmap_clear_all(&m_key_not_found_partitions); + } m_top_entry= NO_CURRENT_PART_ID; queue_remove_all(&m_queue); @@ -5178,6 +5208,13 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) { DBUG_RETURN(error); } + else if (error == HA_ERR_KEY_NOT_FOUND) + { + DBUG_PRINT("info", ("HA_ERR_KEY_NOT_FOUND from partition %u", i)); + bitmap_set_bit(&m_key_not_found_partitions, i); + m_key_not_found= true; + saved_error= error; + } part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } if (found) @@ -5195,7 +5232,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry)); DBUG_RETURN(0); } - DBUG_RETURN(HA_ERR_END_OF_FILE); + DBUG_RETURN(saved_error); } @@ -5223,6 +5260,59 @@ void ha_partition::return_top_record(uchar *buf) } +/** + Add index_next/prev from partitions without exact match. + + If there where any partitions that returned HA_ERR_KEY_NOT_FOUND when + ha_index_read_map was done, those partitions must be included in the + following index_next/prev call. +*/ + +int ha_partition::handle_ordered_index_scan_key_not_found() +{ + int error; + uint i; + uchar *part_buf= m_ordered_rec_buffer; + uchar *curr_rec_buf= NULL; + DBUG_ENTER("ha_partition::handle_ordered_index_scan_key_not_found"); + DBUG_ASSERT(m_key_not_found); + /* + Loop over all used partitions to get the correct offset + into m_ordered_rec_buffer. + */ + for (i= 0; i < m_tot_parts; i++) + { + if (!bitmap_is_set(&m_part_info->used_partitions, i)) + continue; + + if (bitmap_is_set(&m_key_not_found_partitions, i)) + { + /* + This partition is used and did return HA_ERR_KEY_NOT_FOUND + in index_read_map. + */ + curr_rec_buf= part_buf + PARTITION_BYTES_IN_POS; + error= m_file[i]->index_next(curr_rec_buf); + /* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */ + DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND); + if (!error) + queue_insert(&m_queue, part_buf); + else if (error != HA_ERR_END_OF_FILE && error != HA_ERR_KEY_NOT_FOUND) + DBUG_RETURN(error); + } + part_buf+= m_rec_length + PARTITION_BYTES_IN_POS; + } + DBUG_ASSERT(curr_rec_buf); + bitmap_clear_all(&m_key_not_found_partitions); + m_key_not_found= false; + + /* Update m_top_entry, which may have changed. */ + uchar *key_buffer= queue_top(&m_queue); + m_top_entry= uint2korr(key_buffer); + DBUG_RETURN(0); +} + + /* Common routine to handle index_next with ordered results @@ -5242,9 +5332,45 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) int error; uint part_id= m_top_entry; uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; - handler *file= m_file[part_id]; + handler *file; DBUG_ENTER("ha_partition::handle_ordered_next"); + if (m_key_not_found) + { + if (is_next_same) + { + /* Only rows which match the key. */ + m_key_not_found= false; + bitmap_clear_all(&m_key_not_found_partitions); + } + else + { + /* There are partitions not included in the index record queue. */ + uint old_elements= m_queue.elements; + if ((error= handle_ordered_index_scan_key_not_found())) + DBUG_RETURN(error); + /* + If the queue top changed, i.e. one of the partitions that gave + HA_ERR_KEY_NOT_FOUND in index_read_map found the next record, + return it. + Otherwise replace the old with a call to index_next (fall through). + */ + if (old_elements != m_queue.elements && part_id != m_top_entry) + { + return_top_record(buf); + DBUG_RETURN(0); + } + } + } + if (part_id >= m_tot_parts) + { + /* This should never happen! */ + DBUG_ASSERT(0); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + file= m_file[part_id]; + if (m_index_scan_type == partition_read_range) { error= file->read_range_next(); diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 294e9f8adf6..24f04ee596b 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -183,6 +183,9 @@ private: static int compare_number_of_records(ha_partition *me, const uint32 *a, const uint32 *b); + /** partitions that returned HA_ERR_KEY_NOT_FOUND. */ + MY_BITMAP m_key_not_found_partitions; + bool m_key_not_found; public: handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) @@ -519,6 +522,7 @@ private: int handle_unordered_next(uchar * buf, bool next_same); int handle_unordered_scan_next_partition(uchar * buf); int handle_ordered_index_scan(uchar * buf, bool reverse_order); + int handle_ordered_index_scan_key_not_found(); int handle_ordered_next(uchar * buf, bool next_same); int handle_ordered_prev(uchar * buf); void return_top_record(uchar * buf); -- cgit v1.2.1 From 2943c8131a180de40d94a28bec89d3c1ba1f3687 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Wed, 3 Oct 2012 15:00:43 +0200 Subject: Bug#14495351: CRASH IN HA_PARTITION::HANDLE_UNORDERED_NEXT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up patch - Fix broken build: error: format ‘%u’ expects argument of type ‘unsigned int’, but argument 2 has type ‘key_part_map {aka long unsigned int}’ [-Werror=format] --- sql/ha_partition.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index e9ffc7abbac..2f39f201f80 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4476,7 +4476,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) m_start_key.length= key_len= calculate_key_len(table, active_index, m_start_key.key, m_start_key.keypart_map); - DBUG_PRINT("info", ("have_start_key map %u find_flag %u len %u", + DBUG_PRINT("info", ("have_start_key map %lu find_flag %u len %u", m_start_key.keypart_map, m_start_key.flag, key_len)); DBUG_ASSERT(key_len); } -- cgit v1.2.1 From 30d35590a3bce929679cdc38d36fc67f7923a39e Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Wed, 3 Oct 2012 16:05:07 +0200 Subject: Bug#13713525 CREATE_INITIAL_DB.CMAKE IS FAILING ON WINDOWS, STILL "DEVENV" RETURNS 0 This bug depends on cmake version. For cmake 2.6 (which is still in use for some pushbuild trees) the main build would succeed, even if create_initial_db failed. The problem was the chaining of commands in the CUSTOM_COMMAND to produce 'initdb.dep'. It first invokes cmake to run mysqld, then invokes 'touch' to create the file. Moving the 'touch' command makes the error propagate properly for both cmake 2.6 and 2.8 --- sql/CMakeLists.txt | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index a3df9e7948b..d79b732005b 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -291,15 +291,17 @@ IF(WIN32 AND MYSQLD_EXECUTABLE) COMMAND ${CMAKE_COMMAND} ${CONFIG_PARAM} -P ${CMAKE_CURRENT_BINARY_DIR}/create_initial_db.cmake WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data - COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/initdb.dep DEPENDS mysqld ) ADD_CUSTOM_TARGET(initial_database ALL DEPENDS initdb.dep ) - INSTALL(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data DESTINATION . - COMPONENT DataFiles PATTERN "initdb.dep" EXCLUDE PATTERN "bootstrap.sql" EXCLUDE) + INSTALL(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data + DESTINATION . + COMPONENT DataFiles + PATTERN "initdb.dep" EXCLUDE + PATTERN "bootstrap.sql" EXCLUDE) ELSE() # Not windows or cross compiling, just install an empty directory INSTALL(FILES ${DUMMY_FILE} DESTINATION data/mysql COMPONENT DataFiles) -- cgit v1.2.1 From bfba296d4084eab4b580cddc04e889ad2abdaeb2 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Thu, 4 Oct 2012 16:15:13 +0200 Subject: Bug#14640599 MEMORY LEAK WHEN EXECUTING STORED ROUTINE EXCEPTION HANDLER When a SP handler is activated, memory is allocated to hold the MESSAGE_TEXT for the condition that caused the activation. The problem was that this memory was allocated on the MEM_ROOT belonging to the stored program. Since this MEM_ROOT is not freed until the stored program ends, a stored program that causes lots of handler activations can start using lots of memory. In 5.1 and earlier the problem did not exist as no MESSAGE_TEXT was allocated if a condition was raised with a handler present. However, this behavior lead to a number of other issues such as Bug#23032. This patch fixes the problem by allocating enough memory for the necessary MESSAGE_TEXTs in the SP MEM_ROOT when the SP starts and then re-using this memory each time a handler is activated. This is the 5.5 version of the patch. --- sql/sp_rcontext.cc | 11 +++-------- sql/sp_rcontext.h | 45 ++++++++++++++++++++++++++++++++++++++++++--- sql/sql_signal.cc | 12 +++++++++--- 3 files changed, 54 insertions(+), 14 deletions(-) (limited to 'sql') diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index fba6a56b37c..09f48d824b0 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -67,19 +67,15 @@ sp_rcontext::~sp_rcontext() bool sp_rcontext::init(THD *thd) { uint handler_count= m_root_parsing_ctx->max_handler_index(); - uint i; in_sub_stmt= thd->in_sub_stmt; if (init_var_table(thd) || init_var_items()) return TRUE; - if (!(m_raised_conditions= new (thd->mem_root) MYSQL_ERROR[handler_count])) + if (!(m_raised_conditions= new (thd->mem_root) Sql_condition_info[handler_count])) return TRUE; - for (i= 0; imem_root); - return !(m_handler= (sp_handler_t*)thd->alloc(handler_count * sizeof(sp_handler_t))) || @@ -446,13 +442,12 @@ sp_rcontext::exit_handler() DBUG_VOID_RETURN; } -MYSQL_ERROR* -sp_rcontext::raised_condition() const +Sql_condition_info* sp_rcontext::raised_condition() const { if (m_ihsp > 0) { uint hindex= m_in_handler[m_ihsp - 1].index; - MYSQL_ERROR *raised= & m_raised_conditions[hindex]; + Sql_condition_info *raised= & m_raised_conditions[hindex]; return raised; } diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index 84d5a1227fe..c89ada3f3c8 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -58,6 +58,46 @@ typedef struct uint index; } sp_active_handler_t; + +class Sql_condition_info : public Sql_alloc +{ +public: + /** SQL error code. */ + uint m_sql_errno; + + /** Error level. */ + MYSQL_ERROR::enum_warning_level m_level; + + /** SQLSTATE. */ + char m_sql_state[SQLSTATE_LENGTH + 1]; + + /** Text message. */ + char m_message[MYSQL_ERRMSG_SIZE]; + + void set(uint sql_errno, const char* sqlstate, + MYSQL_ERROR::enum_warning_level level, + const char* msg) + { + m_sql_errno= sql_errno; + m_level= level; + + memcpy(m_sql_state, sqlstate, SQLSTATE_LENGTH); + m_sql_state[SQLSTATE_LENGTH]= '\0'; + + strncpy(m_message, msg, MYSQL_ERRMSG_SIZE); + } + + void clear() + { + m_sql_errno= 0; + m_level= MYSQL_ERROR::WARN_LEVEL_ERROR; + + m_sql_state[0]= '\0'; + m_message[0]= '\0'; + } +}; + + /* This class is a runtime context of a Stored Routine. It is used in an execution and is intended to contain all dynamic objects (i.e. objects, which @@ -146,8 +186,7 @@ class sp_rcontext : public Sql_alloc MYSQL_ERROR::enum_warning_level level, const char *msg); - MYSQL_ERROR * - raised_condition() const; + Sql_condition_info *raised_condition() const; void push_hstack(uint h); @@ -232,7 +271,7 @@ private: SQL conditions caught by each handler. This is an array indexed by handler index. */ - MYSQL_ERROR *m_raised_conditions; + Sql_condition_info *m_raised_conditions; uint m_hcount; // Stack pointer for m_handler uint *m_hstack; // Return stack for continue handlers diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc index 9910dfc924e..e0c2a96ac84 100644 --- a/sql/sql_signal.cc +++ b/sql/sql_signal.cc @@ -478,7 +478,7 @@ bool Signal_statement::execute(THD *thd) bool Resignal_statement::execute(THD *thd) { - MYSQL_ERROR *signaled; + Sql_condition_info *signaled; int result= TRUE; DBUG_ENTER("Resignal_statement::execute"); @@ -491,15 +491,21 @@ bool Resignal_statement::execute(THD *thd) DBUG_RETURN(result); } + MYSQL_ERROR signaled_err(thd->mem_root); + signaled_err.set(signaled->m_sql_errno, + signaled->m_sql_state, + signaled->m_level, + signaled->m_message); + if (m_cond == NULL) { /* RESIGNAL without signal_value */ - result= raise_condition(thd, signaled); + result= raise_condition(thd, &signaled_err); DBUG_RETURN(result); } /* RESIGNAL with signal_value */ - result= raise_condition(thd, signaled); + result= raise_condition(thd, &signaled_err); DBUG_RETURN(result); } -- cgit v1.2.1 From 378a7d1ef5e60884d21f3e4059aad29d0e4dcefa Mon Sep 17 00:00:00 2001 From: Annamalai Gurusami Date: Mon, 8 Oct 2012 19:40:30 +0530 Subject: Bug #14036214 MYSQLD CRASHES WHEN EXECUTING UPDATE IN TRX WITH CONSISTENT SNAPSHOT OPTION A transaction is started with a consistent snapshot. After the transaction is started new indexes are added to the table. Now when we issue an update statement, the optimizer chooses an index. When the index scan is being initialized via ha_innobase::change_active_index(), InnoDB reports the error code HA_ERR_TABLE_DEF_CHANGED, with message stating that "insufficient history for index". This error message is propagated up to the SQL layer. But the my_error() api is never called. The statement level diagnostics area is not updated with the correct error status (it remains in Diagnostics_area::DA_EMPTY). Hence the following check in the Protocol::end_statement() fails. 516 case Diagnostics_area::DA_EMPTY: 517 default: 518 DBUG_ASSERT(0); 519 error= send_ok(thd->server_status, 0, 0, 0, NULL); 520 break; The fix is to backport the fix of bugs 14365043, 11761652 and 11746399. 14365043 PROTOCOL::END_STATEMENT(): ASSERTION `0' FAILED 11761652 HA_RND_INIT() RESULT CODE NOT CHECKED 11746399 RETURN VALUES OF HA_INDEX_INIT() AND INDEX_INIT() IGNORED rb://1227 approved by guilhem and mattiasj. --- sql/event_db_repository.cc | 10 +++++-- sql/filesort.cc | 8 +++++- sql/handler.cc | 47 ++++++++++++++++++++++++++------ sql/handler.h | 2 ++ sql/item_subselect.cc | 31 +++++++++++++++------ sql/log_event.cc | 12 ++++++-- sql/log_event_old.cc | 36 ++++++++++++++++++------ sql/opt_range.cc | 32 ++++++++++++++-------- sql/opt_sum.cc | 9 ++++-- sql/records.cc | 10 +++++-- sql/sp.cc | 17 ++++++++++-- sql/sql_acl.cc | 38 +++++++++++++++++++++----- sql/sql_handler.cc | 25 +++++++++-------- sql/sql_help.cc | 10 +++++-- sql/sql_select.cc | 68 +++++++++++++++++++++++++++++++++++++--------- sql/sql_show.cc | 12 ++++++-- sql/sql_update.cc | 14 +++++++--- sql/tztime.cc | 32 ++++++++++------------ 18 files changed, 304 insertions(+), 109 deletions(-) (limited to 'sql') diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 59d168e01b5..74da4d8f587 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -406,7 +406,6 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, TABLE *event_table, const char *db) { - int ret=0; CHARSET_INFO *scs= system_charset_info; KEY *key_info; uint key_len; @@ -416,7 +415,14 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, DBUG_ENTER("Event_db_repository::index_read_for_db_for_i_s"); DBUG_PRINT("info", ("Using prefix scanning on PK")); - event_table->file->ha_index_init(0, 1); + + int ret= event_table->file->ha_index_init(0, 1); + if (ret) + { + event_table->file->print_error(ret, MYF(0)); + DBUG_RETURN(true); + } + key_info= event_table->key_info; if (key_info->key_parts == 0 || diff --git a/sql/filesort.cc b/sql/filesort.cc index a11be501991..b64f5221606 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -567,7 +567,13 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (!quick_select) { next_pos=(uchar*) 0; /* Find records in sequence */ - file->ha_rnd_init(1); + DBUG_EXECUTE_IF("bug14365043_1", + DBUG_SET("+d,ha_rnd_init_fail");); + if ((error= file->ha_rnd_init(1))) + { + file->print_error(error, MYF(0)); + DBUG_RETURN(HA_POS_ERROR); + } file->extra_opt(HA_EXTRA_CACHE, current_thd->variables.read_buff_size); } diff --git a/sql/handler.cc b/sql/handler.cc index 27309a4b1d7..f4eb89912e9 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2291,16 +2291,25 @@ int handler::read_first_row(uchar * buf, uint primary_key) if (stats.deleted < 10 || primary_key >= MAX_KEY || !(index_flags(primary_key, 0, 0) & HA_READ_ORDER)) { - (void) ha_rnd_init(1); - while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ; - (void) ha_rnd_end(); + if (!(error= ha_rnd_init(1))) + { + while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) + /* skip deleted row */; + const int end_error= ha_rnd_end(); + if (!error) + error= end_error; + } } else { /* Find the first row through the primary key */ - (void) ha_index_init(primary_key, 0); - error=index_first(buf); - (void) ha_index_end(); + if (!(error= ha_index_init(primary_key, 0))) + { + error= index_first(buf); + const int end_error= ha_index_end(); + if (!error) + error= end_error; + } } DBUG_RETURN(error); } @@ -2695,7 +2704,15 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, table->mark_columns_used_by_index_no_reset(table->s->next_number_index, table->read_set); column_bitmaps_signal(); - index_init(table->s->next_number_index, 1); + + if (ha_index_init(table->s->next_number_index, 1)) + { + /* This should never happen, assert in debug, and fail in release build */ + DBUG_ASSERT(0); + *first_value= ULONGLONG_MAX; + return; + } + if (table->s->next_number_keypart == 0) { // Autoincrement at key-start error=index_last(table->record[1]); @@ -2725,13 +2742,25 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, } if (error) - nr=1; + { + if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND) + { + /* No entry found, start with 1. */ + nr= 1; + } + else + { + DBUG_ASSERT(0); + nr= ULONGLONG_MAX; + } + } else nr= ((ulonglong) table->next_number_field-> val_int_offset(table->s->rec_buff_length)+1); - index_end(); + ha_index_end(); (void) extra(HA_EXTRA_NO_KEYREAD); *first_value= nr; + return; } diff --git a/sql/handler.h b/sql/handler.h index 0b970a1349d..5eb2d6b440a 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1351,6 +1351,7 @@ public: int ha_open(TABLE *table, const char *name, int mode, int test_if_locked); int ha_index_init(uint idx, bool sorted) { + DBUG_EXECUTE_IF("ha_index_init_fail", return HA_ERR_TABLE_DEF_CHANGED;); int result; DBUG_ENTER("ha_index_init"); DBUG_ASSERT(inited==NONE); @@ -1367,6 +1368,7 @@ public: } int ha_rnd_init(bool scan) { + DBUG_EXECUTE_IF("ha_rnd_init_fail", return HA_ERR_TABLE_DEF_CHANGED;); int result; DBUG_ENTER("ha_rnd_init"); DBUG_ASSERT(inited==NONE || (inited==RND && scan)); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index bc81e5b01c1..d3b06f5983a 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2058,10 +2058,14 @@ int subselect_uniquesubquery_engine::scan_table() TABLE *table= tab->table; DBUG_ENTER("subselect_uniquesubquery_engine::scan_table"); - if (table->file->inited) - table->file->ha_index_end(); - - table->file->ha_rnd_init(1); + if ((table->file->inited && + (error= table->file->ha_index_end())) || + (error= table->file->ha_rnd_init(1))) + { + (void) report_error(table, error); + DBUG_RETURN(true); + } + table->file->extra_opt(HA_EXTRA_CACHE, current_thd->variables.read_buff_size); table->null_row= 0; @@ -2238,9 +2242,14 @@ int subselect_uniquesubquery_engine::exec() if (null_keypart) DBUG_RETURN(scan_table()); - - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key, 0); + + if (!table->file->inited && + (error= table->file->ha_index_init(tab->ref.key, 0))) + { + (void) report_error(table, error); + DBUG_RETURN(true); + } + error= table->file->index_read_map(table->record[0], tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts), @@ -2360,8 +2369,12 @@ int subselect_indexsubquery_engine::exec() if (null_keypart) DBUG_RETURN(scan_table()); - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key, 1); + if (!table->file->inited && + (error= table->file->ha_index_init(tab->ref.key, 1))) + { + (void) report_error(table, error); + DBUG_RETURN(true); + } error= table->file->index_read_map(table->record[0], tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts), diff --git a/sql/log_event.cc b/sql/log_event.cc index c4a6ebb9a71..a31e5f71c37 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -9777,19 +9777,25 @@ int Rows_log_event::find_row(const Relay_log_info *rli) case HA_ERR_END_OF_FILE: if (++restart_count < 2) - table->file->ha_rnd_init(1); + { + if ((error= table->file->ha_rnd_init(1))) + { + table->file->print_error(error, MYF(0)); + goto err; + } + } break; default: DBUG_PRINT("info", ("Failed to get next record" " (rnd_next returns %d)",error)); table->file->print_error(error, MYF(0)); - table->file->ha_rnd_end(); + (void) table->file->ha_rnd_end(); goto err; } } while (restart_count < 2 && record_compare(table)); - + /* Note: above record_compare will take into accout all record fields which might be incorrect in case a partial row was given in the event diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index d68c474d00a..55d8d72ec5d 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -725,7 +725,10 @@ static int find_and_fetch_row(TABLE *table, uchar *key) int error; /* We have a key: search the table using the index */ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } /* Don't print debug messages when running valgrind since they can @@ -823,7 +826,10 @@ static int find_and_fetch_row(TABLE *table, uchar *key) /* We don't have a key: search the table using rnd_next() */ if ((error= table->file->ha_rnd_init(1))) - return error; + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } /* Continue until we find the right record or have made a full loop */ do @@ -846,15 +852,21 @@ static int find_and_fetch_row(TABLE *table, uchar *key) goto restart_rnd_next; case HA_ERR_END_OF_FILE: - if (++restart_count < 2) - table->file->ha_rnd_init(1); - break; + if (++restart_count < 2) + { + if ((error= table->file->ha_rnd_init(1))) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } + } + break; default: - table->file->print_error(error, MYF(0)); + table->file->print_error(error, MYF(0)); DBUG_PRINT("info", ("Record not found")); - table->file->ha_rnd_end(); - DBUG_RETURN(error); + (void) table->file->ha_rnd_end(); + DBUG_RETURN(error); } } while (restart_count < 2 && record_compare(table)); @@ -2417,7 +2429,7 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli) continue; DBUG_PRINT("info",("no record matching the given row found")); table->file->print_error(error, MYF(0)); - table->file->ha_index_end(); + (void) table->file->ha_index_end(); DBUG_RETURN(error); } } @@ -2458,7 +2470,13 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli) case HA_ERR_END_OF_FILE: if (++restart_count < 2) - table->file->ha_rnd_init(1); + { + if ((error= table->file->ha_rnd_init(1))) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } + } break; default: diff --git a/sql/opt_range.cc b/sql/opt_range.cc index ce48a8da958..ffd66253eaa 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1183,7 +1183,7 @@ int QUICK_RANGE_SELECT::init() { DBUG_ENTER("QUICK_RANGE_SELECT::init"); - if (file->inited != handler::NONE) + if (file->inited) file->ha_index_or_rnd_end(); DBUG_RETURN(FALSE); } @@ -1191,7 +1191,7 @@ int QUICK_RANGE_SELECT::init() void QUICK_RANGE_SELECT::range_end() { - if (file->inited != handler::NONE) + if (file->inited) file->ha_index_or_rnd_end(); } @@ -1450,8 +1450,9 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler) There is no use of this->file. Use it for the first of merged range selects. */ - if (quick->init_ror_merged_scan(TRUE)) - DBUG_RETURN(1); + int error= quick->init_ror_merged_scan(TRUE); + if (error) + DBUG_RETURN(error); quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); } while ((quick= quick_it++)) @@ -1522,7 +1523,7 @@ QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT() quick_selects.delete_elements(); delete cpk_quick; free_root(&alloc,MYF(0)); - if (need_to_fetch_row && head->file->inited != handler::NONE) + if (need_to_fetch_row && head->file->inited) head->file->ha_rnd_end(); DBUG_VOID_RETURN; } @@ -1626,8 +1627,8 @@ int QUICK_ROR_UNION_SELECT::reset() List_iterator_fast it(quick_selects); while ((quick= it++)) { - if (quick->reset()) - DBUG_RETURN(1); + if ((error= quick->reset())) + DBUG_RETURN(error); if ((error= quick->get_next())) { if (error == HA_ERR_END_OF_FILE) @@ -1638,10 +1639,10 @@ int QUICK_ROR_UNION_SELECT::reset() queue_insert(&queue, (uchar*)quick); } - if (head->file->ha_rnd_init(1)) + if ((error= head->file->ha_rnd_init(1))) { DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); - DBUG_RETURN(1); + DBUG_RETURN(error); } DBUG_RETURN(0); @@ -1659,7 +1660,7 @@ QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() DBUG_ENTER("QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT"); delete_queue(&queue); quick_selects.delete_elements(); - if (head->file->inited != handler::NONE) + if (head->file->inited) head->file->ha_rnd_end(); free_root(&alloc,MYF(0)); DBUG_VOID_RETURN; @@ -8316,7 +8317,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() if (!cur_quick) break; - if (cur_quick->file->inited != handler::NONE) + if (cur_quick->file->inited) cur_quick->file->ha_index_end(); if (cur_quick->init() || cur_quick->reset()) DBUG_RETURN(1); @@ -8568,8 +8569,14 @@ int QUICK_RANGE_SELECT::reset() { if (in_ror_merged_scan) head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap); + + DBUG_EXECUTE_IF("bug14365043_2", + DBUG_SET("+d,ha_index_init_fail");); if ((error= file->ha_index_init(index,1))) + { + file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } /* Do not allocate the buffers twice. */ @@ -10783,7 +10790,10 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void) head->set_keyread(TRUE); /* We need only the key attributes */ if ((result= file->ha_index_init(index,1))) + { + head->file->print_error(result, MYF(0)); DBUG_RETURN(result); + } if (quick_prefix_select && quick_prefix_select->reset()) DBUG_RETURN(1); result= file->index_last(record); diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index e187cf19917..aaf279395dc 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -380,9 +380,14 @@ int opt_sum_query(THD *thd, const_result= 0; break; } - table->file->ha_index_init((uint) ref.key, 1); + if ((error= table->file->ha_index_init((uint) ref.key, 1))) + { + table->file->print_error(error, MYF(0)); + table->set_keyread(FALSE); + DBUG_RETURN(error); + } - error= is_max ? + error= is_max ? get_index_max_value(table, &ref, range_fl) : get_index_min_value(table, &ref, item_field, range_fl, prefix_len); diff --git a/sql/records.cc b/sql/records.cc index 55859e05f29..7f74b84b2d7 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -67,6 +67,7 @@ static int rr_index_desc(READ_RECORD *info); void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, bool print_error, uint idx, bool reverse) { + int error; empty_record(table); bzero((char*) info,sizeof(*info)); info->thd= thd; @@ -77,8 +78,13 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, info->unlock_row= rr_unlock_row; table->status=0; /* And it's always found */ - if (!table->file->inited) - table->file->ha_index_init(idx, 1); + if (!table->file->inited && + (error= table->file->ha_index_init(idx, 1))) + { + if (print_error) + table->file->print_error(error, MYF(0)); + } + /* read_record will be changed to rr_index in rr_index_first */ info->read_record= reverse ? rr_index_last : rr_index_first; } diff --git a/sql/sp.cc b/sql/sp.cc index b257e23c8c7..6300329398d 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1442,7 +1442,6 @@ bool lock_db_routines(THD *thd, char *db) { TABLE *table; uint key_len; - int nxtres= 0; Open_tables_backup open_tables_state_backup; MDL_request_list mdl_requests; Lock_db_routines_error_handler err_handler; @@ -1468,7 +1467,13 @@ bool lock_db_routines(THD *thd, char *db) table->field[MYSQL_PROC_FIELD_DB]->store(db, strlen(db), system_charset_info); key_len= table->key_info->key_part[0].store_length; - table->file->ha_index_init(0, 1); + int nxtres= table->file->ha_index_init(0, 1); + if (nxtres) + { + table->file->print_error(nxtres, MYF(0)); + close_system_tables(thd, &open_tables_state_backup); + DBUG_RETURN(true); + } if (! table->file->index_read_map(table->record[0], table->field[MYSQL_PROC_FIELD_DB]->ptr, @@ -1532,7 +1537,12 @@ sp_drop_db_routines(THD *thd, char *db) key_len= table->key_info->key_part[0].store_length; ret= SP_OK; - table->file->ha_index_init(0, 1); + if (table->file->ha_index_init(0, 1)) + { + ret= SP_KEY_NOT_FOUND; + goto err_idx_init; + } + if (! table->file->index_read_map(table->record[0], (uchar *)table->field[MYSQL_PROC_FIELD_DB]->ptr, (key_part_map)1, HA_READ_KEY_EXACT)) @@ -1560,6 +1570,7 @@ sp_drop_db_routines(THD *thd, char *db) } table->file->ha_index_end(); +err_idx_init: close_thread_tables(thd); /* Make sure to only release the MDL lock on mysql.proc, not other diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 201335f16b5..5c4a144247f 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2659,7 +2659,13 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user, get_grantor(thd, grantor); - table->file->ha_index_init(0, 1); + if ((error= table->file->ha_index_init(0, 1))) + { + table->file->print_error(error, MYF(0)); + DBUG_PRINT("info", ("ha_index_init error")); + DBUG_RETURN(-1); + } + if (table->file->index_read_map(table->record[0], user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) @@ -2901,7 +2907,12 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len); col_privs->field[4]->store("",0, &my_charset_latin1); - col_privs->file->ha_index_init(0, 1); + if (col_privs->file->ha_index_init(0, 1)) + { + cols= 0; + return; + } + if (col_privs->file->index_read_map(col_privs->record[0], (uchar*) key, (key_part_map)15, HA_READ_KEY_EXACT)) { @@ -3032,7 +3043,7 @@ static int replace_column_table(GRANT_TABLE *g_t, const char *db, const char *table_name, ulong rights, bool revoke_grant) { - int error=0,result=0; + int result=0; uchar key[MAX_KEY_LENGTH]; uint key_prefix_length; KEY_PART_INFO *key_part= table->key_info->key_part; @@ -3059,7 +3070,13 @@ static int replace_column_table(GRANT_TABLE *g_t, List_iterator iter(columns); class LEX_COLUMN *column; - table->file->ha_index_init(0, 1); + int error= table->file->ha_index_init(0, 1); + if (error) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(-1); + } + while ((column= iter++)) { ulong privileges= column->rights; @@ -4180,7 +4197,10 @@ static my_bool grant_load_procs_priv(TABLE *p_table) (void) my_hash_init(&func_priv_hash, &my_charset_utf8_bin, 0,0,0, (my_hash_get_key) get_grant_table, 0,0); - p_table->file->ha_index_init(0, 1); + + if (p_table->file->ha_index_init(0, 1)) + DBUG_RETURN(TRUE); + p_table->use_all_columns(); if (!p_table->file->index_first(p_table->record[0])) @@ -4281,7 +4301,10 @@ static my_bool grant_load(THD *thd, TABLE_LIST *tables) t_table = tables[0].table; c_table = tables[1].table; - t_table->file->ha_index_init(0, 1); + + if (t_table->file->ha_index_init(0, 1)) + goto end_index_init; + t_table->use_all_columns(); c_table->use_all_columns(); @@ -4326,9 +4349,10 @@ static my_bool grant_load(THD *thd, TABLE_LIST *tables) return_val=0; // Return ok end_unlock: - thd->variables.sql_mode= old_sql_mode; t_table->file->ha_index_end(); my_pthread_setspecific_ptr(THR_MALLOC, save_mem_root_ptr); +end_index_init: + thd->variables.sql_mode= old_sql_mode; DBUG_RETURN(return_val); } diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 4435dfab9ea..e7973850194 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -665,14 +665,14 @@ retry: case RFIRST: if (keyname) { - table->file->ha_index_or_rnd_end(); - table->file->ha_index_init(keyno, 1); - error= table->file->index_first(table->record[0]); + if (!(error= table->file->ha_index_or_rnd_end()) && + !(error= table->file->ha_index_init(keyno, 1))) + error= table->file->index_first(table->record[0]); } else { - table->file->ha_index_or_rnd_end(); - if (!(error= table->file->ha_rnd_init(1))) + if (!(error= table->file->ha_index_or_rnd_end()) && + !(error= table->file->ha_rnd_init(1))) error= table->file->rnd_next(table->record[0]); } mode=RNEXT; @@ -689,9 +689,9 @@ retry: /* else fall through */ case RLAST: DBUG_ASSERT(keyname != 0); - table->file->ha_index_or_rnd_end(); - table->file->ha_index_init(keyno, 1); - error= table->file->index_last(table->record[0]); + if (!(error= table->file->ha_index_or_rnd_end()) && + !(error= table->file->ha_index_init(keyno, 1))) + error= table->file->index_last(table->record[0]); mode=RPREV; break; case RNEXT_SAME: @@ -734,11 +734,12 @@ retry: if (!(key= (uchar*) thd->calloc(ALIGN_SIZE(key_len)))) goto err; - table->file->ha_index_or_rnd_end(); - table->file->ha_index_init(keyno, 1); + if ((error= table->file->ha_index_or_rnd_end())) + break; key_copy(key, table->record[0], table->key_info + keyno, key_len); - error= table->file->index_read_map(table->record[0], - key, keypart_map, ha_rkey_mode); + if (!(error= table->file->ha_index_init(keyno, 1))) + error= table->file->index_read_map(table->record[0], + key, keypart_map, ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 99203ef98ba..0dca1f6a75a 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -297,8 +297,14 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, rtopic_id= find_fields[help_relation_help_topic_id].field; rkey_id= find_fields[help_relation_help_keyword_id].field; - topics->file->ha_index_init(iindex_topic,1); - relations->file->ha_index_init(iindex_relations,1); + if (topics->file->ha_index_init(iindex_topic,1) || + relations->file->ha_index_init(iindex_relations,1)) + { + if (topics->file->inited) + topics->file->ha_index_end(); + my_message(ER_CORRUPT_HELP_DB, ER(ER_CORRUPT_HELP_DB), MYF(0)); + DBUG_RETURN(-1); + } rkey_id->store((longlong) key_id, TRUE); rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3bde5aa8f6a..5c216a44d70 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11466,7 +11466,14 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) empty_record(table); if (table->group && join->tmp_table_param.sum_func_count && table->s->keys && !table->file->inited) - table->file->ha_index_init(0, 0); + { + rc= table->file->ha_index_init(0, 0); + if (rc) + { + table->file->print_error(rc, MYF(0)); + DBUG_RETURN(rc); + } + } } /* Set up select_end */ Next_select_func end_select= setup_end_select_func(join); @@ -12286,8 +12293,13 @@ join_read_key(JOIN_TAB *tab) if (!table->file->inited) { - table->file->ha_index_init(tab->ref.key, tab->sorted); + if ((error= table->file->ha_index_init(tab->ref.key, tab->sorted))) + { + (void) report_error(table, error); + return 1; + } } + if (cmp_buffer_with_ref(tab) || (table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW))) { @@ -12371,8 +12383,12 @@ join_read_always_key(JOIN_TAB *tab) TABLE *table= tab->table; /* Initialize the index first */ - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key, tab->sorted); + if (!table->file->inited && + (error= table->file->ha_index_init(tab->ref.key, tab->sorted))) + { + (void) report_error(table, error); + return 1; + } /* Perform "Late NULLs Filtering" (see internals manual for explanations) */ for (uint i= 0 ; i < tab->ref.key_parts ; i++) @@ -12407,8 +12423,13 @@ join_read_last_key(JOIN_TAB *tab) int error; TABLE *table= tab->table; - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key, tab->sorted); + if (!table->file->inited && + (error= table->file->ha_index_init(tab->ref.key, tab->sorted))) + { + (void) report_error(table, error); + return 1; + } + if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; if ((error=table->file->index_read_last_map(table->record[0], @@ -12520,8 +12541,14 @@ join_read_first(JOIN_TAB *tab) tab->read_record.file=table->file; tab->read_record.index=tab->index; tab->read_record.record=table->record[0]; - if (!table->file->inited) - table->file->ha_index_init(tab->index, tab->sorted); + + if (!table->file->inited && + (error= table->file->ha_index_init(tab->index, tab->sorted))) + { + (void) report_error(table, error); + return 1; + } + if ((error=tab->table->file->index_first(tab->table->record[0]))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) @@ -12555,8 +12582,13 @@ join_read_last(JOIN_TAB *tab) tab->read_record.file=table->file; tab->read_record.index=tab->index; tab->read_record.record=table->record[0]; - if (!table->file->inited) - table->file->ha_index_init(tab->index, 1); + if (!table->file->inited && + (error= table->file->ha_index_init(tab->index, 1))) + { + (void) report_error(table, error); + return 1; + } + if ((error= tab->table->file->index_last(tab->table->record[0]))) return report_error(table, error); return 0; @@ -12579,8 +12611,13 @@ join_ft_read_first(JOIN_TAB *tab) int error; TABLE *table= tab->table; - if (!table->file->inited) - table->file->ha_index_init(tab->ref.key, 1); + if (!table->file->inited && + (error= table->file->ha_index_init(tab->ref.key, 1))) + { + (void) report_error(table, error); + return 1; + } + table->file->ft_init(); if ((error= table->file->ft_read(table->record[0]))) @@ -12973,7 +13010,12 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), error, 0)) DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error /* Change method to update rows */ - table->file->ha_index_init(0, 0); + if ((error= table->file->ha_index_init(0, 0))) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(NESTED_LOOP_ERROR); + } + join->join_tab[join->tables-1].next_select=end_unique_update; } join->send_records++; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 81e18ccd6c3..b921b2c782e 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4925,7 +4925,13 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) { DBUG_RETURN(1); } - proc_table->file->ha_index_init(0, 1); + + if (proc_table->file->ha_index_init(0, 1)) + { + res= 1; + goto err; + } + if ((res= proc_table->file->index_first(proc_table->record[0]))) { res= (res == HA_ERR_END_OF_FILE) ? 0 : 1; @@ -4951,7 +4957,9 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) } err: - proc_table->file->ha_index_end(); + if (proc_table->file->inited) + (void) proc_table->file->ha_index_end(); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(res); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c4a95edcfc2..222b3d9470a 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -2038,7 +2038,8 @@ int multi_update::do_updates() org_updated= updated; tmp_table= tmp_tables[cur_table->shared]; tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache - (void) table->file->ha_rnd_init(0); + if (local_error= table->file->ha_rnd_init(0)) + goto err; table->file->extra(HA_EXTRA_NO_CACHE); check_opt_it.rewind(); @@ -2163,11 +2164,16 @@ err: } err2: - (void) table->file->ha_rnd_end(); - (void) tmp_table->file->ha_rnd_end(); + if (table->file->inited) + (void) table->file->ha_rnd_end(); + if (tmp_table->file->inited) + (void) tmp_table->file->ha_rnd_end(); check_opt_it.rewind(); while (TABLE *tbl= check_opt_it++) - tbl->file->ha_rnd_end(); + { + if (tbl->file->inited) + (void) tbl->file->ha_rnd_end(); + } if (updated != org_updated) { diff --git a/sql/tztime.cc b/sql/tztime.cc index f245b736f52..72898ccc706 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1706,14 +1706,11 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) } table= tz_tables[0].table; - /* - It is OK to ignore ha_index_init()/ha_index_end() return values since - mysql.time_zone* tables are MyISAM and these operations always succeed - for MyISAM. - */ - (void)table->file->ha_index_init(0, 1); - table->use_all_columns(); + if (table->file->ha_index_init(0, 1)) + goto end_with_close; + + table->use_all_columns(); tz_leapcnt= 0; res= table->file->index_first(table->record[0]); @@ -1899,12 +1896,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) tz_tables= tz_tables->next_local; table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); - /* - It is OK to ignore ha_index_init()/ha_index_end() return values since - mysql.time_zone* tables are MyISAM and these operations always succeed - for MyISAM. - */ - (void)table->file->ha_index_init(0, 1); + if (table->file->ha_index_init(0, 1)) + goto end; if (table->file->index_read_map(table->record[0], table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) @@ -1932,7 +1925,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) table= tz_tables->table; tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); - (void)table->file->ha_index_init(0, 1); + if (table->file->ha_index_init(0, 1)) + goto end; if (table->file->index_read_map(table->record[0], table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) @@ -1959,7 +1953,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) table= tz_tables->table; tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); - (void)table->file->ha_index_init(0, 1); + if (table->file->ha_index_init(0, 1)) + goto end; res= table->file->index_read_map(table->record[0], table->field[0]->ptr, (key_part_map)1, HA_READ_KEY_EXACT); @@ -2030,7 +2025,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) */ table= tz_tables->table; table->field[0]->store((longlong) tzid, TRUE); - (void)table->file->ha_index_init(0, 1); + if (table->file->ha_index_init(0, 1)) + goto end; res= table->file->index_read_map(table->record[0], table->field[0]->ptr, (key_part_map)1, HA_READ_KEY_EXACT); @@ -2165,8 +2161,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) end: - if (table) - (void)table->file->ha_index_end(); + if (table && table->file->inited) + (void) table->file->ha_index_end(); DBUG_RETURN(return_val); } -- cgit v1.2.1 From d5d53d190293af440f6a7d116df63c21fa391bd1 Mon Sep 17 00:00:00 2001 From: Annamalai Gurusami Date: Tue, 9 Oct 2012 12:25:02 +0530 Subject: Fixing a compilation issue. --- sql/sql_update.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 222b3d9470a..e7600fe248e 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -2038,7 +2038,7 @@ int multi_update::do_updates() org_updated= updated; tmp_table= tmp_tables[cur_table->shared]; tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache - if (local_error= table->file->ha_rnd_init(0)) + if ((local_error= table->file->ha_rnd_init(0))) goto err; table->file->extra(HA_EXTRA_NO_CACHE); -- cgit v1.2.1 From 5427d33e6248caa4e6a2b92c9ab9317fadcb5c2f Mon Sep 17 00:00:00 2001 From: Harin Vadodaria Date: Tue, 9 Oct 2012 18:15:40 +0530 Subject: Bug #14211140: CRASH WHEN GRANTING OR REVOKING PROXY PRIVILEGES Description: (user,host) pair from security context is used privilege checking at the time of granting or revoking proxy privileges. This creates problem when server is started with --skip-name-resolve option because host will not contain any value. Checks should be dependent on consistent values regardless the way server is started. Further, privilege check should use (priv_user,priv_host) pair rather than values obtained from inbound connection because this pair represents the correct account context obtained from mysql.user table. --- sql/sql_acl.cc | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 5c4a144247f..d99ca3ceb99 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -7256,14 +7256,25 @@ acl_check_proxy_grant_access(THD *thd, const char *host, const char *user, DBUG_RETURN(FALSE); } - /* one can grant proxy to himself to others */ - if (!strcmp(thd->security_ctx->user, user) && + /* + one can grant proxy for self to others. + Security context in THD contains two pairs of (user,host): + 1. (user,host) pair referring to inbound connection. + 2. (priv_user,priv_host) pair obtained from mysql.user table after doing + authnetication of incoming connection. + Privileges should be checked wrt (priv_user, priv_host) tuple, because + (user,host) pair obtained from inbound connection may have different + values than what is actually stored in mysql.user table and while granting + or revoking proxy privilege, user is expected to provide entries mentioned + in mysql.user table. + */ + if (!strcmp(thd->security_ctx->priv_user, user) && !my_strcasecmp(system_charset_info, host, - thd->security_ctx->host)) + thd->security_ctx->priv_host)) { DBUG_PRINT("info", ("strcmp (%s, %s) my_casestrcmp (%s, %s) equal", - thd->security_ctx->user, user, - host, thd->security_ctx->host)); + thd->security_ctx->priv_user, user, + host, thd->security_ctx->priv_host)); DBUG_RETURN(FALSE); } -- cgit v1.2.1 From 6ff71d0dd38e39e67580f598b400fa3fb8888da3 Mon Sep 17 00:00:00 2001 From: Annamalai Gurusami Date: Fri, 19 Oct 2012 16:43:48 +0530 Subject: Bug #14226171 EXCESSIVE ROW LOCKING WITH UPDATE IN 5.5.25 When a DML statement is issued, and if the index merge access method is chosen, then many rows from the storage engine will be locked because of the way the algorithm works. Many rows will be locked, but they will not be part of the final result set. To reduce the excessive locking, the locks of unmatched rows are released by this patch. This patch will affect only transactions with isolation level equal to or less stricter than READ COMMITTED. This is because of the behaviour of ha_innobase::unlock_row(). rb://1296 approved by jorgen and olav. --- sql/opt_range.cc | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index ffd66253eaa..ce87bdd5381 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -8412,6 +8412,13 @@ int QUICK_INDEX_MERGE_SELECT::get_next() If a Clustered PK scan is present, it is used only to check if row satisfies its condition (and never used for row retrieval). + Locking: to ensure that exclusive locks are only set on records that + are included in the final result we must release the lock + on all rows we read but do not include in the final result. This + must be done on each index that reads the record and the lock + must be released using the same handler (the same quick object) as + used when reading the record. + RETURN 0 - Ok other - Error code if any error occurred. @@ -8421,6 +8428,12 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() { List_iterator_fast quick_it(quick_selects); QUICK_RANGE_SELECT* quick; + + /* quick that reads the given rowid first. This is needed in order + to be able to unlock the row using the same handler object that locked + it */ + QUICK_RANGE_SELECT* quick_with_last_rowid; + int error, cmp; uint last_rowid_count=0; DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next"); @@ -8433,7 +8446,10 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() if (cpk_quick) { while (!error && !cpk_quick->row_in_ranges()) + { + quick->file->unlock_row(); /* row not in range; unlock */ error= quick->get_next(); + } } if (error) DBUG_RETURN(error); @@ -8441,6 +8457,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() quick->file->position(quick->record); memcpy(last_rowid, quick->file->ref, head->file->ref_length); last_rowid_count= 1; + quick_with_last_rowid= quick; while (last_rowid_count < quick_selects.elements) { @@ -8453,9 +8470,17 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() do { if ((error= quick->get_next())) + { + quick_with_last_rowid->file->unlock_row(); DBUG_RETURN(error); + } quick->file->position(quick->record); cmp= head->file->cmp_ref(quick->file->ref, last_rowid); + if (cmp < 0) + { + /* This row is being skipped. Release lock on it. */ + quick->file->unlock_row(); + } } while (cmp < 0); /* Ok, current select 'caught up' and returned ref >= cur_ref */ @@ -8466,13 +8491,19 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() { while (!cpk_quick->row_in_ranges()) { + quick->file->unlock_row(); /* row not in range; unlock */ if ((error= quick->get_next())) + { + quick_with_last_rowid->file->unlock_row(); DBUG_RETURN(error); + } } quick->file->position(quick->record); } memcpy(last_rowid, quick->file->ref, head->file->ref_length); + quick_with_last_rowid->file->unlock_row(); last_rowid_count= 1; + quick_with_last_rowid= quick; } else { -- cgit v1.2.1 From 154860eab5499cf44160f253694a87b8147a2965 Mon Sep 17 00:00:00 2001 From: Ashish Agarwal Date: Wed, 31 Oct 2012 12:40:48 +0530 Subject: BUG#14485479: INSTALL AUDIT PLUGIN HANGS IF WE TRY TO DISABLE AND ENABLED DURING DDL OPERATION PROBLEM: Same thread trying to acquire the same mutex second time leads to hang/server crash. While [un]installing audit_log plugin a thread acquires the LOCK_plugin mutex and after successful initialization tries to write in mysql.plugin table. It holds this mutex for a long time. If some how plugin table is corrupted then a write to plugin table will throw an error, thread try to log this error in the audit_log plugin, doing so it tries to acquire the mutex again and results is server hang/crash. SOLUTION: Releasing the LOCK_plugin mutex before writing in mysql.plugin table. We dont need to hold this mutex as thread already acquired a TL_WRITE lock on mysql.plugin table. --- sql/sql_plugin.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 4c42f3798dd..13623078265 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1820,6 +1820,7 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl { if (plugin_initialize(tmp)) { + mysql_mutex_unlock(&LOCK_plugin); my_error(ER_CANT_INITIALIZE_UDF, MYF(0), name->str, "Plugin initialization function failed."); goto deinit; @@ -1831,6 +1832,7 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl of the insert into the plugin table, so that it is not replicated in row based mode. */ + mysql_mutex_unlock(&LOCK_plugin); tmp_disable_binlog(thd); table->use_all_columns(); restore_record(table, s->default_values); @@ -1843,10 +1845,9 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl table->file->print_error(error, MYF(0)); goto deinit; } - - mysql_mutex_unlock(&LOCK_plugin); DBUG_RETURN(FALSE); deinit: + mysql_mutex_lock(&LOCK_plugin); tmp->state= PLUGIN_IS_DELETED; reap_needed= true; reap_plugins(); -- cgit v1.2.1 From d538d394b86d4095e78c891e40876f4b9e031d7f Mon Sep 17 00:00:00 2001 From: Gleb Shchepa Date: Wed, 5 Dec 2012 17:24:45 +0400 Subject: Bug #15948123: SERVER WORKS INCORRECT WITH LONG TABLE ALIASES Code in MDL subsystem assumes that identifiers of objects can't be longer than NAME_LEN characters. This assumption was broken when one tried to construct MDL_key based on table alias, which can have arbitrary length. Since MDL_key's (and MDL locks) are not really used for table aliases this patch changes code to not initialize MDL_key object for table list element representing aliases. --- sql/mdl.h | 2 ++ sql/sql_parse.cc | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/mdl.h b/sql/mdl.h index d30d30ac2fa..6d29ad968c6 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -246,6 +246,8 @@ public: } void mdl_key_init(const MDL_key *rhs) { + DBUG_ASSERT(rhs->m_length <= NAME_LEN); + DBUG_ASSERT(rhs->m_db_name_length <= NAME_LEN); memcpy(m_ptr, rhs->m_ptr, rhs->m_length); m_length= rhs->m_length; m_db_name_length= rhs->m_db_name_length; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ea07bfce0cb..2e995f58c5c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6000,8 +6000,13 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->next_name_resolution_table= NULL; /* Link table in global list (all used tables) */ lex->add_to_query_tables(ptr); - ptr->mdl_request.init(MDL_key::TABLE, ptr->db, ptr->table_name, mdl_type, - MDL_TRANSACTION); + + // Pure table aliases do not need to be locked: + if (!test(table_options & TL_OPTION_ALIAS)) + { + ptr->mdl_request.init(MDL_key::TABLE, ptr->db, ptr->table_name, mdl_type, + MDL_TRANSACTION); + } DBUG_RETURN(ptr); } -- cgit v1.2.1 From e5424d196b9c392b71fc1b2c8dcf67f610f6a746 Mon Sep 17 00:00:00 2001 From: Gleb Shchepa Date: Wed, 5 Dec 2012 20:47:21 +0400 Subject: Bug #15948123: SERVER WORKS INCORRECT WITH LONG TABLE ALIASES After-push cleanup: removal of unneeded assertions. --- sql/mdl.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'sql') diff --git a/sql/mdl.h b/sql/mdl.h index 6d29ad968c6..d30d30ac2fa 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -246,8 +246,6 @@ public: } void mdl_key_init(const MDL_key *rhs) { - DBUG_ASSERT(rhs->m_length <= NAME_LEN); - DBUG_ASSERT(rhs->m_db_name_length <= NAME_LEN); memcpy(m_ptr, rhs->m_ptr, rhs->m_length); m_length= rhs->m_length; m_db_name_length= rhs->m_db_name_length; -- cgit v1.2.1 From d8876ff2fb2fac75a23678bd5b251780c66756a1 Mon Sep 17 00:00:00 2001 From: Harin Vadodaria Date: Thu, 6 Dec 2012 17:02:09 +0530 Subject: Bug#15912213: BUFFER OVERFLOW IN ACL_GET() Description: A very large database name causes buffer overflow in functions acl_get() and check_grant_db() in sql_acl.cc. It happens due to an unguarded string copy operation. This puts required sanity checks before copying db string to destination buffer. --- sql/sql_acl.cc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index d99ca3ceb99..fdd9b107bf2 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1581,11 +1581,20 @@ ulong acl_get(const char *host, const char *ip, { ulong host_access= ~(ulong)0, db_access= 0; uint i; - size_t key_length; + size_t key_length, copy_length; char key[ACL_KEY_LENGTH],*tmp_db,*end; acl_entry *entry; DBUG_ENTER("acl_get"); + copy_length= (size_t) (strlen(ip ? ip : "") + + strlen(user ? user : "") + + strlen(db ? db : "")); + /* + Make sure that strmov() operations do not result in buffer overflow. + */ + if (copy_length >= ACL_KEY_LENGTH) + DBUG_RETURN(0); + mysql_mutex_lock(&acl_cache->lock); end=strmov((tmp_db=strmov(strmov(key, ip ? ip : "")+1,user)+1),db); if (lower_case_table_names) @@ -4942,6 +4951,16 @@ bool check_grant_db(THD *thd,const char *db) char helping [NAME_LEN+USERNAME_LENGTH+2]; uint len; bool error= TRUE; + size_t copy_length; + + copy_length= (size_t) (strlen(sctx->priv_user ? sctx->priv_user : "") + + strlen(db ? db : "")); + + /* + Make sure that strmov() operations do not result in buffer overflow. + */ + if (copy_length >= (NAME_LEN+USERNAME_LENGTH+2)) + return 1; len= (uint) (strmov(strmov(helping, sctx->priv_user) + 1, db) - helping) + 1; -- cgit v1.2.1 From 14ba37f76f87cc48cae62eb6bdf3cda294dff78d Mon Sep 17 00:00:00 2001 From: Dmitry Lenev Date: Mon, 10 Dec 2012 10:06:37 +0400 Subject: Bug #15954896 "SP, MULTI-TABLE DELETE AND LONG ALIAS". Using too long table aliases in stored routines might have caused server crashes. Code in sp_head::merge_table_list() which is responsible for collecting information about tables used in stored routine was not aware of the fact that table alias might have arbitrary length. I.e. it assumed that table alias can't be longer than NAME_LEN bytes and allocated buffer for a key identifying table accordingly. This patch fixes the issue by ensuring that we use dynamically allocated buffer for table key when table alias is too long. By default stack based buffer is used in which NAME_LEN bytes are reserved for table alias. --- sql/sp_head.cc | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) (limited to 'sql') diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 09795156102..f20c205886d 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -4022,8 +4022,6 @@ typedef struct st_sp_table Multi-set key: db_name\0table_name\0alias\0 - for normal tables db_name\0table_name\0 - for temporary tables - Note that in both cases we don't take last '\0' into account when - we count length of key. */ LEX_STRING qname; uint db_length, table_name_length; @@ -4080,19 +4078,26 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) for (; table ; table= table->next_global) if (!table->derived && !table->schema_table) { - char tname[(NAME_LEN + 1) * 3]; // db\0table\0alias\0 - uint tlen, alen; - - tlen= table->db_length; - memcpy(tname, table->db, tlen); - tname[tlen++]= '\0'; - memcpy(tname+tlen, table->table_name, table->table_name_length); - tlen+= table->table_name_length; - tname[tlen++]= '\0'; - alen= strlen(table->alias); - memcpy(tname+tlen, table->alias, alen); - tlen+= alen; - tname[tlen]= '\0'; + /* + Structure of key for the multi-set is "db\0table\0alias\0". + Since "alias" part can have arbitrary length we use String + object to construct the key. By default String will use + buffer allocated on stack with NAME_LEN bytes reserved for + alias, since in most cases it is going to be smaller than + NAME_LEN bytes. + */ + char tname_buff[(NAME_LEN + 1) * 3]; + String tname(tname_buff, sizeof(tname_buff), &my_charset_bin); + uint temp_table_key_length; + + tname.length(0); + tname.append(table->db, table->db_length); + tname.append('\0'); + tname.append(table->table_name, table->table_name_length); + tname.append('\0'); + temp_table_key_length= tname.length(); + tname.append(table->alias); + tname.append('\0'); /* Upgrade the lock type because this table list will be used @@ -4107,9 +4112,10 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) (and therefore should not be prelocked). Otherwise we will erroneously treat table with same name but with different alias as non-temporary. */ - if ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname, tlen)) || - ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname, - tlen - alen - 1)) && + if ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname.ptr(), + tname.length())) || + ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname.ptr(), + temp_table_key_length)) && tab->temp)) { if (tab->lock_type < table->lock_type) @@ -4128,11 +4134,11 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) lex_for_tmp_check->create_info.options & HA_LEX_CREATE_TMP_TABLE) { tab->temp= TRUE; - tab->qname.length= tlen - alen - 1; + tab->qname.length= temp_table_key_length; } else - tab->qname.length= tlen; - tab->qname.str= (char*) thd->memdup(tname, tab->qname.length + 1); + tab->qname.length= tname.length(); + tab->qname.str= (char*) thd->memdup(tname.ptr(), tab->qname.length); if (!tab->qname.str) return FALSE; tab->table_name_length= table->table_name_length; @@ -4201,7 +4207,7 @@ sp_head::add_used_tables_to_table_list(THD *thd, if (!(tab_buff= (char *)thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST)) * stab->lock_count)) || !(key_buff= (char*)thd->memdup(stab->qname.str, - stab->qname.length + 1))) + stab->qname.length))) DBUG_RETURN(FALSE); for (uint j= 0; j < stab->lock_count; j++) -- cgit v1.2.1