diff options
author | Michael Widenius <monty@askmonty.org> | 2011-05-10 18:17:43 +0300 |
---|---|---|
committer | Michael Widenius <monty@askmonty.org> | 2011-05-10 18:17:43 +0300 |
commit | f34be1893892745b5b1a7a099eab4ad8e9ac8641 (patch) | |
tree | d7eed818fd8b648e5eabe0dbad53e61665d8dc37 /sql | |
parent | e843297d128b165125d17aab8958f7ca91808923 (diff) | |
parent | 8882d71f3f1dd03ef98d072def39b29e6a03f5b8 (diff) | |
download | mariadb-git-f34be1893892745b5b1a7a099eab4ad8e9ac8641.tar.gz |
Merge with MariaDB 5.2
Diffstat (limited to 'sql')
55 files changed, 1488 insertions, 717 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 13a2f8cf7c2..2a2fc6bdd3d 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -425,8 +425,8 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, key_copy(key_buf, event_table->record[0], key_info, key_len); if (!(ret= event_table->file->ha_index_read_map(event_table->record[0], key_buf, - (key_part_map)1, - HA_READ_PREFIX))) + (key_part_map) 1, + HA_READ_KEY_EXACT))) { DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); do diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 2a354fe6cfd..c551cf74095 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -748,12 +748,14 @@ Event_queue::cond_wait(THD *thd, struct timespec *abstime, const char* msg, thd->enter_cond(&COND_queue_state, &LOCK_event_queue, msg); - DBUG_PRINT("info", ("pthread_cond_%swait", abstime? "timed":"")); - if (!abstime) - pthread_cond_wait(&COND_queue_state, &LOCK_event_queue); - else - pthread_cond_timedwait(&COND_queue_state, &LOCK_event_queue, abstime); - + if (!thd->killed) + { + DBUG_PRINT("info", ("pthread_cond_%swait", abstime ? "timed" : "")); + if (!abstime) + pthread_cond_wait(&COND_queue_state, &LOCK_event_queue); + else + pthread_cond_timedwait(&COND_queue_state, &LOCK_event_queue, abstime); + } mutex_last_locked_in_func= func; mutex_last_locked_at_line= line; mutex_queue_data_locked= TRUE; diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index 4d6636eedb2..ecddcb7ca46 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -648,7 +648,14 @@ Event_scheduler::stop() /* thd could be 0x0, when shutting down */ sql_print_information("Event Scheduler: " "Waiting for the scheduler thread to reply"); - COND_STATE_WAIT(thd, NULL, "Waiting scheduler to stop"); + + /* + Wait only 2 seconds, as there is a small chance the thread missed the + above awake() call and we may have to do it again + */ + struct timespec top_time; + set_timespec(top_time, 2); + COND_STATE_WAIT(thd, &top_time, "Waiting scheduler to stop"); } while (state == STOPPING); DBUG_PRINT("info", ("Scheduler thread has cleaned up. Set state to INIT")); sql_print_information("Event Scheduler: Stopped"); diff --git a/sql/field.cc b/sql/field.cc index 461d6c1eda2..7d8a64d7745 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5474,6 +5474,7 @@ double Field_year::val_real(void) longlong Field_year::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; + DBUG_ASSERT(field_length == 2 || field_length == 4); int tmp= (int) ptr[0]; if (field_length != 4) tmp%=100; // Return last 2 char @@ -5486,6 +5487,7 @@ longlong Field_year::val_int(void) String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + DBUG_ASSERT(field_length < 5); val_buffer->alloc(5); val_buffer->length(field_length); char *to=(char*) val_buffer->ptr(); @@ -9490,6 +9492,7 @@ void Create_field::create_length_to_internal_length(void) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VARCHAR: diff --git a/sql/field.h b/sql/field.h index d695479f197..6706f85d368 100644 --- a/sql/field.h +++ b/sql/field.h @@ -13,6 +13,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "my_compare.h" /* for clr_rec_bits */ + /* Because of the function new_field() all field classes that have static variables must declare the size_of() member function. diff --git a/sql/filesort.cc b/sql/filesort.cc index 6e3bf27afcc..c28878c616f 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -231,7 +231,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, goto err; } if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX, - DISK_BUFFER_SIZE, MYF(MY_WME))) + DISK_BUFFER_SIZE, MYF(ME_ERROR | MY_WME))) goto err; param.keys--; /* TODO: check why we do this */ @@ -266,7 +266,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, /* Open cached file if it isn't open */ if (! my_b_inited(outfile) && open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER, - MYF(MY_WME))) + MYF(ME_ERROR | MY_WME))) goto err; if (reinit_io_cache(outfile,WRITE_CACHE,0L,0,0)) goto err; @@ -318,8 +318,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } } if (error) - my_message(ER_FILSORT_ABORT, ER(ER_FILSORT_ABORT), - MYF(ME_ERROR+ME_WAITTANG)); + my_message(ER_FILSORT_ABORT, ER(ER_FILSORT_ABORT), MYF(0)); else statistic_add(thd->status_var.filesort_rows, (ulong) records, &LOCK_status); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 74ea36b5bd9..6cd735ddd4b 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -164,8 +164,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_alloc_root(&m_mem_root, 512, 512); @@ -186,16 +185,46 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); init_alloc_root(&m_mem_root, 512, 512); init_handler_variables(); - DBUG_ASSERT(m_part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_alloc_root(&m_mem_root, 512, 512); + init_handler_variables(); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -248,7 +277,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -256,6 +284,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -368,7 +401,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1869,7 +1903,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2091,18 +2125,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2166,21 +2198,24 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; file_buffer= (uchar *) my_alloca(tot_len_byte); if (!file_buffer) DBUG_RETURN(TRUE); bzero(file_buffer, tot_len_byte); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2218,13 +2253,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2235,6 +2272,7 @@ bool ha_partition::create_handler_file(const char *name) result= my_write(file, (uchar *) file_buffer, tot_len_byte, MYF(MY_WME | MY_NABP)) != 0; + /* Write connection information (for federatedx engine) */ part_it.rewind(); for (i= 0; i < no_parts && !result; i++) { @@ -2245,7 +2283,10 @@ bool ha_partition::create_handler_file(const char *name) if (my_write(file, buffer, 4, MYF(MY_WME | MY_NABP)) || my_write(file, (uchar *) part_elem->connect_string.str, length, MYF(MY_WME | MY_NABP))) + { result= TRUE; + break; + } } VOID(my_close(file, MYF(0))); } @@ -2255,14 +2296,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - - SYNOPSIS - clear_handler_file() - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2275,16 +2311,15 @@ void ha_partition::clear_handler_file() m_connect_string= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @param mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2322,6 +2357,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2393,85 +2429,83 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; - engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); - for (i= 0; i < m_tot_parts; i++) - { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); - if (!engine_array[i]) - goto err3; - } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; + goto err2; + m_file_buffer= file_buffer; // Will be freed in clear_handler_file() + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; if (!(m_connect_string= (LEX_STRING*) alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING)))) - goto err3; + goto err2; bzero(m_connect_string, m_tot_parts * sizeof(LEX_STRING)); + /* Read connection arguments (for federated X engine) */ for (i= 0; i < m_tot_parts; i++) { LEX_STRING connect_string; uchar buffer[4]; if (my_read(file, buffer, 4, MYF(MY_NABP))) + { + /* No extra options; Probably not a federatedx engine */ break; + } connect_string.length= uint4korr(buffer); connect_string.str= (char*) alloc_root(&m_mem_root, connect_string.length+1); if (my_read(file, (uchar*) connect_string.str, connect_string.length, @@ -2482,31 +2516,100 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) } VOID(my_close(file, MYF(0))); - m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - + DBUG_RETURN(false); + +err2: +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); + engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); + + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); + for (i= 0; i < m_tot_parts; i++) + { + engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type) + *(buff + i)); + if (!engine_array[i]) + goto err; + } if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) - goto err3; + goto err; for (i= 0; i < m_tot_parts; i++) m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); my_afree(engine_array); - if (!m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: + DBUG_RETURN(false); + +err: my_afree(engine_array); -err2: -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2553,13 +2656,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; - int error; + char *name_buffer_ptr; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2567,8 +2670,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) - DBUG_RETURN(1); + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) + DBUG_RETURN(error); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->stored_rec_length; @@ -2578,7 +2682,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2601,50 +2705,86 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + table->s->connect_string = m_connect_string[(uint)(file-m_file)]; + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + goto err_handler; + bzero(&table->s->connect_string, sizeof(LEX_STRING)); + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; - do + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + while (*(++file)) { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - table->s->connect_string = m_connect_string[(uint)(file-m_file)]; - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - bzero(&table->s->connect_string, sizeof(LEX_STRING)); - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + DBUG_ASSERT(ref_length >= (*file)->ref_length); set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2653,6 +2793,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2710,25 +2851,54 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory for ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then on the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; + + if (new_handler && + new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + DBUG_RETURN((handler*) new_handler); } @@ -2759,7 +2929,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -4354,6 +4524,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, break; } } + m_last_part= part; } else { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index f5e66c5913e..b1e39cf4d22 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -70,7 +80,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name MEM_ROOT m_mem_root; plugin_ref *m_engine_array; // Array of types of the handlers @@ -134,6 +144,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -170,11 +187,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -187,7 +199,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -206,6 +218,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share,
+ partition_info *part_info_arg,
+ ha_partition *clone_arg,
+ MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -276,7 +292,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 520402c8e38..895e7a65125 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2099,11 +2099,10 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); - - if (!new_handler) + handler *new_handler= get_new_handler(table->s, mem_root, ht); + if (! new_handler) return NULL; /* @@ -2111,17 +2110,27 @@ handler *handler::clone(MEM_ROOT *mem_root) on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) + + if (!(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) return NULL; - if (new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, + + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + + This is not critical as the engines already have the table open + and should be able to use the original instance of the table. + */ + if (new_handler->ha_open(table, name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) return NULL; + new_handler->cloned= 1; // Marker for debugging return new_handler; } + double handler::keyread_time(uint index, uint ranges, ha_rows rows) { /* diff --git a/sql/handler.h b/sql/handler.h index f674a8a56bb..59d6aaf4b7f 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -23,7 +23,6 @@ #pragma interface /* gcc class implementation */ #endif -#include <my_handler.h> #include <ft_global.h> #include <keycache.h> @@ -1670,7 +1669,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/hostname.cc b/sql/hostname.cc index ec090cbe02f..dfcdd3edd90 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -183,7 +184,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors) &tmp_hostent,buff,sizeof(buff),&tmp_errno))) { DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno)); - return 0; + DBUG_RETURN(0); } if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2), &tmp_errno))) diff --git a/sql/item.cc b/sql/item.cc index b0530f0e17e..c1620174533 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -980,7 +980,7 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const /** Get the value of the function as a MYSQL_TIME structure. - As a extra convenience the time structure is reset on error! + As a extra convenience the time structure is reset on error or NULL values! */ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) @@ -996,8 +996,12 @@ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) } else { - longlong value= val_int(); int was_cut; + longlong value= val_int(); + + if (null_value) + goto err; + if (number_to_datetime(value, ltime, fuzzydate, &was_cut) == LL(-1)) { char buff[22], *end; @@ -2856,6 +2860,16 @@ bool Item_param::set_longdata(const char *str, ulong length) (here), and first have to concatenate all pieces together, write query to the binary log and only then perform conversion. */ + if (str_value.length() + length > max_long_data_size) + { + my_message(ER_UNKNOWN_ERROR, + "Parameter of prepared statement which is set through " + "mysql_send_long_data() is longer than " + "'max_long_data_size' bytes", + MYF(0)); + DBUG_RETURN(true); + } + if (str_value.append(str, length, &my_charset_bin)) DBUG_RETURN(TRUE); state= LONG_DATA_VALUE; @@ -6418,7 +6432,7 @@ void Item_ref::print(String *str, enum_query_type query_type) { THD *thd= current_thd; append_identifier(thd, str, (*ref)->real_item()->name, - (*ref)->real_item()->name_length); + strlen((*ref)->real_item()->name)); } else (*ref)->print(str, query_type); @@ -7993,7 +8007,7 @@ String *Item_cache_int::val_str(String *str) null_value= TRUE; return NULL; } - str->set(value, default_charset()); + str->set_int(value, unsigned_flag, default_charset()); return str; } diff --git a/sql/item.h b/sql/item.h index 120ff358098..a486c34902f 100644 --- a/sql/item.h +++ b/sql/item.h @@ -539,6 +539,11 @@ public: */ Item *next; uint32 max_length; + /* + TODO: convert name and name_length fields into LEX_STRING to keep them in + sync (see bug #11829681/60295 etc). Then also remove some strlen(name) + calls. + */ uint name_length; /* Length of name */ int8 marker; uint8 decimals; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index f4dd9b4de12..9017134416f 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -918,7 +918,7 @@ int Arg_comparator::set_cmp_func(Item_result_field *owner_arg, */ Query_arena backup; Query_arena *save_arena= thd->switch_to_arena_for_cached_items(&backup); - Item_cache_int *cache= new Item_cache_int(); + Item_cache_int *cache= new Item_cache_int(MYSQL_TYPE_DATETIME); if (save_arena) thd->set_query_arena(save_arena); @@ -4139,13 +4139,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - if (!args[i]->null_value) // Skip NULL values - { - array->set(j,args[i]); - j++; - } - else - have_null= 1; + array->set(j,args[i]); + if (!args[i]->null_value) // Skip NULL values + j++; + else + have_null= 1; } if ((array->used_count= j)) array->sort(); diff --git a/sql/item_func.cc b/sql/item_func.cc index 63b8419aaaa..dd20ad53f7c 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -524,7 +524,10 @@ bool Item_func::is_expensive_processor(uchar *arg) my_decimal *Item_func::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed); - int2my_decimal(E_DEC_FATAL_ERROR, val_int(), unsigned_flag, decimal_value); + longlong nr= val_int(); + if (null_value) + return 0; /* purecov: inspected */ + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); return decimal_value; } @@ -882,7 +885,7 @@ longlong Item_func_numhybrid::val_int() return 0; char *end= (char*) res->ptr() + res->length(); - CHARSET_INFO *cs= str_value.charset(); + CHARSET_INFO *cs= res->charset(); return (*(cs->cset->strtoll10))(cs, res->ptr(), &end, &err_not_used); } default: @@ -1845,9 +1848,10 @@ void Item_func_integer::fix_length_and_dec() void Item_func_int_val::fix_num_length_and_dec() { - max_length= args[0]->max_length - (args[0]->decimals ? - args[0]->decimals + 1 : - 0) + 2; + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - + (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; + max_length= tmp_max_length > (ulonglong) max_field_size ? + max_field_size : (uint32) tmp_max_length; uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; @@ -2162,10 +2166,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) if (!(null_value= (args[0]->null_value || args[1]->null_value || my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec, truncate, decimal_value) > 1))) - { - decimal_value->frac= decimals; return decimal_value; - } return 0; } @@ -3902,6 +3903,7 @@ Item_func_set_user_var::fix_length_and_dec() maybe_null=args[0]->maybe_null; max_length=args[0]->max_length; decimals=args[0]->decimals; + unsigned_flag= args[0]->unsigned_flag; collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); } diff --git a/sql/item_func.h b/sql/item_func.h index 5c5ea33f247..b8f294d9e70 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011 Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 39776643ddd..148170110f5 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -39,6 +39,9 @@ C_MODE_START #include "../mysys/my_static.h" // For soundex_map C_MODE_END +/** + @todo Remove this. It is not safe to use a shared String object. + */ String my_empty_string("",default_charset_info); @@ -116,7 +119,6 @@ String *Item_func_md5::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) { uchar digest[16]; @@ -129,6 +131,7 @@ String *Item_func_md5::val_str(String *str) return 0; } array_to_hex((char *) str->ptr(), (const char*) digest, 16); + str->set_charset(&my_charset_bin); str->length((uint) 32); return str; } @@ -155,7 +158,6 @@ String *Item_func_sha::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) /* If we got value different from NULL */ { SHA1_CONTEXT context; /* Context used to generate SHA1 hash */ @@ -165,11 +167,13 @@ String *Item_func_sha::val_str(String *str) /* No need to check error as the only case would be too long message */ mysql_sha1_input(&context, (const uchar *) sptr->ptr(), sptr->length()); + /* Ensure that memory is free and we got result */ if (!( str->alloc(SHA1_HASH_SIZE*2) || (mysql_sha1_result(&context,digest)))) { array_to_hex((char *) str->ptr(), (const char*) digest, SHA1_HASH_SIZE); + str->set_charset(&my_charset_bin); str->length((uint) SHA1_HASH_SIZE*2); null_value=0; return str; @@ -461,7 +465,7 @@ String *Item_func_des_encrypt::val_str(String *str) if ((null_value= args[0]->null_value)) return 0; // ENCRYPT(NULL) == NULL if ((res_length=res->length()) == 0) - return &my_empty_string; + return make_empty_result(); if (arg_count == 1) { @@ -517,6 +521,7 @@ String *Item_func_des_encrypt::val_str(String *str) tmp_arg[res_length-1]=tail; // save extra length tmp_value.realloc(res_length+1); tmp_value.length(res_length+1); + tmp_value.set_charset(&my_charset_bin); tmp_value[0]=(char) (128 | key_number); // Real encryption bzero((char*) &ivec,sizeof(ivec)); @@ -604,6 +609,7 @@ String *Item_func_des_decrypt::val_str(String *str) if ((tail=(uint) (uchar) tmp_value[length-2]) > 8) goto wrong_key; // Wrong key tmp_value.length(length-1-tail); + tmp_value.set_charset(&my_charset_bin); return &tmp_value; error: @@ -641,7 +647,7 @@ String *Item_func_concat_ws::val_str(String *str) use_as_buff= &tmp_value; str->length(0); // QQ; Should be removed - res=str; + res=str; // If 0 arg_count // Skip until non-null argument is found. // If not, return the empty string @@ -653,7 +659,7 @@ String *Item_func_concat_ws::val_str(String *str) } if (i == arg_count) - return &my_empty_string; + return make_empty_result(); for (i++; i < arg_count ; i++) { @@ -804,7 +810,7 @@ String *Item_func_reverse::val_str(String *str) return 0; /* An empty string is a special case as the string pointer may be null */ if (!res->length()) - return &my_empty_string; + return make_empty_result(); if (tmp_value.alloced_length() < res->length() && tmp_value.realloc(res->length())) { @@ -1144,8 +1150,7 @@ String *Item_func_left::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; - + return make_empty_result(); if ((res->length() <= (ulonglong) length) || (res->length() <= (char_pos= res->charpos((int) length)))) return res; @@ -1188,7 +1193,7 @@ String *Item_func_right::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; /* purecov: inspected */ + return make_empty_result(); /* purecov: inspected */ if (res->length() <= (ulonglong) length) return res; /* purecov: inspected */ @@ -1227,7 +1232,7 @@ String *Item_func_substr::val_str(String *str) /* Negative or zero length, will return empty string. */ if ((arg_count == 3) && (length <= 0) && (length == 0 || !args[2]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Set here so that rest of code sees out-of-bound value as such. */ @@ -1238,12 +1243,12 @@ String *Item_func_substr::val_str(String *str) /* Assumes that the maximum length of a String is < INT_MAX32. */ if ((!args[1]->unsigned_flag && (start < INT_MIN32 || start > INT_MAX32)) || (args[1]->unsigned_flag && ((ulonglong) start > INT_MAX32))) - return &my_empty_string; + return make_empty_result(); start= ((start < 0) ? res->numchars() + start : start - 1); start= res->charpos((int) start); if ((start < 0) || ((uint) start + 1 > res->length())) - return &my_empty_string; + return make_empty_result(); length= res->charpos((int) length, (uint32) start); tmp_length= res->length() - start; @@ -1306,7 +1311,7 @@ String *Item_func_substr_index::val_str(String *str) null_value=0; uint delimiter_length= delimiter->length(); if (!res->length() || !delimiter_length || !count) - return &my_empty_string; // Wrong parameters + return make_empty_result(); // Wrong parameters res->set_charset(collation.collation); @@ -1655,7 +1660,7 @@ String *Item_func_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, res->charset()); return str; @@ -1679,7 +1684,7 @@ String *Item_func_old_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password_323(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, res->charset()); return str; @@ -1707,8 +1712,7 @@ String *Item_func_encrypt::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; - + return make_empty_result(); if (arg_count == 1) { // generate random salt time_t timestamp=current_thd->query_start(); @@ -1968,7 +1972,7 @@ String *Item_func_soundex::val_str(String *str) for ( ; ; ) /* Skip pre-space */ { if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) - return &my_empty_string; /* EOL or invalid byte sequence */ + return make_empty_result(); /* EOL or invalid byte sequence */ if (rc == 1 && cs->ctype) { @@ -1993,7 +1997,7 @@ String *Item_func_soundex::val_str(String *str) { /* Extra safety - should not really happen */ DBUG_ASSERT(false); - return &my_empty_string; + return make_empty_result(); } to+= rc; break; @@ -2291,7 +2295,7 @@ String *Item_func_make_set::val_str(String *str) else { if (tmp_str.copy(*res)) // Don't use 'str' - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } } @@ -2301,11 +2305,11 @@ String *Item_func_make_set::val_str(String *str) { // Copy data to tmp_str if (tmp_str.alloc(result->length()+res->length()+1) || tmp_str.copy(*result)) - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } if (tmp_str.append(STRING_WITH_LEN(","), &my_charset_bin) || tmp_str.append(*res)) - return &my_empty_string; + return make_empty_result(); } } } @@ -2444,7 +2448,7 @@ String *Item_func_repeat::val_str(String *str) null_value= 0; if (count <= 0 && (count == 0 || !args[1]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Bounds check on count: If this is triggered, we will error. */ @@ -2752,7 +2756,7 @@ String *Item_func_conv::val_str(String *str) ptr= longlong2str(dec, ans, to_base, 1); if (str->copy(ans, (uint32) (ptr-ans), default_charset())) - return &my_empty_string; + return make_empty_result(); return str; } @@ -2762,22 +2766,16 @@ String *Item_func_conv_charset::val_str(String *str) DBUG_ASSERT(fixed == 1); if (use_cached_value) return null_value ? 0 : &str_value; - /* - Here we don't pass 'str' as a parameter to args[0]->val_str() - as 'str' may point to 'str_value' (e.g. see Item::save_in_field()), - which we use below to convert string. - Use argument's 'str_value' instead. - */ - String *arg= args[0]->val_str(&args[0]->str_value); + String *arg= args[0]->val_str(str); uint dummy_errors; if (!arg) { null_value=1; return 0; } - null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(), + null_value= tmp_value.copy(arg->ptr(), arg->length(), arg->charset(), conv_charset, &dummy_errors); - return null_value ? 0 : check_well_formed_result(&str_value); + return null_value ? 0 : check_well_formed_result(&tmp_value); } void Item_func_conv_charset::fix_length_and_dec() @@ -2919,7 +2917,7 @@ String *Item_func_hex::val_str(String *str) return 0; ptr= longlong2str(dec,ans,16,1); if (str->copy(ans,(uint32) (ptr-ans),default_charset())) - return &my_empty_string; // End of memory + return make_empty_result(); // End of memory return str; } @@ -3219,14 +3217,68 @@ String *Item_func_quote::val_str(String *str) } arg_length= arg->length(); - new_length= arg_length+2; /* for beginning and ending ' signs */ - for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) - new_length+= get_esc_bit(escmask, (uchar) *from); + if (collation.collation->mbmaxlen == 1) + { + new_length= arg_length + 2; /* for beginning and ending ' signs */ + for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) + new_length+= get_esc_bit(escmask, (uchar) *from); + } + else + { + new_length= (arg_length * 2) + /* For string characters */ + (2 * collation.collation->mbmaxlen); /* For quotes */ + } if (tmp_value.alloc(new_length)) goto null; + if (collation.collation->mbmaxlen > 1) + { + CHARSET_INFO *cs= collation.collation; + int mblen; + uchar *to_end; + to= (char*) tmp_value.ptr(); + to_end= (uchar*) to + new_length; + + /* Put leading quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + + for (start= (char*) arg->ptr(), end= start + arg_length; start < end; ) + { + my_wc_t wc; + bool escape; + if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) start, (uchar*) end)) <= 0) + goto null; + start+= mblen; + switch (wc) { + case 0: escape= 1; wc= '0'; break; + case '\032': escape= 1; wc= 'Z'; break; + case '\'': escape= 1; break; + case '\\': escape= 1; break; + default: escape= 0; break; + } + if (escape) + { + if ((mblen= cs->cset->wc_mb(cs, '\\', (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + if ((mblen= cs->cset->wc_mb(cs, wc, (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + + /* Put trailing quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + new_length= to - tmp_value.ptr(); + goto ret; + } + /* We replace characters from the end to the beginning */ @@ -3258,6 +3310,8 @@ String *Item_func_quote::val_str(String *str) } } *to= '\''; + +ret: tmp_value.length(new_length); tmp_value.set_charset(collation.collation); null_value= 0; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 53219e70973..e8d0384482d 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -22,6 +22,23 @@ class Item_str_func :public Item_func { +protected: + /** + Sets the result value of the function an empty string, using the current + character set. No memory is allocated. + @retval A pointer to the str_value member. + */ + String *make_empty_result() + { + /* + Reset string length to an empty string. We don't use str_value.set() as + we don't want to free and potentially have to reallocate the buffer + for each call. + */ + str_value.length(0); + str_value.set_charset(collation.collation); + return &str_value; + } public: Item_str_func() :Item_func() { decimals=NOT_FIXED_DEC; } Item_str_func(Item *a) :Item_func(a) {decimals=NOT_FIXED_DEC; } @@ -707,15 +724,17 @@ public: String *val_str(String *); void fix_length_and_dec() { - ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2; - max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); collation.set(args[0]->collation); + ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + + 2 * collation.collation->mbmaxlen; + max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); } }; class Item_func_conv_charset :public Item_str_func { bool use_cached_value; + String tmp_value; public: bool safe; CHARSET_INFO *conv_charset; // keep it public diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 7de02d726fa..51754fbd9ee 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -609,17 +609,13 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) switch (hybrid_type= item->result_type()) { case INT_RESULT: - max_length= 20; - break; case DECIMAL_RESULT: + case STRING_RESULT: max_length= item->max_length; break; case REAL_RESULT: max_length= float_length(decimals); break; - case STRING_RESULT: - max_length= item->max_length; - break; case ROW_RESULT: default: DBUG_ASSERT(0); diff --git a/sql/item_sum.h b/sql/item_sum.h index 8e8f8ac99d2..851b77ddeae 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -354,6 +354,7 @@ public: forced_const= TRUE; } virtual bool const_item() const { return forced_const; } + virtual bool const_during_execution() const { return false; } virtual void print(String *str, enum_query_type query_type); void fix_num_length_and_dec(); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 9cf56148994..c17557905bd 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -294,8 +294,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, for (; ptr != end && val != val_end; ptr++) { /* Skip pre-space between each argument */ - while (val != val_end && my_isspace(cs, *val)) - val++; + if ((val+= cs->cset->scan(cs, val, val_end, MY_SEQ_SPACES)) >= val_end) + break; if (*ptr == '%' && ptr+1 != end) { @@ -649,7 +649,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -658,7 +658,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -823,7 +823,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); @@ -3300,6 +3300,7 @@ void Item_func_str_to_date::fix_length_and_dec() { maybe_null= 1; decimals=0; + cached_format_type= DATE_TIME; cached_field_type= MYSQL_TYPE_DATETIME; max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; cached_timestamp_type= MYSQL_TIMESTAMP_NONE; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 87af384923e..2d499f6ef0e 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -108,8 +109,11 @@ public: { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { - str->set(val_int(), &my_charset_bin); - return null_value ? 0 : str; + longlong nr= val_int(); + if (null_value) + return 0; + str->set(nr, &my_charset_bin); + return str; } const char *func_name() const { return "month"; } enum Item_result result_type () const { return INT_RESULT; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 2a47be38d85..54cbf81ab20 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -9585,7 +9585,19 @@ static bool record_compare(TABLE *table) } } - if (table->s->blob_fields + table->s->varchar_fields == 0) + /** + Compare full record only if: + - there are no blob fields (otherwise we would also need + to compare blobs contents as well); + - there are no varchar fields (otherwise we would also need + to compare varchar contents as well); + - there are no null fields, otherwise NULLed fields + contents (i.e., the don't care bytes) may show arbitrary + values, depending on how each engine handles internally. + */ + if ((table->s->blob_fields + + table->s->varchar_fields + + table->s->null_fields) == 0) { result= cmp_record(table,record[1]); goto record_compare_exit; @@ -9600,13 +9612,22 @@ static bool record_compare(TABLE *table) goto record_compare_exit; } - /* Compare updated fields */ + /* Compare fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { - if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + + /** + We only compare field contents that are not null. + NULL fields (i.e., their null bits) were compared + earlier. + */ + if (!(*(ptr))->is_null()) { - result= TRUE; - goto record_compare_exit; + if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + { + result= TRUE; + goto record_compare_exit; + } } } diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index 37ce7c3f840..cbd98cb41af 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -998,7 +998,9 @@ int DsMrr_impl::setup_two_handlers() DBUG_RETURN(1); /* Create a separate handler object to do rnd_pos() calls. */ - if (!(new_h2= primary_file->clone(thd->mem_root)) || + if (!(new_h2= primary_file->clone(primary_file->get_table()->s-> + normalized_path.str, + thd->mem_root)) || new_h2->ha_external_lock(thd, F_RDLCK)) { delete new_h2; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 4cbb3251742..94e44aad93b 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -980,14 +980,14 @@ struct Query_cache_query_flags (((L)->sql_command == SQLCOM_SELECT) && (L)->safe_to_cache_query) #else #define QUERY_CACHE_FLAGS_SIZE 0 -#define query_cache_store_query(A, B) -#define query_cache_destroy() -#define query_cache_result_size_limit(A) -#define query_cache_init() -#define query_cache_resize(A) -#define query_cache_set_min_res_unit(A) -#define query_cache_invalidate3(A, B, C) -#define query_cache_invalidate1(A) +#define query_cache_store_query(A, B) do { } while(0) +#define query_cache_destroy() do { } while(0) +#define query_cache_result_size_limit(A) do { } while(0) +#define query_cache_init() do { } while(0) +#define query_cache_resize(A) do { } while(0) +#define query_cache_set_min_res_unit(A) do { } while(0) +#define query_cache_invalidate3(A, B, C) do { } while(0) +#define query_cache_invalidate1(A) do { } while(0) #define query_cache_send_result_to_client(A, B, C) 0 #define query_cache_invalidate_by_MyISAM_filename_ref NULL @@ -1101,7 +1101,11 @@ void reset_mqh(LEX_USER *lu, bool get_them); bool check_mqh(THD *thd, uint check_command); void time_out_user_resource_limits(THD *thd, USER_CONN *uc); void decrease_user_connections(USER_CONN *uc); -void thd_init_client_charset(THD *thd, uint cs_number); +bool thd_init_client_charset(THD *thd, uint cs_number); +inline bool is_supported_parser_charset(CHARSET_INFO *cs) +{ + return test(cs->mbminlen == 1); +} bool setup_connection_thread_globals(THD *thd); bool login_connection(THD *thd); void end_connection(THD *thd); @@ -2074,6 +2078,7 @@ extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, mysqld_extra_port, dropping_tables; extern uint delay_key_write_options; +extern ulong max_long_data_size; #endif /* MYSQL_SERVER */ #if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS extern MYSQL_PLUGIN_IMPORT uint lower_case_table_names; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2e4ce2c6be6..936be77c432 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -449,6 +449,7 @@ TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"", /* the default log output is log tables */ static bool lower_case_table_names_used= 0; +static bool max_long_data_size_used= false; static bool volatile select_thread_in_use, signal_thread_in_use; static bool volatile ready_to_exit; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; @@ -643,6 +644,12 @@ ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong max_connections, max_connect_errors; ulong extra_max_connections; +/* + Maximum length of parameter value which can be set through + mysql_send_long_data() call. +*/ +ulong max_long_data_size; + uint max_user_connections= 0; ulonglong denied_connections; /** @@ -1530,6 +1537,7 @@ static void wait_for_signal_thread_to_end() #endif } +#endif /*EMBEDDED_LIBRARY*/ static void clean_up_mutexes() { @@ -1557,19 +1565,21 @@ static void clean_up_mutexes() (void) pthread_mutex_destroy(&LOCK_global_table_stats); (void) pthread_mutex_destroy(&LOCK_global_index_stats); +#ifndef EMBEDDED_LIBRARY Events::destroy_mutexes(); +#endif /* !EMBEDDED_LIBRARY */ #ifdef HAVE_OPENSSL (void) pthread_mutex_destroy(&LOCK_des_key_file); #ifndef HAVE_YASSL for (int i= 0; i < CRYPTO_num_locks(); ++i) (void) rwlock_destroy(&openssl_stdlocks[i].lock); OPENSSL_free(openssl_stdlocks); -#endif -#endif +#endif /* HAVE_YASSL */ +#endif /* HAVE_OPENSSL */ #ifdef HAVE_REPLICATION (void) pthread_mutex_destroy(&LOCK_rpl_status); (void) pthread_cond_destroy(&COND_rpl_status); -#endif +#endif /* HAVE_REPLICATION */ (void) pthread_mutex_destroy(&LOCK_server_started); (void) pthread_cond_destroy(&COND_server_started); (void) pthread_mutex_destroy(&LOCK_active_mi); @@ -1589,8 +1599,6 @@ static void clean_up_mutexes() DBUG_VOID_RETURN; } -#endif /*EMBEDDED_LIBRARY*/ - /** Register order of mutex for wrong mutex deadlock detector @@ -3245,6 +3253,19 @@ sizeof(load_default_groups)/sizeof(load_default_groups[0]); #endif +#ifndef EMBEDDED_LIBRARY +static +int +check_enough_stack_size() +{ + uchar stack_top; + + return check_stack_overrun(current_thd, STACK_MIN_SIZE, + &stack_top); +} +#endif + + /** Initialize one of the global date/time format variables. @@ -3443,18 +3464,13 @@ static int init_common_variables(const char *conf_file_name, int argc, char **argv, const char **groups) { char buff[FN_REFLEN], *s; + const char *basename; umask(((~my_umask) & 0666)); my_decimal_set_zero(&decimal_zero); // set decimal_zero constant; tzset(); // Set tzname max_system_variables.pseudo_thread_id= (ulong)~0; server_start_time= flush_status_time= my_time(0); - /* TODO: remove this when my_time_t is 64 bit compatible */ - if (server_start_time >= (time_t) MY_TIME_T_MAX) - { - sql_print_error("This MySQL server doesn't support dates later then 2038"); - return 1; - } rpl_filter= new Rpl_filter; binlog_filter= new Rpl_filter; @@ -3493,21 +3509,29 @@ static int init_common_variables(const char *conf_file_name, int argc, */ mysql_bin_log.init_pthread_objects(); + /* TODO: remove this when my_time_t is 64 bit compatible */ + if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time)) + { + sql_print_error("This MySQL server doesn't support dates later then 2038"); + return 1; + } + + if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) { /* Get hostname of computer (used by 'show variables') and as default basename for the pid file if --log-basename is not given. */ - const char *basename= glob_hostname; - if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) - { - strmake(glob_hostname, STRING_WITH_LEN("localhost")); - sql_print_warning("gethostname failed, using '%s' as hostname", + strmake(glob_hostname, STRING_WITH_LEN("localhost")); + sql_print_warning("gethostname failed, using '%s' as hostname", glob_hostname); - basename= "mysql"; - } - strmake(pidfile_name, basename, sizeof(pidfile_name)-5); + basename= "mysql"; + } + else + { + basename= glob_hostname; } + strmake(pidfile_name, basename, sizeof(pidfile_name)-5); strmov(fn_ext(pidfile_name),".pid"); // Add proper extension /* @@ -3636,7 +3660,11 @@ static int init_common_variables(const char *conf_file_name, int argc, #endif mysys_uses_curses=0; #ifdef USE_REGEX - my_regex_init(&my_charset_latin1); +#ifndef EMBEDDED_LIBRARY + my_regex_init(&my_charset_latin1, check_enough_stack_size); +#else + my_regex_init(&my_charset_latin1, NULL); +#endif #endif /* Process a comma-separated character set list and choose @@ -6081,6 +6109,7 @@ enum options_mysqld OPT_IGNORE_BUILTIN_INNODB, OPT_BINLOG_DIRECT_NON_TRANS_UPDATE, OPT_DEFAULT_CHARACTER_SET_OLD, + OPT_MAX_LONG_DATA_SIZE, OPT_MASTER_VERIFY_CHECKSUM, OPT_SLAVE_SQL_VERIFY_CHECKSUM }; @@ -7305,6 +7334,12 @@ each time the SQL thread starts.", &global_system_variables.max_length_for_sort_data, &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, + {"max_long_data_size", OPT_MAX_LONG_DATA_SIZE, + "The maximum size of prepared statement parameter which can be provided " + "through mysql_send_long_data() API call. To be used when limit of " + "max_allowed_packet is too small", + &max_long_data_size, &max_long_data_size, 0, GET_ULONG, + REQUIRED_ARG, 1024*1024L, 1024, UINT_MAX32, MALLOC_OVERHEAD, 1, 0}, {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT, "Maximum number of prepared statements in the server.", &max_prepared_stmt_count, &max_prepared_stmt_count, @@ -8286,6 +8321,7 @@ static void usage(void) puts("\ Copyright (C) 2000-2008 MySQL AB, by Monty and others.\n\ Copyright (C) 2008 Sun Microsystems, Inc.\n\ +Copyright (C) 2009-2011 Monty Program Ab.\n\ This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ and you are welcome to modify and redistribute it under the GPL license\n\n\ Starts the MySQL database server.\n"); @@ -9320,6 +9356,9 @@ mysqld_get_one_option(int optid, } break; #endif /* defined(ENABLED_DEBUG_SYNC) */ + case OPT_MAX_LONG_DATA_SIZE: + max_long_data_size_used= true; + break; } return 0; } @@ -9412,6 +9451,14 @@ static int get_options(int *argc,char **argv) opt_log_slow_slave_statements) && !opt_slow_log) sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set"); + if (global_system_variables.net_buffer_length > + global_system_variables.max_allowed_packet) + { + sql_print_warning("net_buffer_length (%lu) is set to be larger " + "than max_allowed_packet (%lu). Please rectify.", + global_system_variables.net_buffer_length, + global_system_variables.max_allowed_packet); + } #if defined(HAVE_BROKEN_REALPATH) my_use_symdir=0; @@ -9494,6 +9541,14 @@ static int get_options(int *argc,char **argv) &extra_max_connections, &extra_connection_count); #endif + + /* + If max_long_data_size is not specified explicitly use + value of max_allowed_packet. + */ + if (!max_long_data_size_used) + max_long_data_size= global_system_variables.max_allowed_packet; + return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index a605b3f8fe1..eae344f378f 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1665,7 +1665,7 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, select->read_tables=read_tables; select->const_tables=const_tables; select->head=head; - select->cond=conds; + select->cond= select->original_cond= conds; if (head->sort.io_cache) { @@ -1959,7 +1959,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/sql/opt_range.h b/sql/opt_range.h index d7a0c1e2f8f..a921b60fc2b 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -883,6 +883,11 @@ class SQL_SELECT :public Sql_alloc { public: QUICK_SELECT_I *quick; // If quick-select used COND *cond; // where condition + /* + Original WHERE condition (before anything was removed as part of index + condition pushdown. + */ + COND *original_cond; /* When using Index Condition Pushdown: condition that we've had before diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 310b48756d0..5cba8d12ede 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -211,6 +211,7 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) /** Substitutes constants for some COUNT(), MIN() and MAX() functions. + @param thd thread handler @param tables list of leaves of join table tree @param all_fields All fields to be returned @param conds WHERE clause @@ -228,9 +229,12 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) HA_ERR_KEY_NOT_FOUND on impossible conditions @retval HA_ERR_... if a deadlock or a lock wait timeout happens, for example + @retval + ER_... e.g. ER_SUBQUERY_NO_1_ROW */ -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) +int opt_sum_query(THD *thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds) { List_iterator_fast<Item> it(all_fields); int const_result= 1; @@ -241,6 +245,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) table_map where_tables= 0; Item *item; int error; + DBUG_ENTER("opt_sum_query"); if (conds) where_tables= conds->used_tables(); @@ -269,7 +274,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + DBUG_RETURN(0); } else used_tables|= tl->table->map; @@ -297,7 +302,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { tl->table->file->print_error(error, MYF(0)); tl->table->in_use->fatal_error(); - return error; + DBUG_RETURN(error); } count*= tl->table->file->stats.records; } @@ -389,10 +394,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) if (error) { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); - return(error); + DBUG_RETURN(error); } removed_tables|= table->map; } @@ -436,6 +441,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; } } + + if (thd->is_error()) + DBUG_RETURN(thd->main_da.sql_errno()); + /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables @@ -445,7 +454,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) */ if (removed_tables && used_tables != removed_tables) const_result= 0; // We didn't remove all tables - return const_result; + DBUG_RETURN(const_result); } @@ -731,6 +740,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, if (is_null || (is_null_safe_eq && args[1]->is_null())) { + /* + If we have a non-nullable index, we cannot use it, + since set_null will be ignored, and we will compare uninitialized data. + */ + if (!part->field->real_maybe_null()) + DBUG_RETURN(FALSE); part->field->set_null(); *key_ptr= (uchar) 1; } @@ -801,8 +816,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, @param[out] prefix_len Length of prefix for the search range @note - This function may set table->key_read to 1, which must be reset after - index is used! (This can only happen when function returns 1) + This function may set field->table->key_read to true, + which must be reset after index is used! + (This can only happen when function returns 1) @retval 0 Index can not be used to optimize MIN(field)/MAX(field) @@ -817,7 +833,9 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not key field + return FALSE; // Not key field + + DBUG_ENTER("find_key_for_maxmin"); TABLE *table= field->table; uint idx= 0; @@ -842,7 +860,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) - return 0; + DBUG_RETURN(FALSE); /* Check whether the index component is partial */ Field *part_field= table->field[part->fieldnr-1]; @@ -891,12 +909,12 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ if (field->part_of_key.is_set(idx)) table->enable_keyread(); - return 1; + DBUG_RETURN(TRUE); } } } } - return 0; + DBUG_RETURN(FALSE); } diff --git a/sql/set_var.cc b/sql/set_var.cc index 33575de1ccf..49e0be3f0db 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -154,6 +154,8 @@ static bool sys_update_slow_log_path(THD *thd, set_var * var); static void sys_default_slow_log_path(THD *thd, enum_var_type type); static void fix_sys_log_slow_filter(THD *thd, enum_var_type); static uchar *get_myisam_mmap_size(THD *thd); +static int check_max_allowed_packet(THD *thd, set_var *var); +static int check_net_buffer_length(THD *thd, set_var *var); /* Variable definition list @@ -401,7 +403,8 @@ static sys_var_const sys_lower_case_table_names(&vars, (uchar*) &lower_case_table_names); static sys_var_thd_ulong_session_readonly sys_max_allowed_packet(&vars, "max_allowed_packet", - &SV::max_allowed_packet); + &SV::max_allowed_packet, + check_max_allowed_packet); static sys_var_ulonglong_ptr sys_max_binlog_cache_size(&vars, "max_binlog_cache_size", &max_binlog_cache_size); static sys_var_long_ptr sys_max_binlog_size(&vars, "max_binlog_size", @@ -435,6 +438,12 @@ static sys_var_thd_ulong sys_max_seeks_for_key(&vars, "max_seeks_for_key", &SV::max_seeks_for_key); static sys_var_thd_ulong sys_max_length_for_sort_data(&vars, "max_length_for_sort_data", &SV::max_length_for_sort_data); +static sys_var_const sys_max_long_data_size(&vars, + "max_long_data_size", + OPT_GLOBAL, SHOW_LONG, + (uchar*) + &max_long_data_size); + #ifndef TO_BE_DELETED /* Alias for max_join_size */ static sys_var_thd_ha_rows sys_sql_max_join_size(&vars, "sql_max_join_size", &SV::max_join_size, @@ -487,7 +496,8 @@ static sys_var_const sys_named_pipe(&vars, "named_pipe", /* purecov: end */ #endif static sys_var_thd_ulong_session_readonly sys_net_buffer_length(&vars, "net_buffer_length", - &SV::net_buffer_length); + &SV::net_buffer_length, + check_net_buffer_length); static sys_var_thd_ulong sys_net_read_timeout(&vars, "net_read_timeout", &SV::net_read_timeout, 0, fix_net_read_timeout); @@ -1919,7 +1929,7 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) } var->save_result.ulong_value= ((ulong) - find_set(enum_names, res->ptr(), + find_set(enum_names, res->c_ptr_safe(), res->length(), NULL, &error, &error_len, @@ -2334,7 +2344,7 @@ bool sys_var_character_set_client::check(THD *thd, set_var *var) if (sys_var_character_set_sv::check(thd, var)) return 1; /* Currently, UCS-2 cannot be used as a client character set */ - if (var->save_result.charset->mbminlen > 1) + if (!is_supported_parser_charset(var->save_result.charset)) { my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, var->save_result.charset->csname); @@ -2846,14 +2856,14 @@ int set_var_collation_client::update(THD *thd) bool sys_var_timestamp::check(THD *thd, set_var *var) { - time_t val; + longlong val; var->save_result.ulonglong_value= var->value->val_int(); - val= (time_t) var->save_result.ulonglong_value; - if (val < (time_t) MY_TIME_T_MIN || val > (time_t) MY_TIME_T_MAX) + val= (longlong) var->save_result.ulonglong_value; + if (val != 0 && // this is how you set the default value + (val < TIMESTAMP_MIN_VALUE || val > TIMESTAMP_MAX_VALUE)) { - my_message(ER_UNKNOWN_ERROR, - "This version of MySQL doesn't support dates later than 2038", - MYF(0)); + char buf[64]; + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "timestamp", llstr(val, buf)); return TRUE; } return FALSE; @@ -4432,6 +4442,36 @@ uchar *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type, } #endif + +int +check_max_allowed_packet(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val < (longlong) global_system_variables.net_buffer_length) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + + +int +check_net_buffer_length(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val > (longlong) global_system_variables.max_allowed_packet) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + /**************************************************************************** Used templates ****************************************************************************/ diff --git a/sql/slave.cc b/sql/slave.cc index b042d463b1e..4f3b0370744 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -408,17 +409,6 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) int error,force_all = (thread_mask & SLAVE_FORCE_ALL); pthread_mutex_t *sql_lock = &mi->rli.run_lock, *io_lock = &mi->run_lock; - if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) - { - DBUG_PRINT("info",("Terminating IO thread")); - mi->abort_slave=1; - if ((error=terminate_slave_thread(mi->io_thd, io_lock, - &mi->stop_cond, - &mi->slave_running, - skip_lock)) && - !force_all) - DBUG_RETURN(error); - } if (thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) { DBUG_PRINT("info",("Terminating SQL thread")); @@ -430,6 +420,17 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) !force_all) DBUG_RETURN(error); } + if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) + { + DBUG_PRINT("info",("Terminating IO thread")); + mi->abort_slave=1; + if ((error=terminate_slave_thread(mi->io_thd, io_lock, + &mi->stop_cond, + &mi->slave_running, + skip_lock)) && + !force_all) + DBUG_RETURN(error); + } DBUG_RETURN(0); } diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index 686e1a1346f..18c80b2b054 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -651,7 +651,7 @@ int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u) } -bool Select_fetch_into_spvars::send_data(List<Item> &items) +int Select_fetch_into_spvars::send_data(List<Item> &items) { List_iterator_fast<struct sp_variable> spvar_iter(*spvar_list); List_iterator_fast<Item> item_iter(items); @@ -668,7 +668,7 @@ bool Select_fetch_into_spvars::send_data(List<Item> &items) for (; spvar= spvar_iter++, item= item_iter++; ) { if (thd->spcont->set_variable(thd, spvar->offset, &item)) - return TRUE; + return 1; } - return FALSE; + return 0; } diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index ecd11453e49..15815e496dd 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -254,7 +254,7 @@ public: void set_spvar_list(List<struct sp_variable> *vars) { spvar_list= vars; } virtual bool send_eof() { return FALSE; } - virtual bool send_data(List<Item> &items); + virtual int send_data(List<Item> &items); virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u); }; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 47d85238cff..f783375b010 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -5422,18 +5423,15 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, } -/* +/** Handle an in-memory privilege structure. - SYNOPSIS - handle_grant_struct() - struct_no The number of the structure to handle (0..3). - drop If user_from is to be dropped. - user_from The the user to be searched/dropped/renamed. - user_to The new name for the user if to be renamed, - NULL otherwise. + @param struct_no The number of the structure to handle (0..4). + @param drop If user_from is to be dropped. + @param user_from The the user to be searched/dropped/renamed. + @param user_to The new name for the user if to be renamed, NULL otherwise. - DESCRIPTION + @note Scan through all elements in an in-memory grant structure and apply the requested operation. Delete from grant structure if drop is true. @@ -5443,12 +5441,12 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, 0 acl_users 1 acl_dbs 2 column_priv_hash - 3 procs_priv_hash + 3 proc_priv_hash + 4 func_priv_hash - RETURN - > 0 At least one element matched. - 0 OK, but no element matched. - -1 Wrong arguments to function + @retval > 0 At least one element matched. + @retval 0 OK, but no element matched. + @retval -1 Wrong arguments to function. */ static int handle_grant_struct(uint struct_no, bool drop, @@ -5462,6 +5460,7 @@ static int handle_grant_struct(uint struct_no, bool drop, ACL_USER *acl_user= NULL; ACL_DB *acl_db= NULL; GRANT_NAME *grant_name= NULL; + HASH *grant_name_hash= NULL; DBUG_ENTER("handle_grant_struct"); DBUG_PRINT("info",("scan struct: %u search: '%s'@'%s'", struct_no, user_from->user.str, user_from->host.str)); @@ -5480,10 +5479,16 @@ static int handle_grant_struct(uint struct_no, bool drop, elements= acl_dbs.elements; break; case 2: - elements= column_priv_hash.records; + grant_name_hash= &column_priv_hash; + elements= grant_name_hash->records; break; case 3: - elements= proc_priv_hash.records; + grant_name_hash= &proc_priv_hash; + elements= grant_name_hash->records; + break; + case 4: + grant_name_hash= &func_priv_hash; + elements= grant_name_hash->records; break; default: return -1; @@ -5513,16 +5518,13 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx); - user= grant_name->user; - host= grant_name->host.hostname; - break; - case 3: - grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx); + case 4: + grant_name= (GRANT_NAME*) hash_element(grant_name_hash, idx); user= grant_name->user; host= grant_name->host.hostname; break; + default: MY_ASSERT_UNREACHABLE(); } @@ -5552,14 +5554,25 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - hash_delete(&column_priv_hash, (uchar*) grant_name); - break; - case 3: - hash_delete(&proc_priv_hash, (uchar*) grant_name); + case 4: + hash_delete(grant_name_hash, (uchar*) grant_name); break; } elements--; + /* + - If we are iterating through an array then we just have moved all + elements after the current element one position closer to its head. + This means that we have to take another look at the element at + current position as it is a new element from the array's tail. + - If we are iterating through a hash the current element was replaced + with one of elements from the tail. So we also have to take a look + at the new element in current position. + Note that in our HASH implementation hash_delete() won't move any + elements with position after current one to position before the + current (i.e. from the tail to the head), so it is safe to continue + iteration without re-starting. + */ idx--; } else if ( user_to ) @@ -5577,22 +5590,41 @@ static int handle_grant_struct(uint struct_no, bool drop, case 2: case 3: - /* - Update the grant structure with the new user name and - host name - */ - grant_name->set_user_details(user_to->host.str, grant_name->db, - user_to->user.str, grant_name->tname, - TRUE); - - /* - Since username is part of the hash key, when the user name - is renamed, the hash key is changed. Update the hash to - ensure that the position matches the new hash key value - */ - hash_update(&column_priv_hash, (uchar*) grant_name, - (uchar*) grant_name->hash_key, grant_name->key_length); - break; + case 4: + { + /* + Save old hash key and its length to be able properly update + element position in hash. + */ + char *old_key= grant_name->hash_key; + size_t old_key_length= grant_name->key_length; + + /* + Update the grant structure with the new user name and host name. + */ + grant_name->set_user_details(user_to->host.str, grant_name->db, + user_to->user.str, grant_name->tname, + TRUE); + + /* + Since username is part of the hash key, when the user name + is renamed, the hash key is changed. Update the hash to + ensure that the position matches the new hash key value + */ + hash_update(grant_name_hash, (uchar*) grant_name, (uchar*) old_key, + old_key_length); + /* + hash_update() operation could have moved element from the tail + of the hash to the current position. So we need to take a look + at the element in current position once again. + Thanks to the fact that hash_update() for our HASH implementation + won't move any elements from the tail of the hash to the positions + before the current one (a.k.a. head) it is safe to continue + iteration without restarting. + */ + idx--; + break; + } } } else @@ -5649,8 +5681,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, else { /* Handle user array. */ - if ((handle_grant_struct(0, drop, user_from, user_to) && ! result) || - found) + if ((handle_grant_struct(0, drop, user_from, user_to)) || found) { result= 1; /* At least one record/element found. */ /* If search is requested, we do not need to search further. */ @@ -5678,7 +5709,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, } } - /* Handle procedures table. */ + /* Handle stored routines table. */ if ((found= handle_grant_table(tables, 4, drop, user_from, user_to)) < 0) { /* Handle of table failed, don't touch in-memory array. */ @@ -5695,6 +5726,15 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, if (! drop && ! user_to) goto end; } + /* Handle funcs array. */ + if (((handle_grant_struct(4, drop, user_from, user_to) && ! result) || + found) && ! result) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } } /* Handle tables table. */ @@ -7348,7 +7388,8 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length) if (ptr+1 < end) { uint cs_number= uint2korr(ptr); - thd_init_client_charset(thd, cs_number); + if (thd_init_client_charset(thd, cs_number)) + return 1; thd->update_charset(); } @@ -7451,7 +7492,8 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - thd_init_client_charset(thd, (uint) net->read_pos[8]); + if (thd_init_client_charset(thd, (uint) net->read_pos[8])) + return packet_error; thd->update_charset(); end= (char*) net->read_pos+32; } @@ -7999,7 +8041,8 @@ static int do_auth_once(THD *thd, LEX_STRING *auth_plugin_name, @retval 1 error */ -bool acl_authenticate(THD *thd, uint connect_errors, uint com_change_user_pkt_len) +bool acl_authenticate(THD *thd, uint connect_errors, + uint com_change_user_pkt_len) { int res= CR_OK; MPVIO_EXT mpvio; diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 2c6937d29b7..218f1a6bab1 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -242,7 +242,7 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) if (str == end) { info->is_float = 1; // we can't use variable decimals here - return 1; + DBUG_RETURN(1); } DBUG_RETURN(0); } @@ -753,7 +753,7 @@ int analyse::end_of_records() tmp_str.append(STRING_WITH_LEN(" NOT NULL")); output_str_length = tmp_str.length(); func_items[9]->set(tmp_str.ptr(), tmp_str.length(), tmp_str.charset()); - if (result->send_data(result_fields)) + if (result->send_data(result_fields) > 0) return -1; continue; } @@ -798,7 +798,7 @@ int analyse::end_of_records() if (!(*f)->nulls) ans.append(STRING_WITH_LEN(" NOT NULL")); func_items[9]->set(ans.ptr(), ans.length(), ans.charset()); - if (result->send_data(result_fields)) + if (result->send_data(result_fields) > 0) return -1; } return 0; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index fa495a776e1..c0b69ac7b54 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2827,10 +2827,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, ("Found table '%s.%s' with different refresh version", table_list->db, table_list->table_name)); - /* Ignore FLUSH, but not name locks! */ + /* Ignore FLUSH and pending name locks, but not acquired name locks! */ if (flags & MYSQL_LOCK_IGNORE_FLUSH && !table->open_placeholder) { - DBUG_ASSERT(table->db_stat); /* Force close at once after usage */ thd->version= table->s->version; continue; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a5c06d2aa77..58e9d55bb10 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1890,7 +1890,7 @@ void select_send::cleanup() /* Send data to client. Returns 0 if ok */ -bool select_send::send_data(List<Item> &items) +int select_send::send_data(List<Item> &items) { if (unit->offset_limit_cnt) { // using limit offset,count @@ -2199,7 +2199,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) (int) (uchar) (x) == line_sep_char || \ !(x)) -bool select_export::send_data(List<Item> &items) +int select_export::send_data(List<Item> &items) { DBUG_ENTER("select_export::send_data"); @@ -2456,7 +2456,7 @@ select_dump::prepare(List<Item> &list __attribute__((unused)), } -bool select_dump::send_data(List<Item> &items) +int select_dump::send_data(List<Item> &items) { List_iterator_fast<Item> li(items); char buff[MAX_FIELD_WIDTH]; @@ -2501,7 +2501,7 @@ select_subselect::select_subselect(Item_subselect *item_arg) } -bool select_singlerow_subselect::send_data(List<Item> &items) +int select_singlerow_subselect::send_data(List<Item> &items) { DBUG_ENTER("select_singlerow_subselect::send_data"); Item_singlerow_subselect *it= (Item_singlerow_subselect *)item; @@ -2532,7 +2532,7 @@ void select_max_min_finder_subselect::cleanup() } -bool select_max_min_finder_subselect::send_data(List<Item> &items) +int select_max_min_finder_subselect::send_data(List<Item> &items) { DBUG_ENTER("select_max_min_finder_subselect::send_data"); Item_maxmin_subselect *it= (Item_maxmin_subselect *)item; @@ -2636,7 +2636,7 @@ bool select_max_min_finder_subselect::cmp_str() sortcmp(val1, val2, cache->collation.collation) < 0); } -bool select_exists_subselect::send_data(List<Item> &items) +int select_exists_subselect::send_data(List<Item> &items) { DBUG_ENTER("select_exists_subselect::send_data"); Item_exists_subselect *it= (Item_exists_subselect *)item; @@ -2988,7 +2988,7 @@ Statement_map::~Statement_map() hash_free(&st_hash); } -bool select_dumpvar::send_data(List<Item> &items) +int select_dumpvar::send_data(List<Item> &items) { List_iterator_fast<my_var> var_li(var_list); List_iterator<Item> it(items); @@ -3091,15 +3091,16 @@ void select_materialize_with_stats::cleanup() @return FALSE on success */ -bool select_materialize_with_stats::send_data(List<Item> &items) +int select_materialize_with_stats::send_data(List<Item> &items) { List_iterator_fast<Item> item_it(items); Item *cur_item; Column_statistics *cur_col_stat= col_stat; uint nulls_in_row= 0; + int res; - if (select_union::send_data(items)) - return 1; + if ((res= select_union::send_data(items))) + return res; /* Skip duplicate rows. */ if (write_err == HA_ERR_FOUND_DUPP_KEY || write_err == HA_ERR_FOUND_DUPP_UNIQUE) diff --git a/sql/sql_class.h b/sql/sql_class.h index fd932ef59db..0ed27ae2825 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2197,7 +2197,7 @@ public: /*TODO: this will be obsolete when we have support for 64 bit my_time_t */ inline bool is_valid_time() { - return (start_time < (time_t) MY_TIME_T_MAX); + return (IS_TIME_T_VALID_FOR_TIMESTAMP(start_time)); } void set_time_after_lock() { utime_after_lock= my_micro_time(); } ulonglong current_utime() { return my_micro_time(); } @@ -2685,7 +2685,11 @@ public: virtual uint field_count(List<Item> &fields) const { return fields.elements; } virtual bool send_fields(List<Item> &list, uint flags)=0; - virtual bool send_data(List<Item> &items)=0; + /* + send_data returns 0 on ok, 1 on error and -1 if data was ignored, for + example for a duplicate row entry written to a temp table. + */ + virtual int send_data(List<Item> &items)=0; virtual bool initialize_tables (JOIN *join=0) { return 0; } virtual void send_error(uint errcode,const char *err); virtual bool send_eof()=0; @@ -2743,7 +2747,7 @@ class select_send :public select_result { public: select_send() :is_result_set_started(FALSE) {} bool send_fields(List<Item> &list, uint flags); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool send_eof(); virtual bool check_simple_select() const { return FALSE; } void abort(); @@ -2814,7 +2818,7 @@ public: } ~select_export(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); }; @@ -2831,7 +2835,7 @@ public: nest_level= nest_level_arg; } int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); }; @@ -2852,7 +2856,7 @@ public: ~select_insert(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); virtual int prepare2(void); - bool send_data(List<Item> &items); + virtual int send_data(List<Item> &items); virtual void store_values(List<Item> &values); virtual bool can_rollback_data() { return 0; } void send_error(uint errcode,const char *err); @@ -3021,7 +3025,7 @@ public: select_union() :write_err(0),table(0) { tmp_table_param.init(); } int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool send_eof(); bool flush(); void cleanup(); @@ -3037,7 +3041,7 @@ protected: Item_subselect *item; public: select_subselect(Item_subselect *item); - bool send_data(List<Item> &items)=0; + int send_data(List<Item> &items)=0; bool send_eof() { return 0; }; }; @@ -3048,7 +3052,7 @@ public: select_singlerow_subselect(Item_subselect *item_arg) :select_subselect(item_arg) {} - bool send_data(List<Item> &items); + int send_data(List<Item> &items); }; @@ -3096,7 +3100,7 @@ public: bool is_distinct, ulonglong options, const char *alias, bool bit_fields_as_long); bool init_result_table(ulonglong select_options); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); void cleanup(); ha_rows get_null_count_of_col(uint idx) { @@ -3128,7 +3132,7 @@ public: :select_subselect(item_arg), cache(0), fmax(mx) {} void cleanup(); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool cmp_real(); bool cmp_int(); bool cmp_decimal(); @@ -3141,7 +3145,7 @@ class select_exists_subselect :public select_subselect public: select_exists_subselect(Item_subselect *item_arg) :select_subselect(item_arg){} - bool send_data(List<Item> &items); + int send_data(List<Item> &items); }; @@ -3389,7 +3393,7 @@ public: multi_delete(TABLE_LIST *dt, uint num_of_tables); ~multi_delete(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); int do_deletes(); @@ -3433,7 +3437,7 @@ public: enum_duplicates handle_duplicates, bool ignore); ~multi_update(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); int do_updates(); @@ -3477,7 +3481,7 @@ public: } ~select_dumpvar() {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - bool send_data(List<Item> &items); + int send_data(List<Item> &items); bool send_eof(); virtual bool check_simple_select() const; void cleanup(); diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 1c2ae915259..def0c8dd951 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -780,8 +780,23 @@ void update_global_user_stats(THD *thd, bool create_user, time_t now) } -void thd_init_client_charset(THD *thd, uint cs_number) +/** + Set thread character set variables from the given ID + + @param thd thread handle + @param cs_number character set and collation ID + + @retval 0 OK; character_set_client, collation_connection and + character_set_results are set to the new value, + or to the default global values. + + @retval 1 error, e.g. the given ID is not supported by parser. + Corresponding SQL error is sent. +*/ + +bool thd_init_client_charset(THD *thd, uint cs_number) { + CHARSET_INFO *cs; /* Use server character set and collation if - opt_character_set_client_handshake is not set @@ -790,10 +805,10 @@ void thd_init_client_charset(THD *thd, uint cs_number) - client character set doesn't exists in server */ if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !(cs= get_charset(cs_number, MYF(0))) || !my_strcasecmp(&my_charset_latin1, global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) + cs->name)) { thd->variables.character_set_client= global_system_variables.character_set_client; @@ -804,10 +819,18 @@ void thd_init_client_charset(THD *thd, uint cs_number) } else { + if (!is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + return true; + } thd->variables.character_set_results= thd->variables.collation_connection= - thd->variables.character_set_client; + thd->variables.character_set_client= cs; } + return false; } diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index 308c49fc15c..274d418dc19 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -662,7 +662,7 @@ void Materialized_cursor::fetch(ulong num_rows) If network write failed (i.e. due to a closed socked), the error has already been set. Just return. */ - if (result->send_data(item_list)) + if (result->send_data(item_list) > 0) return; } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e2cb17090a1..afb45e730a3 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -758,7 +758,7 @@ multi_delete::~multi_delete() } -bool multi_delete::send_data(List<Item> &values) +int multi_delete::send_data(List<Item> &values) { int secure_counter= delete_while_scanning ? -1 : 0; TABLE_LIST *del_table; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 05804c2ee7e..bd28ab6e9b2 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -904,7 +904,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ query_cache_invalidate3(thd, table_list, 1); } - if ((changed && error <= 0) || + if (error <= 0 || thd->transaction.stmt.modified_non_trans_table || was_insert_delayed) { @@ -3200,7 +3200,7 @@ select_insert::~select_insert() } -bool select_insert::send_data(List<Item> &values) +int select_insert::send_data(List<Item> &values) { DBUG_ENTER("select_insert::send_data"); bool error=0; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index d6276d2f47c..cc1f046b5f0 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1079,9 +1079,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),buffer(0),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1108,12 +1109,9 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; - /* Set of a stack for unget if long terminators */ - uint length=max(field_term_length,line_term_length)+1; + uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); @@ -1155,11 +1153,8 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error) - { - if (need_end_io_cache) - ::end_io_cache(&cache); - } + if (need_end_io_cache) + ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index e2031c129b0..2699443ebd0 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1133,7 +1133,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, uint save_db_length= thd->db_length; char *save_db= thd->db; USER_CONN *save_user_connect= thd->user_connect; - Security_context save_security_ctx= *thd->security_ctx; + Security_context save_security_ctx= *thd->security_ctx; CHARSET_INFO *save_character_set_client= thd->variables.character_set_client; CHARSET_INFO *save_collation_connection= @@ -1141,8 +1141,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, CHARSET_INFO *save_character_set_results= thd->variables.character_set_results; + /* Ensure we don't free security_ctx->user in case we have to revert */ + thd->security_ctx->user= 0; + if (acl_authenticate(thd, 0, packet_length)) { + /* Free user if allocated by acl_authenticate */ x_free(thd->security_ctx->user); *thd->security_ctx= save_security_ctx; thd->user_connect= save_user_connect; diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 1c78f6a3613..15cae73844f 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,4 +1,5 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -761,6 +762,9 @@ static bool handle_list_of_fields(List_iterator<char> it, bool result; char *field_name; bool is_list_empty= TRUE; + int fields_handled = 0; + char* field_name_array[MAX_KEY]; + DBUG_ENTER("handle_list_of_fields"); while ((field_name= it++)) @@ -776,6 +780,25 @@ static bool handle_list_of_fields(List_iterator<char> it, result= TRUE; goto end; } + + /* + Check for duplicate fields in the list. + Assuming that there are not many fields in the partition key list. + If there were, it would be better to replace the for-loop + with a more efficient algorithm. + */ + + field_name_array[fields_handled] = field_name; + for (int i = 0; i < fields_handled; ++i) + { + if (my_strcasecmp(system_charset_info, + field_name_array[i], field_name) == 0) + { + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + } + fields_handled++; } if (is_list_empty) { diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index db623133cb1..2b478508170 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2002, 2010, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -107,7 +108,7 @@ class Select_fetch_protocol_binary: public select_send public: Select_fetch_protocol_binary(THD *thd); virtual bool send_fields(List<Item> &list, uint flags); - virtual bool send_data(List<Item> &items); + virtual int send_data(List<Item> &items); virtual bool send_eof(); #ifdef EMBEDDED_LIBRARY void begin_dataset() @@ -2793,6 +2794,32 @@ void mysql_sql_stmt_close(THD *thd) } } + +class Set_longdata_error_handler : public Internal_error_handler +{ +public: + Set_longdata_error_handler(Prepared_statement *statement) + : stmt(statement) + { } + +public: + bool handle_error(uint sql_errno, + const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *) + { + stmt->state= Query_arena::ERROR; + stmt->last_errno= sql_errno; + strnmov(stmt->last_error, message, MYSQL_ERRMSG_SIZE); + + return TRUE; + } + +private: + Prepared_statement *stmt; +}; + + /** Handle long data in pieces from client. @@ -2849,16 +2876,19 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) param= stmt->param_array[param_number]; + Set_longdata_error_handler err_handler(stmt); + /* + Install handler that will catch any errors that can be generated + during execution of Item_param::set_longdata() and propagate + them to Statement::last_error. + */ + thd->push_internal_handler(&err_handler); #ifndef EMBEDDED_LIBRARY - if (param->set_longdata(packet, (ulong) (packet_end - packet))) + param->set_longdata(packet, (ulong) (packet_end - packet)); #else - if (param->set_longdata(thd->extra_data, thd->extra_length)) + param->set_longdata(thd->extra_data, thd->extra_length); #endif - { - stmt->state= Query_arena::ERROR; - stmt->last_errno= ER_OUTOFMEMORY; - sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); - } + thd->pop_internal_handler(); general_log_print(thd, thd->command, NullS); @@ -2899,11 +2929,11 @@ bool Select_fetch_protocol_binary::send_eof() } -bool +int Select_fetch_protocol_binary::send_data(List<Item> &fields) { Protocol *save_protocol= thd->protocol; - bool rc; + int rc; thd->protocol= &protocol; rc= select_send::send_data(fields); @@ -3320,6 +3350,13 @@ Prepared_statement::execute_loop(String *expanded_query, bool error; int reprepare_attempt= 0; + /* Check if we got an error when sending long data */ + if (state == Query_arena::ERROR) + { + my_message(last_errno, last_error, MYF(0)); + return TRUE; + } + if (set_parameters(expanded_query, packet, packet_end)) return TRUE; @@ -3560,12 +3597,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) status_var_increment(thd->status_var.com_stmt_execute); - /* Check if we got an error when sending long data */ - if (state == Query_arena::ERROR) - { - my_message(last_errno, last_error, MYF(0)); - return TRUE; - } if (flags & (uint) IS_IN_USE) { my_error(ER_PS_NO_RECURSION, MYF(0)); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 8b6ba0e44e5..bce687ebcb9 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB & Sasha +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,6 +22,7 @@ #include "log_event.h" #include "rpl_filter.h" #include <my_dir.h> +#include "debug_sync.h" int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; @@ -464,11 +466,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, pthread_mutex_t *log_lock; bool binlog_can_be_corrupted= FALSE; uint8 current_checksum_alg= BINLOG_CHECKSUM_ALG_UNDEF; - + int old_max_allowed_packet= thd->variables.max_allowed_packet; #ifndef DBUG_OFF int left_events = max_binlog_dump_events; #endif - int old_max_allowed_packet= thd->variables.max_allowed_packet; DBUG_ENTER("mysql_binlog_send"); DBUG_PRINT("enter",("log_ident: '%s' pos: %ld", log_ident, (long) pos)); @@ -563,7 +564,7 @@ impossible position"; and fake Rotates. */ if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg, - get_binlog_checksum_value_at_connect(current_thd))) + get_binlog_checksum_value_at_connect(thd))) { /* This error code is not perfect, as fake_rotate_event() does not @@ -680,9 +681,11 @@ impossible position"; while (!net->error && net->vio != 0 && !thd->killed) { + my_off_t prev_pos= pos; while (!(error = Log_event::read_log_event(&log, packet, log_lock, current_checksum_alg))) { + prev_pos= my_b_tell(&log); #ifndef DBUG_OFF if (max_binlog_dump_events && !left_events--) { @@ -692,8 +695,21 @@ impossible position"; goto err; } #endif - - if ((uchar)(*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + const char act[]= + "now " + "wait_for signal.continue"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(thd, + STRING_WITH_LEN(act))); + } + }); + + if ((uchar) (*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) { current_checksum_alg= get_checksum_alg(packet->ptr() + 1, packet->length() - 1); @@ -729,15 +745,23 @@ impossible position"; goto err; } + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + } + }); + DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] )); - if ((uchar)(*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) + if ((uchar) (*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) { if (send_file(thd)) { - errmsg = "failed in send_file()"; - my_errno= ER_UNKNOWN_ERROR; - goto err; + errmsg = "failed in send_file()"; + my_errno= ER_UNKNOWN_ERROR; + goto err; } } } @@ -749,8 +773,13 @@ impossible position"; of a crash ?). treat any corruption as EOF */ if (binlog_can_be_corrupted && - (error != LOG_READ_MEM && error != LOG_READ_CHECKSUM_FAILURE)) + (error != LOG_READ_MEM && error != LOG_READ_CHECKSUM_FAILURE && + error != LOG_READ_EOF)) + { + my_b_seek(&log, prev_pos); error=LOG_READ_EOF; + } + /* TODO: now that we are logging the offset, check to make sure the recorded offset and the actual match. diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5e84138bf9c..47c9d372086 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -291,57 +291,60 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, } -/* +/** Fix fields referenced from inner selects. - SYNOPSIS - fix_inner_refs() - thd Thread handle - all_fields List of all fields used in select - select Current select - ref_pointer_array Array of references to Items used in current select - group_list GROUP BY list (is NULL by default) + @param thd Thread handle + @param all_fields List of all fields used in select + @param select Current select + @param ref_pointer_array Array of references to Items used in current select + @param group_list GROUP BY list (is NULL by default) - DESCRIPTION - The function serves 3 purposes - adds fields referenced from inner - selects to the current select list, resolves which class to use - to access referenced item (Item_ref of Item_direct_ref) and fixes - references (Item_ref objects) to these fields. + @details + The function serves 3 purposes + + - adds fields referenced from inner query blocks to the current select list - If a field isn't already in the select list and the ref_pointer_array + - Decides which class to use to reference the items (Item_ref or + Item_direct_ref) + + - fixes references (Item_ref objects) to these fields. + + If a field isn't already on the select list and the ref_pointer_array is provided then it is added to the all_fields list and the pointer to it is saved in the ref_pointer_array. The class to access the outer field is determined by the following rules: - 1. If the outer field isn't used under an aggregate function - then the Item_ref class should be used. - 2. If the outer field is used under an aggregate function and this - function is aggregated in the select where the outer field was - resolved or in some more inner select then the Item_direct_ref - class should be used. - It used used also if we are grouping by a subquery that refers - this outer field. + + -#. If the outer field isn't used under an aggregate function then the + Item_ref class should be used. + + -#. If the outer field is used under an aggregate function and this + function is, in turn, aggregated in the query block where the outer + field was resolved or some query nested therein, then the + Item_direct_ref class should be used. Also it should be used if we are + grouping by a subquery containing the outer field. + The resolution is done here and not at the fix_fields() stage as - it can be done only after sum functions are fixed and pulled up to - selects where they are have to be aggregated. + it can be done only after aggregate functions are fixed and pulled up to + selects where they are to be aggregated. + When the class is chosen it substitutes the original field in the Item_outer_ref object. After this we proceed with fixing references (Item_outer_ref objects) to this field from inner subqueries. - RETURN - TRUE an error occured - FALSE ok -*/ + @return Status + @retval true An error occured. + @retval false OK. + */ bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, Item **ref_pointer_array) { Item_outer_ref *ref; - bool res= FALSE; - bool direct_ref= FALSE; /* Mark the references from the inner_refs_list that are occurred in @@ -358,6 +361,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, while ((ref= ref_it++)) { + bool direct_ref= false; Item *item= ref->outer_ref; Item **item_ref= ref->ref; Item_ref *new_ref; @@ -416,7 +420,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, return TRUE; thd->used_tables|= item->used_tables(); } - return res; + return false; } /** @@ -924,7 +928,7 @@ JOIN::optimize() If all items were resolved by opt_sum_query, there is no need to open any tables. */ - if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) + if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { @@ -1930,7 +1934,7 @@ JOIN::exec() { if (do_send_rows && (procedure ? (procedure->send_row(procedure_fields_list) || - procedure->end_of_records()) : result->send_data(fields_list))) + procedure->end_of_records()) : result->send_data(fields_list)> 0)) error= 1; else { @@ -2042,7 +2046,11 @@ JOIN::exec() JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables; first_tab->sorted= test(first_tab->loosescan_match_tab); } - if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) + + Procedure *save_proc= curr_join->procedure; + tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0); + curr_join->procedure= save_proc; + if (tmp_error) { error= tmp_error; DBUG_VOID_RETURN; @@ -3567,6 +3575,7 @@ static uint get_semi_join_select_list_index(Field *field) @param field Field used in comparision @param eq_func True if we used =, <=> or IS NULL @param value Value used for comparison with field + @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates @@ -3666,26 +3675,7 @@ add_key_field(JOIN *join, eq_func is NEVER true when num_values > 1 */ if (!eq_func) - { - /* - Additional optimization: if we're processing - "t.key BETWEEN c1 AND c1" then proceed as if we were processing - "t.key = c1". - TODO: This is a very limited fix. A more generic fix is possible. - There are 2 options: - A) Make equality propagation code be able to handle BETWEEN - (including cases like t1.key BETWEEN t2.key AND t3.key) - B) Make range optimizer to infer additional "t.key = c" equalities - and use them in equality propagation process (see details in - OptimizerKBAndTodo) - */ - if ((cond->functype() != Item_func::BETWEEN) || - ((Item_func_between*) cond)->negated || - !value[0]->eq(value[1], field->binary())) - return; - eq_func= TRUE; - } - + return; if (field->result_type() == STRING_RESULT) { if ((*value)->result_type() != STRING_RESULT) @@ -3908,9 +3898,65 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, case Item_func::OPTIMIZE_KEY: { Item **values; - // BETWEEN, IN, NE - if (is_local_field (cond_func->key_item()) && - !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) + /* + Build list of possible keys for 'a BETWEEN low AND high'. + It is handled similar to the equivalent condition + 'a >= low AND a <= high': + */ + if (cond_func->functype() == Item_func::BETWEEN) + { + Item_field *field_item; + bool equal_func= FALSE; + uint num_values= 2; + values= cond_func->arguments(); + + bool binary_cmp= (values[0]->real_item()->type() == Item::FIELD_ITEM) + ? ((Item_field*)values[0]->real_item())->field->binary() + : TRUE; + + /* + Additional optimization: If 'low = high': + Handle as if the condition was "t.key = low". + */ + if (!((Item_func_between*)cond_func)->negated && + values[1]->eq(values[2], binary_cmp)) + { + equal_func= TRUE; + num_values= 1; + } + + /* + Append keys for 'field <cmp> value[]' if the + condition is of the form:: + '<field> BETWEEN value[1] AND value[2]' + */ + if (is_local_field(values[0])) + { + field_item= (Item_field *) (values[0]->real_item()); + add_key_equal_fields(join, key_fields, *and_level, cond_func, + field_item, equal_func, &values[1], + num_values, usable_tables, sargables); + } + /* + Append keys for 'value[0] <cmp> field' if the + condition is of the form: + 'value[0] BETWEEN field1 AND field2' + */ + for (uint i= 1; i <= num_values; i++) + { + if (is_local_field(values[i])) + { + field_item= (Item_field *) (values[i]->real_item()); + add_key_equal_fields(join, key_fields, *and_level, cond_func, + field_item, equal_func, values, + 1, usable_tables, sargables); + } + } + } // if ( ... Item_func::BETWEEN) + + // IN, NE + else if (is_local_field (cond_func->key_item()) && + !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) { values= cond_func->arguments()+1; if (cond_func->functype() == Item_func::NE_FUNC && @@ -3924,21 +3970,6 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, cond_func->argument_count()-1, usable_tables, sargables); } - if (cond_func->functype() == Item_func::BETWEEN) - { - values= cond_func->arguments(); - for (uint i= 1 ; i < cond_func->argument_count() ; i++) - { - Item_field *field_item; - if (is_local_field (cond_func->arguments()[i])) - { - field_item= (Item_field *) (cond_func->arguments()[i]->real_item()); - add_key_equal_fields(join, key_fields, *and_level, cond_func, - field_item, 0, values, 1, usable_tables, - sargables); - } - } - } break; } case Item_func::OPTIMIZE_OP: @@ -3949,7 +3980,8 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, if (is_local_field (cond_func->arguments()[0])) { add_key_equal_fields(join, key_fields, *and_level, cond_func, - (Item_field*) (cond_func->arguments()[0])->real_item(), + (Item_field*) (cond_func->arguments()[0])-> + real_item(), equal_func, cond_func->arguments()+1, 1, usable_tables, sargables); @@ -3958,7 +3990,8 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, cond_func->functype() != Item_func::LIKE_FUNC) { add_key_equal_fields(join, key_fields, *and_level, cond_func, - (Item_field*) (cond_func->arguments()[1])->real_item(), + (Item_field*) (cond_func->arguments()[1])-> + real_item(), equal_func, cond_func->arguments(),1,usable_tables, sargables); @@ -3974,8 +4007,9 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, if (unlikely(!tmp)) // Should never be true return; add_key_equal_fields(join, key_fields, *and_level, cond_func, - (Item_field*) (cond_func->arguments()[0])->real_item(), - cond_func->functype() == Item_func::ISNULL_FUNC, + (Item_field*) (cond_func->arguments()[0])-> + real_item(), + cond_func->functype() == Item_func::ISNULL_FUNC, &tmp, 1, usable_tables, sargables); } break; @@ -7332,7 +7366,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) */ if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0))) DBUG_RETURN(1); - sel->cond= tmp; + sel->cond= sel->original_cond= tmp; tab->set_select_cond(tmp, __LINE__); /* Push condition to storage engine if this is enabled and the condition is not guarded */ @@ -7364,7 +7398,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) with key reading */ if ((tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF && tab->type != JT_FT && - (tab->type != JT_REF || + ((tab->type != JT_REF && tab->type != JT_CONST) || (uint) tab->ref.key == tab->quick->index)) || is_hj) { sel->quick=tab->quick; // Use value from get_quick_... @@ -7418,7 +7452,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) sel->cond->quick_fix_field(); if (sel->test_quick_select(thd, tab->keys, - (used_tables & ~ current_map) | OUTER_REF_TABLE_BIT, + ((used_tables & ~ current_map) | + OUTER_REF_TABLE_BIT), (join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : @@ -8282,7 +8317,6 @@ static bool make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) { uint i; - DBUG_ENTER("make_join_readinfo"); bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); @@ -8300,7 +8334,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) /* The approximation below for partial join cardinality is not good because - it does not take into account some pushdown predicates - - it does not differentiate between inner joins, outer joins and semi-joins. + - it does not differentiate between inner joins, outer joins and + semi-joins. Later it should be improved. */ JOIN_TAB *tab=join->join_tab+i; @@ -8435,14 +8470,16 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) if (tab->select && tab->select->quick) { if (statistics) - status_var_increment(join->thd->status_var.select_full_range_join_count); + status_var_increment(join->thd->status_var. + select_full_range_join_count); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) { - status_var_increment(join->thd->status_var.select_full_join_count); + status_var_increment(join->thd->status_var. + select_full_join_count); join->thd->query_plan_flags|= QPLAN_FULL_JOIN; } } @@ -8480,12 +8517,14 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) break; case JT_FT: break; + /* purecov: begin deadcode */ default: - DBUG_PRINT("error",("Table type %d found",tab->type)); /* purecov: deadcode */ - break; /* purecov: deadcode */ + DBUG_PRINT("error",("Table type %d found",tab->type)); + break; case JT_UNKNOWN: case JT_MAYBE_REF: - abort(); /* purecov: deadcode */ + abort(); + /* purecov: end */ } } join->join_tab[join->tables-1].next_select=0; /* Set by do_select */ @@ -9111,7 +9150,7 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, Item *item; while ((item= it++)) item->no_rows_in_result(); - send_error= result->send_data(fields); + send_error= result->send_data(fields) > 0; } if (!send_error) result->send_eof(); // Should be safe @@ -13488,7 +13527,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) for (TABLE_LIST *table= join->select_lex->leaf_tables; table; table= table->next_leaf) mark_as_null_row(table->table); - rc= join->result->send_data(*columns_list); + rc= join->result->send_data(*columns_list) > 0; } } } @@ -14925,16 +14964,21 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_ENTER("end_send"); if (!end_of_records) { - int error; if (join->having && join->having->val_int() == 0) DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - error=0; if (join->procedure) - error=join->procedure->send_row(join->procedure_fields_list); - else if (join->do_send_rows) - error=join->result->send_data(*join->fields); - if (error) - DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ + { + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + if (join->do_send_rows) + { + int error; + /* result < 0 if row was not accepted and should not be counted */ + if ((error= join->result->send_data(*join->fields))) + DBUG_RETURN(error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR); + } if (++join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { @@ -15044,7 +15088,15 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), else { if (join->do_send_rows) - error=join->result->send_data(*join->fields) ? 1 : 0; + { + error= join->result->send_data(*join->fields); + if (error < 0) + { + /* Duplicate row, don't count */ + join->send_records--; + error= 0; + } + } join->send_records++; } if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) @@ -15453,40 +15505,43 @@ bool test_if_ref(Item *root_cond, Item_field *left_item,Item *right_item) } - -/* - Extract a condition that can be checked after reading given table - - SYNOPSIS - make_cond_for_table() - cond Condition to analyze - tables Tables for which "current field values" are available - used_table Table that we're extracting the condition for (may - also include PSEUDO_TABLE_BITS - exclude_expensive_cond Do not push expensive conditions - retain_ref_cond Retain ref conditions - - DESCRIPTION - Extract the condition that can be checked after reading the table - specified in 'used_table', given that current-field values for tables - specified in 'tables' bitmap are available. - - The function assumes that - - Constant parts of the condition has already been checked. - - Condition that could be checked for tables in 'tables' has already - been checked. - - The function takes into account that some parts of the condition are - guaranteed to be true by employed 'ref' access methods (the code that - does this is located at the end, search down for "EQ_FUNC"). - - - SEE ALSO - make_cond_for_info_schema uses similar algorithm - - RETURN - Extracted condition -*/ +/** + Extract a condition that can be checked after reading given table + @fn make_cond_for_table() + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static Item * make_cond_for_table(Item *cond, table_map tables, table_map used_table, @@ -16154,13 +16209,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, { int ref_key; uint ref_key_parts; - int order_direction; + int order_direction= 0; uint used_key_parts; TABLE *table=tab->table; SQL_SELECT *select=tab->select; key_map usable_keys; - QUICK_SELECT_I *save_quick= 0; - COND *orig_select_cond= 0; + QUICK_SELECT_I *save_quick= select ? select->quick : 0; + int best_key= -1; DBUG_ENTER("test_if_skip_sort_order"); LINT_INIT(ref_key_parts); @@ -16180,7 +16235,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } usable_keys.intersect(((Item_field*) item)->field->part_of_sortkey); if (usable_keys.is_clear_all()) - goto use_filesort; // No usable keys + goto use_filesort; // No usable keys } ref_key= -1; @@ -16195,7 +16250,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, else if (select && select->quick) // Range found by opt_range { int quick_type= select->quick->get_type(); - save_quick= select->quick; /* assume results are not ordered when index merge is used TODO: sergeyp: Results of all index merge selects actually are ordered @@ -16228,15 +16282,10 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ if (table->covering_keys.is_set(ref_key)) usable_keys.intersect(table->covering_keys); - if (tab->pre_idx_push_select_cond) - orig_select_cond= tab->set_cond(tab->pre_idx_push_select_cond); if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts, &usable_keys)) < MAX_KEY) { - /* Found key that can be used to retrieve data in sorted order */ - //psergey-mrr:if (tab->pre_idx_push_select_cond) - // tab->select_cond= tab->select->cond= tab->pre_idx_push_select_cond; if (tab->ref.key >= 0) { /* @@ -16263,22 +16312,36 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, The range optimizer constructed QUICK_RANGE for ref_key, and we want to use instead new_ref_key as the index. We can't just change the index of the quick select, because this may - result in an incosistent QUICK_SELECT object. Below we + result in an inconsistent QUICK_SELECT object. Below we create a new QUICK_SELECT from scratch so that all its - parameres are set correctly by the range optimizer. + parameters are set correctly by the range optimizer. */ key_map new_ref_key_map; + COND *save_cond; + bool res; new_ref_key_map.clear_all(); // Force the creation of quick select new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key. - if (select->test_quick_select(tab->join->thd, new_ref_key_map, 0, - (tab->join->select_options & - OPTION_FOUND_ROWS) ? - HA_POS_ERROR : - tab->join->unit->select_limit_cnt,0, - TRUE) <= - 0) + /* Reset quick; This will be restored in 'use_filesort' if needed */ + select->quick= 0; + save_cond= select->cond; + select->cond= select->original_cond; + res= select->test_quick_select(tab->join->thd, new_ref_key_map, 0, + (tab->join->select_options & + OPTION_FOUND_ROWS) ? + HA_POS_ERROR : + tab->join->unit->select_limit_cnt,0, + TRUE) <= 0; + if (res) + { + select->cond= save_cond; goto use_filesort; + } + /* + We don't restore select->cond as we want to use the + original condition as index condition pushdown is not + active for the new index. + */ } ref_key= new_ref_key; } @@ -16303,7 +16366,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, int best_key_direction; ha_rows best_records; double read_time; - int best_key= -1; bool is_best_covering= FALSE; double fanout= 1; JOIN *join= tab->join; @@ -16521,84 +16583,21 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, tab->join->tables > tab->join->const_tables + 1) && ((unsigned) best_key != table->s->primary_key || !table->file->primary_key_is_clustered())) - DBUG_RETURN(0); + goto use_filesort; if (best_key >= 0) { - bool quick_created= FALSE; if (table->quick_keys.is_set(best_key) && best_key != ref_key) { key_map map; map.clear_all(); // Force the creation of quick select map.set_bit(best_key); // only best_key. - quick_created= - select->test_quick_select(join->thd, map, 0, - join->select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : - join->unit->select_limit_cnt, - TRUE, FALSE) > 0; - } - if (!no_changes) - { - /* - If ref_key used index tree reading only ('Using index' in EXPLAIN), - and best_key doesn't, then revert the decision. - */ - if (!table->covering_keys.is_set(best_key)) - table->disable_keyread(); - if (!quick_created) - { - tab->index= best_key; - tab->read_first_record= best_key_direction > 0 ? - join_read_first:join_read_last; - tab->type=JT_NEXT; // Read with index_first(), index_next() - if (select && select->quick) - { - delete select->quick; - select->quick= 0; - } - if (table->covering_keys.is_set(best_key) && ! table->key_read) - table->enable_keyread(); - if (tab->pre_idx_push_select_cond) - { - COND *tmp_cond= tab->pre_idx_push_select_cond; - if (orig_select_cond) - { - tmp_cond= and_conds(tmp_cond, orig_select_cond); - tmp_cond->quick_fix_field(); - } - tab->set_cond(tmp_cond); - /* orig_select_cond was merged, no need to restore original one. */ - orig_select_cond= 0; - } - table->file->ha_index_or_rnd_end(); - if (join->select_options & SELECT_DESCRIBE) - { - tab->ref.key= -1; - tab->ref.key_parts= 0; - if (select_limit < table_records) - tab->limit= select_limit; - } - } - else if (tab->type != JT_ALL) - { - /* - We're about to use a quick access to the table. - We need to change the access method so as the quick access - method is actually used. - */ - DBUG_ASSERT(tab->select->quick); - tab->type=JT_ALL; - tab->use_quick=1; - tab->ref.key= -1; - tab->ref.key_parts=0; // Don't use ref key. - tab->read_first_record= join_init_read_record; - if (tab->is_using_loose_index_scan()) - join->tmp_table_param.precomputed_group_by= TRUE; - /* - TODO: update the number of records in join->best_positions[tablenr] - */ - } + select->quick= 0; + select->test_quick_select(join->thd, map, 0, + join->select_options & OPTION_FOUND_ROWS ? + HA_POS_ERROR : + join->unit->select_limit_cnt, + TRUE, FALSE); } order_direction= best_key_direction; /* @@ -16611,68 +16610,177 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, saved_best_key_parts : best_key_parts; } else - goto use_filesort; + goto use_filesort; } check_reverse_order: + DBUG_ASSERT(order_direction != 0); + if (order_direction == -1) // If ORDER BY ... DESC { + int quick_type; if (select && select->quick) { /* Don't reverse the sort order, if it's already done. (In some cases test_if_order_by_key() can be called multiple times */ - if (!select->quick->reverse_sorted()) + if (select->quick->reverse_sorted()) + goto skipped_filesort; + + quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + { + tab->limit= 0; + goto use_filesort; // Use filesort + } + } + } + + /* + Update query plan with access pattern for doing ordered access + according to what we have decided above. + */ + if (!no_changes) // We are allowed to update QEP + { + if (best_key >= 0) + { + bool quick_created= + (select && select->quick && select->quick!=save_quick); + + /* + If ref_key used index tree reading only ('Using index' in EXPLAIN), + and best_key doesn't, then revert the decision. + */ + if (!table->covering_keys.is_set(best_key)) + table->disable_keyread(); + if (!quick_created) + { + if (select) // Throw any existing quick select + select->quick= 0; // Cleanup either reset to save_quick, + // or 'delete save_quick' + tab->index= best_key; + tab->read_first_record= order_direction > 0 ? + join_read_first:join_read_last; + tab->type=JT_NEXT; // Read with index_first(), index_next() + + if (table->covering_keys.is_set(best_key) && ! table->key_read) + table->enable_keyread(); + if (tab->pre_idx_push_select_cond) + { + COND *tmp_cond= tab->pre_idx_push_select_cond; + COND *orig_select_cond= tab->select_cond; + + if (orig_select_cond) + { + tmp_cond= and_conds(tmp_cond, orig_select_cond); + tmp_cond->quick_fix_field(); + } + tab->set_cond(tmp_cond); + } + table->file->ha_index_or_rnd_end(); + if (tab->join->select_options & SELECT_DESCRIBE) + { + tab->ref.key= -1; + tab->ref.key_parts= 0; + if (select_limit < table->file->stats.records) + tab->limit= select_limit; + } + } + else if (tab->type != JT_ALL) + { + /* + We're about to use a quick access to the table. + We need to change the access method so as the quick access + method is actually used. + */ + DBUG_ASSERT(tab->select->quick); + tab->type=JT_ALL; + tab->use_quick=1; + tab->ref.key= -1; + tab->ref.key_parts=0; // Don't use ref key. + tab->read_first_record= join_init_read_record; + if (tab->is_using_loose_index_scan()) + tab->join->tmp_table_param.precomputed_group_by= TRUE; + + /* + Restore the original condition as changes done by pushdown + condition are not relevant anymore + */ + if (tab->select) + tab->set_cond(tab->select->original_cond); + + /* + TODO: update the number of records in join->best_positions[tablenr] + */ + } + } // best_key >= 0 + + if (order_direction == -1) // If ORDER BY ... DESC + { + if (select && select->quick) { QUICK_SELECT_DESC *tmp; bool error= FALSE; - int quick_type= select->quick->get_type(); - if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || - quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || - quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + + /* ORDER BY range_key DESC */ + tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), + used_key_parts, &error); + if (tmp && select->quick == save_quick) + save_quick= 0; // ::QUICK_SELECT_DESC consumed it + + if (!tmp || error) { + delete tmp; tab->limit= 0; - select->quick= save_quick; - goto use_filesort; // Use filesort + goto use_filesort; // Reverse sort failed -> filesort } - - /* ORDER BY range_key DESC */ - tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), - used_key_parts, &error); - if (!tmp || error) - { - delete tmp; - select->quick= save_quick; - tab->limit= 0; - goto use_filesort; // Reverse sort not supported - } - select->quick=tmp; + select->quick= tmp; } - } - else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && - tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) - { - /* - SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC + else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && + tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) + { + /* + SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC - Use a traversal function that starts by reading the last row - with key part (A) and then traverse the index backwards. - */ - tab->read_first_record= join_read_last_key; - tab->read_record.read_record= join_read_prev_same; + Use a traversal function that starts by reading the last row + with key part (A) and then traverse the index backwards. + */ + tab->read_first_record= join_read_last_key; + tab->read_record.read_record= join_read_prev_same; + } } + else if (select && select->quick) + select->quick->need_sorted_output(); + + } // QEP has been modified + + /* + Cleanup: + We may have both a 'select->quick' and 'save_quick' (original) + at this point. Delete the one that we wan't use. + */ + +skipped_filesort: + // Keep current (ordered) select->quick + if (select && save_quick != select->quick) + { + delete save_quick; + save_quick= NULL; } - else if (select && select->quick) - select->quick->need_sorted_output(); - if (orig_select_cond) - tab->set_cond(orig_select_cond); DBUG_RETURN(1); + use_filesort: - if (orig_select_cond) - tab->set_cond(orig_select_cond); + // Restore original save_quick + if (select && select->quick != save_quick) + { + delete select->quick; + select->quick= save_quick; + } DBUG_RETURN(0); } @@ -18602,7 +18710,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) { if (join_tab->select->cond) error=(int) cond->add(join_tab->select->cond); - join_tab->select->cond= cond; + join_tab->select->cond= join_tab->select->original_cond= cond; join_tab->set_select_cond(cond, __LINE__); } else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0, @@ -19005,6 +19113,7 @@ int JOIN::rollup_send_data(uint idx) uint i; for (i= send_group_parts ; i-- > idx ; ) { + int res= 0; /* Get reference pointers to sum functions in place */ memcpy((char*) ref_pointer_array, (char*) rollup.ref_pointer_arrays[i], @@ -19012,9 +19121,10 @@ int JOIN::rollup_send_data(uint idx) if ((!having || having->val_int())) { if (send_records < unit->select_limit_cnt && do_send_rows && - result->send_data(rollup.fields[i])) + (res= result->send_data(rollup.fields[i])) > 0) return 1; - send_records++; + if (!res) + send_records++; } } /* Restore ref_pointer_array */ diff --git a/sql/sql_select.h b/sql/sql_select.h index f0eda9671cb..748ae5a1dca 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1037,7 +1037,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, /* functions from opt_sum.cc */ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +int opt_sum_query(THD* thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 833b695e4bf..0886671d8b5 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3792,6 +3792,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) it.rewind(); /* To get access to new elements in basis list */ while ((db_name= it++)) { + /* db_name can be changed in make_table_list() func */ + LEX_STRING orig_db_name= *db_name; + #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!(check_access(thd,SELECT_ACL, db_name->str, &thd->col_access, 0, 1, with_i_schema) || @@ -3859,17 +3862,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) } int res; - LEX_STRING tmp_lex_string, orig_db_name; + LEX_STRING tmp_lex_string; /* Set the parent lex of 'sel' because it is needed by sel.init_query() which is called inside make_table_list. */ thd->no_warnings_for_error= 1; sel.parent_lex= lex; - /* db_name can be changed in make_table_list() func */ - if (!thd->make_lex_string(&orig_db_name, db_name->str, - db_name->length, FALSE)) - goto err; if (make_table_list(thd, &sel, db_name, table_name)) goto err; TABLE_LIST *show_table_list= sel.table_list.first; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index b359b2a7168..6f40d797a5c 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -52,11 +52,33 @@ bool String::real_alloc(uint32 arg_length) } -/* -** Check that string is big enough. Set string[alloc_length] to 0 -** (for C functions) -*/ +/** + Allocates a new buffer on the heap for this String. + + - If the String's internal buffer is privately owned and heap allocated, + one of the following is performed. + + - If the requested length is greater than what fits in the buffer, a new + buffer is allocated, data moved and the old buffer freed. + + - If the requested length is less or equal to what fits in the buffer, a + null character is inserted at the appropriate position. + - If the String does not keep a private buffer on the heap, such a buffer + will be allocated and the string copied accoring to its length, as found + in String::length(). + + For C compatibility, the new string buffer is null terminated. + + @param alloc_length The requested string size in characters, excluding any + null terminator. + + @retval false Either the copy operation is complete or, if the size of the + new buffer is smaller than the currently allocated buffer (if one exists), + no allocation occured. + + @retval true An error occured when attempting to allocate memory. +*/ bool String::realloc(uint32 alloc_length) { if (Alloced_length <= alloc_length) @@ -189,6 +211,17 @@ bool String::copy() return FALSE; } +/** + Copies the internal buffer from str. If this String has a private heap + allocated buffer where new data does not fit, a new buffer is allocated + before copying and the old buffer freed. Character set information is also + copied. + + @param str The string whose internal buffer is to be copied. + + @retval false Success. + @retval true Memory allocation failed. +*/ bool String::copy(const String &str) { if (alloc(str.str_length)) diff --git a/sql/sql_string.h b/sql/sql_string.h index c9eaf924e4d..37b351624c5 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -120,6 +120,9 @@ public: inline const char *ptr() const { return Ptr; } inline char *c_ptr() { + DBUG_ASSERT(!alloced || !Ptr || !Alloced_length || + (Alloced_length >= (str_length + 1))); + if (!Ptr || Ptr[str_length]) /* Should be safe */ (void) realloc(str_length); return Ptr; @@ -148,6 +151,16 @@ public: Alloced_length=str.Alloced_length-offset; str_charset=str.str_charset; } + + + /** + Points the internal buffer to the supplied one. The old buffer is freed. + @param str Pointer to the new buffer. + @param arg_length Length of the new buffer in characters, excluding any + null character. + @param cs Character set to use for interpreting string data. + @note The new buffer will not be null terminated. + */ inline void set(char *str,uint32 arg_length, CHARSET_INFO *cs) { free(); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index a94ad9f3b4b..898df3dd65e 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -49,7 +49,7 @@ int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u) } -bool select_union::send_data(List<Item> &values) +int select_union::send_data(List<Item> &values) { if (unit->offset_limit_cnt) { // using limit offset,count @@ -62,6 +62,14 @@ bool select_union::send_data(List<Item> &values) if ((write_err= table->file->ha_write_row(table->record[0]))) { + if (write_err == HA_ERR_FOUND_DUPP_KEY) + { + /* + Inform upper level that we found a duplicate key, that should not + be counted as part of limit + */ + return -1; + } /* create_internal_tmp_table_from_heap will generate error if needed */ if (table->file->is_fatal_error(write_err, HA_CHECK_DUP) && create_internal_tmp_table_from_heap(thd, table, diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 0a4d3a4ba80..6ec95989f38 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1689,7 +1689,7 @@ multi_update::~multi_update() } -bool multi_update::send_data(List<Item> ¬_used_values) +int multi_update::send_data(List<Item> ¬_used_values) { TABLE_LIST *cur_table; DBUG_ENTER("multi_update::send_data"); diff --git a/sql/table.cc b/sql/table.cc index 41fb9a1c8b4..96a64cdeba2 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1,4 +1,5 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by |