diff options
Diffstat (limited to 'sql')
43 files changed, 1301 insertions, 556 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index c1a64446c12..e20c464fdb7 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -425,8 +425,8 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, key_copy(key_buf, event_table->record[0], key_info, key_len); if (!(ret= event_table->file->ha_index_read_map(event_table->record[0], key_buf, - (key_part_map)1, - HA_READ_PREFIX))) + (key_part_map) 1, + HA_READ_KEY_EXACT))) { DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); do diff --git a/sql/field.cc b/sql/field.cc index 13162fcb0d6..9a125a301b3 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5468,6 +5468,7 @@ double Field_year::val_real(void) longlong Field_year::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; + DBUG_ASSERT(field_length == 2 || field_length == 4); int tmp= (int) ptr[0]; if (field_length != 4) tmp%=100; // Return last 2 char @@ -5480,6 +5481,7 @@ longlong Field_year::val_int(void) String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + DBUG_ASSERT(field_length < 5); val_buffer->alloc(5); val_buffer->length(field_length); char *to=(char*) val_buffer->ptr(); @@ -9484,6 +9486,7 @@ void Create_field::create_length_to_internal_length(void) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VARCHAR: diff --git a/sql/field.h b/sql/field.h index c205c9b5582..5302b3de821 100644 --- a/sql/field.h +++ b/sql/field.h @@ -13,6 +13,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "my_compare.h" /* for clr_rec_bits */ + /* Because of the function new_field() all field classes that have static variables must declare the size_of() member function. diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 56a4005e3e2..9e39c8eec8b 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -164,8 +164,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_alloc_root(&m_mem_root, 512, 512); @@ -186,16 +185,46 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); init_alloc_root(&m_mem_root, 512, 512); init_handler_variables(); - DBUG_ASSERT(m_part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_alloc_root(&m_mem_root, 512, 512); + init_handler_variables(); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -248,7 +277,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -256,6 +284,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -368,7 +401,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1869,7 +1903,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2091,18 +2125,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2166,21 +2198,24 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; file_buffer= (uchar *) my_alloca(tot_len_byte); if (!file_buffer) DBUG_RETURN(TRUE); bzero(file_buffer, tot_len_byte); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2218,13 +2253,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2235,6 +2272,7 @@ bool ha_partition::create_handler_file(const char *name) result= my_write(file, (uchar *) file_buffer, tot_len_byte, MYF(MY_WME | MY_NABP)) != 0; + /* Write connection information (for federatedx engine) */ part_it.rewind(); for (i= 0; i < no_parts && !result; i++) { @@ -2245,7 +2283,10 @@ bool ha_partition::create_handler_file(const char *name) if (my_write(file, buffer, 4, MYF(MY_WME | MY_NABP)) || my_write(file, (uchar *) part_elem->connect_string.str, length, MYF(MY_WME | MY_NABP))) + { result= TRUE; + break; + } } VOID(my_close(file, MYF(0))); } @@ -2255,14 +2296,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - - SYNOPSIS - clear_handler_file() - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2275,16 +2311,15 @@ void ha_partition::clear_handler_file() m_connect_string= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @param mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2322,6 +2357,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2393,85 +2429,83 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; - engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); - for (i= 0; i < m_tot_parts; i++) - { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); - if (!engine_array[i]) - goto err3; - } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; + goto err2; + m_file_buffer= file_buffer; // Will be freed in clear_handler_file() + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; if (!(m_connect_string= (LEX_STRING*) alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING)))) - goto err3; + goto err2; bzero(m_connect_string, m_tot_parts * sizeof(LEX_STRING)); + /* Read connection arguments (for federated X engine) */ for (i= 0; i < m_tot_parts; i++) { LEX_STRING connect_string; uchar buffer[4]; if (my_read(file, buffer, 4, MYF(MY_NABP))) + { + /* No extra options; Probably not a federatedx engine */ break; + } connect_string.length= uint4korr(buffer); connect_string.str= (char*) alloc_root(&m_mem_root, connect_string.length+1); if (my_read(file, (uchar*) connect_string.str, connect_string.length, @@ -2482,31 +2516,100 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) } VOID(my_close(file, MYF(0))); - m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - + DBUG_RETURN(false); + +err2: +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); + engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); + + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); + for (i= 0; i < m_tot_parts; i++) + { + engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type) + *(buff + i)); + if (!engine_array[i]) + goto err; + } if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) - goto err3; + goto err; for (i= 0; i < m_tot_parts; i++) m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); my_afree(engine_array); - if (!m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: + DBUG_RETURN(false); + +err: my_afree(engine_array); -err2: -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2553,13 +2656,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; - int error; + char *name_buffer_ptr; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2567,8 +2670,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) - DBUG_RETURN(1); + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) + DBUG_RETURN(error); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->stored_rec_length; @@ -2578,7 +2682,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2601,50 +2705,86 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + table->s->connect_string = m_connect_string[(uint)(file-m_file)]; + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + goto err_handler; + bzero(&table->s->connect_string, sizeof(LEX_STRING)); + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; - do + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + while (*(++file)) { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - table->s->connect_string = m_connect_string[(uint)(file-m_file)]; - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - bzero(&table->s->connect_string, sizeof(LEX_STRING)); - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + DBUG_ASSERT(ref_length >= (*file)->ref_length); set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2653,6 +2793,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2710,25 +2851,54 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory for ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then on the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; + + if (new_handler && + new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + DBUG_RETURN((handler*) new_handler); } @@ -2759,7 +2929,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -4353,6 +4523,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, break; } } + m_last_part= part; } else { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index f5e66c5913e..b1e39cf4d22 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -70,7 +80,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name MEM_ROOT m_mem_root; plugin_ref *m_engine_array; // Array of types of the handlers @@ -134,6 +144,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -170,11 +187,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -187,7 +199,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -206,6 +218,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share,
+ partition_info *part_info_arg,
+ ha_partition *clone_arg,
+ MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -276,7 +292,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index a89c3c7f05c..f5e7de371a4 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2038,11 +2038,10 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); - - if (!new_handler) + handler *new_handler= get_new_handler(table->s, mem_root, ht); + if (! new_handler) return NULL; /* @@ -2050,17 +2049,27 @@ handler *handler::clone(MEM_ROOT *mem_root) on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) + + if (!(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) return NULL; - if (new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, + + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + + This is not critical as the engines already have the table open + and should be able to use the original instance of the table. + */ + if (new_handler->ha_open(table, name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) return NULL; + new_handler->cloned= 1; // Marker for debugging return new_handler; } + double handler::keyread_time(uint index, uint ranges, ha_rows rows) { /* diff --git a/sql/handler.h b/sql/handler.h index 270c7bb0be2..32112fdcd13 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -23,7 +23,6 @@ #pragma interface /* gcc class implementation */ #endif -#include <my_handler.h> #include <ft_global.h> #include <keycache.h> @@ -1306,7 +1305,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/hostname.cc b/sql/hostname.cc index ec090cbe02f..dfcdd3edd90 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -183,7 +184,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors) &tmp_hostent,buff,sizeof(buff),&tmp_errno))) { DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno)); - return 0; + DBUG_RETURN(0); } if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2), &tmp_errno))) diff --git a/sql/item.cc b/sql/item.cc index 92dbff867c5..90730793b0f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -932,7 +932,7 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const /** Get the value of the function as a MYSQL_TIME structure. - As a extra convenience the time structure is reset on error! + As a extra convenience the time structure is reset on error or NULL values! */ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) @@ -948,8 +948,12 @@ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) } else { - longlong value= val_int(); int was_cut; + longlong value= val_int(); + + if (null_value) + goto err; + if (number_to_datetime(value, ltime, fuzzydate, &was_cut) == LL(-1)) { char buff[22], *end; @@ -2775,6 +2779,16 @@ bool Item_param::set_longdata(const char *str, ulong length) (here), and first have to concatenate all pieces together, write query to the binary log and only then perform conversion. */ + if (str_value.length() + length > max_long_data_size) + { + my_message(ER_UNKNOWN_ERROR, + "Parameter of prepared statement which is set through " + "mysql_send_long_data() is longer than " + "'max_long_data_size' bytes", + MYF(0)); + DBUG_RETURN(true); + } + if (str_value.append(str, length, &my_charset_bin)) DBUG_RETURN(TRUE); state= LONG_DATA_VALUE; @@ -6142,7 +6156,7 @@ void Item_ref::print(String *str, enum_query_type query_type) { THD *thd= current_thd; append_identifier(thd, str, (*ref)->real_item()->name, - (*ref)->real_item()->name_length); + strlen((*ref)->real_item()->name)); } else (*ref)->print(str, query_type); @@ -7164,7 +7178,7 @@ String *Item_cache_int::val_str(String *str) DBUG_ASSERT(fixed == 1); if (!value_cached && !cache_value()) return NULL; - str->set(value, default_charset()); + str->set_int(value, unsigned_flag, default_charset()); return str; } diff --git a/sql/item.h b/sql/item.h index 28856842b99..0445cd4a9d5 100644 --- a/sql/item.h +++ b/sql/item.h @@ -532,6 +532,11 @@ public: */ Item *next; uint32 max_length; + /* + TODO: convert name and name_length fields into LEX_STRING to keep them in + sync (see bug #11829681/60295 etc). Then also remove some strlen(name) + calls. + */ uint name_length; /* Length of name */ int8 marker; uint8 decimals; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 51a73d774a8..15db44dd1c0 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -918,7 +918,7 @@ int Arg_comparator::set_cmp_func(Item_result_field *owner_arg, */ Query_arena backup; Query_arena *save_arena= thd->switch_to_arena_for_cached_items(&backup); - Item_cache_int *cache= new Item_cache_int(); + Item_cache_int *cache= new Item_cache_int(MYSQL_TYPE_DATETIME); if (save_arena) thd->set_query_arena(save_arena); @@ -4022,13 +4022,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - if (!args[i]->null_value) // Skip NULL values - { - array->set(j,args[i]); - j++; - } - else - have_null= 1; + array->set(j,args[i]); + if (!args[i]->null_value) // Skip NULL values + j++; + else + have_null= 1; } if ((array->used_count= j)) array->sort(); diff --git a/sql/item_func.cc b/sql/item_func.cc index 6ff743ed91d..f1af632d790 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -496,7 +496,10 @@ bool Item_func::is_expensive_processor(uchar *arg) my_decimal *Item_func::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed); - int2my_decimal(E_DEC_FATAL_ERROR, val_int(), unsigned_flag, decimal_value); + longlong nr= val_int(); + if (null_value) + return 0; /* purecov: inspected */ + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); return decimal_value; } @@ -854,7 +857,7 @@ longlong Item_func_numhybrid::val_int() return 0; char *end= (char*) res->ptr() + res->length(); - CHARSET_INFO *cs= str_value.charset(); + CHARSET_INFO *cs= res->charset(); return (*(cs->cset->strtoll10))(cs, res->ptr(), &end, &err_not_used); } default: @@ -1817,9 +1820,10 @@ void Item_func_integer::fix_length_and_dec() void Item_func_int_val::fix_num_length_and_dec() { - max_length= args[0]->max_length - (args[0]->decimals ? - args[0]->decimals + 1 : - 0) + 2; + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - + (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; + max_length= tmp_max_length > (ulonglong) max_field_size ? + max_field_size : (uint32) tmp_max_length; uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; @@ -2134,10 +2138,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) if (!(null_value= (args[0]->null_value || args[1]->null_value || my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec, truncate, decimal_value) > 1))) - { - decimal_value->frac= decimals; return decimal_value; - } return 0; } @@ -3873,6 +3874,7 @@ Item_func_set_user_var::fix_length_and_dec() maybe_null=args[0]->maybe_null; max_length=args[0]->max_length; decimals=args[0]->decimals; + unsigned_flag= args[0]->unsigned_flag; collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); } diff --git a/sql/item_func.h b/sql/item_func.h index a41324b4102..2cdc6f36713 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011 Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ad88fe31a0d..d6ac3a341a4 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -39,6 +39,9 @@ C_MODE_START #include "../mysys/my_static.h" // For soundex_map C_MODE_END +/** + @todo Remove this. It is not safe to use a shared String object. + */ String my_empty_string("",default_charset_info); @@ -116,7 +119,6 @@ String *Item_func_md5::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) { uchar digest[16]; @@ -129,6 +131,7 @@ String *Item_func_md5::val_str(String *str) return 0; } array_to_hex((char *) str->ptr(), (const char*) digest, 16); + str->set_charset(&my_charset_bin); str->length((uint) 32); return str; } @@ -155,7 +158,6 @@ String *Item_func_sha::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) /* If we got value different from NULL */ { SHA1_CONTEXT context; /* Context used to generate SHA1 hash */ @@ -165,11 +167,13 @@ String *Item_func_sha::val_str(String *str) /* No need to check error as the only case would be too long message */ mysql_sha1_input(&context, (const uchar *) sptr->ptr(), sptr->length()); + /* Ensure that memory is free and we got result */ if (!( str->alloc(SHA1_HASH_SIZE*2) || (mysql_sha1_result(&context,digest)))) { array_to_hex((char *) str->ptr(), (const char*) digest, SHA1_HASH_SIZE); + str->set_charset(&my_charset_bin); str->length((uint) SHA1_HASH_SIZE*2); null_value=0; return str; @@ -461,7 +465,7 @@ String *Item_func_des_encrypt::val_str(String *str) if ((null_value= args[0]->null_value)) return 0; // ENCRYPT(NULL) == NULL if ((res_length=res->length()) == 0) - return &my_empty_string; + return make_empty_result(); if (arg_count == 1) { @@ -517,6 +521,7 @@ String *Item_func_des_encrypt::val_str(String *str) tmp_arg[res_length-1]=tail; // save extra length tmp_value.realloc(res_length+1); tmp_value.length(res_length+1); + tmp_value.set_charset(&my_charset_bin); tmp_value[0]=(char) (128 | key_number); // Real encryption bzero((char*) &ivec,sizeof(ivec)); @@ -604,6 +609,7 @@ String *Item_func_des_decrypt::val_str(String *str) if ((tail=(uint) (uchar) tmp_value[length-2]) > 8) goto wrong_key; // Wrong key tmp_value.length(length-1-tail); + tmp_value.set_charset(&my_charset_bin); return &tmp_value; error: @@ -641,7 +647,7 @@ String *Item_func_concat_ws::val_str(String *str) use_as_buff= &tmp_value; str->length(0); // QQ; Should be removed - res=str; + res=str; // If 0 arg_count // Skip until non-null argument is found. // If not, return the empty string @@ -653,7 +659,7 @@ String *Item_func_concat_ws::val_str(String *str) } if (i == arg_count) - return &my_empty_string; + return make_empty_result(); for (i++; i < arg_count ; i++) { @@ -804,7 +810,7 @@ String *Item_func_reverse::val_str(String *str) return 0; /* An empty string is a special case as the string pointer may be null */ if (!res->length()) - return &my_empty_string; + return make_empty_result(); if (tmp_value.alloced_length() < res->length() && tmp_value.realloc(res->length())) { @@ -1144,8 +1150,7 @@ String *Item_func_left::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; - + return make_empty_result(); if ((res->length() <= (ulonglong) length) || (res->length() <= (char_pos= res->charpos((int) length)))) return res; @@ -1188,7 +1193,7 @@ String *Item_func_right::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; /* purecov: inspected */ + return make_empty_result(); /* purecov: inspected */ if (res->length() <= (ulonglong) length) return res; /* purecov: inspected */ @@ -1227,7 +1232,7 @@ String *Item_func_substr::val_str(String *str) /* Negative or zero length, will return empty string. */ if ((arg_count == 3) && (length <= 0) && (length == 0 || !args[2]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Set here so that rest of code sees out-of-bound value as such. */ @@ -1238,12 +1243,12 @@ String *Item_func_substr::val_str(String *str) /* Assumes that the maximum length of a String is < INT_MAX32. */ if ((!args[1]->unsigned_flag && (start < INT_MIN32 || start > INT_MAX32)) || (args[1]->unsigned_flag && ((ulonglong) start > INT_MAX32))) - return &my_empty_string; + return make_empty_result(); start= ((start < 0) ? res->numchars() + start : start - 1); start= res->charpos((int) start); if ((start < 0) || ((uint) start + 1 > res->length())) - return &my_empty_string; + return make_empty_result(); length= res->charpos((int) length, (uint32) start); tmp_length= res->length() - start; @@ -1306,7 +1311,7 @@ String *Item_func_substr_index::val_str(String *str) null_value=0; uint delimiter_length= delimiter->length(); if (!res->length() || !delimiter_length || !count) - return &my_empty_string; // Wrong parameters + return make_empty_result(); // Wrong parameters res->set_charset(collation.collation); @@ -1655,7 +1660,7 @@ String *Item_func_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, res->charset()); return str; @@ -1679,7 +1684,7 @@ String *Item_func_old_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password_323(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, res->charset()); return str; @@ -1707,8 +1712,7 @@ String *Item_func_encrypt::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; - + return make_empty_result(); if (arg_count == 1) { // generate random salt time_t timestamp=current_thd->query_start(); @@ -1968,7 +1972,7 @@ String *Item_func_soundex::val_str(String *str) for ( ; ; ) /* Skip pre-space */ { if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) - return &my_empty_string; /* EOL or invalid byte sequence */ + return make_empty_result(); /* EOL or invalid byte sequence */ if (rc == 1 && cs->ctype) { @@ -1993,7 +1997,7 @@ String *Item_func_soundex::val_str(String *str) { /* Extra safety - should not really happen */ DBUG_ASSERT(false); - return &my_empty_string; + return make_empty_result(); } to+= rc; break; @@ -2290,7 +2294,7 @@ String *Item_func_make_set::val_str(String *str) else { if (tmp_str.copy(*res)) // Don't use 'str' - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } } @@ -2300,11 +2304,11 @@ String *Item_func_make_set::val_str(String *str) { // Copy data to tmp_str if (tmp_str.alloc(result->length()+res->length()+1) || tmp_str.copy(*result)) - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } if (tmp_str.append(STRING_WITH_LEN(","), &my_charset_bin) || tmp_str.append(*res)) - return &my_empty_string; + return make_empty_result(); } } } @@ -2443,7 +2447,7 @@ String *Item_func_repeat::val_str(String *str) null_value= 0; if (count <= 0 && (count == 0 || !args[1]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Bounds check on count: If this is triggered, we will error. */ @@ -2751,7 +2755,7 @@ String *Item_func_conv::val_str(String *str) ptr= longlong2str(dec, ans, to_base, 1); if (str->copy(ans, (uint32) (ptr-ans), default_charset())) - return &my_empty_string; + return make_empty_result(); return str; } @@ -2761,22 +2765,16 @@ String *Item_func_conv_charset::val_str(String *str) DBUG_ASSERT(fixed == 1); if (use_cached_value) return null_value ? 0 : &str_value; - /* - Here we don't pass 'str' as a parameter to args[0]->val_str() - as 'str' may point to 'str_value' (e.g. see Item::save_in_field()), - which we use below to convert string. - Use argument's 'str_value' instead. - */ - String *arg= args[0]->val_str(&args[0]->str_value); + String *arg= args[0]->val_str(str); uint dummy_errors; if (!arg) { null_value=1; return 0; } - null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(), + null_value= tmp_value.copy(arg->ptr(), arg->length(), arg->charset(), conv_charset, &dummy_errors); - return null_value ? 0 : check_well_formed_result(&str_value); + return null_value ? 0 : check_well_formed_result(&tmp_value); } void Item_func_conv_charset::fix_length_and_dec() @@ -2918,7 +2916,7 @@ String *Item_func_hex::val_str(String *str) return 0; ptr= longlong2str(dec,ans,16,1); if (str->copy(ans,(uint32) (ptr-ans),default_charset())) - return &my_empty_string; // End of memory + return make_empty_result(); // End of memory return str; } @@ -3218,14 +3216,68 @@ String *Item_func_quote::val_str(String *str) } arg_length= arg->length(); - new_length= arg_length+2; /* for beginning and ending ' signs */ - for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) - new_length+= get_esc_bit(escmask, (uchar) *from); + if (collation.collation->mbmaxlen == 1) + { + new_length= arg_length + 2; /* for beginning and ending ' signs */ + for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) + new_length+= get_esc_bit(escmask, (uchar) *from); + } + else + { + new_length= (arg_length * 2) + /* For string characters */ + (2 * collation.collation->mbmaxlen); /* For quotes */ + } if (tmp_value.alloc(new_length)) goto null; + if (collation.collation->mbmaxlen > 1) + { + CHARSET_INFO *cs= collation.collation; + int mblen; + uchar *to_end; + to= (char*) tmp_value.ptr(); + to_end= (uchar*) to + new_length; + + /* Put leading quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + + for (start= (char*) arg->ptr(), end= start + arg_length; start < end; ) + { + my_wc_t wc; + bool escape; + if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) start, (uchar*) end)) <= 0) + goto null; + start+= mblen; + switch (wc) { + case 0: escape= 1; wc= '0'; break; + case '\032': escape= 1; wc= 'Z'; break; + case '\'': escape= 1; break; + case '\\': escape= 1; break; + default: escape= 0; break; + } + if (escape) + { + if ((mblen= cs->cset->wc_mb(cs, '\\', (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + if ((mblen= cs->cset->wc_mb(cs, wc, (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + + /* Put trailing quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + new_length= to - tmp_value.ptr(); + goto ret; + } + /* We replace characters from the end to the beginning */ @@ -3257,6 +3309,8 @@ String *Item_func_quote::val_str(String *str) } } *to= '\''; + +ret: tmp_value.length(new_length); tmp_value.set_charset(collation.collation); null_value= 0; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 53219e70973..e8d0384482d 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -22,6 +22,23 @@ class Item_str_func :public Item_func { +protected: + /** + Sets the result value of the function an empty string, using the current + character set. No memory is allocated. + @retval A pointer to the str_value member. + */ + String *make_empty_result() + { + /* + Reset string length to an empty string. We don't use str_value.set() as + we don't want to free and potentially have to reallocate the buffer + for each call. + */ + str_value.length(0); + str_value.set_charset(collation.collation); + return &str_value; + } public: Item_str_func() :Item_func() { decimals=NOT_FIXED_DEC; } Item_str_func(Item *a) :Item_func(a) {decimals=NOT_FIXED_DEC; } @@ -707,15 +724,17 @@ public: String *val_str(String *); void fix_length_and_dec() { - ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2; - max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); collation.set(args[0]->collation); + ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + + 2 * collation.collation->mbmaxlen; + max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); } }; class Item_func_conv_charset :public Item_str_func { bool use_cached_value; + String tmp_value; public: bool safe; CHARSET_INFO *conv_charset; // keep it public diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 40ece652ba3..d71258d8748 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -607,17 +607,13 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) switch (hybrid_type= item->result_type()) { case INT_RESULT: - max_length= 20; - break; case DECIMAL_RESULT: + case STRING_RESULT: max_length= item->max_length; break; case REAL_RESULT: max_length= float_length(decimals); break; - case STRING_RESULT: - max_length= item->max_length; - break; case ROW_RESULT: default: DBUG_ASSERT(0); diff --git a/sql/item_sum.h b/sql/item_sum.h index 8e8f8ac99d2..851b77ddeae 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -354,6 +354,7 @@ public: forced_const= TRUE; } virtual bool const_item() const { return forced_const; } + virtual bool const_during_execution() const { return false; } virtual void print(String *str, enum_query_type query_type); void fix_num_length_and_dec(); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 9cf56148994..c17557905bd 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -294,8 +294,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, for (; ptr != end && val != val_end; ptr++) { /* Skip pre-space between each argument */ - while (val != val_end && my_isspace(cs, *val)) - val++; + if ((val+= cs->cset->scan(cs, val, val_end, MY_SEQ_SPACES)) >= val_end) + break; if (*ptr == '%' && ptr+1 != end) { @@ -649,7 +649,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -658,7 +658,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -823,7 +823,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); @@ -3300,6 +3300,7 @@ void Item_func_str_to_date::fix_length_and_dec() { maybe_null= 1; decimals=0; + cached_format_type= DATE_TIME; cached_field_type= MYSQL_TYPE_DATETIME; max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; cached_timestamp_type= MYSQL_TIMESTAMP_NONE; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 87af384923e..2d499f6ef0e 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -108,8 +109,11 @@ public: { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { - str->set(val_int(), &my_charset_bin); - return null_value ? 0 : str; + longlong nr= val_int(); + if (null_value) + return 0; + str->set(nr, &my_charset_bin); + return str; } const char *func_name() const { return "month"; } enum Item_result result_type () const { return INT_RESULT; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 95cf853d9ff..217adce4f66 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -9010,7 +9010,19 @@ static bool record_compare(TABLE *table) } } - if (table->s->blob_fields + table->s->varchar_fields == 0) + /** + Compare full record only if: + - there are no blob fields (otherwise we would also need + to compare blobs contents as well); + - there are no varchar fields (otherwise we would also need + to compare varchar contents as well); + - there are no null fields, otherwise NULLed fields + contents (i.e., the don't care bytes) may show arbitrary + values, depending on how each engine handles internally. + */ + if ((table->s->blob_fields + + table->s->varchar_fields + + table->s->null_fields) == 0) { result= cmp_record(table,record[1]); goto record_compare_exit; @@ -9025,13 +9037,22 @@ static bool record_compare(TABLE *table) goto record_compare_exit; } - /* Compare updated fields */ + /* Compare fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { - if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + + /** + We only compare field contents that are not null. + NULL fields (i.e., their null bits) were compared + earlier. + */ + if (!(*(ptr))->is_null()) { - result= TRUE; - goto record_compare_exit; + if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + { + result= TRUE; + goto record_compare_exit; + } } } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 68d678eecdf..6f97c7359c2 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1043,7 +1043,11 @@ void reset_mqh(LEX_USER *lu, bool get_them); bool check_mqh(THD *thd, uint check_command); void time_out_user_resource_limits(THD *thd, USER_CONN *uc); void decrease_user_connections(USER_CONN *uc); -void thd_init_client_charset(THD *thd, uint cs_number); +bool thd_init_client_charset(THD *thd, uint cs_number); +inline bool is_supported_parser_charset(CHARSET_INFO *cs) +{ + return test(cs->mbminlen == 1); +} bool setup_connection_thread_globals(THD *thd); bool login_connection(THD *thd); void end_connection(THD *thd); @@ -2010,6 +2014,7 @@ extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, mysqld_extra_port, dropping_tables; extern uint delay_key_write_options; +extern ulong max_long_data_size; #endif /* MYSQL_SERVER */ #if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS extern MYSQL_PLUGIN_IMPORT uint lower_case_table_names; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index bd0290a6420..c52de3d0f42 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -421,6 +421,7 @@ TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"", /* the default log output is log tables */ static bool lower_case_table_names_used= 0; +static bool max_long_data_size_used= false; static bool volatile select_thread_in_use, signal_thread_in_use; static bool volatile ready_to_exit; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; @@ -598,6 +599,12 @@ ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong max_connections, max_connect_errors; ulong extra_max_connections; +/* + Maximum length of parameter value which can be set through + mysql_send_long_data() call. +*/ +ulong max_long_data_size; + uint max_user_connections= 0; ulonglong denied_connections; /** @@ -1484,6 +1491,7 @@ static void wait_for_signal_thread_to_end() #endif } +#endif /*EMBEDDED_LIBRARY*/ static void clean_up_mutexes() { @@ -1511,19 +1519,21 @@ static void clean_up_mutexes() (void) pthread_mutex_destroy(&LOCK_global_table_stats); (void) pthread_mutex_destroy(&LOCK_global_index_stats); +#ifndef EMBEDDED_LIBRARY Events::destroy_mutexes(); +#endif /* !EMBEDDED_LIBRARY */ #ifdef HAVE_OPENSSL (void) pthread_mutex_destroy(&LOCK_des_key_file); #ifndef HAVE_YASSL for (int i= 0; i < CRYPTO_num_locks(); ++i) (void) rwlock_destroy(&openssl_stdlocks[i].lock); OPENSSL_free(openssl_stdlocks); -#endif -#endif +#endif /* HAVE_YASSL */ +#endif /* HAVE_OPENSSL */ #ifdef HAVE_REPLICATION (void) pthread_mutex_destroy(&LOCK_rpl_status); (void) pthread_cond_destroy(&COND_rpl_status); -#endif +#endif /* HAVE_REPLICATION */ (void) pthread_mutex_destroy(&LOCK_server_started); (void) pthread_cond_destroy(&COND_server_started); (void) pthread_mutex_destroy(&LOCK_active_mi); @@ -1543,8 +1553,6 @@ static void clean_up_mutexes() DBUG_VOID_RETURN; } -#endif /*EMBEDDED_LIBRARY*/ - /** Register order of mutex for wrong mutex deadlock detector @@ -3197,6 +3205,19 @@ sizeof(load_default_groups)/sizeof(load_default_groups[0]); #endif +#ifndef EMBEDDED_LIBRARY +static +int +check_enough_stack_size() +{ + uchar stack_top; + + return check_stack_overrun(current_thd, STACK_MIN_SIZE, + &stack_top); +} +#endif + + /** Initialize one of the global date/time format variables. @@ -3401,12 +3422,6 @@ static int init_common_variables(const char *conf_file_name, int argc, max_system_variables.pseudo_thread_id= (ulong)~0; server_start_time= flush_status_time= my_time(0); - /* TODO: remove this when my_time_t is 64 bit compatible */ - if (server_start_time >= (time_t) MY_TIME_T_MAX) - { - sql_print_error("This MySQL server doesn't support dates later then 2038"); - return 1; - } rpl_filter= new Rpl_filter; binlog_filter= new Rpl_filter; @@ -3445,6 +3460,14 @@ static int init_common_variables(const char *conf_file_name, int argc, */ mysql_bin_log.init_pthread_objects(); + /* TODO: remove this when my_time_t is 64 bit compatible */ + if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time)) + { + sql_print_error("This MySQL server doesn't support dates later then 2038"); + return 1; + } + + if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) { /* Get hostname of computer (used by 'show variables') and as default @@ -3588,7 +3611,11 @@ static int init_common_variables(const char *conf_file_name, int argc, #endif mysys_uses_curses=0; #ifdef USE_REGEX - my_regex_init(&my_charset_latin1); +#ifndef EMBEDDED_LIBRARY + my_regex_init(&my_charset_latin1, check_enough_stack_size); +#else + my_regex_init(&my_charset_latin1, NULL); +#endif #endif /* Process a comma-separated character set list and choose @@ -6021,7 +6048,8 @@ enum options_mysqld OPT_SLOW_QUERY_LOG_FILE, OPT_IGNORE_BUILTIN_INNODB, OPT_BINLOG_DIRECT_NON_TRANS_UPDATE, - OPT_DEFAULT_CHARACTER_SET_OLD + OPT_DEFAULT_CHARACTER_SET_OLD, + OPT_MAX_LONG_DATA_SIZE }; @@ -7208,6 +7236,12 @@ each time the SQL thread starts.", &global_system_variables.max_length_for_sort_data, &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, + {"max_long_data_size", OPT_MAX_LONG_DATA_SIZE, + "The maximum size of prepared statement parameter which can be provided " + "through mysql_send_long_data() API call. To be used when limit of " + "max_allowed_packet is too small", + &max_long_data_size, &max_long_data_size, 0, GET_ULONG, + REQUIRED_ARG, 1024*1024L, 1024, UINT_MAX32, MALLOC_OVERHEAD, 1, 0}, {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT, "Maximum number of prepared statements in the server.", &max_prepared_stmt_count, &max_prepared_stmt_count, @@ -8167,6 +8201,7 @@ static void usage(void) puts("\ Copyright (C) 2000-2008 MySQL AB, by Monty and others.\n\ Copyright (C) 2008 Sun Microsystems, Inc.\n\ +Copyright (C) 2009-2011 Monty Program Ab.\n\ This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ and you are welcome to modify and redistribute it under the GPL license\n\n\ Starts the MySQL database server.\n"); @@ -9200,6 +9235,9 @@ mysqld_get_one_option(int optid, } break; #endif /* defined(ENABLED_DEBUG_SYNC) */ + case OPT_MAX_LONG_DATA_SIZE: + max_long_data_size_used= true; + break; } return 0; } @@ -9292,6 +9330,14 @@ static int get_options(int *argc,char **argv) opt_log_slow_slave_statements) && !opt_slow_log) sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set"); + if (global_system_variables.net_buffer_length > + global_system_variables.max_allowed_packet) + { + sql_print_warning("net_buffer_length (%lu) is set to be larger " + "than max_allowed_packet (%lu). Please rectify.", + global_system_variables.net_buffer_length, + global_system_variables.max_allowed_packet); + } #if defined(HAVE_BROKEN_REALPATH) my_use_symdir=0; @@ -9373,6 +9419,14 @@ static int get_options(int *argc,char **argv) &extra_max_connections, &extra_connection_count); #endif + + /* + If max_long_data_size is not specified explicitly use + value of max_allowed_packet. + */ + if (!max_long_data_size_used) + max_long_data_size= global_system_variables.max_allowed_packet; + return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2e39f367a70..ab6c1918fb7 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1334,7 +1334,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 0c2e41225ba..0ed31675f24 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -211,6 +211,7 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) /** Substitutes constants for some COUNT(), MIN() and MAX() functions. + @param thd thread handler @param tables list of leaves of join table tree @param all_fields All fields to be returned @param conds WHERE clause @@ -228,9 +229,12 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) HA_ERR_KEY_NOT_FOUND on impossible conditions @retval HA_ERR_... if a deadlock or a lock wait timeout happens, for example + @retval + ER_... e.g. ER_SUBQUERY_NO_1_ROW */ -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) +int opt_sum_query(THD *thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds) { List_iterator_fast<Item> it(all_fields); int const_result= 1; @@ -241,6 +245,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) table_map where_tables= 0; Item *item; int error; + DBUG_ENTER("opt_sum_query"); if (conds) where_tables= conds->used_tables(); @@ -269,7 +274,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + DBUG_RETURN(0); } else used_tables|= tl->table->map; @@ -297,7 +302,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { tl->table->file->print_error(error, MYF(0)); tl->table->in_use->fatal_error(); - return error; + DBUG_RETURN(error); } count*= tl->table->file->stats.records; } @@ -389,10 +394,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) if (error) { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); - return(error); + DBUG_RETURN(error); } removed_tables|= table->map; } @@ -436,6 +441,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; } } + + if (thd->is_error()) + DBUG_RETURN(thd->main_da.sql_errno()); + /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables @@ -445,7 +454,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) */ if (removed_tables && used_tables != removed_tables) const_result= 0; // We didn't remove all tables - return const_result; + DBUG_RETURN(const_result); } @@ -731,6 +740,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, if (is_null || (is_null_safe_eq && args[1]->is_null())) { + /* + If we have a non-nullable index, we cannot use it, + since set_null will be ignored, and we will compare uninitialized data. + */ + if (!part->field->real_maybe_null()) + DBUG_RETURN(FALSE); part->field->set_null(); *key_ptr= (uchar) 1; } @@ -801,8 +816,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, @param[out] prefix_len Length of prefix for the search range @note - This function may set table->key_read to 1, which must be reset after - index is used! (This can only happen when function returns 1) + This function may set field->table->key_read to true, + which must be reset after index is used! + (This can only happen when function returns 1) @retval 0 Index can not be used to optimize MIN(field)/MAX(field) @@ -817,7 +833,9 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not key field + return FALSE; // Not key field + + DBUG_ENTER("find_key_for_maxmin"); TABLE *table= field->table; uint idx= 0; @@ -842,7 +860,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) - return 0; + DBUG_RETURN(FALSE); /* Check whether the index component is partial */ Field *part_field= table->field[part->fieldnr-1]; @@ -891,12 +909,12 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ if (field->part_of_key.is_set(idx)) table->enable_keyread(); - return 1; + DBUG_RETURN(TRUE); } } } } - return 0; + DBUG_RETURN(FALSE); } diff --git a/sql/set_var.cc b/sql/set_var.cc index f5ab86a16a8..4aa30b5634d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -154,6 +154,8 @@ static bool sys_update_slow_log_path(THD *thd, set_var * var); static void sys_default_slow_log_path(THD *thd, enum_var_type type); static void fix_sys_log_slow_filter(THD *thd, enum_var_type); static uchar *get_myisam_mmap_size(THD *thd); +static int check_max_allowed_packet(THD *thd, set_var *var); +static int check_net_buffer_length(THD *thd, set_var *var); /* Variable definition list @@ -393,7 +395,8 @@ static sys_var_const sys_lower_case_table_names(&vars, (uchar*) &lower_case_table_names); static sys_var_thd_ulong_session_readonly sys_max_allowed_packet(&vars, "max_allowed_packet", - &SV::max_allowed_packet); + &SV::max_allowed_packet, + check_max_allowed_packet); static sys_var_ulonglong_ptr sys_max_binlog_cache_size(&vars, "max_binlog_cache_size", &max_binlog_cache_size); static sys_var_long_ptr sys_max_binlog_size(&vars, "max_binlog_size", @@ -427,6 +430,12 @@ static sys_var_thd_ulong sys_max_seeks_for_key(&vars, "max_seeks_for_key", &SV::max_seeks_for_key); static sys_var_thd_ulong sys_max_length_for_sort_data(&vars, "max_length_for_sort_data", &SV::max_length_for_sort_data); +static sys_var_const sys_max_long_data_size(&vars, + "max_long_data_size", + OPT_GLOBAL, SHOW_LONG, + (uchar*) + &max_long_data_size); + #ifndef TO_BE_DELETED /* Alias for max_join_size */ static sys_var_thd_ha_rows sys_sql_max_join_size(&vars, "sql_max_join_size", &SV::max_join_size, @@ -481,7 +490,8 @@ static sys_var_const sys_named_pipe(&vars, "named_pipe", /* purecov: end */ #endif static sys_var_thd_ulong_session_readonly sys_net_buffer_length(&vars, "net_buffer_length", - &SV::net_buffer_length); + &SV::net_buffer_length, + check_net_buffer_length); static sys_var_thd_ulong sys_net_read_timeout(&vars, "net_read_timeout", &SV::net_read_timeout, 0, fix_net_read_timeout); @@ -1896,7 +1906,7 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) } var->save_result.ulong_value= ((ulong) - find_set(enum_names, res->ptr(), + find_set(enum_names, res->c_ptr_safe(), res->length(), NULL, &error, &error_len, @@ -2311,7 +2321,7 @@ bool sys_var_character_set_client::check(THD *thd, set_var *var) if (sys_var_character_set_sv::check(thd, var)) return 1; /* Currently, UCS-2 cannot be used as a client character set */ - if (var->save_result.charset->mbminlen > 1) + if (!is_supported_parser_charset(var->save_result.charset)) { my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, var->save_result.charset->csname); @@ -2823,14 +2833,14 @@ int set_var_collation_client::update(THD *thd) bool sys_var_timestamp::check(THD *thd, set_var *var) { - time_t val; + longlong val; var->save_result.ulonglong_value= var->value->val_int(); - val= (time_t) var->save_result.ulonglong_value; - if (val < (time_t) MY_TIME_T_MIN || val > (time_t) MY_TIME_T_MAX) + val= (longlong) var->save_result.ulonglong_value; + if (val != 0 && // this is how you set the default value + (val < TIMESTAMP_MIN_VALUE || val > TIMESTAMP_MAX_VALUE)) { - my_message(ER_UNKNOWN_ERROR, - "This version of MySQL doesn't support dates later than 2038", - MYF(0)); + char buf[64]; + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "timestamp", llstr(val, buf)); return TRUE; } return FALSE; @@ -4409,6 +4419,36 @@ uchar *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type, } #endif + +int +check_max_allowed_packet(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val < (longlong) global_system_variables.net_buffer_length) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + + +int +check_net_buffer_length(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val > (longlong) global_system_variables.max_allowed_packet) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + /**************************************************************************** Used templates ****************************************************************************/ diff --git a/sql/slave.cc b/sql/slave.cc index 0d14234766b..435f3d8b95f 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -408,17 +409,6 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) int error,force_all = (thread_mask & SLAVE_FORCE_ALL); pthread_mutex_t *sql_lock = &mi->rli.run_lock, *io_lock = &mi->run_lock; - if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) - { - DBUG_PRINT("info",("Terminating IO thread")); - mi->abort_slave=1; - if ((error=terminate_slave_thread(mi->io_thd, io_lock, - &mi->stop_cond, - &mi->slave_running, - skip_lock)) && - !force_all) - DBUG_RETURN(error); - } if (thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) { DBUG_PRINT("info",("Terminating SQL thread")); @@ -430,6 +420,17 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) !force_all) DBUG_RETURN(error); } + if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) + { + DBUG_PRINT("info",("Terminating IO thread")); + mi->abort_slave=1; + if ((error=terminate_slave_thread(mi->io_thd, io_lock, + &mi->stop_cond, + &mi->slave_running, + skip_lock)) && + !force_all) + DBUG_RETURN(error); + } DBUG_RETURN(0); } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 47d85238cff..f783375b010 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -5422,18 +5423,15 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, } -/* +/** Handle an in-memory privilege structure. - SYNOPSIS - handle_grant_struct() - struct_no The number of the structure to handle (0..3). - drop If user_from is to be dropped. - user_from The the user to be searched/dropped/renamed. - user_to The new name for the user if to be renamed, - NULL otherwise. + @param struct_no The number of the structure to handle (0..4). + @param drop If user_from is to be dropped. + @param user_from The the user to be searched/dropped/renamed. + @param user_to The new name for the user if to be renamed, NULL otherwise. - DESCRIPTION + @note Scan through all elements in an in-memory grant structure and apply the requested operation. Delete from grant structure if drop is true. @@ -5443,12 +5441,12 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, 0 acl_users 1 acl_dbs 2 column_priv_hash - 3 procs_priv_hash + 3 proc_priv_hash + 4 func_priv_hash - RETURN - > 0 At least one element matched. - 0 OK, but no element matched. - -1 Wrong arguments to function + @retval > 0 At least one element matched. + @retval 0 OK, but no element matched. + @retval -1 Wrong arguments to function. */ static int handle_grant_struct(uint struct_no, bool drop, @@ -5462,6 +5460,7 @@ static int handle_grant_struct(uint struct_no, bool drop, ACL_USER *acl_user= NULL; ACL_DB *acl_db= NULL; GRANT_NAME *grant_name= NULL; + HASH *grant_name_hash= NULL; DBUG_ENTER("handle_grant_struct"); DBUG_PRINT("info",("scan struct: %u search: '%s'@'%s'", struct_no, user_from->user.str, user_from->host.str)); @@ -5480,10 +5479,16 @@ static int handle_grant_struct(uint struct_no, bool drop, elements= acl_dbs.elements; break; case 2: - elements= column_priv_hash.records; + grant_name_hash= &column_priv_hash; + elements= grant_name_hash->records; break; case 3: - elements= proc_priv_hash.records; + grant_name_hash= &proc_priv_hash; + elements= grant_name_hash->records; + break; + case 4: + grant_name_hash= &func_priv_hash; + elements= grant_name_hash->records; break; default: return -1; @@ -5513,16 +5518,13 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx); - user= grant_name->user; - host= grant_name->host.hostname; - break; - case 3: - grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx); + case 4: + grant_name= (GRANT_NAME*) hash_element(grant_name_hash, idx); user= grant_name->user; host= grant_name->host.hostname; break; + default: MY_ASSERT_UNREACHABLE(); } @@ -5552,14 +5554,25 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - hash_delete(&column_priv_hash, (uchar*) grant_name); - break; - case 3: - hash_delete(&proc_priv_hash, (uchar*) grant_name); + case 4: + hash_delete(grant_name_hash, (uchar*) grant_name); break; } elements--; + /* + - If we are iterating through an array then we just have moved all + elements after the current element one position closer to its head. + This means that we have to take another look at the element at + current position as it is a new element from the array's tail. + - If we are iterating through a hash the current element was replaced + with one of elements from the tail. So we also have to take a look + at the new element in current position. + Note that in our HASH implementation hash_delete() won't move any + elements with position after current one to position before the + current (i.e. from the tail to the head), so it is safe to continue + iteration without re-starting. + */ idx--; } else if ( user_to ) @@ -5577,22 +5590,41 @@ static int handle_grant_struct(uint struct_no, bool drop, case 2: case 3: - /* - Update the grant structure with the new user name and - host name - */ - grant_name->set_user_details(user_to->host.str, grant_name->db, - user_to->user.str, grant_name->tname, - TRUE); - - /* - Since username is part of the hash key, when the user name - is renamed, the hash key is changed. Update the hash to - ensure that the position matches the new hash key value - */ - hash_update(&column_priv_hash, (uchar*) grant_name, - (uchar*) grant_name->hash_key, grant_name->key_length); - break; + case 4: + { + /* + Save old hash key and its length to be able properly update + element position in hash. + */ + char *old_key= grant_name->hash_key; + size_t old_key_length= grant_name->key_length; + + /* + Update the grant structure with the new user name and host name. + */ + grant_name->set_user_details(user_to->host.str, grant_name->db, + user_to->user.str, grant_name->tname, + TRUE); + + /* + Since username is part of the hash key, when the user name + is renamed, the hash key is changed. Update the hash to + ensure that the position matches the new hash key value + */ + hash_update(grant_name_hash, (uchar*) grant_name, (uchar*) old_key, + old_key_length); + /* + hash_update() operation could have moved element from the tail + of the hash to the current position. So we need to take a look + at the element in current position once again. + Thanks to the fact that hash_update() for our HASH implementation + won't move any elements from the tail of the hash to the positions + before the current one (a.k.a. head) it is safe to continue + iteration without restarting. + */ + idx--; + break; + } } } else @@ -5649,8 +5681,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, else { /* Handle user array. */ - if ((handle_grant_struct(0, drop, user_from, user_to) && ! result) || - found) + if ((handle_grant_struct(0, drop, user_from, user_to)) || found) { result= 1; /* At least one record/element found. */ /* If search is requested, we do not need to search further. */ @@ -5678,7 +5709,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, } } - /* Handle procedures table. */ + /* Handle stored routines table. */ if ((found= handle_grant_table(tables, 4, drop, user_from, user_to)) < 0) { /* Handle of table failed, don't touch in-memory array. */ @@ -5695,6 +5726,15 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, if (! drop && ! user_to) goto end; } + /* Handle funcs array. */ + if (((handle_grant_struct(4, drop, user_from, user_to) && ! result) || + found) && ! result) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } } /* Handle tables table. */ @@ -7348,7 +7388,8 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length) if (ptr+1 < end) { uint cs_number= uint2korr(ptr); - thd_init_client_charset(thd, cs_number); + if (thd_init_client_charset(thd, cs_number)) + return 1; thd->update_charset(); } @@ -7451,7 +7492,8 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - thd_init_client_charset(thd, (uint) net->read_pos[8]); + if (thd_init_client_charset(thd, (uint) net->read_pos[8])) + return packet_error; thd->update_charset(); end= (char*) net->read_pos+32; } @@ -7999,7 +8041,8 @@ static int do_auth_once(THD *thd, LEX_STRING *auth_plugin_name, @retval 1 error */ -bool acl_authenticate(THD *thd, uint connect_errors, uint com_change_user_pkt_len) +bool acl_authenticate(THD *thd, uint connect_errors, + uint com_change_user_pkt_len) { int res= CR_OK; MPVIO_EXT mpvio; diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index a57bd41c7e6..218f1a6bab1 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -242,7 +242,7 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) if (str == end) { info->is_float = 1; // we can't use variable decimals here - return 1; + DBUG_RETURN(1); } DBUG_RETURN(0); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9f75ac4c2a9..0874ee16127 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2826,10 +2826,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, ("Found table '%s.%s' with different refresh version", table_list->db, table_list->table_name)); - /* Ignore FLUSH, but not name locks! */ + /* Ignore FLUSH and pending name locks, but not acquired name locks! */ if (flags & MYSQL_LOCK_IGNORE_FLUSH && !table->open_placeholder) { - DBUG_ASSERT(table->db_stat); /* Force close at once after usage */ thd->version= table->s->version; continue; diff --git a/sql/sql_class.h b/sql/sql_class.h index 90fe2bb49c1..87909c0e5a4 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2097,7 +2097,7 @@ public: /*TODO: this will be obsolete when we have support for 64 bit my_time_t */ inline bool is_valid_time() { - return (start_time < (time_t) MY_TIME_T_MAX); + return (IS_TIME_T_VALID_FOR_TIMESTAMP(start_time)); } void set_time_after_lock() { utime_after_lock= my_micro_time(); } ulonglong current_utime() { return my_micro_time(); } diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 1c2ae915259..def0c8dd951 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -780,8 +780,23 @@ void update_global_user_stats(THD *thd, bool create_user, time_t now) } -void thd_init_client_charset(THD *thd, uint cs_number) +/** + Set thread character set variables from the given ID + + @param thd thread handle + @param cs_number character set and collation ID + + @retval 0 OK; character_set_client, collation_connection and + character_set_results are set to the new value, + or to the default global values. + + @retval 1 error, e.g. the given ID is not supported by parser. + Corresponding SQL error is sent. +*/ + +bool thd_init_client_charset(THD *thd, uint cs_number) { + CHARSET_INFO *cs; /* Use server character set and collation if - opt_character_set_client_handshake is not set @@ -790,10 +805,10 @@ void thd_init_client_charset(THD *thd, uint cs_number) - client character set doesn't exists in server */ if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !(cs= get_charset(cs_number, MYF(0))) || !my_strcasecmp(&my_charset_latin1, global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) + cs->name)) { thd->variables.character_set_client= global_system_variables.character_set_client; @@ -804,10 +819,18 @@ void thd_init_client_charset(THD *thd, uint cs_number) } else { + if (!is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + return true; + } thd->variables.character_set_results= thd->variables.collation_connection= - thd->variables.character_set_client; + thd->variables.character_set_client= cs; } + return false; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 901db600d0d..1286c56f30e 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -900,7 +900,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ query_cache_invalidate3(thd, table_list, 1); } - if ((changed && error <= 0) || + if (error <= 0 || thd->transaction.stmt.modified_non_trans_table || was_insert_delayed) { diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 3483a8226d4..84a31c3a20d 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1079,9 +1079,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),buffer(0),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1108,12 +1109,9 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; - /* Set of a stack for unget if long terminators */ - uint length=max(field_term_length,line_term_length)+1; + uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); @@ -1155,11 +1153,8 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error) - { - if (need_end_io_cache) - ::end_io_cache(&cache); - } + if (need_end_io_cache) + ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index a215644bce3..e8b94d11cd2 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1128,7 +1128,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, uint save_db_length= thd->db_length; char *save_db= thd->db; USER_CONN *save_user_connect= thd->user_connect; - Security_context save_security_ctx= *thd->security_ctx; + Security_context save_security_ctx= *thd->security_ctx; CHARSET_INFO *save_character_set_client= thd->variables.character_set_client; CHARSET_INFO *save_collation_connection= @@ -1136,8 +1136,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, CHARSET_INFO *save_character_set_results= thd->variables.character_set_results; + /* Ensure we don't free security_ctx->user in case we have to revert */ + thd->security_ctx->user= 0; + if (acl_authenticate(thd, 0, packet_length)) { + /* Free user if allocated by acl_authenticate */ x_free(thd->security_ctx->user); *thd->security_ctx= save_security_ctx; thd->user_connect= save_user_connect; diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 1c78f6a3613..15cae73844f 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,4 +1,5 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -761,6 +762,9 @@ static bool handle_list_of_fields(List_iterator<char> it, bool result; char *field_name; bool is_list_empty= TRUE; + int fields_handled = 0; + char* field_name_array[MAX_KEY]; + DBUG_ENTER("handle_list_of_fields"); while ((field_name= it++)) @@ -776,6 +780,25 @@ static bool handle_list_of_fields(List_iterator<char> it, result= TRUE; goto end; } + + /* + Check for duplicate fields in the list. + Assuming that there are not many fields in the partition key list. + If there were, it would be better to replace the for-loop + with a more efficient algorithm. + */ + + field_name_array[fields_handled] = field_name; + for (int i = 0; i < fields_handled; ++i) + { + if (my_strcasecmp(system_charset_info, + field_name_array[i], field_name) == 0) + { + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + } + fields_handled++; } if (is_list_empty) { diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 1a3c4f4f8ae..efe5eb43121 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2002, 2010, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2734,6 +2735,32 @@ void mysql_sql_stmt_close(THD *thd) } } + +class Set_longdata_error_handler : public Internal_error_handler +{ +public: + Set_longdata_error_handler(Prepared_statement *statement) + : stmt(statement) + { } + +public: + bool handle_error(uint sql_errno, + const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *) + { + stmt->state= Query_arena::ERROR; + stmt->last_errno= sql_errno; + strnmov(stmt->last_error, message, MYSQL_ERRMSG_SIZE); + + return TRUE; + } + +private: + Prepared_statement *stmt; +}; + + /** Handle long data in pieces from client. @@ -2790,16 +2817,19 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) param= stmt->param_array[param_number]; + Set_longdata_error_handler err_handler(stmt); + /* + Install handler that will catch any errors that can be generated + during execution of Item_param::set_longdata() and propagate + them to Statement::last_error. + */ + thd->push_internal_handler(&err_handler); #ifndef EMBEDDED_LIBRARY - if (param->set_longdata(packet, (ulong) (packet_end - packet))) + param->set_longdata(packet, (ulong) (packet_end - packet)); #else - if (param->set_longdata(thd->extra_data, thd->extra_length)) + param->set_longdata(thd->extra_data, thd->extra_length); #endif - { - stmt->state= Query_arena::ERROR; - stmt->last_errno= ER_OUTOFMEMORY; - sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); - } + thd->pop_internal_handler(); general_log_print(thd, thd->command, NullS); @@ -3261,6 +3291,13 @@ Prepared_statement::execute_loop(String *expanded_query, bool error; int reprepare_attempt= 0; + /* Check if we got an error when sending long data */ + if (state == Query_arena::ERROR) + { + my_message(last_errno, last_error, MYF(0)); + return TRUE; + } + if (set_parameters(expanded_query, packet, packet_end)) return TRUE; @@ -3501,12 +3538,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) status_var_increment(thd->status_var.com_stmt_execute); - /* Check if we got an error when sending long data */ - if (state == Query_arena::ERROR) - { - my_message(last_errno, last_error, MYF(0)); - return TRUE; - } if (flags & (uint) IS_IN_USE) { my_error(ER_PS_NO_RECURSION, MYF(0)); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 95e48c531be..418c2985f85 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB & Sasha +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,6 +22,7 @@ #include "log_event.h" #include "rpl_filter.h" #include <my_dir.h> +#include "debug_sync.h" int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; @@ -544,8 +546,10 @@ impossible position"; while (!net->error && net->vio != 0 && !thd->killed) { + my_off_t prev_pos= pos; while (!(error = Log_event::read_log_event(&log, packet, log_lock))) { + prev_pos= my_b_tell(&log); #ifndef DBUG_OFF if (max_binlog_dump_events && !left_events--) { @@ -556,6 +560,20 @@ impossible position"; } #endif + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + const char act[]= + "now " + "wait_for signal.continue"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(current_thd, + STRING_WITH_LEN(act))); + } + }); + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) { binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] & @@ -572,6 +590,14 @@ impossible position"; goto err; } + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + } + }); + DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] )); if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) @@ -590,8 +616,13 @@ impossible position"; here we were reading binlog that was not closed properly (as a result of a crash ?). treat any corruption as EOF */ - if (binlog_can_be_corrupted && error != LOG_READ_MEM) + if (binlog_can_be_corrupted && + error != LOG_READ_MEM && error != LOG_READ_EOF) + { + my_b_seek(&log, prev_pos); error=LOG_READ_EOF; + } + /* TODO: now that we are logging the offset, check to make sure the recorded offset and the actual match. diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8c5b881f41e..287f08bad44 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -289,57 +289,60 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, } -/* +/** Fix fields referenced from inner selects. - SYNOPSIS - fix_inner_refs() - thd Thread handle - all_fields List of all fields used in select - select Current select - ref_pointer_array Array of references to Items used in current select - group_list GROUP BY list (is NULL by default) + @param thd Thread handle + @param all_fields List of all fields used in select + @param select Current select + @param ref_pointer_array Array of references to Items used in current select + @param group_list GROUP BY list (is NULL by default) - DESCRIPTION - The function serves 3 purposes - adds fields referenced from inner - selects to the current select list, resolves which class to use - to access referenced item (Item_ref of Item_direct_ref) and fixes - references (Item_ref objects) to these fields. + @details + The function serves 3 purposes - If a field isn't already in the select list and the ref_pointer_array + - adds fields referenced from inner query blocks to the current select list + + - Decides which class to use to reference the items (Item_ref or + Item_direct_ref) + + - fixes references (Item_ref objects) to these fields. + + If a field isn't already on the select list and the ref_pointer_array is provided then it is added to the all_fields list and the pointer to it is saved in the ref_pointer_array. The class to access the outer field is determined by the following rules: - 1. If the outer field isn't used under an aggregate function - then the Item_ref class should be used. - 2. If the outer field is used under an aggregate function and this - function is aggregated in the select where the outer field was - resolved or in some more inner select then the Item_direct_ref - class should be used. - It used used also if we are grouping by a subquery that refers - this outer field. + + -#. If the outer field isn't used under an aggregate function then the + Item_ref class should be used. + + -#. If the outer field is used under an aggregate function and this + function is, in turn, aggregated in the query block where the outer + field was resolved or some query nested therein, then the + Item_direct_ref class should be used. Also it should be used if we are + grouping by a subquery containing the outer field. + The resolution is done here and not at the fix_fields() stage as - it can be done only after sum functions are fixed and pulled up to - selects where they are have to be aggregated. + it can be done only after aggregate functions are fixed and pulled up to + selects where they are to be aggregated. + When the class is chosen it substitutes the original field in the Item_outer_ref object. After this we proceed with fixing references (Item_outer_ref objects) to this field from inner subqueries. - RETURN - TRUE an error occured - FALSE ok -*/ + @return Status + @retval true An error occured. + @retval false OK. + */ bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, Item **ref_pointer_array) { Item_outer_ref *ref; - bool res= FALSE; - bool direct_ref= FALSE; /* Mark the references from the inner_refs_list that are occurred in @@ -356,6 +359,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, while ((ref= ref_it++)) { + bool direct_ref= false; Item *item= ref->outer_ref; Item **item_ref= ref->ref; Item_ref *new_ref; @@ -414,7 +418,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, return TRUE; thd->used_tables|= item->used_tables(); } - return res; + return false; } /** @@ -966,7 +970,7 @@ JOIN::optimize() If all items were resolved by opt_sum_query, there is no need to open any tables. */ - if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) + if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { @@ -1920,7 +1924,11 @@ JOIN::exec() if (!curr_join->sort_and_group && curr_join->const_tables != curr_join->tables) curr_join->join_tab[curr_join->const_tables].sorted= 0; - if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) + + Procedure *save_proc= curr_join->procedure; + tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0); + curr_join->procedure= save_proc; + if (tmp_error) { error= tmp_error; DBUG_VOID_RETURN; @@ -3304,6 +3312,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, @param field Field used in comparision @param eq_func True if we used =, <=> or IS NULL @param value Value used for comparison with field + @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates @@ -3396,26 +3405,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, eq_func is NEVER true when num_values > 1 */ if (!eq_func) - { - /* - Additional optimization: if we're processing - "t.key BETWEEN c1 AND c1" then proceed as if we were processing - "t.key = c1". - TODO: This is a very limited fix. A more generic fix is possible. - There are 2 options: - A) Make equality propagation code be able to handle BETWEEN - (including cases like t1.key BETWEEN t2.key AND t3.key) - B) Make range optimizer to infer additional "t.key = c" equalities - and use them in equality propagation process (see details in - OptimizerKBAndTodo) - */ - if ((cond->functype() != Item_func::BETWEEN) || - ((Item_func_between*) cond)->negated || - !value[0]->eq(value[1], field->binary())) - return; - eq_func= TRUE; - } - + return; if (field->result_type() == STRING_RESULT) { if ((*value)->result_type() != STRING_RESULT) @@ -3630,9 +3620,65 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, case Item_func::OPTIMIZE_KEY: { Item **values; - // BETWEEN, IN, NE - if (is_local_field (cond_func->key_item()) && - !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) + /* + Build list of possible keys for 'a BETWEEN low AND high'. + It is handled similar to the equivalent condition + 'a >= low AND a <= high': + */ + if (cond_func->functype() == Item_func::BETWEEN) + { + Item_field *field_item; + bool equal_func= FALSE; + uint num_values= 2; + values= cond_func->arguments(); + + bool binary_cmp= (values[0]->real_item()->type() == Item::FIELD_ITEM) + ? ((Item_field*)values[0]->real_item())->field->binary() + : TRUE; + + /* + Additional optimization: If 'low = high': + Handle as if the condition was "t.key = low". + */ + if (!((Item_func_between*)cond_func)->negated && + values[1]->eq(values[2], binary_cmp)) + { + equal_func= TRUE; + num_values= 1; + } + + /* + Append keys for 'field <cmp> value[]' if the + condition is of the form:: + '<field> BETWEEN value[1] AND value[2]' + */ + if (is_local_field(values[0])) + { + field_item= (Item_field *) (values[0]->real_item()); + add_key_equal_fields(key_fields, *and_level, cond_func, + field_item, equal_func, &values[1], + num_values, usable_tables, sargables); + } + /* + Append keys for 'value[0] <cmp> field' if the + condition is of the form: + 'value[0] BETWEEN field1 AND field2' + */ + for (uint i= 1; i <= num_values; i++) + { + if (is_local_field(values[i])) + { + field_item= (Item_field *) (values[i]->real_item()); + add_key_equal_fields(key_fields, *and_level, cond_func, + field_item, equal_func, values, + 1, usable_tables, sargables); + } + } + } // if ( ... Item_func::BETWEEN) + + // IN, NE + else if (is_local_field (cond_func->key_item()) && + !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) { values= cond_func->arguments()+1; if (cond_func->functype() == Item_func::NE_FUNC && @@ -3646,21 +3692,6 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, cond_func->argument_count()-1, usable_tables, sargables); } - if (cond_func->functype() == Item_func::BETWEEN) - { - values= cond_func->arguments(); - for (uint i= 1 ; i < cond_func->argument_count() ; i++) - { - Item_field *field_item; - if (is_local_field (cond_func->arguments()[i])) - { - field_item= (Item_field *) (cond_func->arguments()[i]->real_item()); - add_key_equal_fields(key_fields, *and_level, cond_func, - field_item, 0, values, 1, usable_tables, - sargables); - } - } - } break; } case Item_func::OPTIMIZE_OP: @@ -12680,22 +12711,21 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_ENTER("end_send"); if (!end_of_records) { - int error; if (join->having && join->having->val_int() == 0) DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - error=0; if (join->procedure) - error=join->procedure->send_row(join->procedure_fields_list); - else if (join->do_send_rows) { - if ((error= join->result->send_data(*join->fields)) < 0) - { - /* row was not accepted. Don't count it */ - DBUG_RETURN(NESTED_LOOP_OK); - } + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + if (join->do_send_rows) + { + int error; + /* result < 0 if row was not accepted and should not be counted */ + if ((error= join->result->send_data(*join->fields))) + DBUG_RETURN(error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR); } - if (error) - DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (++join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { @@ -13195,6 +13225,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) return 0; // keep test } +/** + Extract a condition that can be checked after reading given table + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static COND * make_cond_for_table(COND *cond, table_map tables, table_map used_table) @@ -13682,12 +13748,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, { int ref_key; uint ref_key_parts; - int order_direction; + int order_direction= 0; uint used_key_parts; TABLE *table=tab->table; SQL_SELECT *select=tab->select; key_map usable_keys; QUICK_SELECT_I *save_quick= 0; + int best_key= -1; DBUG_ENTER("test_if_skip_sort_order"); LINT_INIT(ref_key_parts); @@ -13791,13 +13858,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, new_ref_key_map.clear_all(); // Force the creation of quick select new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key. + /* Reset quick; This will be restored in 'use_filesort' if needed */ + select->quick= 0; if (select->test_quick_select(tab->join->thd, new_ref_key_map, 0, (tab->join->select_options & OPTION_FOUND_ROWS) ? HA_POS_ERROR : tab->join->unit->select_limit_cnt,0) <= 0) - DBUG_RETURN(0); + goto use_filesort; } ref_key= new_ref_key; } @@ -13822,7 +13891,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, int best_key_direction; ha_rows best_records; double read_time; - int best_key= -1; bool is_best_covering= FALSE; double fanout= 1; JOIN *join= tab->join; @@ -14040,72 +14108,21 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, tab->join->tables > tab->join->const_tables + 1) && ((unsigned) best_key != table->s->primary_key || !table->file->primary_key_is_clustered())) - DBUG_RETURN(0); + goto use_filesort; if (best_key >= 0) { - bool quick_created= FALSE; if (table->quick_keys.is_set(best_key) && best_key != ref_key) { key_map map; map.clear_all(); // Force the creation of quick select map.set_bit(best_key); // only best_key. - quick_created= - select->test_quick_select(join->thd, map, 0, - join->select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : - join->unit->select_limit_cnt, - 0) > 0; - } - if (!no_changes) - { - /* - If ref_key used index tree reading only ('Using index' in EXPLAIN), - and best_key doesn't, then revert the decision. - */ - if (!table->covering_keys.is_set(best_key)) - table->disable_keyread(); - if (!quick_created) - { - tab->index= best_key; - tab->read_first_record= best_key_direction > 0 ? - join_read_first:join_read_last; - tab->type=JT_NEXT; // Read with index_first(), index_next() - if (select && select->quick) - { - delete select->quick; - select->quick= 0; - } - if (table->covering_keys.is_set(best_key) && ! table->key_read) - table->enable_keyread(); - table->file->ha_index_or_rnd_end(); - if (join->select_options & SELECT_DESCRIBE) - { - tab->ref.key= -1; - tab->ref.key_parts= 0; - if (select_limit < table_records) - tab->limit= select_limit; - } - } - else if (tab->type != JT_ALL) - { - /* - We're about to use a quick access to the table. - We need to change the access method so as the quick access - method is actually used. - */ - DBUG_ASSERT(tab->select->quick); - tab->type=JT_ALL; - tab->use_quick=1; - tab->ref.key= -1; - tab->ref.key_parts=0; // Don't use ref key. - tab->read_first_record= join_init_read_record; - if (tab->is_using_loose_index_scan()) - join->tmp_table_param.precomputed_group_by= TRUE; - /* - TODO: update the number of records in join->best_positions[tablenr] - */ - } + select->quick= 0; + select->test_quick_select(join->thd, map, 0, + join->select_options & OPTION_FOUND_ROWS ? + HA_POS_ERROR : + join->unit->select_limit_cnt, + 0); } order_direction= best_key_direction; /* @@ -14118,61 +14135,155 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, saved_best_key_parts : best_key_parts; } else - DBUG_RETURN(0); + goto use_filesort; } check_reverse_order: + DBUG_ASSERT(order_direction != 0); + if (order_direction == -1) // If ORDER BY ... DESC { + int quick_type; if (select && select->quick) { /* Don't reverse the sort order, if it's already done. (In some cases test_if_order_by_key() can be called multiple times */ - if (!select->quick->reverse_sorted()) + if (select->quick->reverse_sorted()) + goto skipped_filesort; + + quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) { - QUICK_SELECT_DESC *tmp; - int quick_type= select->quick->get_type(); - if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || - quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + tab->limit= 0; + goto use_filesort; // Use filesort + } + } + } + + /* + Update query plan with access pattern for doing ordered access + according to what we have decided above. + */ + if (!no_changes) // We are allowed to update QEP + { + if (best_key >= 0) + { + bool quick_created= + (select && select->quick && select->quick!=save_quick); + + /* + If ref_key used index tree reading only ('Using index' in EXPLAIN), + and best_key doesn't, then revert the decision. + */ + if (!table->covering_keys.is_set(best_key)) + table->disable_keyread(); + if (!quick_created) + { + if (select) // Throw any existing quick select + select->quick= 0; // Cleanup either reset to save_quick, + // or 'delete save_quick' + tab->index= best_key; + tab->read_first_record= order_direction > 0 ? + join_read_first:join_read_last; + tab->type=JT_NEXT; // Read with index_first(), index_next() + + if (table->covering_keys.is_set(best_key) && ! table->key_read) + table->enable_keyread(); + table->file->ha_index_or_rnd_end(); + if (tab->join->select_options & SELECT_DESCRIBE) { - tab->limit= 0; - select->quick= save_quick; - DBUG_RETURN(0); // Use filesort + tab->ref.key= -1; + tab->ref.key_parts= 0; + if (select_limit < table->file->stats.records) + tab->limit= select_limit; } - + } + else if (tab->type != JT_ALL) + { + /* + We're about to use a quick access to the table. + We need to change the access method so as the quick access + method is actually used. + */ + DBUG_ASSERT(tab->select->quick); + tab->type=JT_ALL; + tab->use_quick=1; + tab->ref.key= -1; + tab->ref.key_parts=0; // Don't use ref key. + tab->read_first_record= join_init_read_record; + if (tab->is_using_loose_index_scan()) + tab->join->tmp_table_param.precomputed_group_by= TRUE; + /* + TODO: update the number of records in join->best_positions[tablenr] + */ + } + } // best_key >= 0 + + if (order_direction == -1) // If ORDER BY ... DESC + { + if (select && select->quick) + { + QUICK_SELECT_DESC *tmp; /* ORDER BY range_key DESC */ - tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), + tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), used_key_parts); - if (!tmp || tmp->error) - { - delete tmp; - select->quick= save_quick; + if (tmp && select->quick == save_quick) + save_quick= 0; // ::QUICK_SELECT_DESC consumed it + + if (!tmp || tmp->error) + { + delete tmp; tab->limit= 0; - DBUG_RETURN(0); // Reverse sort not supported - } - select->quick=tmp; + goto use_filesort; // Reverse sort failed -> filesort + } + select->quick= tmp; } - } - else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && - tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) - { - /* - SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC + else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && + tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) + { + /* + SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC - Use a traversal function that starts by reading the last row - with key part (A) and then traverse the index backwards. - */ - tab->read_first_record= join_read_last_key; - tab->read_record.read_record= join_read_prev_same; + Use a traversal function that starts by reading the last row + with key part (A) and then traverse the index backwards. + */ + tab->read_first_record= join_read_last_key; + tab->read_record.read_record= join_read_prev_same; + } } + else if (select && select->quick) + select->quick->sorted= 1; + + } // QEP has been modified + + /* + Cleanup: + We may have both a 'select->quick' and 'save_quick' (original) + at this point. Delete the one that we wan't use. + */ + +skipped_filesort: + // Keep current (ordered) select->quick + if (select && save_quick != select->quick) + { + delete save_quick; + save_quick= NULL; } - else if (select && select->quick) - select->quick->sorted= 1; DBUG_RETURN(1); + +use_filesort: + // Restore original save_quick + if (select && select->quick != save_quick) + { + delete select->quick; + select->quick= save_quick; + } + DBUG_RETURN(0); } diff --git a/sql/sql_select.h b/sql/sql_select.h index ea06b26a229..b821181207c 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -635,7 +635,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, /* functions from opt_sum.cc */ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +int opt_sum_query(THD* thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index fa84be7bc17..9a023daf1ff 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3794,6 +3794,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) it.rewind(); /* To get access to new elements in basis list */ while ((db_name= it++)) { + /* db_name can be changed in make_table_list() func */ + LEX_STRING orig_db_name= *db_name; + #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!(check_access(thd,SELECT_ACL, db_name->str, &thd->col_access, 0, 1, with_i_schema) || @@ -3856,17 +3859,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) } int res; - LEX_STRING tmp_lex_string, orig_db_name; + LEX_STRING tmp_lex_string; /* Set the parent lex of 'sel' because it is needed by sel.init_query() which is called inside make_table_list. */ thd->no_warnings_for_error= 1; sel.parent_lex= lex; - /* db_name can be changed in make_table_list() func */ - if (!thd->make_lex_string(&orig_db_name, db_name->str, - db_name->length, FALSE)) - goto err; if (make_table_list(thd, &sel, db_name, table_name)) goto err; TABLE_LIST *show_table_list= sel.table_list.first; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index b359b2a7168..6f40d797a5c 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -52,11 +52,33 @@ bool String::real_alloc(uint32 arg_length) } -/* -** Check that string is big enough. Set string[alloc_length] to 0 -** (for C functions) -*/ +/** + Allocates a new buffer on the heap for this String. + + - If the String's internal buffer is privately owned and heap allocated, + one of the following is performed. + + - If the requested length is greater than what fits in the buffer, a new + buffer is allocated, data moved and the old buffer freed. + + - If the requested length is less or equal to what fits in the buffer, a + null character is inserted at the appropriate position. + - If the String does not keep a private buffer on the heap, such a buffer + will be allocated and the string copied accoring to its length, as found + in String::length(). + + For C compatibility, the new string buffer is null terminated. + + @param alloc_length The requested string size in characters, excluding any + null terminator. + + @retval false Either the copy operation is complete or, if the size of the + new buffer is smaller than the currently allocated buffer (if one exists), + no allocation occured. + + @retval true An error occured when attempting to allocate memory. +*/ bool String::realloc(uint32 alloc_length) { if (Alloced_length <= alloc_length) @@ -189,6 +211,17 @@ bool String::copy() return FALSE; } +/** + Copies the internal buffer from str. If this String has a private heap + allocated buffer where new data does not fit, a new buffer is allocated + before copying and the old buffer freed. Character set information is also + copied. + + @param str The string whose internal buffer is to be copied. + + @retval false Success. + @retval true Memory allocation failed. +*/ bool String::copy(const String &str) { if (alloc(str.str_length)) diff --git a/sql/sql_string.h b/sql/sql_string.h index 5da0d38f24d..84cb3e52378 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -120,6 +120,9 @@ public: inline const char *ptr() const { return Ptr; } inline char *c_ptr() { + DBUG_ASSERT(!alloced || !Ptr || !Alloced_length || + (Alloced_length >= (str_length + 1))); + if (!Ptr || Ptr[str_length]) /* Should be safe */ (void) realloc(str_length); return Ptr; @@ -148,6 +151,16 @@ public: Alloced_length=str.Alloced_length-offset; str_charset=str.str_charset; } + + + /** + Points the internal buffer to the supplied one. The old buffer is freed. + @param str Pointer to the new buffer. + @param arg_length Length of the new buffer in characters, excluding any + null character. + @param cs Character set to use for interpreting string data. + @note The new buffer will not be null terminated. + */ inline void set(char *str,uint32 arg_length, CHARSET_INFO *cs) { free(); diff --git a/sql/table.cc b/sql/table.cc index d52c6bb085d..dbb5d9ec499 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1,4 +1,5 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by |