diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/event_db_repository.cc | 8 | ||||
-rw-r--r-- | sql/field.cc | 2 | ||||
-rw-r--r-- | sql/ha_partition.cc | 431 | ||||
-rw-r--r-- | sql/ha_partition.h | 35 | ||||
-rw-r--r-- | sql/handler.cc | 27 | ||||
-rw-r--r-- | sql/handler.h | 19 | ||||
-rw-r--r-- | sql/hostname.cc | 4 | ||||
-rw-r--r-- | sql/item_cmpfunc.cc | 12 | ||||
-rw-r--r-- | sql/item_func.cc | 3 | ||||
-rw-r--r-- | sql/item_sum.cc | 6 | ||||
-rw-r--r-- | sql/item_timefunc.cc | 6 | ||||
-rw-r--r-- | sql/opt_range.cc | 2 | ||||
-rw-r--r-- | sql/opt_sum.cc | 43 | ||||
-rw-r--r-- | sql/slave.cc | 6 | ||||
-rw-r--r-- | sql/sql_class.cc | 1 | ||||
-rw-r--r-- | sql/sql_load.cc | 11 | ||||
-rw-r--r-- | sql/sql_select.cc | 56 | ||||
-rw-r--r-- | sql/sql_select.h | 3 |
18 files changed, 463 insertions, 212 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 7473cf47188..a0765dc6d15 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -582,6 +582,14 @@ Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type, *table= tables.table; tables.table->use_all_columns(); + + if (table_intact.check(*table, &event_table_def)) + { + close_thread_tables(thd); + my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); } diff --git a/sql/field.cc b/sql/field.cc index 1ad5e408e07..3707c5b056f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5467,6 +5467,7 @@ double Field_year::val_real(void) longlong Field_year::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; + DBUG_ASSERT(field_length == 2 || field_length == 4); int tmp= (int) ptr[0]; if (field_length != 4) tmp%=100; // Return last 2 char @@ -5479,6 +5480,7 @@ longlong Field_year::val_int(void) String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + DBUG_ASSERT(field_length < 5); val_buffer->alloc(5); val_buffer->length(field_length); char *to=(char*) val_buffer->ptr(); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index bd8e0d397c4..d3858eae0d4 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -163,8 +163,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_handler_variables(); @@ -184,15 +183,44 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); init_handler_variables(); - DBUG_ASSERT(m_part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_handler_variables(); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -244,7 +272,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -252,6 +279,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -359,7 +391,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1848,7 +1881,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2064,18 +2097,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2139,19 +2170,22 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL)))) DBUG_RETURN(TRUE); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2189,13 +2223,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2213,14 +2249,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - SYNOPSIS - clear_handler_file() - - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2233,16 +2264,15 @@ void ha_partition::clear_handler_file() m_engine_array= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @param mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2280,6 +2310,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2351,100 +2382,164 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0)))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ + if (len_words != (tot_partition_words + tot_name_words + 4)) + goto err2; + VOID(my_close(file, MYF(0))); + m_file_buffer= file_buffer; // Will be freed in clear_handler_file() + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; + + DBUG_RETURN(false); + +err2: + my_free(file_buffer, MYF(0)); +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); + + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); for (i= 0; i < m_tot_parts; i++) { engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); + *(buff + i)); if (!engine_array[i]) - goto err3; + goto err; } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; - if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; - VOID(my_close(file, MYF(0))); - m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - if (!(m_engine_array= (plugin_ref*) my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) - goto err3; + goto err; for (i= 0; i < m_tot_parts; i++) m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); my_afree((gptr) engine_array); - if (!m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: + DBUG_RETURN(false); + +err: my_afree((gptr) engine_array); -err2: - my_free(file_buffer, MYF(0)); -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2491,13 +2586,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; - int error; + char *name_buffer_ptr; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2505,8 +2600,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) - DBUG_RETURN(1); + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) + DBUG_RETURN(error); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->reclength; @@ -2516,7 +2612,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2539,48 +2635,84 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + goto err_handler; + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; - do + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + while (*(++file)) { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + /* MyISAM can have smaller ref_length for partitions with MAX_ROWS set */ set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2589,6 +2721,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2646,25 +2779,54 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory for ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then on the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; + + if (new_handler && + new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + DBUG_RETURN((handler*) new_handler); } @@ -2695,7 +2857,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -3795,19 +3957,16 @@ end_dont_reset_start_part: void ha_partition::position(const uchar *record) { handler *file= m_file[m_last_part]; + uint pad_length; DBUG_ENTER("ha_partition::position"); file->position(record); int2store(ref, m_last_part); - memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, - (ref_length - PARTITION_BYTES_IN_POS)); - -#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES -#ifdef HAVE_purify - bzero(ref + PARTITION_BYTES_IN_POS + ref_length, - max_ref_length-ref_length); -#endif /* HAVE_purify */ -#endif + memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, file->ref_length); + pad_length= m_ref_length - PARTITION_BYTES_IN_POS - file->ref_length; + if (pad_length) + memset((ref + PARTITION_BYTES_IN_POS + file->ref_length), 0, pad_length); + DBUG_VOID_RETURN; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 76b91e160ca..cd90c4cc1d5 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -71,7 +81,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name plugin_ref *m_engine_array; // Array of types of the handlers handler **m_file; // Array of references to handler inst. @@ -133,6 +143,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -169,11 +186,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -186,7 +198,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -205,6 +217,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share,
+ partition_info *part_info_arg,
+ ha_partition *clone_arg,
+ MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -275,7 +291,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 5968a78b587..718529fa5fc 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2037,22 +2037,29 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); + handler *new_handler= get_new_handler(table->s, mem_root, ht); /* Allocate handler->ref here because otherwise ha_open will allocate it on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) - return NULL; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) + new_handler= NULL; + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + */ + if (new_handler && new_handler->ha_open(table, + name, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + return new_handler; } diff --git a/sql/handler.h b/sql/handler.h index 5f68bb6a8f8..03b0555ae86 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1,18 +1,19 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ /* Definitions for parameters to do with handler-routines */ @@ -56,7 +57,7 @@ a table with rnd_next() - We will see all rows (including deleted ones) - Row positions are 'table->s->db_record_offset' apart - If this flag is not set, filesort will do a postion() call for each matched + If this flag is not set, filesort will do a position() call for each matched row to be able to find the row later. */ #define HA_REC_NOT_IN_SEQ (1 << 3) @@ -1165,7 +1166,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/hostname.cc b/sql/hostname.cc index c8cf46383a9..9796755e9fb 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -183,7 +183,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors) &tmp_hostent,buff,sizeof(buff),&tmp_errno))) { DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno)); - return 0; + DBUG_RETURN(0); } if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2), &tmp_errno))) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 36ca5537eb5..23f081e1cc0 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -4000,13 +4000,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - if (!args[i]->null_value) // Skip NULL values - { - array->set(j,args[i]); - j++; - } - else - have_null= 1; + array->set(j,args[i]); + if (!args[i]->null_value) // Skip NULL values + j++; + else + have_null= 1; } if ((array->used_count= j)) array->sort(); diff --git a/sql/item_func.cc b/sql/item_func.cc index 595629b51be..6a9c47954b7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2122,10 +2122,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) if (!(null_value= (args[0]->null_value || args[1]->null_value || my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec, truncate, decimal_value) > 1))) - { - decimal_value->frac= decimals; return decimal_value; - } return 0; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 2a8aea68f7a..c62738abac0 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -612,17 +612,13 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) switch (hybrid_type= item->result_type()) { case INT_RESULT: - max_length= 20; - break; case DECIMAL_RESULT: + case STRING_RESULT: max_length= item->max_length; break; case REAL_RESULT: max_length= float_length(decimals); break; - case STRING_RESULT: - max_length= item->max_length; - break; case ROW_RESULT: default: DBUG_ASSERT(0); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 797b8cffb92..a96922b94a1 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -648,7 +648,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -657,7 +657,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -816,7 +816,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 9edd4f58f04..fd71166dc23 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1335,7 +1335,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index b20a0c4fcbe..1eef3798908 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -211,6 +211,7 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) /** Substitutes constants for some COUNT(), MIN() and MAX() functions. + @param thd thread handler @param tables list of leaves of join table tree @param all_fields All fields to be returned @param conds WHERE clause @@ -228,9 +229,12 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) HA_ERR_KEY_NOT_FOUND on impossible conditions @retval HA_ERR_... if a deadlock or a lock wait timeout happens, for example + @retval + ER_... e.g. ER_SUBQUERY_NO_1_ROW */ -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) +int opt_sum_query(THD *thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds) { List_iterator_fast<Item> it(all_fields); int const_result= 1; @@ -242,6 +246,8 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Item *item; int error; + DBUG_ENTER("opt_sum_query"); + if (conds) where_tables= conds->used_tables(); @@ -269,7 +275,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + DBUG_RETURN(0); } else used_tables|= tl->table->map; @@ -297,7 +303,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { tl->table->file->print_error(error, MYF(0)); tl->table->in_use->fatal_error(); - return error; + DBUG_RETURN(error); } count*= tl->table->file->stats.records; } @@ -390,10 +396,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) if (error) { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); - return(error); + DBUG_RETURN(error); } removed_tables|= table->map; } @@ -437,6 +443,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; } } + + if (thd->is_error()) + DBUG_RETURN(thd->main_da.sql_errno()); + /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables @@ -446,7 +456,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) */ if (removed_tables && used_tables != removed_tables) const_result= 0; // We didn't remove all tables - return const_result; + DBUG_RETURN(const_result); } @@ -732,6 +742,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, if (is_null || (is_null_safe_eq && args[1]->is_null())) { + /* + If we have a non-nullable index, we cannot use it, + since set_null will be ignored, and we will compare uninitialized data. + */ + if (!part->field->real_maybe_null()) + DBUG_RETURN(false); part->field->set_null(); *key_ptr= (uchar) 1; } @@ -802,8 +818,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, @param[out] prefix_len Length of prefix for the search range @note - This function may set table->key_read to 1, which must be reset after - index is used! (This can only happen when function returns 1) + This function may set field->table->key_read to true, + which must be reset after index is used! + (This can only happen when function returns 1) @retval 0 Index can not be used to optimize MIN(field)/MAX(field) @@ -818,7 +835,9 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not key field + return false; // Not key field + + DBUG_ENTER("find_key_for_maxmin"); TABLE *table= field->table; uint idx= 0; @@ -843,7 +862,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) - return 0; + DBUG_RETURN(false); /* Check whether the index component is partial */ Field *part_field= table->field[part->fieldnr-1]; @@ -892,12 +911,12 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ if (field->part_of_key.is_set(idx)) table->set_keyread(TRUE); - return 1; + DBUG_RETURN(true); } } } } - return 0; + DBUG_RETURN(false); } diff --git a/sql/slave.cc b/sql/slave.cc index 6d266245460..dd578064f24 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -97,7 +97,7 @@ static const char *reconnect_messages[SLAVE_RECON_ACT_MAX][SLAVE_RECON_MSG_MAX]= registration on master", "Reconnecting after a failed registration on master", "failed registering on master, reconnecting to try again, \ -log '%s' at postion %s", +log '%s' at position %s", "COM_REGISTER_SLAVE", "Slave I/O thread killed during or after reconnect" }, @@ -105,7 +105,7 @@ log '%s' at postion %s", "Waiting to reconnect after a failed binlog dump request", "Slave I/O thread killed while retrying master dump", "Reconnecting after a failed binlog dump request", - "failed dump request, reconnecting to try again, log '%s' at postion %s", + "failed dump request, reconnecting to try again, log '%s' at position %s", "COM_BINLOG_DUMP", "Slave I/O thread killed during or after reconnect" }, @@ -114,7 +114,7 @@ log '%s' at postion %s", "Slave I/O thread killed while waiting to reconnect after a failed read", "Reconnecting after a failed master event read", "Slave I/O thread: Failed reading log event, reconnecting to retry, \ -log '%s' at postion %s", +log '%s' at position %s", "", "Slave I/O thread killed during or after a reconnect done to recover from \ failed read" diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a61ce7bfd14..ae21a5335fd 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3383,6 +3383,7 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state) xs->xa_state=xa_state; xs->xid.set(xid); xs->in_thd=0; + xs->rm_error=0; res=my_hash_insert(&xid_cache, (uchar*)xs); } pthread_mutex_unlock(&LOCK_xid_cache); diff --git a/sql/sql_load.cc b/sql/sql_load.cc index c227fe69b62..b9b7bd74f6c 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1075,9 +1075,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1104,12 +1105,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; /* Set of a stack for unget if long terminators */ - uint length=max(field_term_length,line_term_length)+1; + uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); @@ -1151,7 +1150,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error && need_end_io_cache) + if (need_end_io_cache) ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index eb2559fc600..46e9ad242b3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -961,7 +961,7 @@ JOIN::optimize() If all items were resolved by opt_sum_query, there is no need to open any tables. */ - if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) + if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { @@ -1929,7 +1929,11 @@ JOIN::exec() if (!curr_join->sort_and_group && curr_join->const_tables != curr_join->tables) curr_join->join_tab[curr_join->const_tables].sorted= 0; - if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) + + Procedure *save_proc= curr_join->procedure; + tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0); + curr_join->procedure= save_proc; + if (tmp_error) { error= tmp_error; DBUG_VOID_RETURN; @@ -2211,7 +2215,7 @@ JOIN::exec() Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having, used_tables, - used_tables); + (table_map) 0); if (sort_table_cond) { if (!curr_table->select) @@ -12354,10 +12358,14 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), int error; if (join->having && join->having->val_int() == 0) DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - error=0; if (join->procedure) - error=join->procedure->send_row(join->procedure_fields_list); - else if (join->do_send_rows) + { + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + error=0; + if (join->do_send_rows) error=join->result->send_data(*join->fields); if (error) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ @@ -12844,6 +12852,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) return 0; // keep test } +/** + Extract a condition that can be checked after reading given table + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static COND * make_cond_for_table(COND *cond, table_map tables, table_map used_table) diff --git a/sql/sql_select.h b/sql/sql_select.h index 5350e28d8ff..dd810ae5156 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -612,7 +612,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, /* functions from opt_sum.cc */ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +int opt_sum_query(THD* thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); |