diff options
Diffstat (limited to 'sql')
61 files changed, 1829 insertions, 533 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am index d9cea960915..6c685ba67c6 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -149,6 +149,8 @@ sql_yacc.cc: sql_yacc.yy sql_yacc.h: sql_yacc.yy sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) + @SED@ -e 's/__attribute__ ((__unused__))//' sql_yacc.cc > sql_yacc.cc-new + @MV@ sql_yacc.cc-new sql_yacc.cc @echo "Note: The following compile may take a long time." @echo "If it fails, re-run configure with --with-low-memory" $(CXXCOMPILE) $(LM_CFLAGS) -c $< diff --git a/sql/field.cc b/sql/field.cc index ec97bc92d24..1cfd0843179 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1399,6 +1399,7 @@ Field_str::Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, field_charset=charset; if (charset->state & MY_CS_BINSORT) flags|=BINARY_FLAG; + field_derivation= DERIVATION_IMPLICIT; } @@ -2425,6 +2426,13 @@ int Field_new_decimal::store_decimal(const my_decimal *decimal_value) } +int Field_new_decimal::store_time(TIME *ltime, timestamp_type t_type) +{ + my_decimal decimal_value; + return store_value(date2my_decimal(ltime, &decimal_value)); +} + + double Field_new_decimal::val_real(void) { double dbl; @@ -5399,7 +5407,21 @@ int Field_newdate::store_time(TIME *ltime,timestamp_type type) long tmp; int error= 0; if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + { tmp=ltime->year*16*32+ltime->month*32+ltime->day; + if ((my_bool)check_date(ltime, tmp, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[12]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_date((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATE, 1); + } + } else { tmp=0; @@ -5608,8 +5630,22 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type) structure always fit into DATETIME range. */ if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + { tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+ (ltime->hour*10000L+ltime->minute*100+ltime->second)); + if ((my_bool)check_date(ltime, tmp, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[19]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_datetime((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATETIME,1); + } + } else { tmp=0; @@ -5783,37 +5819,148 @@ void Field_datetime::sql_type(String &res) const ** A string may be varchar or binary ****************************************************************************/ +/* + Report "not well formed" or "cannot convert" error + after storing a character string info a field. + + SYNOPSIS + check_string_copy_error() + field - Field + well_formed_error_pos - where not well formed data was first met + cannot_convert_error_pos - where a not-convertable character was first met + end - end of the string + + NOTES + As of version 5.0 both cases return the same error: + + "Invalid string value: 'xxx' for column 't' at row 1" + + Future versions will possibly introduce a new error message: + + "Cannot convert character string: 'xxx' for column 't' at row 1" + + RETURN + FALSE - If errors didn't happen + TRUE - If an error happened +*/ + +static bool +check_string_copy_error(Field_str *field, + const char *well_formed_error_pos, + const char *cannot_convert_error_pos, + const char *end) +{ + const char *pos, *end_orig; + char tmp[64], *t; + + if (!(pos= well_formed_error_pos) && + !(pos= cannot_convert_error_pos)) + return FALSE; + + end_orig= end; + set_if_smaller(end, pos + 6); + + for (t= tmp; pos < end; pos++) + { + if (((unsigned char) *pos) >= 0x20 && + ((unsigned char) *pos) <= 0x7F) + { + *t++= *pos; + } + else + { + *t++= '\\'; + *t++= 'x'; + *t++= _dig_vec_upper[((unsigned char) *pos) >> 4]; + *t++= _dig_vec_upper[((unsigned char) *pos) & 15]; + } + } + if (end_orig > end) + { + *t++= '.'; + *t++= '.'; + *t++= '.'; + } + *t= '\0'; + push_warning_printf(field->table->in_use, + field->table->in_use->abort_on_warning ? + MYSQL_ERROR::WARN_LEVEL_ERROR : + MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "string", tmp, field->field_name, + (ulong) field->table->in_use->row_count); + return TRUE; +} + + + +/* + Send a truncation warning or a truncation error + after storing a too long character string info a field. + + SYNOPSIS + report_data_too_long() + field - Field + + RETURN + N/A +*/ + +inline void +report_data_too_long(Field_str *field) +{ + if (field->table->in_use->abort_on_warning) + field->set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); + else + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); +} + + +/* + Test if the given string contains important data: + not spaces for character string, + or any data for binary string. + + SYNOPSIS + test_if_important_data() + cs Character set + str String to test + strend String end + + RETURN + FALSE - If string does not have important data + TRUE - If string has some important data +*/ + +static bool +test_if_important_data(CHARSET_INFO *cs, const char *str, const char *strend) +{ + if (cs != &my_charset_bin) + str+= cs->cset->scan(cs, str, strend, MY_SEQ_SPACES); + return (str < strend); +} + + /* Copy a string and fill with space */ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0, well_formed_error; - uint32 not_used; - char buff[STRING_BUFFER_USUAL_SIZE]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); uint copy_length; + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos; /* See the comment for Field_long::store(long long) */ DBUG_ASSERT(table->in_use == current_thd); - /* Convert character set if necessary */ - if (String::needs_conversion(length, cs, field_charset, ¬_used)) - { - uint conv_errors; - tmpstr.copy(from, length, cs, field_charset, &conv_errors); - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error= 2; - } - - /* Make sure we don't break a multibyte sequence or copy malformed data. */ - copy_length= field_charset->cset->well_formed_len(field_charset, - from,from+length, - field_length/ - field_charset->mbmaxlen, - &well_formed_error); - memmove(ptr, from, copy_length); + copy_length= well_formed_copy_nchars(field_charset, + ptr, field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); /* Append spaces if the string was shorter than the field. */ if (copy_length < field_length) @@ -5821,32 +5968,23 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) field_length-copy_length, field_charset->pad_char); + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + /* Check if we lost any important data (anything in a binary string, or any non-space in others). */ - if ((copy_length < length) && table->in_use->count_cuted_fields) + if ((from_end_pos < from + length) && table->in_use->count_cuted_fields) { - if (binary()) - error= 2; - else + if (test_if_important_data(field_charset, from_end_pos, from + length)) { - const char *end=from+length; - from+= copy_length; - from+= field_charset->cset->scan(field_charset, from, end, - MY_SEQ_SPACES); - if (from != end) - error= 2; + report_data_too_long(this); + return 2; } } - if (error) - { - if (table->in_use->abort_on_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); - else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); - } - return error; + return 0; } @@ -6179,58 +6317,35 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table, int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) { - uint32 not_used, copy_length; - char buff[STRING_BUFFER_USUAL_SIZE]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); - int error_code= 0, well_formed_error; - enum MYSQL_ERROR::enum_warning_level level= MYSQL_ERROR::WARN_LEVEL_WARN; + uint copy_length; + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos; + + copy_length= well_formed_copy_nchars(field_charset, + ptr + length_bytes, field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); - /* Convert character set if necessary */ - if (String::needs_conversion(length, cs, field_charset, ¬_used)) - { - uint conv_errors; - tmpstr.copy(from, length, cs, field_charset, &conv_errors); - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error_code= WARN_DATA_TRUNCATED; - } - /* - Make sure we don't break a multibyte sequence - as well as don't copy a malformed data. - */ - copy_length= field_charset->cset->well_formed_len(field_charset, - from,from+length, - field_length/ - field_charset->mbmaxlen, - &well_formed_error); - memmove(ptr + length_bytes, from, copy_length); if (length_bytes == 1) *ptr= (uchar) copy_length; else int2store(ptr, copy_length); + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + // Check if we lost something other than just trailing spaces - if ((copy_length < length) && table->in_use->count_cuted_fields && - !error_code) + if ((from_end_pos < from + length) && table->in_use->count_cuted_fields) { - if (!binary()) - { - const char *end= from + length; - from+= copy_length; - from+= field_charset->cset->scan(field_charset, from, end, MY_SEQ_SPACES); - /* If we lost only spaces then produce a NOTE, not a WARNING */ - if (from == end) - level= MYSQL_ERROR::WARN_LEVEL_NOTE; - } - error_code= WARN_DATA_TRUNCATED; - } - if (error_code) - { - if (level == MYSQL_ERROR::WARN_LEVEL_WARN && - table->in_use->abort_on_warning) - error_code= ER_DATA_TOO_LONG; - set_warning(level, error_code, 1); + if (test_if_important_data(field_charset, from_end_pos, from + length)) + report_data_too_long(this); + else /* If we lost only spaces then produce a NOTE, not a WARNING */ + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); return 2; } return 0; @@ -6812,68 +6927,70 @@ void Field_blob::put_length(char *pos, uint32 length) int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0, well_formed_error; + uint copy_length, new_length; + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos, *tmp; + char buff[STRING_BUFFER_USUAL_SIZE]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + if (!length) { bzero(ptr,Field_blob::pack_length()); + return 0; } - else - { - bool was_conversion; - char buff[STRING_BUFFER_USUAL_SIZE]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); - uint copy_length; - uint32 not_used; - /* Convert character set if necessary */ - if ((was_conversion= String::needs_conversion(length, cs, field_charset, - ¬_used))) - { - uint conv_errors; - if (tmpstr.copy(from, length, cs, field_charset, &conv_errors)) - { - /* Fatal OOM error */ - bzero(ptr,Field_blob::pack_length()); - return -1; - } - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error= 2; - } - - copy_length= max_data_length(); - /* - copy_length is OK as last argument to well_formed_len as this is never - used to limit the length of the data. The cut of long data is done with - the 'min()' call below. - */ - copy_length= field_charset->cset->well_formed_len(field_charset, - from,from + - min(length, copy_length), - copy_length, - &well_formed_error); - if (copy_length < length) - error= 2; - Field_blob::store_length(copy_length); - if (was_conversion || table->copy_blobs || copy_length <= MAX_FIELD_WIDTH) - { // Must make a copy - if (from != value.ptr()) // For valgrind - { - value.copy(from,copy_length,charset()); - from=value.ptr(); - } + if (from == value.ptr()) + { + uint32 dummy_offset; + if (!String::needs_conversion(length, cs, field_charset, &dummy_offset)) + { + Field_blob::store_length(length); + bmove(ptr+packlength,(char*) &from,sizeof(char*)); + return 0; } - bmove(ptr+packlength,(char*) &from,sizeof(char*)); + if (tmpstr.copy(from, length, cs)) + goto oom_error; + from= tmpstr.ptr(); } - if (error) + + new_length= min(max_data_length(), field_charset->mbmaxlen * length); + if (value.alloc(new_length)) + goto oom_error; + + /* + "length" is OK as "nchars" argument to well_formed_copy_nchars as this + is never used to limit the length of the data. The cut of long data + is done with the new_length value. + */ + copy_length= well_formed_copy_nchars(field_charset, + (char*) value.ptr(), new_length, + cs, from, length, + length, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); + + Field_blob::store_length(copy_length); + tmp= value.ptr(); + bmove(ptr+packlength,(char*) &tmp,sizeof(char*)); + + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + + if (copy_length < length) { - if (table->in_use->abort_on_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); - else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + report_data_too_long(this); + return 2; } + return 0; + +oom_error: + /* Fatal OOM error */ + bzero(ptr,Field_blob::pack_length()); + return -1; } diff --git a/sql/field.h b/sql/field.h index b79c2bf77a8..709630218b2 100644 --- a/sql/field.h +++ b/sql/field.h @@ -302,6 +302,9 @@ public: virtual CHARSET_INFO *sort_charset(void) const { return charset(); } virtual bool has_charset(void) const { return FALSE; } virtual void set_charset(CHARSET_INFO *charset) { } + virtual enum Derivation derivation(void) const + { return DERIVATION_IMPLICIT; } + virtual void set_derivation(enum Derivation derivation) { } bool set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code, int cuted_increment); bool check_int(const char *str, int length, const char *int_end, @@ -373,6 +376,7 @@ public: class Field_str :public Field { protected: CHARSET_INFO *field_charset; + enum Derivation field_derivation; public: Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, @@ -387,6 +391,9 @@ public: uint size_of() const { return sizeof(*this); } CHARSET_INFO *charset(void) const { return field_charset; } void set_charset(CHARSET_INFO *charset) { field_charset=charset; } + enum Derivation derivation(void) const { return field_derivation; } + virtual void set_derivation(enum Derivation derivation_arg) + { field_derivation= derivation_arg; } bool binary() const { return field_charset == &my_charset_bin; } uint32 max_length() { return field_length; } friend class create_field; @@ -489,6 +496,7 @@ public: int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); + int store_time(TIME *ltime, timestamp_type t_type); int store_decimal(const my_decimal *); double val_real(void); longlong val_int(void); diff --git a/sql/filesort.cc b/sql/filesort.cc index b063b416191..38c941b30f4 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -109,6 +109,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, DBUG_PUSH(""); /* No DBUG here */ #endif FILESORT_INFO table_sort; + TABLE_LIST *tab= table->pos_in_table_list; + Item_subselect *subselect= tab ? tab->containing_subselect() : 0; /* Don't use table->sort in filesort as it is also used by QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end @@ -121,7 +123,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; - sort_keys= (uchar **) NULL; error= 1; bzero((char*) ¶m,sizeof(param)); param.sort_length= sortlength(thd, sortorder, s_length, &multi_byte_charset); @@ -202,13 +203,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, ulong old_memavl; ulong keys= memavl/(param.rec_length+sizeof(char*)); param.keys=(uint) min(records+1, keys); - if ((sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, - MYF(0)))) + if (table_sort.sort_keys || + (table_sort.sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, + MYF(0)))) break; old_memavl=memavl; if ((memavl=memavl/4*3) < min_sort_memory && old_memavl > min_sort_memory) memavl= min_sort_memory; } + sort_keys= table_sort.sort_keys; if (memavl < min_sort_memory) { my_error(ER_OUTOFMEMORY,MYF(ME_ERROR+ME_WAITTANG), @@ -235,8 +238,12 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } else { - if (!(buffpek=read_buffpek_from_file(&buffpek_pointers, maxbuffer))) + if (!table_sort.buffpek && table_sort.buffpek_len < maxbuffer && + !(table_sort.buffpek= + (byte *) read_buffpek_from_file(&buffpek_pointers, maxbuffer))) goto err; + buffpek= (BUFFPEK *) table_sort.buffpek; + table_sort.buffpek_len= maxbuffer; close_cached_file(&buffpek_pointers); /* Open cached file if it isn't open */ if (! my_b_inited(outfile) && @@ -269,8 +276,14 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, err: if (param.tmp_buffer) x_free(param.tmp_buffer); - x_free((gptr) sort_keys); - x_free((gptr) buffpek); + if (!subselect || !subselect->is_uncacheable()) + { + x_free((gptr) sort_keys); + table_sort.sort_keys= 0; + x_free((gptr) buffpek); + table_sort.buffpek= 0; + table_sort.buffpek_len= 0; + } close_cached_file(&tempfile); close_cached_file(&buffpek_pointers); if (my_b_inited(outfile)) @@ -296,18 +309,32 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, DBUG_POP(); /* Ok to DBUG */ #endif memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO)); - DBUG_PRINT("exit",("records: %ld",records)); + DBUG_PRINT("exit",("records: %ld", (long) records)); DBUG_RETURN(error ? HA_POS_ERROR : records); } /* filesort */ -void filesort_free_buffers(TABLE *table) +void filesort_free_buffers(TABLE *table, bool full) { if (table->sort.record_pointers) { my_free((gptr) table->sort.record_pointers,MYF(0)); table->sort.record_pointers=0; } + if (full) + { + if (table->sort.sort_keys ) + { + x_free((gptr) table->sort.sort_keys); + table->sort.sort_keys= 0; + } + if (table->sort.buffpek) + { + x_free((gptr) table->sort.buffpek); + table->sort.buffpek= 0; + table->sort.buffpek_len= 0; + } + } if (table->sort.addon_buf) { my_free((char *) table->sort.addon_buf, MYF(0)); @@ -947,7 +974,7 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, Put all room used by freed buffer to use in adjacent buffer. Note, that we can't simply distribute memory evenly between all buffers, because new areas must not overlap with old ones. - SYNOPSYS + SYNOPSIS reuse_freed_buff() queue IN list of non-empty buffers, without freed buffer reuse IN empty buffer diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc index 189c5facfab..113008c4885 100644 --- a/sql/ha_archive.cc +++ b/sql/ha_archive.cc @@ -682,7 +682,8 @@ int ha_archive::real_write_row(byte *buf, gzFile writer) } share->approx_file_size+= total_row_length; written= gzwrite(writer, buf, table->s->reclength); - DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength)); + DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", (int) written, + table->s->reclength)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; @@ -822,7 +823,8 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) DBUG_ENTER("ha_archive::get_row"); read= gzread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength)); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read, + table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 8be1e40f42d..6328803c743 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -557,8 +557,8 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) if (share->scheme) { DBUG_PRINT("info", - ("error: parse_url. Returning error code %d \ - freeing share->scheme %lx", error_num, share->scheme)); + ("error: parse_url. Returning error code %d freeing share->scheme 0x%lx", + error_num, (long) share->scheme)); my_free((gptr) share->scheme, MYF(0)); share->scheme= 0; } @@ -624,7 +624,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, MYF(0)); share->connect_string_length= table->s->connect_string.length; - DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); + DBUG_PRINT("info",("parse_url alloced share->scheme 0x%lx", (long) share->scheme)); /* remove addition of null terminator and store length @@ -1664,7 +1664,7 @@ void ha_federated::update_auto_increment(void) DBUG_ENTER("ha_federated::update_auto_increment"); thd->insert_id(mysql->last_used_con->insert_id); - DBUG_PRINT("info",("last_insert_id %d", auto_increment_value)); + DBUG_PRINT("info",("last_insert_id: %ld", (long) auto_increment_value)); DBUG_VOID_RETURN; } @@ -1917,8 +1917,8 @@ int ha_federated::delete_row(const byte *buf) deleted+= mysql->affected_rows; records-= mysql->affected_rows; DBUG_PRINT("info", - ("rows deleted %d rows deleted for all time %d", - int(mysql->affected_rows), deleted)); + ("rows deleted %ld rows deleted for all time %ld", + (long) mysql->affected_rows, (long) deleted)); DBUG_RETURN(0); } diff --git a/sql/ha_federated.h b/sql/ha_federated.h index 11e5a4f634f..c04ce09e75d 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -232,8 +232,7 @@ public: */ double scan_time() { - DBUG_PRINT("info", - ("records %d", records)); + DBUG_PRINT("info", ("records %ld", (long) records)); return (double)(records*1000); } /* diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index e9ccc0ccede..a1619d8e1a1 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -4215,7 +4215,7 @@ ha_innobase::rnd_pos( } if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); DBUG_RETURN(error); } @@ -4225,7 +4225,7 @@ ha_innobase::rnd_pos( error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); if (error) { - DBUG_PRINT("error", ("Got error: %ld", error)); + DBUG_PRINT("error", ("Got error: %d", error)); } change_active_index(keynr); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 8e40105780b..19ec1b29da3 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1557,7 +1557,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } } DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", - found,recpos,minpos,length)); + (long) found, recpos, minpos, length)); if (recpos != minpos) { // Reserved space (Null bits?) bzero((char*) recinfo_pos,sizeof(*recinfo_pos)); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 33da88bbdd4..5e613a63303 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -108,7 +108,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) if (table->s->reclength != mean_rec_length && mean_rec_length) { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", + DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu", table->s->reclength, mean_rec_length)); goto err; } diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 957a3055e70..739fae79565 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1774,7 +1774,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) all pending update or delete operations should be sent to NDB */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); if (m_ops_pending) { if (m_transaction_on) @@ -2976,7 +2976,7 @@ int ha_ndbcluster::close_scan() Take over any pending transactions to the deleteing/updating transaction before closing the scan */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); @@ -3542,8 +3542,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; - DBUG_PRINT("enter", ("thd: %x, thd_ndb: %x, thd_ndb->lock_count: %d", - thd, thd_ndb, thd_ndb->lock_count)); + DBUG_PRINT("enter", ("thd: 0x%lx thd_ndb: 0x%lx thd_ndb->lock_count: %d", + (long) thd, (long) thd_ndb, thd_ndb->lock_count)); if (lock_type != F_UNLCK) { @@ -3847,7 +3847,8 @@ int ndbcluster_commit(THD *thd, bool all) while ((share= it++)) { pthread_mutex_lock(&share->mutex); - DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %d ", share->table_name, share->commit_count)); + DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %lu", + share->table_name, (ulong) share->commit_count)); share->commit_count= 0; share->commit_count_lock++; pthread_mutex_unlock(&share->mutex); @@ -4281,7 +4282,7 @@ int ha_ndbcluster::create(const char *name, if (packfrm(data, length, &pack_data, &pack_length)) DBUG_RETURN(2); - DBUG_PRINT("info", ("setFrm data=%x, len=%d", pack_data, pack_length)); + DBUG_PRINT("info", ("setFrm data: 0x%lx len: %d", (long) pack_data, pack_length)); tab.setFrm(pack_data, pack_length); my_free((char*)data, MYF(0)); my_free((char*)pack_data, MYF(0)); @@ -5237,7 +5238,7 @@ bool ndbcluster_init() } { char buf[128]; - my_snprintf(buf, sizeof(buf), "mysqld --server-id=%d", server_id); + my_snprintf(buf, sizeof(buf), "mysqld --server-id=%lu", server_id); g_ndb_cluster_connection->set_name(buf); } g_ndb_cluster_connection->set_optimized_node_selection @@ -5813,9 +5814,9 @@ static NDB_SHARE* get_share(const char *table_name) share->use_count++; DBUG_PRINT("share", - ("table_name: %s, length: %d, use_count: %d, commit_count: %d", + ("table_name: %s length: %d use_count: %d commit_count: %lu", share->table_name, share->table_name_length, share->use_count, - share->commit_count)); + (ulong) share->commit_count)); pthread_mutex_unlock(&ndbcluster_mutex); return share; } @@ -5862,14 +5863,14 @@ static int packfrm(const void *data, uint len, uint blob_len; frm_blob_struct* blob; DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len)); error= 1; org_len= len; if (my_compress((byte*)data, &org_len, &comp_len)) goto err; - DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len)); DBUG_DUMP("compressed", (char*)data, org_len); error= 2; @@ -5889,7 +5890,7 @@ static int packfrm(const void *data, uint len, *pack_len= blob_len; error= 0; - DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); + DBUG_PRINT("exit", ("pack_data: 0x%lx, pack_len: %d", (long) *pack_data, *pack_len)); err: DBUG_RETURN(error); @@ -5903,13 +5904,13 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, byte *data; ulong complen, orglen, ver; DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data)); complen= uint4korr((char*)&blob->head.complen); orglen= uint4korr((char*)&blob->head.orglen); ver= uint4korr((char*)&blob->head.ver); - DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", + DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", ver,complen,orglen)); DBUG_DUMP("blob->data", (char*) blob->data, complen); @@ -5928,7 +5929,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, *unpack_data= data; *unpack_len= complen; - DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", (long) *unpack_data, *unpack_len)); DBUG_RETURN(0); } @@ -6521,7 +6522,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) my_thread_init(); DBUG_ENTER("ndb_util_thread"); - DBUG_PRINT("enter", ("ndb_cache_check_time: %d", ndb_cache_check_time)); + DBUG_PRINT("enter", ("ndb_cache_check_time: %lu", ndb_cache_check_time)); thd= new THD; /* note that contructor of THD uses DBUG_ */ THD_CHECK_SENTRY(thd); @@ -6550,7 +6551,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) &abstime); pthread_mutex_unlock(&LOCK_ndb_util_thread); - DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %d", + DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %lu", ndb_cache_check_time)); if (abort_loop) @@ -7447,7 +7448,7 @@ void ndb_serialize_cond(const Item *item, void *arg) if (context->expecting(Item::INT_ITEM)) { Item_int *int_item= (Item_int *) item; - DBUG_PRINT("info", ("value %d", int_item->value)); + DBUG_PRINT("info", ("value %ld", (long) int_item->value)); NDB_ITEM_QUALIFICATION q; q.value_type= Item::INT_ITEM; curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); @@ -7470,7 +7471,7 @@ void ndb_serialize_cond(const Item *item, void *arg) context->supported= FALSE; break; case Item::REAL_ITEM: - DBUG_PRINT("info", ("REAL_ITEM %s")); + DBUG_PRINT("info", ("REAL_ITEM")); if (context->expecting(Item::REAL_ITEM)) { Item_float *float_item= (Item_float *) item; @@ -7518,7 +7519,7 @@ void ndb_serialize_cond(const Item *item, void *arg) context->supported= FALSE; break; case Item::DECIMAL_ITEM: - DBUG_PRINT("info", ("DECIMAL_ITEM %s")); + DBUG_PRINT("info", ("DECIMAL_ITEM")); if (context->expecting(Item::DECIMAL_ITEM)) { Item_decimal *decimal_item= (Item_decimal *) item; diff --git a/sql/handler.cc b/sql/handler.cc index cff8213edec..3f0411d6198 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1533,7 +1533,7 @@ prev_insert_id(ulonglong nr, struct system_variables *variables) */ DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour " "auto_increment_offset: %lu", - nr, variables->auto_increment_offset)); + (ulong) nr, variables->auto_increment_offset)); return nr; } if (variables->auto_increment_increment == 1) @@ -1798,8 +1798,8 @@ void handler::print_error(int error, myf errflag) break; } case HA_ERR_NULL_IN_SPATIAL: - textno= ER_UNKNOWN_ERROR; - break; + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); + DBUG_VOID_RETURN; case HA_ERR_FOUND_DUPP_UNIQUE: textno=ER_DUP_UNIQUE; break; diff --git a/sql/item.cc b/sql/item.cc index d181edb4ecd..e9cd19c0d31 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -272,6 +272,34 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value) } +my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + longlong date; + if (get_date(<ime, TIME_FUZZY_DATE)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + +my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + longlong date; + if (get_time(<ime)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + double Item::val_real_from_decimal() { /* Note that fix_fields may not be called for Item_avg_field items */ @@ -295,6 +323,25 @@ longlong Item::val_int_from_decimal() return result; } +int Item::save_time_in_field(Field *field) +{ + TIME ltime; + if (get_time(<ime)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_TIME); +} + + +int Item::save_date_in_field(Field *field) +{ + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); +} + Item::Item(): rsize(0), name(0), orig_name(0), name_length(0), fixed(0), @@ -1148,6 +1195,28 @@ void Item_name_const::print(String *str) /* + need a special class to adjust printing : references to aggregate functions + must not be printed as refs because the aggregate functions that are added to + the front of select list are not printed as well. +*/ +class Item_aggregate_ref : public Item_ref +{ +public: + Item_aggregate_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, const char *field_name_arg) + :Item_ref(context_arg, item, table_name_arg, field_name_arg) {} + + void print (String *str) + { + if (ref) + (*ref)->print(str); + else + Item_ident::print(str); + } +}; + + +/* Move SUM items out from item tree and replace with reference SYNOPSIS @@ -1200,8 +1269,8 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Item *new_item, *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(new_item= new Item_ref(&thd->lex->current_select->context, - ref_pointer_array + el, 0, name))) + if (!(new_item= new Item_aggregate_ref(&thd->lex->current_select->context, + ref_pointer_array + el, 0, name))) return; // fatal_error is set fields.push_front(real_itm); thd->change_item_tree(ref, new_item); @@ -1622,7 +1691,7 @@ void Item_field::set_field(Field *field_par) db_name= field_par->table->s->db; alias_name_used= field_par->table->alias_name_used; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); - collation.set(field_par->charset(), DERIVATION_IMPLICIT); + collation.set(field_par->charset(), field_par->derivation()); fixed= 1; } @@ -3638,10 +3707,37 @@ bool Item_field::fix_fields(THD *thd, Item **reference) Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, ¬_used); - if (res != (Item **)not_found_item && (*res)->type() == Item::FIELD_ITEM) + if (res != (Item **)not_found_item) { - set_field((*((Item_field**)res))->field); - return 0; + if ((*res)->type() == Item::FIELD_ITEM) + { + /* + It's an Item_field referencing another Item_field in the select + list. + use the field from the Item_field in the select list and leave + the Item_field instance in place. + */ + set_field((*((Item_field**)res))->field); + return 0; + } + else + { + /* + It's not an Item_field in the select list so we must make a new + Item_ref to point to the Item in the select list and replace the + Item_field created by the parser with the new Item_ref. + */ + Item_ref *rf= new Item_ref(context, db_name,table_name,field_name); + if (!rf) + return 1; + thd->change_item_tree(reference, rf); + /* + Because Item_ref never substitutes itself with other items + in Item_ref::fix_fields(), we can safely use the original + pointer to it even after fix_fields() + */ + return rf->fix_fields(thd, reference) || rf->check_cols(1); + } } } if ((ret= fix_outer_field(thd, &from_field, reference)) < 0) @@ -4106,6 +4202,10 @@ void Item_field::make_field(Send_field *tmp_field) DBUG_ASSERT(tmp_field->table_name != 0); if (name) tmp_field->col_name=name; // Use user supplied name + if (table_name) + tmp_field->table_name= table_name; + if (db_name) + tmp_field->db_name= db_name; } diff --git a/sql/item.h b/sql/item.h index 0cfb0b01fd8..63d89113ec1 100644 --- a/sql/item.h +++ b/sql/item.h @@ -27,19 +27,7 @@ class Item_field; /* "Declared Type Collation" A combination of collation and its derivation. -*/ -enum Derivation -{ - DERIVATION_IGNORABLE= 5, - DERIVATION_COERCIBLE= 4, - DERIVATION_SYSCONST= 3, - DERIVATION_IMPLICIT= 2, - DERIVATION_NONE= 1, - DERIVATION_EXPLICIT= 0 -}; - -/* Flags for collation aggregation modes: MY_COLL_ALLOW_SUPERSET_CONV - allow conversion to a superset MY_COLL_ALLOW_COERCIBLE_CONV - allow conversion of a coercible value @@ -617,9 +605,14 @@ public: my_decimal *val_decimal_from_real(my_decimal *decimal_value); my_decimal *val_decimal_from_int(my_decimal *decimal_value); my_decimal *val_decimal_from_string(my_decimal *decimal_value); + my_decimal *val_decimal_from_date(my_decimal *decimal_value); + my_decimal *val_decimal_from_time(my_decimal *decimal_value); longlong val_int_from_decimal(); double val_real_from_decimal(); + int save_time_in_field(Field *field); + int save_date_in_field(Field *field); + virtual Field *get_tmp_table_field() { return 0; } /* This is also used to create fields in CREATE ... SELECT: */ virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } @@ -1959,6 +1952,16 @@ public: class Item_in_subselect; + +/* + An object of this class: + - Converts val_XXX() calls to ref->val_XXX_result() calls, like Item_ref. + - Sets owner->was_null=TRUE if it has returned a NULL value from any + val_XXX() function. This allows to inject an Item_ref_null_helper + object into subquery and then check if the subquery has produced a row + with NULL value. +*/ + class Item_ref_null_helper: public Item_ref { protected: diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9a400d60ae6..bc31a7203a4 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -786,9 +786,41 @@ longlong Item_in_optimizer::val_int() { DBUG_ASSERT(fixed == 1); cache->store(args[0]); + if (cache->null_value) { - null_value= 1; + if (((Item_in_subselect*)args[1])->is_top_level_item()) + { + /* + We're evaluating "NULL IN (SELECT ...)". The result can be NULL or + FALSE, and we can return one instead of another. Just return NULL. + */ + null_value= 1; + } + else + { + if (!((Item_in_subselect*)args[1])->is_correlated && + result_for_null_param != UNKNOWN) + { + /* Use cached value from previous execution */ + null_value= result_for_null_param; + } + else + { + /* + We're evaluating "NULL IN (SELECT ...)". The result is: + FALSE if SELECT produces an empty set, or + NULL otherwise. + We disable the predicates we've pushed down into subselect, run the + subselect and see if it has produced any rows. + */ + ((Item_in_subselect*)args[1])->enable_pushed_conds= FALSE; + longlong tmp= args[1]->val_bool_result(); + result_for_null_param= null_value= + !((Item_in_subselect*)args[1])->engine->no_rows(); + ((Item_in_subselect*)args[1])->enable_pushed_conds= TRUE; + } + } return 0; } bool tmp= args[1]->val_bool_result(); @@ -2196,7 +2228,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: 0x%lx", this)); + DBUG_PRINT("enter",("this: 0x%lx", (long) this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -2971,7 +3003,7 @@ longlong Item_is_not_null_test::val_int() if (!used_tables_cache) { owner->was_null|= (!cached_value); - DBUG_PRINT("info", ("cached :%d", cached_value)); + DBUG_PRINT("info", ("cached :%ld", (long) cached_value)); DBUG_RETURN(cached_value); } if (args[0]->is_null()) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index c8439cba303..acad1e51bc9 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -100,25 +100,44 @@ public: }; class Item_cache; +#define UNKNOWN ((my_bool)-1) + + +/* + Item_in_optimizer(left_expr, Item_in_subselect(...)) + + Item_in_optimizer is used to wrap an instance of Item_in_subselect. This + class does the following: + - Evaluate the left expression and store it in Item_cache_* object (to + avoid re-evaluating it many times during subquery execution) + - Shortcut the evaluation of "NULL IN (...)" to NULL in the cases where we + don't care if the result is NULL or FALSE. + + NOTE + It is not quite clear why the above listed functionality should be + placed into a separate class called 'Item_in_optimizer'. +*/ + class Item_in_optimizer: public Item_bool_func { protected: Item_cache *cache; bool save_cache; + /* + Stores the value of "NULL IN (SELECT ...)" for uncorrelated subqueries: + UNKNOWN - "NULL in (SELECT ...)" has not yet been evaluated + FALSE - result is FALSE + TRUE - result is NULL + */ + my_bool result_for_null_param; public: Item_in_optimizer(Item *a, Item_in_subselect *b): - Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), save_cache(0) + Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), + save_cache(0), result_for_null_param(UNKNOWN) {} bool fix_fields(THD *, Item **); bool fix_left(THD *thd, Item **ref); bool is_null(); - /* - Item_in_optimizer item is special boolean function. On value request - (one of val, val_int or val_str methods) it evaluate left expression - of IN by storing it value in cache item (one of Item_cache* items), - then it test cache is it NULL. If left expression (cache) is NULL then - Item_in_optimizer return NULL, else it evaluate Item_in_subselect. - */ longlong val_int(); void cleanup(); const char *func_name() const { return "<in_optimizer>"; } @@ -256,9 +275,11 @@ public: class Item_maxmin_subselect; /* + trigcond<param>(arg) ::= param? arg : TRUE + The class Item_func_trig_cond is used for guarded predicates which are employed only for internal purposes. - A guarded predicates is an object consisting of an a regular or + A guarded predicate is an object consisting of an a regular or a guarded predicate P and a pointer to a boolean guard variable g. A guarded predicate P/g is evaluated to true if the value of the guard g is false, otherwise it is evaluated to the same value that @@ -276,6 +297,10 @@ class Item_maxmin_subselect; Objects of this class are built only for query execution after the execution plan has been already selected. That's why this class needs only val_int out of generic methods. + + Current uses of Item_func_trig_cond objects: + - To wrap selection conditions when executing outer joins + - To wrap condition that is pushed down into subquery */ class Item_func_trig_cond: public Item_bool_func @@ -1019,6 +1044,11 @@ public: /* Functions used by HAVING for rewriting IN subquery */ class Item_in_subselect; + +/* + This is like IS NOT NULL but it also remembers if it ever has + encountered a NULL. +*/ class Item_is_not_null_test :public Item_func_isnull { Item_in_subselect* owner; diff --git a/sql/item_func.cc b/sql/item_func.cc index 6e81b03d846..96b3cc3da98 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -964,7 +964,14 @@ longlong Item_func_unsigned::val_int() longlong value; int error; - if (args[0]->cast_to_int_type() != STRING_RESULT) + if (args[0]->cast_to_int_type() == DECIMAL_RESULT) + { + my_decimal tmp, *dec= args[0]->val_decimal(&tmp); + if (!(null_value= args[0]->null_value)) + my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &value); + return value; + } + else if (args[0]->cast_to_int_type() != STRING_RESULT) { value= args[0]->val_int(); null_value= args[0]->null_value; @@ -2894,6 +2901,20 @@ void Item_udf_func::cleanup() } +void Item_udf_func::print(String *str) +{ + str->append(func_name()); + str->append('('); + for (uint i=0 ; i < arg_count ; i++) + { + if (i != 0) + str->append(','); + args[i]->print_item_w_name(str); + } + str->append(')'); +} + + double Item_func_udf_float::val_real() { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_func.h b/sql/item_func.h index 01a90ae1744..4379c542e63 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -953,6 +953,7 @@ public: Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } bool is_expensive() { return 1; } + void print(String *str); }; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 0ad517609c9..bffecb3c84c 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -37,7 +37,7 @@ inline Item * and_items(Item* cond, Item *item) Item_subselect::Item_subselect(): Item_result_field(), value_assigned(0), thd(0), substitution(0), engine(0), old_engine(0), used_tables_cache(0), have_to_be_excluded(0), - const_item_cache(1), engine_changed(0), changed(0) + const_item_cache(1), engine_changed(0), changed(0), is_correlated(FALSE) { with_subselect= 1; reset(); @@ -54,7 +54,7 @@ void Item_subselect::init(st_select_lex *select_lex, { DBUG_ENTER("Item_subselect::init"); - DBUG_PRINT("enter", ("select_lex: 0x%x", (ulong) select_lex)); + DBUG_PRINT("enter", ("select_lex: 0x%lx", (long) select_lex)); unit= select_lex->master_unit(); if (unit->item) @@ -192,16 +192,16 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref) return res; } -bool Item_subselect::exec() +bool Item_subselect::exec(bool full_scan) { int res; - res= engine->exec(); + res= engine->exec(full_scan); if (engine_changed) { engine_changed= 0; - return exec(); + return exec(full_scan); } return (res); } @@ -391,6 +391,15 @@ enum Item_result Item_singlerow_subselect::result_type() const return engine->type(); } +/* + Don't rely on the result type to calculate field type. + Ask the engine instead. +*/ +enum_field_types Item_singlerow_subselect::field_type() const +{ + return engine->field_type(); +} + void Item_singlerow_subselect::fix_length_and_dec() { if ((max_columns= engine->cols()) == 1) @@ -441,13 +450,13 @@ bool Item_singlerow_subselect::null_inside() void Item_singlerow_subselect::bring_value() { - exec(); + exec(FALSE); } double Item_singlerow_subselect::val_real() { DBUG_ASSERT(fixed == 1); - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_real(); @@ -462,7 +471,7 @@ double Item_singlerow_subselect::val_real() longlong Item_singlerow_subselect::val_int() { DBUG_ASSERT(fixed == 1); - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_int(); @@ -476,7 +485,7 @@ longlong Item_singlerow_subselect::val_int() String *Item_singlerow_subselect::val_str(String *str) { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_str(str); @@ -491,7 +500,7 @@ String *Item_singlerow_subselect::val_str(String *str) my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_decimal(decimal_value); @@ -506,7 +515,7 @@ my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) bool Item_singlerow_subselect::val_bool() { - if (!exec() && !value->null_value) + if (!exec(FALSE) && !value->null_value) { null_value= 0; return value->val_bool(); @@ -557,7 +566,8 @@ bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit) Item_in_subselect::Item_in_subselect(Item * left_exp, st_select_lex *select_lex): - Item_exists_subselect(), optimizer(0), transformed(0), upper_item(0) + Item_exists_subselect(), optimizer(0), transformed(0), + enable_pushed_conds(TRUE), upper_item(0) { DBUG_ENTER("Item_in_subselect::Item_in_subselect"); left_expr= left_exp; @@ -602,7 +612,7 @@ void Item_exists_subselect::fix_length_and_dec() double Item_exists_subselect::val_real() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -613,7 +623,7 @@ double Item_exists_subselect::val_real() longlong Item_exists_subselect::val_int() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -624,7 +634,7 @@ longlong Item_exists_subselect::val_int() String *Item_exists_subselect::val_str(String *str) { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -637,7 +647,7 @@ String *Item_exists_subselect::val_str(String *str) my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -650,7 +660,7 @@ my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) bool Item_exists_subselect::val_bool() { DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(FALSE)) { reset(); return 0; @@ -667,7 +677,8 @@ double Item_in_subselect::val_real() */ DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); - if (exec()) + null_value= 0; + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -687,7 +698,8 @@ longlong Item_in_subselect::val_int() */ DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); - if (exec()) + null_value= 0; + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -707,7 +719,8 @@ String *Item_in_subselect::val_str(String *str) */ DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); - if (exec()) + null_value= 0; + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -726,7 +739,8 @@ String *Item_in_subselect::val_str(String *str) bool Item_in_subselect::val_bool() { DBUG_ASSERT(fixed == 1); - if (exec()) + null_value= 0; + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -744,8 +758,9 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) method should not be used */ DBUG_ASSERT(0); + null_value= 0; DBUG_ASSERT(fixed == 1); - if (exec()) + if (exec(!enable_pushed_conds)) { reset(); null_value= 1; @@ -758,7 +773,55 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) } -/* Rewrite a single-column IN/ALL/ANY subselect. */ +/* + Rewrite a single-column IN/ALL/ANY subselect + + SYNOPSIS + Item_in_subselect::single_value_transformer() + join Join object of the subquery (i.e. 'child' join). + func Subquery comparison creator + + DESCRIPTION + Rewrite a single-column subquery using rule-based approach. The subquery + + oe $cmp$ (SELECT ie FROM ... WHERE subq_where ... HAVING subq_having) + + First, try to convert the subquery to scalar-result subquery in one of + the forms: + + - oe $cmp$ (SELECT MAX(...) ) // handled by Item_singlerow_subselect + - oe $cmp$ <max>(SELECT ...) // handled by Item_maxmin_subselect + + If that fails, the subquery will be handled with class Item_in_optimizer, + Inject the predicates into subquery, i.e. convert it to: + + - If the subquery has aggregates, GROUP BY, or HAVING, convert to + + SELECT ie FROM ... HAVING subq_having AND + trigcond(oe $cmp$ ref_or_null_helper<ie>) + + the addition is wrapped into trigger only when we want to distinguish + between NULL and FALSE results. + + - Otherwise (no aggregates/GROUP BY/HAVING) convert it to one of the + following: + + = If we don't need to distinguish between NULL and FALSE subquery: + + SELECT 1 FROM ... WHERE (oe $cmp$ ie) AND subq_where + + = If we need to distinguish between those: + + SELECT 1 FROM ... + WHERE subq_where AND trigcond((oe $cmp$ ie) OR (ie IS NULL)) + HAVING trigcond(<is_not_null_test>(ie)) + + RETURN + RES_OK - OK, either subquery was transformed, or appopriate + predicates where injected into it. + RES_REDUCE - The subquery was reduced to non-subquery + RES_ERROR - Error +*/ Item_subselect::trans_res Item_in_subselect::single_value_transformer(JOIN *join, @@ -891,8 +954,12 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; /* Add the left part of a subselect to a WHERE or HAVING clause of - the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => - SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) + the right part, e.g. + + SELECT 1 IN (SELECT a FROM t1) => + + SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) + HAVING is used only if the right part contains a SUM function, a GROUP BY or a HAVING clause. */ @@ -907,10 +974,15 @@ Item_in_subselect::single_value_transformer(JOIN *join, ref_pointer_array, (char *)"<ref>", this->full_name())); -#ifdef CORRECT_BUT_TOO_SLOW_TO_BE_USABLE - if (!abort_on_null && left_expr->maybe_null) - item= new Item_cond_or(new Item_func_isnull(left_expr), item); -#endif + if (!abort_on_null && ((Item*)select_lex->item_list.head())->maybe_null) + { + /* + We can encounter "NULL IN (SELECT ...)". Wrap the added condition + within a trigger. + */ + item= new Item_func_trig_cond(item, &enable_pushed_conds); + } + /* AND and comparison functions can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last @@ -939,19 +1011,19 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->item_list.push_back(new Item_int("Not_used", (longlong) 1, 21)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); + item= func->create(expr, item); if (!abort_on_null && orig_item->maybe_null) { - having= new Item_is_not_null_test(this, having); + having= + new Item_func_trig_cond(new Item_is_not_null_test(this, having), + &enable_pushed_conds); /* Item_is_not_null_test can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= (join->having ? - new Item_cond_and(having, join->having) : - having); + select_lex->having= join->having= having; select_lex->having_fix_field= 1; /* we do not check join->having->fixed, because Item_and (from @@ -962,12 +1034,15 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->having_fix_field= 0; if (tmp) DBUG_RETURN(RES_ERROR); + /* + NOTE: It is important that we add this "IS NULL" here, even when + orig_item can't be NULL. This is needed so that this predicate is + only used by ref[_or_null] analyzer (and, e.g. is not used by const + propagation). + */ item= new Item_cond_or(item, new Item_func_isnull(orig_item)); -#ifdef CORRECT_BUT_TOO_SLOW_TO_BE_USABLE - if (left_expr->maybe_null) - item= new Item_cond_or(new Item_func_isnull(left_expr), item); -#endif + item= new Item_func_trig_cond(item, &enable_pushed_conds); } item->name= (char *)in_additional_cond; /* @@ -994,13 +1069,14 @@ Item_in_subselect::single_value_transformer(JOIN *join, we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= - func->create(expr, + Item *new_having= + func->create(expr, new Item_ref_null_helper(&select_lex->context, this, select_lex->ref_pointer_array, (char *)"<no matter>", (char *)"<result>")); + new_having= new Item_func_trig_cond(new_having, &enable_pushed_conds); + select_lex->having= join->having= new_having; select_lex->having_fix_field= 1; /* @@ -1205,6 +1281,8 @@ Item_in_subselect::row_value_transformer(JOIN *join) where_item= and_items(where_item, item); } + if (where_item) + where_item= new Item_func_trig_cond(where_item, &enable_pushed_conds); /* AND can't be changed during fix_fields() we can assign select_lex->where here, and pass 0 as last @@ -1218,6 +1296,8 @@ Item_in_subselect::row_value_transformer(JOIN *join) if (having_item) { bool res; + having_item= new Item_func_trig_cond(having_item, &enable_pushed_conds); + select_lex->having= join->having= and_items(join->having, having_item); select_lex->having->top_level_item(); /* @@ -1434,6 +1514,27 @@ bool subselect_union_engine::is_executed() const } +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_union_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_union_engine::no_rows() +{ + /* Check if we got any rows when reading UNION result from temp. table: */ + return test(!unit->fake_select_lex->join->send_records); +} + void subselect_uniquesubquery_engine::cleanup() { DBUG_ENTER("subselect_uniquesubquery_engine::cleanup"); @@ -1499,32 +1600,58 @@ int subselect_uniquesubquery_engine::prepare() return 1; } -static Item_result set_row(List<Item> &item_list, Item *item, - Item_cache **row, bool *maybe_null) + +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_single_select_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_single_select_engine::no_rows() +{ + return !item->assigned(); +} + + +/* + makes storage for the output values for the subquery and calcuates + their data and column types and their nullability. +*/ +void subselect_engine::set_row(List<Item> &item_list, Item_cache **row) { - Item_result res_type= STRING_RESULT; Item *sel_item; List_iterator_fast<Item> li(item_list); + res_type= STRING_RESULT; + res_field_type= FIELD_TYPE_VAR_STRING; for (uint i= 0; (sel_item= li++); i++) { item->max_length= sel_item->max_length; res_type= sel_item->result_type(); + res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; - *maybe_null= sel_item->maybe_null; + maybe_null= sel_item->maybe_null; if (!(row[i]= Item_cache::get_cache(res_type))) - return STRING_RESULT; // we should return something + return; row[i]->setup(sel_item); } if (item_list.elements > 1) res_type= ROW_RESULT; - return res_type; } void subselect_single_select_engine::fix_length_and_dec(Item_cache **row) { DBUG_ASSERT(row || select_lex->item_list.elements==1); - res_type= set_row(select_lex->item_list, item, row, &maybe_null); + set_row(select_lex->item_list, row); item->collation.set(row[0]->collation); if (cols() != 1) maybe_null= 0; @@ -1536,13 +1663,14 @@ void subselect_union_engine::fix_length_and_dec(Item_cache **row) if (unit->first_select()->item_list.elements == 1) { - res_type= set_row(unit->types, item, row, &maybe_null); + set_row(unit->types, row); item->collation.set(row[0]->collation); } else { - bool fake= 0; - res_type= set_row(unit->types, item, row, &fake); + bool maybe_null_saved= maybe_null; + set_row(unit->types, row); + maybe_null= maybe_null_saved; } } @@ -1552,7 +1680,11 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) DBUG_ASSERT(0); } -int subselect_single_select_engine::exec() +int init_read_record_seq(JOIN_TAB *tab); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); + +int subselect_single_select_engine::exec(bool full_scan) { DBUG_ENTER("subselect_single_select_engine::exec"); char const *save_where= thd->where; @@ -1590,7 +1722,43 @@ int subselect_single_select_engine::exec() if (!executed) { item->reset_value_registration(); + if (full_scan) + { + /* + We should not apply optimizations based on the condition that was + pushed down into the subquery. Those optimizations are ref[_or_null] + acceses. Change them to be full table scans. + */ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if (tab->keyuse && tab->keyuse->outer_ref) + { + tab->read_first_record= init_read_record_seq; + tab->read_record.record= tab->table->record[0]; + tab->read_record.thd= join->thd; + tab->read_record.ref_length= tab->table->file->ref_length; + } + } + } + join->exec(); + + if (full_scan) + { + /* Enable the optimizations back */ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if (tab->keyuse && tab->keyuse->outer_ref) + { + tab->read_record.record= 0; + tab->read_record.ref_length= 0; + tab->read_first_record= join_read_always_key_or_null; + tab->read_record.read_record= join_read_next_same_or_null; + } + } + } executed= 1; thd->where= save_where; thd->lex->current_select= save_select; @@ -1601,29 +1769,159 @@ int subselect_single_select_engine::exec() DBUG_RETURN(0); } -int subselect_union_engine::exec() +int subselect_union_engine::exec(bool full_scan) { char const *save_where= thd->where; + /* + Ignore the full_scan parameter: the pushed down predicates are only used + for filtering, and the caller has disabled them if necessary. + */ int res= unit->exec(); thd->where= save_where; return res; } -int subselect_uniquesubquery_engine::exec() +/* + Search for at least on row satisfying select condition + + SYNOPSIS + subselect_uniquesubquery_engine::scan_table() + + DESCRIPTION + Scan the table using sequential access until we find at least one row + satisfying select condition. + + The result of this function (info about whether a row was found) is + stored in this->empty_result_set. + + RETURN + FALSE - OK + TRUE - Error +*/ + +int subselect_uniquesubquery_engine::scan_table() { - DBUG_ENTER("subselect_uniquesubquery_engine::exec"); int error; TABLE *table= tab->table; - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) + DBUG_ENTER("subselect_uniquesubquery_engine::scan_table"); + empty_result_set= TRUE; + + if (table->file->inited) + table->file->ha_index_end(); + + table->file->ha_rnd_init(1); + table->file->extra_opt(HA_EXTRA_CACHE, + current_thd->variables.read_buff_size); + table->null_row= 0; + for (;;) + { + error=table->file->rnd_next(table->record[0]); + if (error && error != HA_ERR_END_OF_FILE) + { + error= report_error(table, error); + break; + } + /* No more rows */ + if (table->status) + break; + + if (!cond || cond->val_int()) + { + empty_result_set= FALSE; + break; + } + } + + table->file->ha_rnd_end(); + DBUG_RETURN(error != 0); +} + + +/* + Copy ref key and check for null parts in it + + SYNOPSIS + subselect_uniquesubquery_engine::copy_ref_key() + + DESCRIPTION + Copy ref key and check for null parts in it. + + RETURN + FALSE - ok, index lookup key without keys copied. + TRUE - an error occured while copying the key +*/ + +bool subselect_uniquesubquery_engine::copy_ref_key() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::copy_ref_key"); + + for (store_key **copy= tab->ref.key_copy ; *copy ; copy++) { - if ((tab->ref.key_err= (*copy)->copy()) & 1) + tab->ref.key_err= (*copy)->copy(); + + /* + When there is a NULL part in the key we don't need to make index + lookup for such key thus we don't need to copy whole key. + If we later should do a sequential scan return OK. Fail otherwise. + + See also the comment for the subselect_uniquesubquery_engine::exec() + function. + */ + null_keypart= (*copy)->null_key; + bool top_level= ((Item_in_subselect *) item)->is_top_level_item(); + if (null_keypart && !top_level) + break; + if ((tab->ref.key_err) & 1 || (null_keypart && top_level)) { - table->status= STATUS_NOT_FOUND; + tab->table->status= STATUS_NOT_FOUND; DBUG_RETURN(1); } } + DBUG_RETURN(0); +} + + +/* + Execute subselect + + SYNOPSIS + subselect_uniquesubquery_engine::exec() + DESCRIPTION + Find rows corresponding to the ref key using index access. + If some part of the lookup key is NULL, then we're evaluating + NULL IN (SELECT ... ) + This is a special case, we don't need to search for NULL in the table, + instead, the result value is + - NULL if select produces empty row set + - FALSE otherwise. + + In some cases (IN subselect is a top level item, i.e. abort_on_null==TRUE) + the caller doesn't distinguish between NULL and FALSE result and we just + return FALSE. + Otherwise we make a full table scan to see if there is at least one matching row. + + NOTE + + RETURN + FALSE - ok + TRUE - an error occured while scanning +*/ + +int subselect_uniquesubquery_engine::exec(bool full_scan) +{ + DBUG_ENTER("subselect_uniquesubquery_engine::exec"); + int error; + TABLE *table= tab->table; + + /* TODO: change to use of 'full_scan' here? */ + if (copy_ref_key()) + DBUG_RETURN(1); + + if (null_keypart) + DBUG_RETURN(scan_table()); + if (!table->file->inited) table->file->ha_index_init(tab->ref.key); error= table->file->index_read(table->record[0], @@ -1652,14 +1950,68 @@ subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine() } -int subselect_indexsubquery_engine::exec() +/* + Index-lookup subselect 'engine' - run the subquery + + SYNOPSIS + subselect_uniquesubquery_engine:exec() + full_scan + + DESCRIPTION + The engine is used to resolve subqueries in form + + oe IN (SELECT key FROM tbl WHERE subq_where) + + The value of the predicate is calculated as follows: + 1. If oe IS NULL, this is a special case, do a full table scan on + table tbl and search for row that satisfies subq_where. If such + row is found, return NULL, otherwise return FALSE. + 2. Make an index lookup via key=oe, search for a row that satisfies + subq_where. If found, return TRUE. + 3. If check_null==TRUE, make another lookup via key=NULL, search for a + row that satisfies subq_where. If found, return NULL, otherwise + return FALSE. + + TODO + The step #1 can be optimized further when the index has several key + parts. Consider a subquery: + + (oe1, oe2) IN (SELECT keypart1, keypart2 FROM tbl WHERE subq_where) + + and suppose we need to evaluate it for {oe1, oe2}=={const1, NULL}. + Current code will do a full table scan and obtain correct result. There + is a better option: instead of evaluating + + SELECT keypart1, keypart2 FROM tbl WHERE subq_where (1) + + and checking if it has produced any matching rows, evaluate + + SELECT keypart2 FROM tbl WHERE subq_where AND keypart1=const1 (2) + + If this query produces a row, the result is NULL (as we're evaluating + "(const1, NULL) IN { (const1, X), ... }", which has a value of UNKNOWN, + i.e. NULL). If the query produces no rows, the result is FALSE. + + We currently evaluate (1) by doing a full table scan. (2) can be + evaluated by doing a "ref" scan on "keypart1=const1", which can be much + cheaper. We can use index statistics to quickly check whether "ref" scan + will be cheaper than full table scan. + + RETURN + 0 + 1 +*/ + +int subselect_indexsubquery_engine::exec(bool full_scan) { - DBUG_ENTER("subselect_indexsubselect_engine::exec"); + DBUG_ENTER("subselect_indexsubquery_engine::exec"); int error; bool null_finding= 0; TABLE *table= tab->table; ((Item_in_subselect *) item)->value= 0; + empty_result_set= TRUE; + null_keypart= 0; if (check_null) { @@ -1668,14 +2020,12 @@ int subselect_indexsubquery_engine::exec() ((Item_in_subselect *) item)->was_null= 0; } - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) - { - if ((tab->ref.key_err= (*copy)->copy()) & 1) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(1); - } - } + /* Copy the ref key and check for nulls... */ + if (copy_ref_key()) + DBUG_RETURN(1); + + if (null_keypart) + DBUG_RETURN(scan_table()); if (!table->file->inited) table->file->ha_index_init(tab->ref.key); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 45df4f3880d..f1be99353cc 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -60,6 +60,9 @@ public: /* subquery is transformed */ bool changed; + /* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */ + bool is_correlated; + enum trans_res {RES_OK, RES_REDUCE, RES_ERROR}; enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS}; @@ -92,7 +95,7 @@ public: return null_value; } bool fix_fields(THD *thd, Item **ref); - virtual bool exec(); + virtual bool exec(bool full_scan); virtual void fix_length_and_dec(); table_map used_tables() const; table_map not_null_tables() const { return 0; } @@ -114,6 +117,7 @@ public: single select and union subqueries only. */ bool is_evaluated() const; + bool is_uncacheable() const; /* Used by max/min subquery to initialize value presence registration @@ -155,6 +159,7 @@ public: my_decimal *val_decimal(my_decimal *); bool val_bool(); enum Item_result result_type() const; + enum_field_types field_type() const; void fix_length_and_dec(); uint cols(); @@ -215,7 +220,20 @@ public: friend class subselect_indexsubquery_engine; }; -/* IN subselect */ + +/* + IN subselect: this represents "left_exr IN (SELECT ...)" + + This class has: + - (as a descendant of Item_subselect) a "subquery execution engine" which + allows it to evaluate subqueries. (and this class participates in + execution by having was_null variable where part of execution result + is stored. + - Transformation methods (todo: more on this). + + This class is not used directly, it is "wrapped" into Item_in_optimizer + which provides some small bits of subquery evaluation. +*/ class Item_in_subselect :public Item_exists_subselect { @@ -231,12 +249,14 @@ protected: bool abort_on_null; bool transformed; public: + /* Used to trigger on/off conditions that were pushed down to subselect */ + bool enable_pushed_conds; Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery Item_in_subselect(Item * left_expr, st_select_lex *select_lex); Item_in_subselect() :Item_exists_subselect(), optimizer(0), abort_on_null(0), transformed(0), - upper_item(0) + enable_pushed_conds(TRUE), upper_item(0) {} subs_type substype() { return IN_SUBS; } @@ -256,6 +276,7 @@ public: my_decimal *val_decimal(my_decimal *); bool val_bool(); void top_level_item() { abort_on_null=1; } + inline bool is_top_level_item() { return abort_on_null; } bool test_limit(st_select_lex_unit *unit); void print(String *str); bool fix_fields(THD *thd, Item **ref); @@ -291,6 +312,7 @@ protected: THD *thd; /* pointer to current THD */ Item_subselect *item; /* item, that use this engine */ enum Item_result res_type; /* type of results */ + enum_field_types res_field_type; /* column type of the results */ bool maybe_null; /* may be null (first item in select) */ public: @@ -300,6 +322,7 @@ public: result= res; item= si; res_type= STRING_RESULT; + res_field_type= FIELD_TYPE_VAR_STRING; maybe_null= 0; } virtual ~subselect_engine() {}; // to satisfy compiler @@ -313,10 +336,32 @@ public: THD * get_thd() { return thd; } virtual int prepare()= 0; virtual void fix_length_and_dec(Item_cache** row)= 0; - virtual int exec()= 0; + /* + Execute the engine + + SYNOPSIS + exec() + full_scan TRUE - Pushed-down predicates are disabled, the engine + must disable made based on those predicates. + FALSE - Pushed-down predicates are in effect. + DESCRIPTION + Execute the engine. The result of execution is subquery value that is + either captured by previously set up select_result-based 'sink' or + stored somewhere by the exec() method itself. + + A required side effect: if full_scan==TRUE, subselect_engine->no_rows() + should return correct result. + + RETURN + 0 - OK + 1 - Either an execution error, or the engine was be "changed", and + caller should call exec() again for the new engine. + */ + virtual int exec(bool full_scan)= 0; virtual uint cols()= 0; /* return number of columns in select */ virtual uint8 uncacheable()= 0; /* query is uncacheable */ enum Item_result type() { return res_type; } + enum_field_types field_type() { return res_field_type; } virtual void exclude()= 0; bool may_be_null() { return maybe_null; }; virtual table_map upper_select_const_tables()= 0; @@ -325,6 +370,11 @@ public: virtual bool change_result(Item_subselect *si, select_subselect *result)= 0; virtual bool no_tables()= 0; virtual bool is_executed() const { return FALSE; } + /* Check if subquery produced any rows during last query execution */ + virtual bool no_rows() = 0; + +protected: + void set_row(List<Item> &item_list, Item_cache **row); }; @@ -342,7 +392,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols(); uint8 uncacheable(); void exclude(); @@ -351,6 +401,7 @@ public: bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); bool is_executed() const { return executed; } + bool no_rows(); }; @@ -364,7 +415,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols(); uint8 uncacheable(); void exclude(); @@ -373,6 +424,7 @@ public: bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); bool is_executed() const; + bool no_rows(); }; @@ -382,6 +434,12 @@ class subselect_uniquesubquery_engine: public subselect_engine protected: st_join_table *tab; Item *cond; + /* + TRUE<=> last execution produced empty set. Valid only when left + expression is NULL. + */ + bool empty_result_set; + bool null_keypart; /* TRUE <=> constructed search tuple has a NULL */ public: // constructor can assign THD because it will be called after JOIN::prepare @@ -395,7 +453,7 @@ public: void cleanup(); int prepare(); void fix_length_and_dec(Item_cache** row); - int exec(); + int exec(bool full_scan); uint cols() { return 1; } uint8 uncacheable() { return UNCACHEABLE_DEPENDENT; } void exclude(); @@ -403,11 +461,15 @@ public: void print (String *str); bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); + int scan_table(); + bool copy_ref_key(); + bool no_rows() { return empty_result_set; } }; class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine { + /* FALSE for 'ref', TRUE for 'ref-or-null'. */ bool check_null; public: @@ -418,7 +480,7 @@ public: :subselect_uniquesubquery_engine(thd, tab_arg, subs, where), check_null(chk_null) {} - int exec(); + int exec(bool full_scan); void print (String *str); }; @@ -428,3 +490,9 @@ inline bool Item_subselect::is_evaluated() const return engine->is_executed(); } +inline bool Item_subselect::is_uncacheable() const +{ + return engine->uncacheable(); +} + + diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 77c6e17607f..c2219aafd03 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -893,6 +893,7 @@ bool Item_sum_distinct::setup(THD *thd) tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length, thd->variables.max_heap_table_size); + is_evaluated= FALSE; DBUG_RETURN(tree == 0); } @@ -900,6 +901,7 @@ bool Item_sum_distinct::setup(THD *thd) bool Item_sum_distinct::add() { args[0]->save_in_field(table->field[0], FALSE); + is_evaluated= FALSE; if (!table->field[0]->is_null()) { DBUG_ASSERT(tree); @@ -929,6 +931,7 @@ void Item_sum_distinct::clear() DBUG_ASSERT(tree != 0); /* we always have a tree */ null_value= 1; tree->reset(); + is_evaluated= FALSE; DBUG_VOID_RETURN; } @@ -938,6 +941,7 @@ void Item_sum_distinct::cleanup() delete tree; tree= 0; table= 0; + is_evaluated= FALSE; } Item_sum_distinct::~Item_sum_distinct() @@ -949,16 +953,20 @@ Item_sum_distinct::~Item_sum_distinct() void Item_sum_distinct::calculate_val_and_count() { - count= 0; - val.traits->set_zero(&val); - /* - We don't have a tree only if 'setup()' hasn't been called; - this is the case of sql_select.cc:return_zero_rows. - */ - if (tree) + if (!is_evaluated) { - table->field[0]->set_notnull(); - tree->walk(item_sum_distinct_walk, (void*) this); + count= 0; + val.traits->set_zero(&val); + /* + We don't have a tree only if 'setup()' hasn't been called; + this is the case of sql_select.cc:return_zero_rows. + */ + if (tree) + { + table->field[0]->set_notnull(); + tree->walk(item_sum_distinct_walk, (void*) this); + } + is_evaluated= TRUE; } } @@ -1014,9 +1022,13 @@ Item_sum_avg_distinct::fix_length_and_dec() void Item_sum_avg_distinct::calculate_val_and_count() { - Item_sum_distinct::calculate_val_and_count(); - if (count) - val.traits->div(&val, count); + if (!is_evaluated) + { + Item_sum_distinct::calculate_val_and_count(); + if (count) + val.traits->div(&val, count); + is_evaluated= TRUE; + } } @@ -2478,6 +2490,7 @@ void Item_sum_count_distinct::cleanup() */ delete tree; tree= 0; + is_evaluated= FALSE; if (table) { free_tmp_table(table->in_use, table); @@ -2499,6 +2512,7 @@ void Item_sum_count_distinct::make_unique() original= 0; force_copy_fields= 1; tree= 0; + is_evaluated= FALSE; tmp_table_param= 0; always_null= FALSE; } @@ -2618,6 +2632,7 @@ bool Item_sum_count_distinct::setup(THD *thd) but this has to be handled - otherwise someone can crash the server with a DoS attack */ + is_evaluated= FALSE; if (! tree) return TRUE; } @@ -2634,8 +2649,11 @@ Item *Item_sum_count_distinct::copy_or_same(THD* thd) void Item_sum_count_distinct::clear() { /* tree and table can be both null only if always_null */ + is_evaluated= FALSE; if (tree) + { tree->reset(); + } else if (table) { table->file->extra(HA_EXTRA_NO_CACHE); @@ -2656,6 +2674,7 @@ bool Item_sum_count_distinct::add() if ((*field)->is_real_null(0)) return 0; // Don't count NULL + is_evaluated= FALSE; if (tree) { /* @@ -2682,12 +2701,14 @@ longlong Item_sum_count_distinct::val_int() return LL(0); if (tree) { - ulonglong count; + if (is_evaluated) + return count; if (tree->elements == 0) return (longlong) tree->elements_in_tree(); // everything fits in memory count= 0; tree->walk(count_distinct_walk, (void*) &count); + is_evaluated= TRUE; return (longlong) count; } @@ -3001,6 +3022,7 @@ int dump_leaf_key(byte* key, element_count count __attribute__((unused)), String tmp2; String *result= &item->result; Item **arg= item->args, **arg_end= item->args + item->arg_count_field; + uint old_length= result->length(); if (item->no_appended) item->no_appended= FALSE; @@ -3035,8 +3057,22 @@ int dump_leaf_key(byte* key, element_count count __attribute__((unused)), /* stop if length of result more than max_length */ if (result->length() > item->max_length) { + int well_formed_error; + CHARSET_INFO *cs= item->collation.collation; + const char *ptr= result->ptr(); + uint add_length; + /* + It's ok to use item->result.length() as the fourth argument + as this is never used to limit the length of the data. + Cut is done with the third argument. + */ + add_length= cs->cset->well_formed_len(cs, + ptr + old_length, + ptr + item->max_length, + result->length(), + &well_formed_error); + result->length(old_length + add_length); item->count_cut_values++; - result->length(item->max_length); item->warning_for_row= TRUE; return 1; } @@ -3226,8 +3262,7 @@ bool Item_func_group_concat::add() we can dump the row here in case of GROUP_CONCAT(DISTINCT...) instead of doing tree traverse later. */ - if (result.length() <= max_length && - !warning_for_row && + if (!warning_for_row && (!tree || (el->count == 1 && distinct && !arg_count_order))) dump_leaf_key(table->record[0] + table->s->null_bytes, 1, this); @@ -3296,7 +3331,8 @@ bool Item_func_group_concat::setup(THD *thd) DBUG_RETURN(TRUE); /* We'll convert all blobs to varchar fields in the temporary table */ - tmp_table_param->convert_blob_length= max_length; + tmp_table_param->convert_blob_length= max_length * + collation.collation->mbmaxlen; /* Push all not constant fields to the list and create a temp table */ always_null= 0; for (uint i= 0; i < arg_count_field; i++) diff --git a/sql/item_sum.h b/sql/item_sum.h index fe7edd76ecf..c11ef7e548a 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -352,12 +352,23 @@ public: class Item_sum_num :public Item_sum { +protected: + /* + val_xxx() functions may be called several times during the execution of a + query. Derived classes that require extensive calculation in val_xxx() + maintain cache of aggregate value. This variable governs the validity of + that cache. + */ + bool is_evaluated; public: - Item_sum_num() :Item_sum() {} - Item_sum_num(Item *item_par) :Item_sum(item_par) {} - Item_sum_num(Item *a, Item* b) :Item_sum(a,b) {} - Item_sum_num(List<Item> &list) :Item_sum(list) {} - Item_sum_num(THD *thd, Item_sum_num *item) :Item_sum(thd, item) {} + Item_sum_num() :Item_sum(),is_evaluated(FALSE) {} + Item_sum_num(Item *item_par) + :Item_sum(item_par), is_evaluated(FALSE) {} + Item_sum_num(Item *a, Item* b) :Item_sum(a,b),is_evaluated(FALSE) {} + Item_sum_num(List<Item> &list) + :Item_sum(list), is_evaluated(FALSE) {} + Item_sum_num(THD *thd, Item_sum_num *item) + :Item_sum(thd, item),is_evaluated(item->is_evaluated) {} bool fix_fields(THD *, Item **); longlong val_int() { @@ -540,6 +551,12 @@ class Item_sum_count_distinct :public Item_sum_int */ Unique *tree; /* + Storage for the value of count between calls to val_int() so val_int() + will not recalculate on each call. Validitiy of the value is stored in + is_evaluated. + */ + longlong count; + /* Following is 0 normal object and pointer to original one for copy (to correctly free resources) */ @@ -556,14 +573,15 @@ class Item_sum_count_distinct :public Item_sum_int public: Item_sum_count_distinct(List<Item> &list) :Item_sum_int(list), table(0), field_lengths(0), tmp_table_param(0), - force_copy_fields(0), tree(0), original(0), always_null(FALSE) + force_copy_fields(0), tree(0), count(0), + original(0), always_null(FALSE) { quick_group= 0; } Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item) :Item_sum_int(thd, item), table(item->table), field_lengths(item->field_lengths), tmp_table_param(item->tmp_table_param), - force_copy_fields(0), tree(item->tree), original(item), - tree_key_length(item->tree_key_length), + force_copy_fields(0), tree(item->tree), count(item->count), + original(item), tree_key_length(item->tree_key_length), always_null(item->always_null) {} ~Item_sum_count_distinct(); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index d79dd2a4b44..72d0ff1cb98 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1414,17 +1414,6 @@ String *Item_date::val_str(String *str) } -int Item_date::save_in_field(Field *field, bool no_conversions) -{ - TIME ltime; - if (get_date(<ime, TIME_FUZZY_DATE)) - return set_field_to_null(field); - field->set_notnull(); - field->store_time(<ime, MYSQL_TIMESTAMP_DATE); - return 0; -} - - longlong Item_date::val_int() { DBUG_ASSERT(fixed == 1); @@ -2499,7 +2488,8 @@ String *Item_char_typecast::val_str(String *str) { // Safe even if const arg char char_type[40]; my_snprintf(char_type, sizeof(char_type), "%s(%lu)", - cast_cs == &my_charset_bin ? "BINARY" : "CHAR", length); + cast_cs == &my_charset_bin ? "BINARY" : "CHAR", + (ulong) length); if (!res->alloced_length()) { // Don't change const str diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index e79c62e6ffb..68e8700d1b0 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -339,12 +339,20 @@ public: decimals=0; max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - int save_in_field(Field *to, bool no_conversions); Field *tmp_table_field(TABLE *t_arg) { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -361,21 +369,57 @@ public: return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } +}; + + +class Item_str_timefunc :public Item_str_func +{ +public: + Item_str_timefunc() :Item_str_func() {} + Item_str_timefunc(Item *a) :Item_str_func(a) {} + Item_str_timefunc(Item *a,Item *b) :Item_str_func(a,b) {} + Item_str_timefunc(Item *a, Item *b, Item *c) :Item_str_func(a, b ,c) {} + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + void fix_length_and_dec() + { + decimals=0; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + } + Field *tmp_table_field(TABLE *t_arg) + { + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; /* Abstract CURTIME function. Children should define what time zone is used */ -class Item_func_curtime :public Item_func +class Item_func_curtime :public Item_str_timefunc { longlong value; char buff[9*2+32]; uint buff_length; public: - Item_func_curtime() :Item_func() {} - Item_func_curtime(Item *a) :Item_func(a) {} - enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + Item_func_curtime() :Item_str_timefunc() {} + Item_func_curtime(Item *a) :Item_str_timefunc(a) {} double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } String *val_str(String *str); @@ -602,10 +646,10 @@ class Item_func_convert_tz :public Item_date_func }; -class Item_func_sec_to_time :public Item_str_func +class Item_func_sec_to_time :public Item_str_timefunc { public: - Item_func_sec_to_time(Item *item) :Item_str_func(item) {} + Item_func_sec_to_time(Item *item) :Item_str_timefunc(item) {} double val_real() { DBUG_ASSERT(fixed == 1); @@ -615,17 +659,12 @@ public: String *val_str(String *); void fix_length_and_dec() { + Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; decimals= DATETIME_DEC; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } const char *func_name() const { return "sec_to_time"; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } bool result_as_longlong() { return TRUE; } }; @@ -762,6 +801,15 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -780,6 +828,15 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; @@ -797,12 +854,21 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; -class Item_func_makedate :public Item_str_func +class Item_func_makedate :public Item_date_func { public: - Item_func_makedate(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_makedate(Item *a,Item *b) :Item_date_func(a,b) {} String *val_str(String *str); const char *func_name() const { return "makedate"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } @@ -815,8 +881,16 @@ public: { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } - bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -848,45 +922,46 @@ public: } void print(String *str); const char *func_name() const { return "add_time"; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + if (cached_field_type == MYSQL_TYPE_TIME) + return val_decimal_from_time(decimal_value); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return val_decimal_from_date(decimal_value); + return Item_str_func::val_decimal(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + if (cached_field_type == MYSQL_TYPE_TIME) + return save_time_in_field(field); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return save_date_in_field(field); + return Item_str_func::save_in_field(field, no_conversions); + } }; -class Item_func_timediff :public Item_str_func +class Item_func_timediff :public Item_str_timefunc { public: Item_func_timediff(Item *a, Item *b) - :Item_str_func(a, b) {} + :Item_str_timefunc(a, b) {} String *val_str(String *str); const char *func_name() const { return "timediff"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + Item_str_timefunc::fix_length_and_dec(); maybe_null= 1; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; -class Item_func_maketime :public Item_str_func +class Item_func_maketime :public Item_str_timefunc { public: Item_func_maketime(Item *a, Item *b, Item *c) - :Item_str_func(a, b ,c) {} + :Item_str_timefunc(a, b ,c) {} String *val_str(String *str); const char *func_name() const { return "maketime"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } - void fix_length_and_dec() - { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; - } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; class Item_func_microsecond :public Item_int_func diff --git a/sql/log_event.cc b/sql/log_event.cc index 271658d8054..0d7ef7e8f2a 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -727,7 +727,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, LOG_EVENT_MINIMAL_HEADER_LEN); LOCK_MUTEX; - DBUG_PRINT("info", ("my_b_tell=%lu", my_b_tell(file))); + DBUG_PRINT("info", ("my_b_tell: %lu", (ulong) my_b_tell(file))); if (my_b_read(file, (byte *) head, header_size)) { DBUG_PRINT("info", ("Log_event::read_log_event(IO_CACHE*,Format_desc*) \ @@ -1212,6 +1212,7 @@ bool Query_log_event::write(IO_CACHE* file) /* Store length of status variables */ status_vars_len= (uint) (start-start_of_status); + DBUG_ASSERT(status_vars_len <= MAX_SIZE_LOG_EVENT_STATUS); int2store(buf + Q_STATUS_VARS_LEN_OFFSET, status_vars_len); /* @@ -1297,7 +1298,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, } else time_zone_len= 0; - DBUG_PRINT("info",("Query_log_event has flags2=%lu sql_mode=%lu",flags2,sql_mode)); + DBUG_PRINT("info",("Query_log_event has flags2: %lu sql_mode: %lu", + (ulong) flags2, sql_mode)); } #endif /* MYSQL_CLIENT */ @@ -1345,7 +1347,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, common_header_len= description_event->common_header_len; post_header_len= description_event->post_header_len[event_type-1]; - DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d", + DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d", event_len, common_header_len, post_header_len)); /* @@ -1393,7 +1395,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, case Q_FLAGS2_CODE: flags2_inited= 1; flags2= uint4korr(pos); - DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", flags2)); + DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2)); pos+= 4; break; case Q_SQL_MODE_CODE: @@ -3137,8 +3139,8 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg, #ifndef DBUG_OFF char buff[22]; DBUG_ENTER("Rotate_log_event::Rotate_log_event(THD*,...)"); - DBUG_PRINT("enter",("new_log_ident %s pos %s flags %lu", new_log_ident_arg, - llstr(pos_arg, buff), flags)); + DBUG_PRINT("enter",("new_log_ident: %s pos: %s flags: %lu", new_log_ident_arg, + llstr(pos_arg, buff), (ulong) flags)); #endif if (flags & DUP_NAME) new_log_ident= my_strdup_with_length(new_log_ident_arg, @@ -3912,7 +3914,7 @@ Slave_log_event::Slave_log_event(THD* thd_arg, memcpy(master_log, rli->group_master_log_name, master_log_len + 1); master_port = mi->port; master_pos = rli->group_master_log_pos; - DBUG_PRINT("info", ("master_log: %s pos: %d", master_log, + DBUG_PRINT("info", ("master_log: %s pos: %lu", master_log, (ulong) master_pos)); } else diff --git a/sql/log_event.h b/sql/log_event.h index f1b441dedb1..247e1962776 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -200,8 +200,26 @@ struct sql_ex_info #define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1) #define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN) -/* - Event header offsets; +/* + Max number of possible extra bytes in a replication event compared to a + packet (i.e. a query) sent from client to master; + First, an auxiliary log_event status vars estimation: +*/ +#define MAX_SIZE_LOG_EVENT_STATUS (4 /* flags2 */ + \ + 8 /* sql mode */ + \ + 1 + 1 + 255 /* catalog */ + \ + 4 /* autoinc */ + \ + 6 /* charset */ + \ + MAX_TIME_ZONE_NAME_LENGTH) +#define MAX_LOG_EVENT_HEADER ( /* in order of Query_log_event::write */ \ + LOG_EVENT_HEADER_LEN + /* write_header */ \ + QUERY_HEADER_LEN + /* write_data */ \ + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN + /*write_post_header_for_derived */ \ + MAX_SIZE_LOG_EVENT_STATUS + /* status */ \ + NAME_LEN + 1) + +/* + Event header offsets; these point to places inside the fixed header. */ diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 1bd16940b47..f33609e0168 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -15,6 +15,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysql_priv.h" +#include <time.h> + #ifndef MYSQL_CLIENT /* @@ -190,6 +192,23 @@ int str2my_decimal(uint mask, const char *from, uint length, } +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec) +{ + longlong date; + date = (ltime->year*100L + ltime->month)*100L + ltime->day; + if (ltime->time_type > MYSQL_TIMESTAMP_DATE) + date= ((date*100L + ltime->hour)*100L+ ltime->minute)*100L + ltime->second; + if (int2my_decimal(E_DEC_FATAL_ERROR, date, FALSE, dec)) + return dec; + if (ltime->second_part) + { + dec->buf[(dec->intg-1) / 9 + 1]= ltime->second_part * 1000; + dec->frac= 6; + } + return dec; +} + + #ifndef DBUG_OFF /* routines for debugging print */ diff --git a/sql/my_decimal.h b/sql/my_decimal.h index b02abacf0a3..af3edade8d6 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -295,7 +295,12 @@ int string2my_decimal(uint mask, const String *str, my_decimal *d) { return str2my_decimal(mask, str->ptr(), str->length(), str->charset(), d); } -#endif + + +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec); + + +#endif /*defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY) */ inline int double2my_decimal(uint mask, double val, my_decimal *d) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c353cee356a..edf88eb4f6d 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -96,6 +96,17 @@ extern CHARSET_INFO *system_charset_info, *files_charset_info ; extern CHARSET_INFO *national_charset_info, *table_alias_charset; +enum Derivation +{ + DERIVATION_IGNORABLE= 5, + DERIVATION_COERCIBLE= 4, + DERIVATION_SYSCONST= 3, + DERIVATION_IMPLICIT= 2, + DERIVATION_NONE= 1, + DERIVATION_EXPLICIT= 0 +}; + + typedef struct my_locale_st { const char *name; @@ -1472,7 +1483,7 @@ void end_read_record(READ_RECORD *info); ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder, uint s_length, SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows); -void filesort_free_buffers(TABLE *table); +void filesort_free_buffers(TABLE *table, bool full); void change_double_for_sort(double nr,byte *to); double my_double_round(double value, int dec, bool truncate); int get_quick_record(SQL_SELECT *select); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 26a335842e3..6cfa7e205d1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1531,7 +1531,7 @@ static void network_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { - sql_print_error("The socket file path is too long (> %lu): %s", + sql_print_error("The socket file path is too long (> %u): %s", sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } @@ -3389,8 +3389,8 @@ int main(int argc, char **argv) if (stack_size && stack_size < thread_stack) { if (global_system_variables.log_warnings) - sql_print_warning("Asked for %ld thread stack, but got %ld", - thread_stack, stack_size); + sql_print_warning("Asked for %lu thread stack, but got %ld", + thread_stack, (long) stack_size); #if defined(__ia64__) || defined(__ia64) thread_stack= stack_size*2; #else @@ -3924,7 +3924,7 @@ static void create_new_thread(THD *thd) int error; thread_created++; threads.append(thd); - DBUG_PRINT("info",(("creating thread %d"), thd->thread_id)); + DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id)); thd->connect_time = time(NULL); if ((error=pthread_create(&thd->real_id,&connection_attrib, handle_one_connection, @@ -5141,7 +5141,7 @@ master-ssl", (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ --skip-merge.", - (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0}, + (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 1601f7e5177..e84b2266e82 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -810,7 +810,7 @@ my_real_read(NET *net, ulong *complen) { my_bool interrupted = vio_should_retry(net->vio); - DBUG_PRINT("info",("vio_read returned %d, errno: %d", + DBUG_PRINT("info",("vio_read returned %ld, errno: %d", length, vio_errno(net->vio))); #if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2)) || defined(MYSQL_SERVER) /* diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 96239315026..ef755d868d9 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -878,7 +878,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() file->extra(HA_EXTRA_NO_KEYREAD); if (free_file) { - DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file, + DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file, free_file)); file->reset(); file->external_lock(current_thd, F_UNLCK); @@ -1836,9 +1836,9 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, double scan_time; DBUG_ENTER("SQL_SELECT::test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", - keys_to_use.to_ulonglong(), (ulong) prev_tables, + (ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); - DBUG_PRINT("info", ("records=%lu", (ulong)head->file->records)); + DBUG_PRINT("info", ("records: %lu", (ulong) head->file->records)); delete quick; quick=0; needed_reg.clear_all(); @@ -2102,7 +2102,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records))); if (busy_blocks < 1.0) busy_blocks= 1.0; - DBUG_PRINT("info",("sweep: nblocks=%g, busy_blocks=%g", n_blocks, + DBUG_PRINT("info",("sweep: nblocks: %g, busy_blocks: %g", n_blocks, busy_blocks)); /* Disabled: Bail out if # of blocks to read is bigger than # of blocks in @@ -2126,7 +2126,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) result= busy_blocks; } } - DBUG_PRINT("info",("returning cost=%g", result)); + DBUG_PRINT("return",("cost: %g", result)); DBUG_RETURN(result); } @@ -2220,7 +2220,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, ha_rows roru_total_records; double roru_intersect_part= 1.0; DBUG_ENTER("get_best_disjunct_quick"); - DBUG_PRINT("info", ("Full table scan cost =%g", read_time)); + DBUG_PRINT("info", ("Full table scan cost: %g", read_time)); if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root, sizeof(TRP_RANGE*)* @@ -2264,7 +2264,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, non_cpk_scan_records += (*cur_child)->records; } - DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost)); + DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost)); if (imerge_too_expensive || (imerge_cost > read_time) || (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) && read_time != DBL_MAX) @@ -2877,7 +2877,7 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info, DBUG_PRINT("info", ("Current out_rows= %g", info->out_rows)); DBUG_PRINT("info", ("Adding scan on %s", info->param->table->key_info[ror_scan->keynr].name)); - DBUG_PRINT("info", ("is_cpk_scan=%d",is_cpk_scan)); + DBUG_PRINT("info", ("is_cpk_scan: %d",is_cpk_scan)); selectivity_mult = ror_scan_selectivity(info, ror_scan); if (selectivity_mult == 1.0) @@ -8280,8 +8280,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, *records= num_groups; DBUG_PRINT("info", - ("table rows=%u, keys/block=%u, keys/group=%u, result rows=%u, blocks=%u", - table_records, keys_per_block, keys_per_group, *records, + ("table rows: %u keys/block: %u keys/group: %u result rows: %lu blocks: %u", + table_records, keys_per_block, keys_per_group, (ulong) *records, num_blocks)); DBUG_VOID_RETURN; } diff --git a/sql/protocol.cc b/sql/protocol.cc index 5de24ebdcb3..e7a8d493341 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -46,7 +46,7 @@ bool Protocol_prep::net_store_data(const char *from, uint length) packet->realloc(packet_length+9+length)) return 1; char *to=(char*) net_store_length((char*) packet->ptr()+packet_length, - (ulonglong) length); + length); memcpy(to,from,length); packet->length((uint) (to+length-packet->ptr())); return 0; @@ -280,8 +280,8 @@ send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) } buff[0]=0; // No fields - pos=net_store_length(buff+1,(ulonglong) affected_rows); - pos=net_store_length(pos, (ulonglong) id); + pos=net_store_length(buff+1,affected_rows); + pos=net_store_length(pos, id); if (thd->client_capabilities & CLIENT_PROTOCOL_41) { DBUG_PRINT("info", @@ -456,7 +456,7 @@ void net_send_error_packet(THD *thd, uint sql_errno, const char *err) ulonglong for bigger numbers. */ -char *net_store_length(char *pkg, uint length) +static char *net_store_length_fast(char *pkg, uint length) { uchar *packet=(uchar*) pkg; if (length < 251) @@ -479,7 +479,7 @@ char *net_store_length(char *pkg, uint length) char *net_store_data(char *to,const char *from, uint length) { - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,from,length); return to+length; } @@ -488,7 +488,7 @@ char *net_store_data(char *to,int32 from) { char buff[20]; uint length=(uint) (int10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,buff,length); return to+length; } @@ -497,7 +497,7 @@ char *net_store_data(char *to,longlong from) { char buff[22]; uint length=(uint) (longlong10_to_str(from,buff,10)-buff); - to=net_store_length(to,length); + to=net_store_length_fast(to,length); memcpy(to,buff,length); return to+length; } @@ -561,7 +561,7 @@ bool Protocol::send_fields(List<Item> *list, uint flags) if (flags & SEND_NUM_ROWS) { // Packet with number of elements - char *pos=net_store_length(buff, (uint) list->elements); + char *pos=net_store_length(buff, list->elements); (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); } diff --git a/sql/protocol.h b/sql/protocol.h index 85c22724b74..7e2bc1516ec 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -166,7 +166,6 @@ void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, const char *info=0); void send_eof(THD *thd); bool send_old_password_request(THD *thd); -char *net_store_length(char *packet,uint length); char *net_store_data(char *to,const char *from, uint length); char *net_store_data(char *to,int32 from); char *net_store_data(char *to,longlong from); diff --git a/sql/records.cc b/sql/records.cc index b352f9f395a..78473659b22 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -20,7 +20,7 @@ #include "mysql_priv.h" static int rr_quick(READ_RECORD *info); -static int rr_sequential(READ_RECORD *info); +int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); static int rr_unpack_from_tempfile(READ_RECORD *info); static int rr_unpack_from_buffer(READ_RECORD *info); @@ -184,6 +184,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, } /* init_read_record */ + void end_read_record(READ_RECORD *info) { /* free cache if used */ if (info->cache) @@ -193,7 +194,7 @@ void end_read_record(READ_RECORD *info) } if (info->table) { - filesort_free_buffers(info->table); + filesort_free_buffers(info->table,0); (void) info->file->extra(HA_EXTRA_NO_CACHE); if (info->read_record != rr_quick) // otherwise quick_range does it (void) info->file->ha_index_or_rnd_end(); @@ -289,7 +290,7 @@ static int rr_index(READ_RECORD *info) } -static int rr_sequential(READ_RECORD *info) +int rr_sequential(READ_RECORD *info) { int tmp; while ((tmp=info->file->rnd_next(info->record))) diff --git a/sql/slave.cc b/sql/slave.cc index e3497a4f0ac..b5c1fff4222 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -247,7 +247,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); - DBUG_PRINT("info", ("pos=%lu", pos)); + DBUG_PRINT("info", ("pos: %lu", (long) pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); @@ -2895,6 +2895,13 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO; thd->security_ctx->skip_grants(); my_net_init(&thd->net, 0); +/* + Adding MAX_LOG_EVENT_HEADER_LEN to the max_allowed_packet on all + slave threads, since a replication event can become this much larger + than the corresponding packet (query) sent from client to master. +*/ + thd->variables.max_allowed_packet= global_system_variables.max_allowed_packet + + MAX_LOG_EVENT_HEADER; /* note, incr over the global not session var */ thd->net.read_timeout = slave_net_timeout; thd->slave_thread = 1; set_slave_thread_options(thd); @@ -3080,7 +3087,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) return packet_error; } - DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", + DBUG_PRINT("info",( "len: %lu net->read_pos[4]: %d\n", len, mysql->net.read_pos[4])); return len - 1; } @@ -3347,9 +3354,9 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) const char *errmsg; /* We were in a transaction which has been rolled back because of a - deadlock (currently, InnoDB deadlock detected by InnoDB) or lock - wait timeout (innodb_lock_wait_timeout exceeded); let's seek back to - BEGIN log event and retry it all again. + Sonera deadlock. if lock wait timeout (innodb_lock_wait_timeout exceeded) + there is no rollback since 5.0.13 (ref: manual). + let's seek back to BEGIN log event and retry it all again. We have to not only seek but also a) init_master_info(), to seek back to hot relay log's start for later (for when we will come back to this hot log after re-processing the @@ -3371,6 +3378,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) else { exec_res= 0; + end_trans(thd, ROLLBACK); /* chance for concurrent connection to get more locks */ safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); @@ -3388,9 +3396,17 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) "the slave_transaction_retries variable.", slave_trans_retries); } - if (!((thd->options & OPTION_BEGIN) && opt_using_transactions)) - rli->trans_retries= 0; // restart from fresh - } + else if (!((thd->options & OPTION_BEGIN) && opt_using_transactions)) + { + /* + Only reset the retry counter if the event succeeded or + failed with a non-transient error. On a successful event, + the execution will proceed as usual; in the case of a + non-transient error, the slave will stop with an error. + */ + rli->trans_retries= 0; // restart from fresh + } + } return exec_res; } else @@ -3474,11 +3490,19 @@ slave_begin: thd->proc_info = "Connecting to master"; // we can get killed during safe_connect if (!safe_connect(thd, mysql, mi)) + { sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\ replication started in log '%s' at position %s", mi->user, - mi->host, mi->port, - IO_RPL_LOG_NAME, - llstr(mi->master_log_pos,llbuff)); + mi->host, mi->port, + IO_RPL_LOG_NAME, + llstr(mi->master_log_pos,llbuff)); + /* + Adding MAX_LOG_EVENT_HEADER_LEN to the max_packet_size on the I/O + thread, since a replication event can become this much larger than + the corresponding packet (query) sent from client to master. + */ + mysql->net.max_packet_size= thd->net.max_packet_size+= MAX_LOG_EVENT_HEADER; + } else { sql_print_information("Slave I/O thread killed while connecting to master"); @@ -4116,7 +4140,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) /* Safe copy as 'rev' has been "sanitized" in Rotate_log_event's ctor */ memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1); mi->master_log_pos= rev->pos; - DBUG_PRINT("info", ("master_log_pos: '%s' %d", + DBUG_PRINT("info", ("master_log_pos: '%s' %lu", mi->master_log_name, (ulong) mi->master_log_pos)); #ifndef DBUG_OFF /* @@ -4233,7 +4257,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, int error = process_io_create_file(mi,(Create_file_log_event*)ev); delete ev; mi->master_log_pos += inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); my_free((char*)tmp_buf, MYF(0)); DBUG_RETURN(error); @@ -4260,7 +4284,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, } delete ev; mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(0); } @@ -4316,7 +4340,7 @@ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, delete ev; mi->master_log_pos+= inc_pos; err: - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(0); } @@ -4486,7 +4510,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) rli->ign_master_log_pos_end= mi->master_log_pos; } rli->relay_log.signal_update(); // the slave SQL thread needs to re-check - DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu event originating from the same server, ignored", + (ulong) mi->master_log_pos)); } else { @@ -4494,7 +4519,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } else diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 724cf88d373..d91da405c36 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -3726,7 +3726,7 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant, GRANT_COLUMN *grant_column; ulong want_access= grant->want_privilege & ~grant->privilege; DBUG_ENTER("check_grant_column"); - DBUG_PRINT("enter", ("table: %s want_access: %u", table_name, want_access)); + DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access)); if (!want_access) DBUG_RETURN(0); // Already checked diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 053f6fcb845..a01ffe3ce43 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1491,6 +1491,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->file->ft_handler= 0; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); + table->pos_in_table_list= table_list; table_list->updatable= 1; // It is not derived table nor non-updatable VIEW DBUG_ASSERT(table->key_read == 0); DBUG_RETURN(table); @@ -2775,6 +2776,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, if (thd->slave_thread) slave_open_temp_tables++; } + tmp_table->pos_in_table_list= 0; DBUG_RETURN(tmp_table); } diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index ff033b69f98..5902374dff0 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -527,7 +527,8 @@ void Query_cache_query::init_n_lock() my_rwlock_init(&lock, NULL); lock_writing(); DBUG_PRINT("qcache", ("inited & locked query for block 0x%lx", - ((byte*) this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); DBUG_VOID_RETURN; } @@ -536,7 +537,8 @@ void Query_cache_query::unlock_n_destroy() { DBUG_ENTER("Query_cache_query::unlock_n_destroy"); DBUG_PRINT("qcache", ("destroyed & unlocked query for block 0x%lx", - ((byte*)this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); /* The following call is not needed on system where one can destroy an active semaphore @@ -702,6 +704,7 @@ void query_cache_abort(NET *net) void query_cache_end_of_result(THD *thd) { + Query_cache_block *query_block; DBUG_ENTER("query_cache_end_of_result"); /* See the comment on double-check locking usage above. */ @@ -717,13 +720,9 @@ void query_cache_end_of_result(THD *thd) if (unlikely(query_cache.query_cache_size == 0 || query_cache.flush_in_progress)) - { - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_VOID_RETURN; - } + goto end; - Query_cache_block *query_block= ((Query_cache_block*) - thd->net.query_cache_query); + query_block= ((Query_cache_block*) thd->net.query_cache_query); if (query_block) { DUMP(&query_cache); @@ -742,27 +741,21 @@ void query_cache_end_of_result(THD *thd) header->query())); query_cache.wreck(__LINE__, ""); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - - DBUG_VOID_RETURN; + BLOCK_UNLOCK_WR(query_block); + goto end; } #endif header->found_rows(current_thd->limit_found_rows); header->result()->type= Query_cache_block::RESULT; header->writer(0); thd->net.query_cache_query= 0; + BLOCK_UNLOCK_WR(query_block); DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - - BLOCK_UNLOCK_WR(query_block); - } - else - { - // Cache was flushed or resized and query was deleted => do nothing - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); } +end: + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } @@ -879,8 +872,8 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - flags.limit, - (ulong)flags.time_zone, + (ulong) flags.limit, + (ulong) flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len)); @@ -1122,8 +1115,8 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - flags.limit, - (ulong)flags.time_zone, + (ulong) flags.limit, + (ulong) flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len)); @@ -1260,7 +1253,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", #ifndef EMBEDDED_LIBRARY do { - DBUG_PRINT("qcache", ("Results (len %lu, used %lu, headers %lu)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", result_block->length, result_block->used, result_block->headers_len()+ ALIGN_SIZE(sizeof(Query_cache_result)))); @@ -2037,7 +2030,7 @@ Query_cache::append_result_data(Query_cache_block **current_block, { DBUG_ENTER("Query_cache::append_result_data"); DBUG_PRINT("qcache", ("append %lu bytes to 0x%lx query", - data_len, query_block)); + data_len, (long) query_block)); if (query_block->query()->add(data_len) > query_cache_limit) { @@ -3044,10 +3037,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, OPTION_TO_QUERY_CACHE))) && lex->safe_to_cache_query) { - DBUG_PRINT("qcache", ("options %lx %lx, type %u", - OPTION_TO_QUERY_CACHE, - lex->select_lex.options, - (int) thd->variables.query_cache_type)); + DBUG_PRINT("qcache", ("options: %lx %lx type: %u", + OPTION_TO_QUERY_CACHE, + (long) lex->select_lex.options, + (int) thd->variables.query_cache_type)); if (!(table_count= process_and_count_tables(tables_used, tables_type))) DBUG_RETURN(0); @@ -3063,10 +3056,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, } DBUG_PRINT("qcache", - ("not interesting query: %d or not cacheable, options %lx %lx, type %u", + ("not interesting query: %d or not cacheable, options %lx %lx type: %u", (int) lex->sql_command, OPTION_TO_QUERY_CACHE, - lex->select_lex.options, + (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); } @@ -3655,7 +3648,8 @@ void Query_cache::queries_dump() DBUG_PRINT("qcache", ("F:%u C:%u L:%lu T:'%s' (%u) '%s' '%s'", flags.client_long_flag, flags.character_set_client_num, - (ulong)flags.limit, flags.time_zone->get_name(), + (ulong)flags.limit, + flags.time_zone->get_name()->ptr(), len, str, strend(str)+1)); DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block, (ulong) block->next, (ulong) block->prev, @@ -3875,9 +3869,8 @@ my_bool Query_cache::check_integrity(bool locked) break; } default: - DBUG_PRINT("error", - ("block 0x%lx have incorrect type %u", - block, block->type)); + DBUG_PRINT("error", ("block 0x%lx have incorrect type %u", + (long) block, block->type)); result = 1; } @@ -3975,8 +3968,8 @@ my_bool Query_cache::check_integrity(bool locked) } while (block != bins[i].free_blocks); if (count != bins[i].number) { - DBUG_PRINT("error", ("bin[%d].number is %d, but bin have %d blocks", - bins[i].number, count)); + DBUG_PRINT("error", ("bins[%d].number = %d, but bin have %d blocks", + i, bins[i].number, count)); result = 1; } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ba2f525a4a4..d2f1e9ed0d9 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -712,7 +712,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, (*prev_changed)->key_length)); + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } else if (cmp == 0) @@ -722,7 +722,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -734,7 +734,7 @@ void THD::add_changed_table(const char *key, long key_length) } } *prev_changed = changed_table_dup(key, key_length); - DBUG_PRINT("info", ("key_length %u %u", key_length, + DBUG_PRINT("info", ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index c7bdfbd7ea7..41845dc5c76 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1102,6 +1102,12 @@ public: struct st_mysql_data **data_tail; void clear_data_list(); struct st_mysql_data *alloc_new_dataset(); + /* + In embedded server it points to the statement that is processed + in the current query. We store some results directly in statement + fields then. + */ + struct st_mysql_stmt *current_stmt; #endif NET net; // client connection descriptor MEM_ROOT warn_root; // For warnings and errors @@ -2084,7 +2090,7 @@ public: inline bool unique_add(void *ptr) { DBUG_ENTER("unique_add"); - DBUG_PRINT("info", ("tree %u - %u", tree.elements_in_tree, max_elements)); + DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements)); if (tree.elements_in_tree > max_elements && flush()) DBUG_RETURN(1); DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg)); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e13e7728708..38c12562fe3 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -326,7 +326,7 @@ cleanup: { thd->row_count_func= deleted; send_ok(thd,deleted); - DBUG_PRINT("info",("%d records deleted",deleted)); + DBUG_PRINT("info",("%ld records deleted",(long) deleted)); } DBUG_RETURN(error >= 0 || thd->net.report_error); } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index f39cf39f08e..3de842c8551 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -162,6 +162,7 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc; lex->select_lex.group_list.empty(); lex->select_lex.order_list.empty(); + lex->select_lex.udf_list.empty(); lex->current_select= &lex->select_lex; lex->yacc_yyss=lex->yacc_yyvs=0; lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); @@ -1165,6 +1166,7 @@ void st_select_lex::init_select() braces= 0; when_list.empty(); expr_list.empty(); + udf_list.empty(); interval_list.empty(); use_index.empty(); ftfunc_list_alloc.empty(); @@ -1178,7 +1180,7 @@ void st_select_lex::init_select() select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ offset_limit= 0; /* denotes the default offset = 0 */ with_sum_func= 0; - + is_correlated= 0; } /* @@ -1372,6 +1374,8 @@ void st_select_lex::mark_as_dependent(SELECT_LEX *last) SELECT_LEX_UNIT *munit= s->master_unit(); munit->uncacheable|= UNCACHEABLE_DEPENDENT; } + is_correlated= TRUE; + this->master_unit()->item->is_correlated= TRUE; } bool st_select_lex_node::set_braces(bool value) { return 1; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 398e5666c7f..5f968252cc3 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -470,7 +470,7 @@ public: void set_thd(THD *thd_arg) { thd= thd_arg; } friend void lex_start(THD *thd, uchar *buf, uint length); - friend int subselect_union_engine::exec(); + friend int subselect_union_engine::exec(bool); List<Item> *get_unit_column_types(); }; @@ -562,6 +562,8 @@ public: query processing end even if we use temporary table */ bool subquery_in_having; + /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ + bool is_correlated; /* This variable is required to ensure proper work of subqueries and stored procedures. Generally, one should use the states of @@ -581,6 +583,8 @@ public: /* exclude this select from check of unique_table() */ bool exclude_from_table_unique_test; + List<udf_func> udf_list; /* udf function calls stack */ + void init_query(); void init_select(); st_select_lex_unit* master_unit(); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index af62e1a199f..02bb595b3dc 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -381,9 +381,9 @@ int check_user(THD *thd, enum enum_server_command command, NO_ACCESS)) // authentication is OK { DBUG_PRINT("info", - ("Capabilities: %d packet_length: %ld Host: '%s' " + ("Capabilities: %lu packet_length: %ld Host: '%s' " "Login user: '%s' Priv_user: '%s' Using password: %s " - "Access: %u db: '%s'", + "Access: %lu db: '%s'", thd->client_capabilities, thd->max_client_packet_length, thd->main_security_ctx.host_or_ip, @@ -956,7 +956,7 @@ static int check_connection(THD *thd) if (thd->client_capabilities & CLIENT_IGNORE_SPACE) thd->variables.sql_mode|= MODE_IGNORE_SPACE; #ifdef HAVE_OPENSSL - DBUG_PRINT("info", ("client capabilities: %d", thd->client_capabilities)); + DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities)); if (thd->client_capabilities & CLIENT_SSL) { /* Do the SSL layering. */ @@ -1112,7 +1112,7 @@ pthread_handler_t handle_one_connection(void *arg) of handle_one_connection, which is thd. We need to know the start of the stack so that we could check for stack overruns. */ - DBUG_PRINT("info", ("handle_one_connection called by thread %d\n", + DBUG_PRINT("info", ("handle_one_connection called by thread %lu\n", thd->thread_id)); /* now that we've called my_thread_init(), it is safe to call DBUG_* */ @@ -1764,7 +1764,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (alloc_query(thd, packet, packet_length)) break; // fatal error is set char *packet_end= thd->query + thd->query_length; - mysql_log.write(thd,command, "%.*b", thd->query_length, thd->query); + /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ + const char *format= "%.*b"; + mysql_log.write(thd,command, format, thd->query_length, thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); if (!(specialflag & SPECIAL_NO_PRIOR)) @@ -2503,7 +2505,23 @@ mysql_execute_command(THD *thd) { /* we warn the slave SQL thread */ my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); - reset_one_shot_variables(thd); + if (thd->one_shot_set) + { + /* + It's ok to check thd->one_shot_set here: + + The charsets in a MySQL 5.0 slave can change by both a binlogged + SET ONE_SHOT statement and the event-internal charset setting, + and these two ways to change charsets do not seems to work + together. + + At least there seems to be problems in the rli cache for + charsets if we are using ONE_SHOT. Note that this is normally no + problem because either the >= 5.0 slave reads a 4.1 binlog (with + ONE_SHOT) *or* or 5.0 binlog (without ONE_SHOT) but never both." + */ + reset_one_shot_variables(thd); + } DBUG_RETURN(0); } } @@ -3436,8 +3454,12 @@ end_with_restore_list: if (first_table->lock_type == TL_WRITE_CONCURRENT_INSERT && thd->lock) { + /* INSERT ... SELECT should invalidate only the very first table */ + TABLE_LIST *save_table= first_table->next_local; + first_table->next_local= 0; mysql_unlock_tables(thd, thd->lock); query_cache_invalidate3(thd, first_table, 1); + first_table->next_local= save_table; thd->lock=0; } delete result; @@ -6120,7 +6142,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->alias= alias_str; if (lower_case_table_names && table->table.length) - my_casedn_str(files_charset_info, table->table.str); + table->table.length= my_casedn_str(files_charset_info, table->table.str); ptr->table_name=table->table.str; ptr->table_name_length=table->table.length; ptr->lock_type= lock_type; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6517afa5432..1e7601c0951 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1664,7 +1664,7 @@ static bool check_prepared_statement(Prepared_statement *stmt, enum enum_sql_command sql_command= lex->sql_command; int res= 0; DBUG_ENTER("check_prepared_statement"); - DBUG_PRINT("enter",("command: %d, param_count: %ld", + DBUG_PRINT("enter",("command: %d, param_count: %u", sql_command, stmt->param_count)); lex->first_lists_tables_same(); @@ -1877,9 +1877,12 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) thd->stmt_map.erase(stmt); } else - mysql_log.write(thd, COM_STMT_PREPARE, "[%lu] %.*b", stmt->id, + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_PREPARE, format, stmt->id, stmt->query_length, stmt->query); + } /* check_prepared_statemnt sends the metadata packet in case of success */ DBUG_VOID_RETURN; } @@ -2261,8 +2264,11 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); if (error == 0) - mysql_log.write(thd, COM_STMT_EXECUTE, "[%lu] %.*b", stmt->id, + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, thd->query_length, thd->query); + } DBUG_VOID_RETURN; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index e1933d42f9e..8da8bbe25ca 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -430,6 +430,12 @@ impossible position"; goto err; } packet->set("\0", 1, &my_charset_bin); + /* + Adding MAX_LOG_EVENT_HEADER_LEN, since a binlog event can become + this larger than the corresponding packet (query) sent + from client to master. + */ + thd->variables.max_allowed_packet+= MAX_LOG_EVENT_HEADER; /* We can set log_lock now, it does not move (it's a member of @@ -1101,7 +1107,7 @@ bool change_master(THD* thd, MASTER_INFO* mi) { mi->master_log_pos= lex_mi->pos; } - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); if (lex_mi->host) strmake(mi->host, lex_mi->host, sizeof(mi->host)-1); @@ -1218,7 +1224,7 @@ bool change_master(THD* thd, MASTER_INFO* mi) } } mi->rli.group_master_log_pos = mi->master_log_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); /* Coordinates in rli were spoilt by the 'if (need_relay_log_purge)' block, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 75cfff4cbb6..bcd7e8c4a9d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -158,8 +158,8 @@ static int join_read_prev_same(READ_RECORD *info); static int join_read_prev(READ_RECORD *info); static int join_ft_read_first(JOIN_TAB *tab); static int join_ft_read_next(READ_RECORD *info); -static int join_read_always_key_or_null(JOIN_TAB *tab); -static int join_read_next_same_or_null(READ_RECORD *info); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); @@ -512,11 +512,12 @@ err: DBUG_RETURN(-1); /* purecov: inspected */ } + /* test if it is known for optimisation IN subquery - SYNOPSYS - JOIN::test_in_subselect + SYNOPSIS + JOIN::test_in_subselect() where - pointer for variable in which conditions should be stored if subquery is known @@ -551,6 +552,35 @@ bool JOIN::test_in_subselect(Item **where) /* + Check if the passed HAVING clause is a clause added by subquery optimizer + + SYNOPSIS + is_having_subq_predicates() + having Having clause + + RETURN + TRUE The passed HAVING clause was added by the subquery optimizer + FALSE Otherwise +*/ + +bool is_having_subq_predicates(Item *having) +{ + if (having->type() == Item::FUNC_ITEM) + { + if (((Item_func *) having)->functype() == Item_func::ISNOTNULLTEST_FUNC) + return TRUE; + if (((Item_func *) having)->functype() == Item_func::TRIG_COND_FUNC) + { + having= ((Item_func*)having)->arguments()[0]; + if (((Item_func *) having)->functype() == Item_func::ISNOTNULLTEST_FUNC) + return TRUE; + } + return TRUE; + } + return FALSE; +} + +/* global select optimisation. return 0 - success 1 - error @@ -1018,9 +1048,7 @@ JOIN::optimize() } } else if (join_tab[0].type == JT_REF_OR_NULL && join_tab[0].ref.items[0]->name == in_left_expr_name && - having->type() == Item::FUNC_ITEM && - ((Item_func *) having)->functype() == - Item_func::ISNOTNULLTEST_FUNC) + is_having_subq_predicates(having)) { join_tab[0].type= JT_INDEX_SUBQUERY; error= 0; @@ -1273,14 +1301,14 @@ JOIN::reinit() exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table1->file->delete_all_rows(); free_io_cache(exec_tmp_table1); - filesort_free_buffers(exec_tmp_table1); + filesort_free_buffers(exec_tmp_table1,0); } if (exec_tmp_table2) { exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table2->file->delete_all_rows(); free_io_cache(exec_tmp_table2); - filesort_free_buffers(exec_tmp_table2); + filesort_free_buffers(exec_tmp_table2,0); } if (items0) set_items_ref_array(items0); @@ -1443,6 +1471,7 @@ JOIN::exec() curr_join->examined_rows= 0; if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && get_schema_tables_result(curr_join)) { DBUG_VOID_RETURN; @@ -2519,6 +2548,9 @@ typedef struct key_field_t { // Used when finding key fields when val IS NULL. */ bool null_rejecting; + + /* TRUE<=> This ref access is an outer subquery reference access */ + bool outer_ref; } KEY_FIELD; /* Values in optimize */ @@ -2817,6 +2849,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, cond->functype() == Item_func::MULT_EQUAL_FUNC) && ((*value)->type() == Item::FIELD_ITEM) && ((Item_field*)*value)->field->maybe_null()); + (*key_fields)->outer_ref= FALSE; (*key_fields)++; } @@ -2875,7 +2908,7 @@ add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, } static void -add_key_fields(KEY_FIELD **key_fields,uint *and_level, +add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, COND *cond, table_map usable_tables, SARGABLE_PARAM **sargables) { @@ -2888,28 +2921,56 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, { Item *item; while ((item=li++)) - add_key_fields(key_fields,and_level,item,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); for (; org_key_fields != *key_fields ; org_key_fields++) org_key_fields->level= *and_level; } else { (*and_level)++; - add_key_fields(key_fields,and_level,li++,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, li++, usable_tables, + sargables); Item *item; while ((item=li++)) { KEY_FIELD *start_key_fields= *key_fields; (*and_level)++; - add_key_fields(key_fields,and_level,item,usable_tables,sargables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); *key_fields=merge_key_fields(org_key_fields,start_key_fields, *key_fields,++(*and_level)); } } return; } - /* If item is of type 'field op field/constant' add it to key_fields */ + /* + Subquery optimization: check if the encountered condition is one + added by condition push down into subquery. + */ + { + if (cond->type() == Item::FUNC_ITEM && + ((Item_func*)cond)->functype() == Item_func::TRIG_COND_FUNC) + { + cond= ((Item_func*)cond)->arguments()[0]; + if (!join->group_list && !join->order && + join->unit->item && + join->unit->item->substype() == Item_subselect::IN_SUBS && + !join->unit->first_select()->next_select()) + { + KEY_FIELD *save= *key_fields; + add_key_fields(join, key_fields, and_level, cond, usable_tables, + sargables); + // Indicate that this ref access candidate is for subquery lookup: + for (; save != *key_fields; save++) + save->outer_ref= TRUE; + } + return; + } + } + + /* If item is of type 'field op field/constant' add it to key_fields */ if (cond->type() != Item::FUNC_ITEM) return; Item_func *cond_func= (Item_func*) cond; @@ -3083,6 +3144,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) keyuse.used_tables=key_field->val->used_tables(); keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; keyuse.null_rejecting= key_field->null_rejecting; + keyuse.outer_ref= key_field->outer_ref; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } } @@ -3205,7 +3267,7 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) Here we can add 'ref' access candidates for t1 and t2, but not for t3. */ -static void add_key_fields_for_nj(TABLE_LIST *nested_join_table, +static void add_key_fields_for_nj(JOIN *join, TABLE_LIST *nested_join_table, KEY_FIELD **end, uint *and_level, SARGABLE_PARAM **sargables) { @@ -3217,12 +3279,13 @@ static void add_key_fields_for_nj(TABLE_LIST *nested_join_table, while ((table= li++)) { if (table->nested_join) - add_key_fields_for_nj(table, end, and_level, sargables); + add_key_fields_for_nj(join, table, end, and_level, sargables); else if (!table->on_expr) tables |= table->table->map; } - add_key_fields(end, and_level, nested_join_table->on_expr, tables, sargables); + add_key_fields(join, end, and_level, nested_join_table->on_expr, tables, + sargables); } @@ -3297,7 +3360,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, return TRUE; if (cond) { - add_key_fields(&end,&and_level,cond,normal_tables,sargables); + add_key_fields(join_tab->join, &end, &and_level, cond, normal_tables, + sargables); for (; field != end ; field++) { add_key_part(keyuse,field); @@ -3319,8 +3383,9 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, into account as well. */ if (*join_tab[i].on_expr_ref) - add_key_fields(&end,&and_level,*join_tab[i].on_expr_ref, - join_tab[i].table->map,sargables); + add_key_fields(join_tab->join, &end, &and_level, + *join_tab[i].on_expr_ref, + join_tab[i].table->map, sargables); } /* Process ON conditions for the nested joins */ @@ -3330,7 +3395,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, while ((table= li++)) { if (table->nested_join) - add_key_fields_for_nj(table, &end, &and_level, sargables); + add_key_fields_for_nj(join_tab->join, table, &end, &and_level, + sargables); } } @@ -6094,7 +6160,7 @@ void JOIN::cleanup(bool full) if (tables > const_tables) // Test for not-const tables { free_io_cache(table[const_tables]); - filesort_free_buffers(table[const_tables]); + filesort_free_buffers(table[const_tables],full); } if (full) @@ -8560,6 +8626,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, item->collation.collation); else new_field= item->make_string_field(table); + new_field->set_derivation(item->collation.derivation); break; case DECIMAL_RESULT: new_field= new Field_new_decimal(item->max_length, maybe_null, item->name, @@ -8735,7 +8802,9 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, (make_copy_field ? 0 : copy_func), modify_item, convert_blob_length); case Item::TYPE_HOLDER: - return ((Item_type_holder *)item)->make_field_by_type(table); + result= ((Item_type_holder *)item)->make_field_by_type(table); + result->set_derivation(item->collation.derivation); + return result; default: // Dosen't have to be stored return 0; } @@ -9965,7 +10034,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) if (join->result->send_eof()) rc= 1; // Don't send error } - DBUG_PRINT("info",("%ld records output",join->send_records)); + DBUG_PRINT("info",("%ld records output", (long) join->send_records)); } else rc= -1; @@ -10791,6 +10860,13 @@ join_init_quick_read_record(JOIN_TAB *tab) } +int rr_sequential(READ_RECORD *info); +int init_read_record_seq(JOIN_TAB *tab) +{ + tab->read_record.read_record= rr_sequential; + return tab->read_record.file->ha_rnd_init(1); +} + static int test_if_quick_select(JOIN_TAB *tab) { @@ -10919,7 +10995,7 @@ join_ft_read_next(READ_RECORD *info) Reading of key with key reference and one part that may be NULL */ -static int +int join_read_always_key_or_null(JOIN_TAB *tab) { int res; @@ -10935,7 +11011,7 @@ join_read_always_key_or_null(JOIN_TAB *tab) } -static int +int join_read_next_same_or_null(READ_RECORD *info) { int error; @@ -12203,6 +12279,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, /* Fill schema tables with data before filesort if it's necessary */ if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && get_schema_tables_result(join)) goto err; @@ -13531,9 +13608,16 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, if (real_pos->type() == Item::FIELD_ITEM) { Item_field *item; - pos= real_pos; - if (!(item= new Item_field(thd, ((Item_field*) pos)))) + if (!(item= new Item_field(thd, ((Item_field*) real_pos)))) goto err; + if (pos->type() == Item::REF_ITEM) + { + /* preserve the names of the ref when dereferncing */ + Item_ref *ref= (Item_ref *) pos; + item->db_name= ref->db_name; + item->table_name= ref->table_name; + item->name= ref->name; + } pos= item; if (item->field->flags & BLOB_FLAG) { diff --git a/sql/sql_select.h b/sql/sql_select.h index 30b8f834ddf..629b44538d8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -36,6 +36,8 @@ typedef struct keyuse_t { satisfied if val has NULL 'value'. */ bool null_rejecting; + /* TRUE<=> This ref access is an outer subquery reference access */ + bool outer_ref; } KEYUSE; class store_key; @@ -455,10 +457,11 @@ class store_key :public Sql_alloc Field *to_field; // Store data here char *null_ptr; char err; - public: +public: + bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null),err(0) + :null_ptr(null), err(0), null_key(0) { if (field_arg->type() == FIELD_TYPE_BLOB) { @@ -496,6 +499,7 @@ class store_key_field: public store_key enum store_key_result copy() { copy_field.do_copy(©_field); + null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } const char *name() const { return field_name; } @@ -516,8 +520,8 @@ public: enum store_key_result copy() { int res= item->save_in_field(to_field, 1); + null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); - } const char *name() const { return "func"; } }; @@ -547,6 +551,7 @@ public: err= res; } } + null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } const char *name() const { return "const"; } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index f107a57fe2b..1d524418480 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3979,7 +3979,7 @@ bool get_schema_tables_result(JOIN *join) table_list->table->file->extra(HA_EXTRA_RESET_STATE); table_list->table->file->delete_all_rows(); free_io_cache(table_list->table); - filesort_free_buffers(table_list->table); + filesort_free_buffers(table_list->table,1); table_list->table->null_row= 0; } else diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 7aaca809113..85ff1fddc45 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -854,6 +854,162 @@ outp: } +/* + copy a string, + with optional character set conversion, + with optional left padding (for binary -> UCS2 conversion) + + SYNOPSIS + well_formed_copy_nhars() + to Store result here + to_length Maxinum length of "to" string + to_cs Character set of "to" string + from Copy from here + from_length Length of from string + from_cs From character set + nchars Copy not more that nchars characters + well_formed_error_pos Return position when "from" is not well formed + or NULL otherwise. + cannot_convert_error_pos Return position where a not convertable + character met, or NULL otherwise. + from_end_pos Return position where scanning of "from" + string stopped. + NOTES + + RETURN + length of bytes copied to 'to' +*/ + + +uint32 +well_formed_copy_nchars(CHARSET_INFO *to_cs, + char *to, uint to_length, + CHARSET_INFO *from_cs, + const char *from, uint from_length, + uint nchars, + const char **well_formed_error_pos, + const char **cannot_convert_error_pos, + const char **from_end_pos) +{ + uint res; + + if ((to_cs == &my_charset_bin) || + (from_cs == &my_charset_bin) || + (to_cs == from_cs) || + my_charset_same(from_cs, to_cs)) + { + if (to_length < to_cs->mbminlen || !nchars) + { + *from_end_pos= from; + *cannot_convert_error_pos= NULL; + *well_formed_error_pos= NULL; + return 0; + } + + if (to_cs == &my_charset_bin) + { + res= min(min(nchars, to_length), from_length); + memmove(to, from, res); + *from_end_pos= from + res; + *well_formed_error_pos= NULL; + *cannot_convert_error_pos= NULL; + } + else + { + int well_formed_error; + uint from_offset; + + if ((from_offset= (from_length % to_cs->mbminlen)) && + (from_cs == &my_charset_bin)) + { + /* + Copying from BINARY to UCS2 needs to prepend zeros sometimes: + INSERT INTO t1 (ucs2_column) VALUES (0x01); + 0x01 -> 0x0001 + */ + uint pad_length= to_cs->mbminlen - from_offset; + bzero(to, pad_length); + memmove(to + pad_length, from, from_offset); + nchars--; + from+= from_offset; + from_length-= from_offset; + to+= to_cs->mbminlen; + to_length-= to_cs->mbminlen; + } + + set_if_smaller(from_length, to_length); + res= to_cs->cset->well_formed_len(to_cs, from, from + from_length, + nchars, &well_formed_error); + memmove(to, from, res); + *from_end_pos= from + res; + *well_formed_error_pos= well_formed_error ? from + res : NULL; + *cannot_convert_error_pos= NULL; + if (from_offset) + res+= to_cs->mbminlen; + } + } + else + { + int cnvres; + my_wc_t wc; + int (*mb_wc)(struct charset_info_st *, my_wc_t *, + const uchar *, const uchar *)= from_cs->cset->mb_wc; + int (*wc_mb)(struct charset_info_st *, my_wc_t, + uchar *s, uchar *e)= to_cs->cset->wc_mb; + const uchar *from_end= (const uchar*) from + from_length; + uchar *to_end= (uchar*) to + to_length; + char *to_start= to; + *well_formed_error_pos= NULL; + *cannot_convert_error_pos= NULL; + + for ( ; nchars; nchars--) + { + const char *from_prev= from; + if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from, from_end)) > 0) + from+= cnvres; + else if (cnvres == MY_CS_ILSEQ) + { + if (!*well_formed_error_pos) + *well_formed_error_pos= from; + from++; + wc= '?'; + } + else if (cnvres > MY_CS_TOOSMALL) + { + /* + A correct multibyte sequence detected + But it doesn't have Unicode mapping. + */ + if (!*cannot_convert_error_pos) + *cannot_convert_error_pos= from; + from+= (-cnvres); + wc= '?'; + } + else + break; // Not enough characters + +outp: + if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0) + to+= cnvres; + else if (cnvres == MY_CS_ILUNI && wc != '?') + { + if (!*cannot_convert_error_pos) + *cannot_convert_error_pos= from_prev; + wc= '?'; + goto outp; + } + else + break; + } + *from_end_pos= from; + res= to - to_start; + } + return (uint32) res; +} + + + + void String::print(String *str) { char *st= (char*)Ptr, *end= st+str_length; diff --git a/sql/sql_string.h b/sql/sql_string.h index 0659f684afe..09b8478adf8 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -32,6 +32,14 @@ String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, const char *from, uint32 from_length, CHARSET_INFO *from_cs, uint *errors); +uint32 well_formed_copy_nchars(CHARSET_INFO *to_cs, + char *to, uint to_length, + CHARSET_INFO *from_cs, + const char *from, uint from_length, + uint nchars, + const char **well_formed_error_pos, + const char **cannot_convert_error_pos, + const char **from_end_pos); class String { diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 1d71bf2497f..ae3b8277fde 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2267,7 +2267,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, goto send_result; } - table->table->pos_in_table_list= table; if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify) { char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE]; @@ -4061,8 +4060,6 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) } else { - t->pos_in_table_list= table; - if (t->file->table_flags() & HA_HAS_CHECKSUM && !(check_opt->flags & T_EXTEND)) protocol->store((ulonglong)t->file->checksum()); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d431b671f18..dabda39d6b7 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -569,7 +569,7 @@ int mysql_update(THD *thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->last_insert_id : 0L,buff); - DBUG_PRINT("info",("%d records updated",updated)); + DBUG_PRINT("info",("%ld records updated", (long) updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ thd->abort_on_warning= 0; @@ -667,7 +667,7 @@ static table_map get_table_map(List<Item> *items) while ((item= (Item_field *) item_it++)) map|= item->used_tables(); - DBUG_PRINT("info",("table_map: 0x%08x", map)); + DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map)); return map; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 52b6c2c38c0..c0cdaf59712 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -563,7 +563,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, LEX_STRING *name; int i; - for (i= 0; name= names++; i++) + for (i= 0; (name= names++); i++) { buff.append(i ? ", " : "("); append_identifier(thd, &buff, name->str, name->length); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 6f24a42c07c..676f3f0e6ab 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -761,7 +761,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <item_list> expr_list udf_expr_list udf_expr_list2 when_list - ident_list ident_list_arg + ident_list ident_list_arg opt_expr_list %type <var_type> option_type opt_var_type opt_var_ident_type @@ -4701,7 +4701,7 @@ simple_expr: { $$= new Item_func_trim($5,$3); } | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,1); } - | ident '.' ident '(' udf_expr_list ')' + | ident '.' ident '(' opt_expr_list ')' { LEX *lex= Lex; sp_name *name= new sp_name($1, $3); @@ -4718,27 +4718,27 @@ simple_expr: { #ifdef HAVE_DLOPEN udf_func *udf= 0; + LEX *lex= Lex; if (using_udf_functions && (udf= find_udf($1.str, $1.length)) && udf->type == UDFTYPE_AGGREGATE) { - LEX *lex= Lex; if (lex->current_select->inc_in_sum_expr()) { yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } } - $<udf>$= udf; + lex->current_select->udf_list.push_front(udf); #endif } udf_expr_list ')' { #ifdef HAVE_DLOPEN - udf_func *udf= $<udf>3; - SELECT_LEX *sel= Select; + udf_func *udf; + LEX *lex= Lex; - if (udf) + if (NULL != (udf= lex->current_select->udf_list.pop())) { if (udf->type == UDFTYPE_AGGREGATE) Select->in_sum_expr--; @@ -4965,12 +4965,29 @@ udf_expr_list3: udf_expr: remember_name expr remember_end select_alias { + udf_func *udf= Select->udf_list.head(); + /* + Use Item::name as a storage for the attribute value of user + defined function argument. It is safe to use Item::name + because the syntax will not allow having an explicit name here. + See WL#1017 re. udf attributes. + */ if ($4.str) { + if (!udf) + { + /* + Disallow using AS to specify explicit names for the arguments + of stored routine calls + */ + yyerror(ER(ER_SYNTAX_ERROR)); + YYABORT; + } + $2->is_autogenerated_name= FALSE; $2->set_name($4.str, $4.length, system_charset_info); } - else + else if (udf) $2->set_name($1, (uint) ($3 - $1), YYTHD->charset()); $$= $2; } @@ -5131,6 +5148,11 @@ cast_type: | DECIMAL_SYM float_options { $$=ITEM_CAST_DECIMAL; Lex->charset= NULL; } ; +opt_expr_list: + /* empty */ { $$= NULL; } + | expr_list { $$= $1;} + ; + expr_list: { Select->expr_list.push_front(new List<Item>); } expr_list2 diff --git a/sql/strfunc.cc b/sql/strfunc.cc index c822d10af46..d03d88ee051 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -150,7 +150,7 @@ uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) int pos; const char *j; DBUG_ENTER("find_type2"); - DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, typelib)); + DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, (long) typelib)); if (!typelib->count) { diff --git a/sql/table.cc b/sql/table.cc index d72379efb32..e63db72a02d 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -88,7 +88,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, MEM_ROOT **root_ptr, *old_root; TABLE_SHARE *share; DBUG_ENTER("openfrm"); - DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam)); + DBUG_PRINT("enter",("name: '%s' form: 0x%lx", name, (long) outparam)); error= 1; disk_buff= NULL; @@ -3032,6 +3032,23 @@ void st_table_list::reinit_before_use(THD *thd) embedding->nested_join->join_list.head() == embedded); } +/* + Return subselect that contains the FROM list this table is taken from + + SYNOPSIS + st_table_list::containing_subselect() + + RETURN + Subselect item for the subquery that contains the FROM list + this table is taken from if there is any + 0 - otherwise + +*/ + +Item_subselect *st_table_list::containing_subselect() +{ + return (select_lex ? select_lex->master_unit()->item : 0); +} /***************************************************************************** ** Instansiate templates diff --git a/sql/table.h b/sql/table.h index 5136ac2c4db..f0190353328 100644 --- a/sql/table.h +++ b/sql/table.h @@ -18,6 +18,7 @@ /* Structs that defines the TABLE */ class Item; /* Needed by ORDER */ +class Item_subselect; class GRANT_TABLE; class st_select_lex_unit; class st_select_lex; @@ -68,6 +69,9 @@ enum frm_type_enum typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filebyte */ + uchar **sort_keys; /* Buffer for sorting keys */ + byte *buffpek; /* Buffer for buffpek structures */ + uint buffpek_len; /* Max number of buffpeks in the buffer */ byte *addon_buf; /* Pointer to a buffer if sorted with fields */ uint addon_length; /* Length of the buffer */ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ @@ -678,6 +682,7 @@ typedef struct st_table_list procedure. */ void reinit_before_use(THD *thd); + Item_subselect *containing_subselect(); private: bool prep_check_option(THD *thd, uint8 check_opt_type); diff --git a/sql/tztime.cc b/sql/tztime.cc index bd8e43075c4..fe23954bbb2 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -949,13 +949,12 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, */ if (shift) { - if (local_t > (TIMESTAMP_MAX_VALUE - shift*86400L + - sp->revtis[i].rt_offset - saved_seconds)) + if (local_t > (my_time_t) (TIMESTAMP_MAX_VALUE - shift*86400L + + sp->revtis[i].rt_offset - saved_seconds)) { DBUG_RETURN(0); /* my_time_t overflow */ } - else - local_t+= shift*86400L; + local_t+= shift*86400L; } if (sp->revtis[i].rt_type) diff --git a/sql/unireg.cc b/sql/unireg.cc index 768a288ca19..8568b09e498 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -394,16 +394,16 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, pos[6]=pos[7]=0; // For the future pos+=8; key_parts+=key->key_parts; - DBUG_PRINT("loop",("flags: %d key_parts: %d at 0x%lx", - key->flags,key->key_parts, - key->key_part)); + DBUG_PRINT("loop", ("flags: %d key_parts: %d at 0x%lx", + key->flags, key->key_parts, + (long) key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; key_part != key_part_end ; key_part++) { uint offset; - DBUG_PRINT("loop",("field: %d startpos: %lu length: %ld", + DBUG_PRINT("loop",("field: %d startpos: %lu length: %d", key_part->fieldnr, key_part->offset + data_offset, key_part->length)); int2store(pos,key_part->fieldnr+1+FIELD_NAME_USED); |