diff options
Diffstat (limited to 'sql')
139 files changed, 30841 insertions, 4528 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am index 19bdf8055f3..a3935835a2d 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -59,8 +59,12 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ log_event.h sql_repl.h slave.h \ stacktrace.h sql_sort.h sql_cache.h set_var.h \ spatial.h gstream.h client_settings.h tzfile.h \ - tztime.h examples/ha_example.h examples/ha_archive.h \ + tztime.h \ + sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ + parse_file.h sql_view.h sql_trigger.h \ + examples/ha_example.h examples/ha_archive.h \ examples/ha_tina.h + mysqld_SOURCES = sql_lex.cc sql_handler.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ @@ -89,11 +93,13 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ slave.cc sql_repl.cc sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ + sql_olap.cc sql_view.cc \ gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \ tztime.cc my_time.c \ + sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \ + sp_cache.cc parse_file.cc sql_trigger.cc \ examples/ha_example.cc examples/ha_archive.cc \ examples/ha_tina.cc - gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index 6fbfb3f9f9d..ea8b3694109 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -530,7 +530,7 @@ int ha_archive::write_row(byte * buf) z_off_t written; DBUG_ENTER("ha_archive::write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); pthread_mutex_lock(&share->mutex); @@ -671,7 +671,8 @@ int ha_archive::rnd_next(byte *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); scan_rows--; - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= gztell(archive); rc= get_row(archive, buf); @@ -707,7 +708,8 @@ void ha_archive::position(const byte *record) int ha_archive::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_archive::rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= ha_get_ptr(pos, ref_length); z_off_t seek= gzseek(archive, current_position, SEEK_SET); @@ -870,71 +872,6 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, return to; } - -/****************************************************************************** - - Everything below here is default, please look at ha_example.cc for - descriptions. - - ******************************************************************************/ - -int ha_archive::update_row(const byte * old_data, byte * new_data) -{ - - DBUG_ENTER("ha_archive::update_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::delete_row(const byte * buf) -{ - DBUG_ENTER("ha_archive::delete_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_archive::index_read"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_archive::index_read_idx"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - - -int ha_archive::index_next(byte * buf) -{ - DBUG_ENTER("ha_archive::index_next"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_prev(byte * buf) -{ - DBUG_ENTER("ha_archive::index_prev"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_first(byte * buf) -{ - DBUG_ENTER("ha_archive::index_first"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_last(byte * buf) -{ - DBUG_ENTER("ha_archive::index_last"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - - void ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); @@ -945,23 +882,4 @@ void ha_archive::info(uint flag) DBUG_VOID_RETURN; } - -int ha_archive::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("ha_archive::extra"); - DBUG_RETURN(0); -} - -int ha_archive::reset(void) -{ - DBUG_ENTER("ha_archive::reset"); - DBUG_RETURN(0); -} - -ha_rows ha_archive::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - DBUG_ENTER("ha_archive::records_in_range "); - DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND -} #endif /* HAVE_ARCHIVE_DB */ diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index b619de5f6c1..eb78b28034c 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -78,31 +78,9 @@ public: { return 0; } - /* - Have to put something here, there is no real limit as far as - archive is concerned. - */ - uint max_supported_record_length() const { return UINT_MAX; } - /* - Called in test_quick_select to determine if indexes should be used. - */ - virtual double scan_time() { return (double) (records) / 20.0+10; } - /* The next method will never be called */ - virtual double read_time(uint index, uint ranges, ha_rows rows) - { return (double) rows / 20.0+1; } int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); int rnd_init(bool scan=1); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); @@ -116,10 +94,7 @@ public: int write_data_header(gzFile file_to_write); void position(const byte *record); void info(uint); - int extra(enum ha_extra_function operation); - int reset(void); int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); int optimize(THD* thd, HA_CHECK_OPT* check_opt); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, diff --git a/sql/field.cc b/sql/field.cc index f70a23e889a..56e3ab71bd8 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -83,32 +83,48 @@ void Field_num::prepend_zeros(String *value) RETURN 0 ok - 1 error + 1 error. A warning is pushed if field_name != 0 */ -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs) +bool Field::check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs) { + const char *end; if (str == int_end) - return 0; // Empty string - const char *end=str+length; + { + char buff[128]; + String tmp(buff,(uint32) sizeof(buff), system_charset_info); + tmp.copy(str, length, system_charset_info); + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "integer", tmp.c_ptr(), field_name, + (ulong) table->in_use->row_count); + return 1; // Empty string + } + end= str+length; if ((str= int_end) == end) - return 1; // All digits was used + return 0; // ok; All digits was used /* Allow end .0000 */ if (*str == '.') { - for (str++ ; str != end && *str == '0'; str++) ; + for (str++ ; str != end && *str == '0'; str++) + ; } /* Allow end space */ - for (str++ ; str != end ; str++) + for ( ; str != end ; str++) { if (!my_isspace(cs,*str)) - return 0; + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + return 1; + } } - return 1; + return 0; } + #ifdef NOT_USED static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) { @@ -872,7 +888,13 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + { + /* + This is a note, not a warning, as we don't want to abort + when we cut decimals in strict mode + */ + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, ER_WARN_DATA_TRUNCATED, 1); + } return 0; } continue; @@ -1099,11 +1121,8 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1119,11 +1138,8 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } ptr[0]= (char) tmp; return error; @@ -1303,11 +1319,8 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1323,11 +1336,8 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -1578,11 +1588,8 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1598,11 +1605,8 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } int3store(ptr,tmp); @@ -1778,71 +1782,72 @@ void Field_medium::sql_type(String &res) const int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) { - long tmp; - int error= 0; + longlong tmp; + long store_tmp; + int error; + bool warning_given= 0; char *end; tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); len-= tmp; from+= tmp; - my_errno=0; - if (unsigned_flag) + end= (char*) from+len; + tmp= my_strtoll10(from, &end, &error); + + if (error != MY_ERRNO_EDOM) { - if (!len || *from == '-') + if (unsigned_flag) { - tmp=0; // Set negative to 0 - my_errno=ERANGE; - error= 1; + if (error < 0) + { + error= 1; + tmp= 0; + } + else if ((ulonglong) tmp > (ulonglong) UINT_MAX32) + { + tmp= UINT_MAX32; + error= 1; + } + else + error= 0; } else - tmp=(long) my_strntoul(cs,from,len,10,&end,&error); - } - else - tmp=my_strntol(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) - error= 1; -#if SIZEOF_LONG > 4 - if (unsigned_flag) - { - if ((ulong) tmp > UINT_MAX32) - { - tmp= UINT_MAX32; - error= 1; - my_errno=ERANGE; - } - } - else - { - if (tmp > INT_MAX32) - { - tmp= INT_MAX32; - error= 1; - my_errno=ERANGE; - } - else if (tmp < INT_MIN32) { - tmp= INT_MIN32; - error= 1; - my_errno=ERANGE; + if (error < 0) + { + error= 0; + if (tmp < INT_MIN32) + { + tmp= INT_MIN32; + error= 1; + } + } + else if (tmp > INT_MAX32) + { + tmp= INT_MAX32; + error= 1; + } } } -#endif if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); error= 1; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); } + else if (from+len != end && table->in_use->count_cuted_fields && + check_int(from,len,end,cs)) + error= 1; + + store_tmp= (long) tmp; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { - int4store(ptr,tmp); + int4store(ptr, store_tmp); } else #endif - longstore(ptr,tmp); + longstore(ptr, store_tmp); return error; } @@ -1857,13 +1862,11 @@ int Field_long::store(double nr) if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) (ulong) ~0L) { res=(int32) (uint32) ~0L; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -1874,18 +1877,19 @@ int Field_long::store(double nr) if (nr < (double) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else res=(int32) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -1902,26 +1906,18 @@ int Field_long::store(longlong nr) { int error= 0; int32 res; - - /* - This assert has nothing to do with this method per se, it was put here - only because it is one of the best places for catching places there its - condition is broken. - */ - DBUG_ASSERT(table->in_use == current_thd); + DBUG_ASSERT(table->in_use == current_thd); // General safety if (unsigned_flag) { if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr >= (LL(1) << 32)) { res=(int32) (uint32) ~0L; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -1932,18 +1928,19 @@ int Field_long::store(longlong nr) if (nr < (longlong) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (longlong) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else res=(int32) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2078,17 +2075,16 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) longlong tmp; int error= 0; char *end; + bool warning_given; tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); len-= (uint)tmp; from+= tmp; - my_errno=0; if (unsigned_flag) { if (!len || *from == '-') { tmp=0; // Set negative to 0 - my_errno= ERANGE; error= 1; } else @@ -2096,13 +2092,14 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) } else tmp=my_strntoll(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) + if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } + else if (from+len != end && table->in_use->count_cuted_fields && + check_int(from,len,end,cs)) + error= 1; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2119,19 +2116,18 @@ int Field_longlong::store(double nr) { int error= 0; longlong res; - nr=rint(nr); + + nr= rint(nr); if (unsigned_flag) { if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr >= (double) ~ (ulonglong) 0) { res= ~(longlong) 0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -2139,21 +2135,22 @@ int Field_longlong::store(double nr) } else { - if (nr <= (double) LONGLONG_MIN) + if (nr < (double) LONGLONG_MIN) { - res=(longlong) LONGLONG_MIN; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + res= LONGLONG_MIN; error= 1; } - else if (nr >= (double) LONGLONG_MAX) + else if (nr > (double) LONGLONG_MAX) { - res=(longlong) LONGLONG_MAX; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + res= LONGLONG_MAX; error= 1; } else res=(longlong) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2319,10 +2316,13 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from,len,&end,&error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : ER_WARN_DATA_TRUNCATED), + 1); error= 1; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } Field_float::store(nr); return error; @@ -2621,10 +2621,13 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from, len, &end, &error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : ER_WARN_DATA_TRUNCATED), + 1); error= 1; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } Field_double::store(nr); return error; @@ -2996,14 +2999,22 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) bool in_dst_time_gap; THD *thd= table->in_use; - have_smth_to_conv= (str_to_datetime(from, len, &l_time, 0, &error) > + have_smth_to_conv= (str_to_datetime(from, len, &l_time, + ((table->in_use->variables.sql_mode & + MODE_NO_ZERO_DATE) | + MODE_NO_ZERO_IN_DATE), + &error) > MYSQL_TIMESTAMP_ERROR); - if (error) + if (error || !have_smth_to_conv) + { + error= 1; set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATETIME, 1); + } - if (have_smth_to_conv) + /* Only convert a correct date (not a zero date) */ + if (have_smth_to_conv && l_time.month) { if (!(tmp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) { @@ -3033,6 +3044,7 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) return error; } + int Field_timestamp::store(double nr) { int error= 0; @@ -3207,7 +3219,7 @@ bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) longget(temp,ptr); if (temp == 0L) { /* Zero time is "000000" */ - if (!fuzzydate) + if (fuzzydate & TIME_NO_ZERO_DATE) return 1; bzero((char*) ltime,sizeof(*ltime)); } @@ -3228,7 +3240,7 @@ bool Field_timestamp::get_time(TIME *ltime) bool Field_timestamp::send_binary(Protocol *protocol) { TIME tm; - Field_timestamp::get_date(&tm, TIME_FUZZY_DATE); + Field_timestamp::get_date(&tm, 0); return protocol->store(&tm); } @@ -3461,7 +3473,7 @@ String *Field_time::val_str(String *val_buffer, bool Field_time::get_date(TIME *ltime, uint fuzzydate) { long tmp; - if (!fuzzydate) + if (!(fuzzydate & TIME_FUZZY_DATE)) { push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, @@ -3543,18 +3555,19 @@ void Field_time::sql_type(String &res) const int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int char *end; - long nr= my_strntol(cs, from, len, 10, &end, ¬_used); + int error; + long nr= my_strntol(cs, from, len, 10, &end, &error); - if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) + if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155 || error) { *ptr=0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } - if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; + if (nr != 0 || len != 4) { if (nr < YY_PART_YEAR) @@ -3563,7 +3576,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) nr-= 1900; } *ptr= (char) (unsigned char) nr; - return 0; + return error; } @@ -3649,7 +3662,11 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) uint32 tmp; int error; - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &l_time, TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES)), + &error) <= MYSQL_TIMESTAMP_ERROR) { tmp=0; error= 1; @@ -3840,7 +3857,12 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) TIME l_time; long tmp; int error; - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &l_time, + (TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error) <= MYSQL_TIMESTAMP_ERROR) { tmp=0L; error= 1; @@ -3980,7 +4002,8 @@ bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) ltime->year= (tmp >> 9); ltime->time_type= MYSQL_TIMESTAMP_DATE; ltime->hour= ltime->minute= ltime->second= ltime->second_part= ltime->neg= 0; - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return ((!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? + 1 : 0); } bool Field_newdate::get_time(TIME *ltime) @@ -4022,7 +4045,12 @@ int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs) int error; ulonglong tmp= 0; - if (str_to_datetime(from, len, &time_tmp, 1, &error) > MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &time_tmp, + (TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error) > MYSQL_TIMESTAMP_ERROR) tmp= TIME_to_ulonglong_datetime(&time_tmp); if (error) @@ -4194,7 +4222,7 @@ bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) ltime->day= (int) (part1%100); ltime->month= (int) (part1/100%100); ltime->year= (int) (part1/10000); - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return (!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? 1 : 0; } bool Field_datetime::get_time(TIME *ltime) @@ -6166,7 +6194,7 @@ Field::set_warning(const uint level, const uint code, int cuted_increment) Produce warning or note about datetime string data saved into field SYNOPSYS - set_warning() + set_datime_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced str - string value which we tried to save @@ -6184,8 +6212,10 @@ Field::set_datetime_warning(const uint level, const uint code, const char *str, uint str_length, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) - make_truncated_value_warning(table->in_use, str, str_length, ts_type); + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) + make_truncated_value_warning(table->in_use, str, str_length, ts_type, + field_name); } @@ -6210,12 +6240,13 @@ Field::set_datetime_warning(const uint level, const uint code, longlong nr, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) { char str_nr[22]; char *str_end= longlong10_to_str(nr, str_nr, -10); make_truncated_value_warning(table->in_use, str_nr, str_end - str_nr, - ts_type); + ts_type, field_name); } } @@ -6239,12 +6270,14 @@ void Field::set_datetime_warning(const uint level, const uint code, double nr, timestamp_type ts_type) { - if (set_warning(level, code, 1)) + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, 1)) { /* DBL_DIG is enough to print '-[digits].E+###' */ char str_nr[DBL_DIG + 8]; uint str_len= my_sprintf(str_nr, (str_nr, "%g", nr)); - make_truncated_value_warning(table->in_use, str_nr, str_len, ts_type); + make_truncated_value_warning(table->in_use, str_nr, str_len, ts_type, + field_name); } } diff --git a/sql/field.h b/sql/field.h index 8887da1dc0f..50ea1450085 100644 --- a/sql/field.h +++ b/sql/field.h @@ -281,6 +281,8 @@ public: virtual void set_charset(CHARSET_INFO *charset) { } bool set_warning(const unsigned int level, const unsigned int code, int cuted_increment); + bool check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs); void set_datetime_warning(const uint level, const uint code, const char *str, uint str_len, timestamp_type ts_type, int cuted_increment); @@ -1262,8 +1264,6 @@ uint pack_length_to_packflag(uint type); uint32 calc_pack_length(enum_field_types type,uint32 length); int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs); /* The following are for the interface with the .frm file @@ -1282,6 +1282,7 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_LEFT_FULLSCREEN 8192 #define FIELDFLAG_RIGHT_FULLSCREEN 16384 #define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output +#define FIELDFLAG_NO_DEFAULT 16384 /* sql */ #define FIELDFLAG_SUM ((uint) 32768)// predit: +#fieldflag #define FIELDFLAG_MAYBE_NULL ((uint) 32768)// sql #define FIELDFLAG_PACK_SHIFT 3 @@ -1290,8 +1291,6 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_NUM_SCREEN_TYPE 0x7F01 #define FIELDFLAG_ALFA_SCREEN_TYPE 0x7800 -#define FIELD_SORT_REVERSE 16384 - #define MTYP_TYPENR(type) (type & 127) /* Remove bits from type */ #define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL) @@ -1309,3 +1308,4 @@ bool test_if_int(const char *str, int length, const char *int_end, #define f_is_equ(x) ((x) & (1+2+FIELDFLAG_PACK+31*256)) #define f_settype(x) (((int) x) << FIELDFLAG_PACK_SHIFT) #define f_maybe_null(x) (x & FIELDFLAG_MAYBE_NULL) +#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT) diff --git a/sql/filesort.cc b/sql/filesort.cc index bd0de022fd4..3e56acefea9 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -48,7 +48,8 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer, BUFFPEK *buffpek, uint maxbuffer,IO_CACHE *tempfile, IO_CACHE *outfile); -static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count); +static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count, + FILESORT_INFO *table_sort); static uint sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset); static SORT_ADDON_FIELD *get_addon_fields(THD *thd, Field **ptabfield, @@ -106,8 +107,16 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, #ifdef SKIP_DBUG_IN_FILESORT DBUG_PUSH(""); /* No DBUG here */ #endif - - outfile= table->sort.io_cache; + FILESORT_INFO table_sort; + /* + Don't use table->sort in filesort as it is also used by + QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end + when index_merge select has finished with it. + */ + memcpy(&table_sort, &table->sort, sizeof(FILESORT_INFO)); + table->sort.io_cache= NULL; + + outfile= table_sort.io_cache; my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; @@ -128,14 +137,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, param.sort_length, ¶m.addon_length); } - table->sort.addon_buf= 0; - table->sort.addon_length= param.addon_length; - table->sort.addon_field= param.addon_field; - table->sort.unpack= unpack_addon_fields; + + table_sort.addon_buf= 0; + table_sort.addon_length= param.addon_length; + table_sort.addon_field= param.addon_field; + table_sort.unpack= unpack_addon_fields; if (param.addon_field) { param.res_length= param.addon_length; - if (!(table->sort.addon_buf= (byte *) my_malloc(param.addon_length, + if (!(table_sort.addon_buf= (byte *) my_malloc(param.addon_length, MYF(MY_WME)))) goto err; } @@ -153,11 +163,11 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (select && select->quick) { - statistic_increment(filesort_range_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_range_count, &LOCK_status); } else { - statistic_increment(filesort_scan_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_scan_count, &LOCK_status); } #ifdef CAN_TRUST_RANGE if (select && select->quick && select->quick->records > 0L) @@ -218,7 +228,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (maxbuffer == 0) // The whole set is in memory { - if (save_index(¶m,sort_keys,(uint) records)) + if (save_index(¶m,sort_keys,(uint) records, &table_sort)) goto err; } else @@ -276,11 +286,13 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (error) my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG)); else - statistic_add(filesort_rows, (ulong) records, &LOCK_status); + statistic_add(thd->status_var.filesort_rows, + (ulong) records, &LOCK_status); *examined_rows= param.examined_rows; #ifdef SKIP_DBUG_IN_FILESORT DBUG_POP(); /* Ok to DBUG */ #endif + memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO)); DBUG_PRINT("exit",("records: %ld",records)); DBUG_RETURN(error ? HA_POS_ERROR : records); } /* filesort */ @@ -387,7 +399,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, byte *ref_pos,*next_pos,ref_buff[MAX_REFLENGTH]; my_off_t record; TABLE *sort_form; - volatile my_bool *killed= ¤t_thd->killed; + volatile THD::killed_state *killed= ¤t_thd->killed; handler *file; DBUG_ENTER("find_all_keys"); DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row"))); @@ -416,12 +428,24 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, current_thd->variables.read_buff_size); } + READ_RECORD read_record_info; + if (quick_select) + { + if (select->quick->reset()) + DBUG_RETURN(HA_POS_ERROR); + init_read_record(&read_record_info, current_thd, select->quick->head, + select, 1, 1); + } + for (;;) { if (quick_select) { - if ((error=select->quick->get_next())) - break; + if ((error= read_record_info.read_record(&read_record_info))) + { + error= HA_ERR_END_OF_FILE; + break; + } file->position(sort_form->record[0]); } else /* Not quick-select */ @@ -449,6 +473,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (error && error != HA_ERR_RECORD_DELETED) break; } + if (*killed) { DBUG_PRINT("info",("Sort killed by user")); @@ -472,9 +497,21 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, else file->unlock_row(); } - (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ - if (!next_pos) - file->ha_rnd_end(); + if (quick_select) + { + /* + index_merge quick select uses table->sort when retrieving rows, so free + resoures it has allocated. + */ + end_read_record(&read_record_info); + } + else + { + (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ + if (!next_pos) + file->ha_rnd_end(); + } + DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); if (error != HA_ERR_END_OF_FILE) { @@ -747,8 +784,8 @@ static void make_sortkey(register SORTPARAM *param, return; } - -static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) +static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count, + FILESORT_INFO *table_sort) { uint offset,res_length; byte *to; @@ -759,7 +796,7 @@ static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) offset= param->rec_length-res_length; if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; - if (!(to= param->sort_form->sort.record_pointers= + if (!(to= table_sort->record_pointers= (byte*) my_malloc(res_length*count, MYF(MY_WME)))) DBUG_RETURN(1); /* purecov: inspected */ for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++) @@ -839,6 +876,39 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, } /* read_to_buffer */ +/* + Put all room used by freed buffer to use in adjacent buffer. Note, that + we can't simply distribute memory evenly between all buffers, because + new areas must not overlap with old ones. + SYNOPSYS + reuse_freed_buff() + queue IN list of non-empty buffers, without freed buffer + reuse IN empty buffer + key_length IN key length +*/ + +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) +{ + uchar *reuse_end= reuse->base + reuse->max_keys * key_length; + for (uint i= 0; i < queue->elements; ++i) + { + BUFFPEK *bp= (BUFFPEK *) queue_element(queue, i); + if (bp->base + bp->max_keys * key_length == reuse->base) + { + bp->max_keys+= reuse->max_keys; + return; + } + else if (bp->base == reuse_end) + { + bp->base= reuse->base; + bp->max_keys+= reuse->max_keys; + return; + } + } + DBUG_ASSERT(0); +} + + /* Merge buffers to one buffer SYNOPSIS @@ -868,18 +938,19 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; - BUFFPEK *buffpek,**refpek; + BUFFPEK *buffpek; QUEUE queue; qsort2_cmp cmp; - volatile my_bool *killed= ¤t_thd->killed; - my_bool not_killable; + volatile THD::killed_state *killed= ¤t_thd->killed; + THD::killed_state not_killable; DBUG_ENTER("merge_buffers"); - statistic_increment(filesort_merge_passes, &LOCK_status); + statistic_increment(current_thd->status_var.filesort_merge_passes, + &LOCK_status); if (param->not_killable) { killed= ¬_killable; - not_killable= 0; + not_killable= THD::NOT_KILLED; } error=0; @@ -982,29 +1053,8 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, if (!(error= (int) read_to_buffer(from_file,buffpek, rec_length))) { - uchar *base= buffpek->base; - ulong max_keys= buffpek->max_keys; - VOID(queue_remove(&queue,0)); - - /* Put room used by buffer to use in other buffer */ - for (refpek= (BUFFPEK**) &queue_top(&queue); - refpek <= (BUFFPEK**) &queue_end(&queue); - refpek++) - { - buffpek= *refpek; - if (buffpek->base+buffpek->max_keys*rec_length == base) - { - buffpek->max_keys+= max_keys; - break; - } - else if (base+max_keys*rec_length == buffpek->base) - { - buffpek->base= base; - buffpek->max_keys+= max_keys; - break; - } - } + reuse_freed_buff(&queue, buffpek, rec_length); break; /* One buffer have been removed */ } else if (error == -1) diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 2c7cce4bcd0..ff6b10fe504 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -857,7 +857,7 @@ int ha_berkeley::write_row(byte * record) int error; DBUG_ENTER("write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && record == table->record[0]) @@ -1101,10 +1101,11 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DB_TXN *sub_trans; ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; bool primary_key_changed; + DBUG_ENTER("update_row"); LINT_INIT(error); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -1294,7 +1295,7 @@ int ha_berkeley::delete_row(const byte * record) key_map keys=table->keys_in_use; ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); if ((error=pack_row(&row, record, 0))) DBUG_RETURN((error)); /* purecov: inspected */ @@ -1441,7 +1442,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; active_index=MAX_KEY; @@ -1460,9 +1461,10 @@ int ha_berkeley::index_read(byte * buf, const byte * key, int error; KEY *key_info= &table->key_info[active_index]; int do_prev= 0; + DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); bzero((char*) &row,sizeof(row)); if (find_flag == HA_READ_BEFORE_KEY) { @@ -1531,7 +1533,8 @@ int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len) KEY *key_info= &table->key_info[active_index]; DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); /* read of partial key */ @@ -1555,7 +1558,8 @@ int ha_berkeley::index_next(byte * buf) { DBT row; DBUG_ENTER("index_next"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, active_index, &row, &last_key, 1)); @@ -1566,7 +1570,8 @@ int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen) DBT row; int error; DBUG_ENTER("index_next_same"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); if (keylen == table->key_info[active_index].key_length) error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_DUP), @@ -1586,7 +1591,8 @@ int ha_berkeley::index_prev(byte * buf) { DBT row; DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), (char*) buf, active_index, &row, &last_key, 1)); @@ -1597,7 +1603,8 @@ int ha_berkeley::index_first(byte * buf) { DBT row; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_FIRST), (char*) buf, active_index, &row, &last_key, 1)); @@ -1607,7 +1614,8 @@ int ha_berkeley::index_last(byte * buf) { DBT row; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_LAST), (char*) buf, active_index, &row, &last_key, 0)); @@ -1629,7 +1637,8 @@ int ha_berkeley::rnd_next(byte *buf) { DBT row; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, primary_key, &row, &last_key, 1)); @@ -1660,9 +1669,10 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) int ha_berkeley::rnd_pos(byte * buf, byte *pos) { DBT db_pos; - statistic_increment(ha_read_rnd_count,&LOCK_status); + DBUG_ENTER("ha_berkeley::rnd_pos"); - + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); active_index= MAX_KEY; DBUG_RETURN(read_row(file->get(file, transaction, get_pos(&db_pos, pos), @@ -2091,9 +2101,9 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, } -longlong ha_berkeley::get_auto_increment() +ulonglong ha_berkeley::get_auto_increment() { - longlong nr=1; // Default if error or new key + ulonglong nr=1; // Default if error or new key int error; (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); @@ -2142,7 +2152,7 @@ longlong ha_berkeley::get_auto_increment() } } if (!error) - nr=(longlong) + nr=(ulonglong) table->next_number_field->val_int_offset(table->rec_buff_length)+1; ha_berkeley::index_end(); (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); @@ -2181,7 +2191,7 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name, protocol->store(msg_type); protocol->store(msgbuf); if (protocol->write()) - thd->killed=1; + thd->killed=THD::KILL_CONNECTION; } #endif @@ -2563,4 +2573,29 @@ ha_rows ha_berkeley::estimate_rows_upper_bound() return share->rows + HA_BERKELEY_EXTRA_ROWS; } +int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2) +{ + if (hidden_primary_key) + return memcmp(ref1, ref2, BDB_HIDDEN_PRIMARY_KEY_LENGTH); + + int result; + Field *field; + KEY *key_info=table->key_info+table->primary_key; + KEY_PART_INFO *key_part=key_info->key_part; + KEY_PART_INFO *end=key_part+key_info->key_parts; + + for (; key_part != end; key_part++) + { + field= key_part->field; + result= field->pack_cmp((const char*)ref1, (const char*)ref2, + key_part->length); + if (result) + return result; + ref1 += field->packed_col_length((const char*)ref1, key_part->length); + ref2 += field->packed_col_length((const char*)ref2, key_part->length); + } + + return 0; +} + #endif /* HAVE_BERKELEY_DB */ diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 25d3e128502..e485b12bdb4 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -153,9 +153,11 @@ class ha_berkeley: public handler int5store(to,share->auto_ident); pthread_mutex_unlock(&share->mutex); } - longlong get_auto_increment(); + ulonglong get_auto_increment(); void print_error(int error, myf errflag); uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; extern bool berkeley_shared_data; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 19b15c6fbcc..3bb8383e488 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -86,7 +86,7 @@ void ha_heap::set_keys_for_scanning(void) int ha_heap::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -96,7 +96,7 @@ int ha_heap::write_row(byte * buf) int ha_heap::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return heap_update(file,old_data,new_data); @@ -104,7 +104,7 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) int ha_heap::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_delete_count,&LOCK_status); return heap_delete(file,buf); } @@ -112,7 +112,7 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error = heap_rkey(file,buf,active_index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -121,7 +121,7 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error= heap_rkey(file, buf, active_index, key, key_len, HA_READ_PREFIX_LAST); table->status= error ? STATUS_NOT_FOUND : 0; @@ -131,7 +131,7 @@ int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error = heap_rkey(file, buf, index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -140,7 +140,7 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, int ha_heap::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count,&LOCK_status); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -149,7 +149,7 @@ int ha_heap::index_next(byte * buf) int ha_heap::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count,&LOCK_status); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -158,7 +158,8 @@ int ha_heap::index_prev(byte * buf) int ha_heap::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -167,7 +168,7 @@ int ha_heap::index_first(byte * buf) int ha_heap::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -180,7 +181,8 @@ int ha_heap::rnd_init(bool scan) int ha_heap::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=heap_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -190,7 +192,7 @@ int ha_heap::rnd_pos(byte * buf, byte *pos) { int error; HEAP_PTR position; - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, &LOCK_status); memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR)); error=heap_rrnd(file, buf, position); table->status=error ? STATUS_NOT_FOUND: 0; @@ -481,7 +483,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info) create_info->auto_increment_value= auto_increment_value; } -longlong ha_heap::get_auto_increment() +ulonglong ha_heap::get_auto_increment() { ha_heap::info(HA_STATUS_AUTO); return auto_increment_value; diff --git a/sql/ha_heap.h b/sql/ha_heap.h index 9ca6b9b76b6..e469a676b65 100644 --- a/sql/ha_heap.h +++ b/sql/ha_heap.h @@ -62,7 +62,7 @@ class ha_heap: public handler int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte * buf, uint idx, const byte * key, @@ -91,5 +91,10 @@ class ha_heap: public handler THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - + int cmp_ref(const byte *ref1, const byte *ref2) + { + HEAP_PTR ptr1=*(HEAP_PTR*)ref1; + HEAP_PTR ptr2=*(HEAP_PTR*)ref2; + return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0); + } }; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 6e08fc680b2..370458c6e01 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -853,6 +853,8 @@ ha_innobase::init_table_handle_for_HANDLER(void) prebuilt->read_just_key = FALSE; prebuilt->used_in_HANDLER = TRUE; + + prebuilt->keep_other_fields_on_keyread = FALSE; } /************************************************************************* @@ -1724,8 +1726,6 @@ ha_innobase::open( } } - auto_inc_counter_for_this_stat = 0; - block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL in query optimization */ @@ -2205,12 +2205,15 @@ build_template( templ = prebuilt->mysql_template + n_requested_fields; field = table->field[i]; - if (templ_type == ROW_MYSQL_REC_FIELDS - && !(fetch_all_in_key - && dict_index_contains_col_or_prefix(index, i)) - && !(fetch_primary_key_cols - && dict_table_col_in_clustered_key(index->table, i)) - && thd->query_id != field->query_id) { + ibool index_contains_field= + dict_index_contains_col_or_prefix(index, i); + + if (templ_type == ROW_MYSQL_REC_FIELDS && + ((prebuilt->read_just_key && !index_contains_field) || + (!(fetch_all_in_key && index_contains_field) && + !(fetch_primary_key_cols && + dict_table_col_in_clustered_key(index->table, i)) && + thd->query_id != field->query_id))) { /* This field is not needed in the query, skip it */ @@ -2287,7 +2290,7 @@ ha_innobase::write_row( longlong dummy; ibool incremented_auto_inc_for_stat = FALSE; ibool incremented_auto_inc_counter = FALSE; - ibool skip_auto_inc_decr; + ibool auto_inc_used= FALSE; DBUG_ENTER("ha_innobase::write_row"); @@ -2309,7 +2312,8 @@ ha_innobase::write_row( ut_error; } - statistic_increment(ha_write_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count, + &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -2377,39 +2381,34 @@ ha_innobase::write_row( prebuilt->sql_stat_start = TRUE; } - /* Fetch the value the user possibly has set in the - autoincrement field */ + /* + We must use the handler code to update the auto-increment + value to be sure that increment it correctly. + */ + update_auto_increment(); + auto_inc_used= 1; - auto_inc = table->next_number_field->val_int(); + } - /* In replication and also otherwise the auto-inc column - can be set with SET INSERT_ID. Then we must look at - user_thd->next_insert_id. If it is nonzero and the user - has not supplied a value, we must use it, and use values - incremented by 1 in all subsequent inserts within the - same SQL statement! */ + if (prebuilt->mysql_template == NULL + || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + /* Build the template used in converting quickly between + the two database formats */ - if (auto_inc == 0 && user_thd->next_insert_id != 0) { - auto_inc = user_thd->next_insert_id; - auto_inc_counter_for_this_stat = auto_inc; - } + build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW); + } + + innodb_srv_conc_enter_innodb(prebuilt->trx); - if (auto_inc == 0 && auto_inc_counter_for_this_stat) { - /* The user set the auto-inc counter for - this SQL statement with SET INSERT_ID. We must - assign sequential values from the counter. */ + error = row_insert_for_mysql((byte*) record, prebuilt); - auto_inc_counter_for_this_stat++; - incremented_auto_inc_for_stat = TRUE; + if (error == DB_SUCCESS && auto_inc_used) { - auto_inc = auto_inc_counter_for_this_stat; + /* Fetch the value that was set in the autoincrement field */ - /* We give MySQL a new value to place in the - auto-inc column */ - user_thd->next_insert_id = auto_inc; - } + auto_inc = table->next_number_field->val_int(); - if (auto_inc != 0) { + if (auto_inc != 0) { /* This call will calculate the max of the current value and the value supplied by the user and update the counter accordingly */ @@ -2421,107 +2420,19 @@ ha_innobase::write_row( The lock is released at each SQL statement's end. */ - innodb_srv_conc_enter_innodb(prebuilt->trx); - error = row_lock_table_autoinc_for_mysql(prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); - - if (error != DB_SUCCESS) { + error = row_lock_table_autoinc_for_mysql(prebuilt); - error = convert_error_code_to_mysql(error, - user_thd); - goto func_exit; - } - - dict_table_autoinc_update(prebuilt->table, auto_inc); - } else { - innodb_srv_conc_enter_innodb(prebuilt->trx); - - if (!prebuilt->trx->auto_inc_lock) { - - error = row_lock_table_autoinc_for_mysql( - prebuilt); - if (error != DB_SUCCESS) { - innodb_srv_conc_exit_innodb( - prebuilt->trx); + if (error != DB_SUCCESS) { - error = convert_error_code_to_mysql( - error, user_thd); - goto func_exit; - } - } - - /* The following call gets the value of the auto-inc - counter of the table and increments it by 1 */ - - auto_inc = dict_table_autoinc_get(prebuilt->table); - incremented_auto_inc_counter = TRUE; - - innodb_srv_conc_exit_innodb(prebuilt->trx); - - /* We can give the new value for MySQL to place in - the field */ - - user_thd->next_insert_id = auto_inc; - } - - /* This call of a handler.cc function places - user_thd->next_insert_id to the column value, if the column - value was not set by the user */ - - update_auto_increment(); - } - - if (prebuilt->mysql_template == NULL - || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { - /* Build the template used in converting quickly between - the two database formats */ - - build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW); - } - - innodb_srv_conc_enter_innodb(prebuilt->trx); - - error = row_insert_for_mysql((byte*) record, prebuilt); + error = convert_error_code_to_mysql(error, user_thd); + goto func_exit; + } + dict_table_autoinc_update(prebuilt->table, auto_inc); + } + } innodb_srv_conc_exit_innodb(prebuilt->trx); - if (error != DB_SUCCESS) { - /* If the insert did not succeed we restore the value of - the auto-inc counter we used; note that this behavior was - introduced only in version 4.0.4. - NOTE that a REPLACE command and LOAD DATA INFILE REPLACE - handles a duplicate key error - itself, and we must not decrement the autoinc counter - if we are performing those statements. - NOTE 2: if there was an error, for example a deadlock, - which caused InnoDB to roll back the whole transaction - already in the call of row_insert_for_mysql(), we may no - longer have the AUTO-INC lock, and cannot decrement - the counter here. */ - - skip_auto_inc_decr = FALSE; - - if (error == DB_DUPLICATE_KEY - && (user_thd->lex->sql_command == SQLCOM_REPLACE - || user_thd->lex->sql_command - == SQLCOM_REPLACE_SELECT - || (user_thd->lex->sql_command == SQLCOM_LOAD - && user_thd->lex->duplicates == DUP_REPLACE))) { - - skip_auto_inc_decr= TRUE; - } - - if (!skip_auto_inc_decr && incremented_auto_inc_counter - && prebuilt->trx->auto_inc_lock) { - dict_table_autoinc_decrement(prebuilt->table); - } - - if (!skip_auto_inc_decr && incremented_auto_inc_for_stat - && prebuilt->trx->auto_inc_lock) { - auto_inc_counter_for_this_stat--; - } - } - error = convert_error_code_to_mysql(error, user_thd); /* Tell InnoDB server that there might be work for @@ -2532,6 +2443,7 @@ func_exit: DBUG_RETURN(error); } + /****************************************************************** Converts field data for storage in an InnoDB update vector. */ inline @@ -2806,6 +2718,32 @@ ha_innobase::delete_row( DBUG_RETURN(error); } +/************************************************************************** +Deletes a lock set to a row */ + +void +ha_innobase::unlock_row(void) +/*=========================*/ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + + DBUG_ENTER("ha_innobase::unlock_row"); + + ut_ad(prebuilt->trx == + (trx_t*) current_thd->transaction.all.innobase_tid); + + if (last_query_id != user_thd->query_id) { + ut_print_timestamp(stderr); + fprintf(stderr, +" InnoDB: Error: last_query_id is %lu != user_thd_query_id is %lu\n", + (ulong)last_query_id, (ulong)user_thd->query_id); + mem_analyze_corruption((byte *) prebuilt->trx); + ut_error; + } + + row_unlock_for_mysql(prebuilt); +} + /********************************************************************** Initializes a handle to use an index. */ @@ -2959,7 +2897,8 @@ ha_innobase::index_read( ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3065,7 +3004,8 @@ ha_innobase::change_active_index( { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; KEY* key=0; - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); DBUG_ENTER("change_active_index"); ut_ad(user_thd == current_thd); @@ -3197,7 +3137,8 @@ ha_innobase::index_next( mysql_byte* buf) /* in/out: buffer for next row in MySQL format */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, 0)); } @@ -3214,7 +3155,8 @@ ha_innobase::index_next_same( const mysql_byte* key, /* in: key value */ uint keylen) /* in: key value length */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode)); } @@ -3248,7 +3190,8 @@ ha_innobase::index_first( int error; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); @@ -3274,7 +3217,8 @@ ha_innobase::index_last( int error; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); @@ -3339,7 +3283,8 @@ ha_innobase::rnd_next( int error; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (start_of_scan) { error = index_first(buf); @@ -3375,7 +3320,8 @@ ha_innobase::rnd_pos( DBUG_ENTER("rnd_pos"); DBUG_DUMP("key", (char*) pos, ref_length); - statistic_increment(ha_read_rnd_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); @@ -4790,9 +4736,11 @@ ha_innobase::extra( if (prebuilt->blob_heap) { row_mysql_prebuilt_free_blob_heap(prebuilt); } + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_RESET_STATE: + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_NO_KEYREAD: @@ -4811,6 +4759,9 @@ ha_innobase::extra( case HA_EXTRA_KEYREAD: prebuilt->read_just_key = 1; break; + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + prebuilt->keep_other_fields_on_keyread = 1; + break; default:/* Do nothing */ ; } @@ -4859,6 +4810,7 @@ ha_innobase::start_stmt( prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (!prebuilt->mysql_has_locked) { /* This handle is for a temporary table created inside @@ -4969,6 +4921,7 @@ ha_innobase::external_lock( prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (lock_type == F_WRLCK) { @@ -5130,7 +5083,8 @@ innodb_show_status( field_list.push_back(new Item_empty_string("Status", flen)); - if (protocol->send_fields(&field_list, 1)) { + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) { my_free(str, MYF(0)); @@ -5397,7 +5351,7 @@ initialized yet. This function does not change the value of the auto-inc counter if it already has been initialized. Returns the value of the auto-inc counter. */ -longlong +ulonglong ha_innobase::get_auto_increment() /*=============================*/ /* out: auto-increment column value, -1 if error @@ -5410,10 +5364,10 @@ ha_innobase::get_auto_increment() if (error) { - return(-1); + return(~(ulonglong) 0); } - return(nr); + return((ulonglong) nr); } /*********************************************************************** @@ -5422,7 +5376,7 @@ This function stores the binlog offset and flushes logs. */ void innobase_store_binlog_offset_and_flush_log( /*=======================================*/ - char *binlog_name, /* in: binlog name */ + char *binlog_name, /* in: binlog name */ longlong offset) /* in: binlog offset */ { mtr_t mtr; @@ -5447,6 +5401,55 @@ innobase_store_binlog_offset_and_flush_log( log_buffer_flush_to_disk(); } + +int +ha_innobase::cmp_ref( + const mysql_byte *ref1, + const mysql_byte *ref2) +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + enum_field_types mysql_type; + Field* field; + int result; + + if (prebuilt->clust_index_was_generated) + return memcmp(ref1, ref2, DATA_ROW_ID_LEN); + + /* Do type-aware comparison of Primary Key members. PK members + are always NOT NULL, so no checks for NULL are performed */ + KEY_PART_INFO *key_part= table->key_info[table->primary_key].key_part; + KEY_PART_INFO *key_part_end= + key_part + table->key_info[table->primary_key].key_parts; + for (; key_part != key_part_end; ++key_part) { + field = key_part->field; + mysql_type = field->type(); + if (mysql_type == FIELD_TYPE_TINY_BLOB + || mysql_type == FIELD_TYPE_MEDIUM_BLOB + || mysql_type == FIELD_TYPE_BLOB + || mysql_type == FIELD_TYPE_LONG_BLOB) { + + ut_a(!ref1[1]); + ut_a(!ref2[1]); + byte len1= *ref1; + byte len2= *ref2; + ref1 += 2; + ref2 += 2; + result = + ((Field_blob*)field)->cmp((const char*)ref1, len1, + (const char*)ref2, len2); + } else { + result = + field->cmp((const char*)ref1, (const char*)ref2); + } + + if (result) + return result; + ref1 += key_part->length; + ref2 += key_part->length; + } + return 0; +} + char* ha_innobase::get_mysql_bin_log_name() { diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e76a966c6b9..7e337afed0e 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -122,6 +122,7 @@ class ha_innobase: public handler int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); + void unlock_row(); int index_init(uint index); int index_end(); @@ -166,11 +167,12 @@ class ha_innobase: public handler THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); void init_table_handle_for_HANDLER(); - longlong get_auto_increment(); + ulonglong get_auto_increment(); uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } - - static char *get_mysql_bin_log_name(); + static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; extern uint innobase_init_flags, innobase_lock_type; diff --git a/sql/ha_isam.cc b/sql/ha_isam.cc index 9de532fa7b0..d703df7d2e3 100644 --- a/sql/ha_isam.cc +++ b/sql/ha_isam.cc @@ -69,7 +69,7 @@ uint ha_isam::min_record_length(uint options) const int ha_isam::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -79,7 +79,7 @@ int ha_isam::write_row(byte * buf) int ha_isam::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_update_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return !nisam_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; @@ -87,14 +87,14 @@ int ha_isam::update_row(const byte * old_data, byte * new_data) int ha_isam::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_delete_count, &LOCK_status); return !nisam_delete(file,buf) ? 0 : my_errno ? my_errno : -1; } int ha_isam::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error=nisam_rkey(file, buf, active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; @@ -103,7 +103,7 @@ int ha_isam::index_read(byte * buf, const byte * key, int ha_isam::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error=nisam_rkey(file, buf, index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; @@ -111,7 +111,7 @@ int ha_isam::index_read_idx(byte * buf, uint index, const byte * key, int ha_isam::index_read_last(byte * buf, const byte * key, uint key_len) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); int error=nisam_rkey(file, buf, active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -120,7 +120,8 @@ int ha_isam::index_read_last(byte * buf, const byte * key, uint key_len) int ha_isam::index_next(byte * buf) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); int error=nisam_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; @@ -128,7 +129,8 @@ int ha_isam::index_next(byte * buf) int ha_isam::index_prev(byte * buf) { - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count, + &LOCK_status); int error=nisam_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; @@ -136,7 +138,8 @@ int ha_isam::index_prev(byte * buf) int ha_isam::index_first(byte * buf) { - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); int error=nisam_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; @@ -144,7 +147,8 @@ int ha_isam::index_first(byte * buf) int ha_isam::index_last(byte * buf) { - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count, + &LOCK_status); int error=nisam_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; @@ -157,7 +161,8 @@ int ha_isam::rnd_init(bool scan) int ha_isam::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=nisam_rrnd(file, buf, NI_POS_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; @@ -165,7 +170,8 @@ int ha_isam::rnd_next(byte *buf) int ha_isam::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); int error=nisam_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; @@ -330,7 +336,7 @@ int ha_isam::create(const char *name, register TABLE *form, } } } - DBUG_PRINT("loop",("found: %lx recpos: %d minpos: %d length: %d", + DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", found,recpos,minpos,length)); if (recpos != minpos) { // Reserved space (Null bits?) diff --git a/sql/ha_isammrg.cc b/sql/ha_isammrg.cc index 367607eef19..5a070087724 100644 --- a/sql/ha_isammrg.cc +++ b/sql/ha_isammrg.cc @@ -77,7 +77,7 @@ int ha_isammrg::write_row(byte * buf) int ha_isammrg::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_update_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return !mrg_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; @@ -85,7 +85,7 @@ int ha_isammrg::update_row(const byte * old_data, byte * new_data) int ha_isammrg::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_delete_count, &LOCK_status); return !mrg_delete(file,buf) ? 0 : my_errno ? my_errno : -1; } @@ -128,7 +128,8 @@ int ha_isammrg::rnd_init(bool scan) int ha_isammrg::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=mrg_rrnd(file, buf, ~(mrg_off_t) 0); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; @@ -136,7 +137,7 @@ int ha_isammrg::rnd_next(byte *buf) int ha_isammrg::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, &LOCK_status); int error=mrg_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return !error ? 0 : my_errno ? my_errno : -1; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index a7beae664b9..7482c6d5fa8 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -87,9 +87,10 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, extern "C" { -volatile my_bool *killed_ptr(MI_CHECK *param) +volatile int *killed_ptr(MI_CHECK *param) { - return &(((THD *)(param->thd))->killed); + /* In theory Unsafe conversion, but should be ok for now */ + return (int*) &(((THD *)(param->thd))->killed); } void mi_check_print_error(MI_CHECK *param, const char *fmt,...) @@ -248,7 +249,7 @@ int ha_myisam::close(void) int ha_myisam::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count,&LOCK_status); /* If we have a timestamp column, update it to the current time */ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -1083,7 +1084,7 @@ bool ha_myisam::is_crashed() const int ha_myisam::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); @@ -1091,7 +1092,7 @@ int ha_myisam::update_row(const byte * old_data, byte * new_data) int ha_myisam::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_delete_count,&LOCK_status); return mi_delete(file,buf); } @@ -1099,7 +1100,7 @@ int ha_myisam::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1108,7 +1109,7 @@ int ha_myisam::index_read(byte * buf, const byte * key, int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=mi_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1117,7 +1118,7 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1126,7 +1127,7 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisam::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count,&LOCK_status); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1135,7 +1136,7 @@ int ha_myisam::index_next(byte * buf) int ha_myisam::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count,&LOCK_status); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1144,7 +1145,8 @@ int ha_myisam::index_prev(byte * buf) int ha_myisam::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1153,7 +1155,7 @@ int ha_myisam::index_first(byte * buf) int ha_myisam::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1164,7 +1166,7 @@ int ha_myisam::index_next_same(byte * buf, uint length __attribute__((unused))) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count,&LOCK_status); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1180,7 +1182,8 @@ int ha_myisam::rnd_init(bool scan) int ha_myisam::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=mi_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1193,7 +1196,7 @@ int ha_myisam::restart_rnd_next(byte *buf, byte *pos) int ha_myisam::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count,&LOCK_status); int error=mi_rrnd(file, buf, ha_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1448,7 +1451,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } } } - DBUG_PRINT("loop",("found: %lx recpos: %d minpos: %d length: %d", + DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", found,recpos,minpos,length)); if (recpos != minpos) { // Reserved space (Null bits?) @@ -1533,8 +1536,12 @@ int ha_myisam::rename_table(const char * from, const char * to) } -longlong ha_myisam::get_auto_increment() +ulonglong ha_myisam::get_auto_increment() { + ulonglong nr; + int error; + byte key[MI_MAX_KEY_LENGTH]; + if (!table->next_number_key_offset) { // Autoincrement at key-start ha_myisam::info(HA_STATUS_AUTO); @@ -1544,19 +1551,17 @@ longlong ha_myisam::get_auto_increment() /* it's safe to call the following if bulk_insert isn't on */ mi_flush_bulk_insert(file, table->next_number_index); - longlong nr; - int error; - byte key[MI_MAX_KEY_LENGTH]; (void) extra(HA_EXTRA_KEYREAD); - key_copy(key,table,table->next_number_index, - table->next_number_key_offset); + key_copy(key, table->record[0], + table->key_info + table->next_number_index, + table->next_number_key_offset); error=mi_rkey(file,table->record[1],(int) table->next_number_index, key,table->next_number_key_offset,HA_READ_PREFIX_LAST); if (error) - nr=1; + nr= 1; else - nr=(longlong) - table->next_number_field->val_int_offset(table->rec_buff_length)+1; + nr= ((ulonglong) table->next_number_field-> + val_int_offset(table->rec_buff_length)+1); extra(HA_EXTRA_NO_KEYREAD); return nr; } @@ -1601,7 +1606,8 @@ int ha_myisam::ft_read(byte * buf) if (!ft_handler) return -1; - thread_safe_increment(ha_read_next_count,&LOCK_status); // why ? + thread_safe_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); // why ? error=ft_handler->please->read_next(ft_handler,(char*) buf); diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 6fde84d6f6f..527e6a49aba 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -81,7 +81,6 @@ class ha_myisam: public handler int index_first(byte * buf); int index_last(byte * buf); int index_next_same(byte *buf, const byte *key, uint keylen); - int index_end() { ft_handler=NULL; return 0; } int ft_init() { if (!ft_handler) @@ -112,7 +111,7 @@ class ha_myisam: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int rename_table(const char * from, const char * to); int delete_table(const char *name); int check(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index bf4c2a36ffd..903dd9dec2f 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -81,7 +81,7 @@ int ha_myisammrg::close(void) int ha_myisammrg::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -91,7 +91,7 @@ int ha_myisammrg::write_row(byte * buf) int ha_myisammrg::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); @@ -99,14 +99,14 @@ int ha_myisammrg::update_row(const byte * old_data, byte * new_data) int ha_myisammrg::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_delete_count,&LOCK_status); return myrg_delete(file,buf); } int ha_myisammrg::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -115,7 +115,7 @@ int ha_myisammrg::index_read(byte * buf, const byte * key, int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=myrg_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -123,7 +123,7 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count,&LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -132,7 +132,7 @@ int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisammrg::index_next(byte * buf) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count,&LOCK_status); int error=myrg_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -140,7 +140,7 @@ int ha_myisammrg::index_next(byte * buf) int ha_myisammrg::index_prev(byte * buf) { - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count,&LOCK_status); int error=myrg_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -148,7 +148,8 @@ int ha_myisammrg::index_prev(byte * buf) int ha_myisammrg::index_first(byte * buf) { - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); int error=myrg_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -156,7 +157,7 @@ int ha_myisammrg::index_first(byte * buf) int ha_myisammrg::index_last(byte * buf) { - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); int error=myrg_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -166,7 +167,7 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count,&LOCK_status); int error=myrg_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -179,7 +180,8 @@ int ha_myisammrg::rnd_init(bool scan) int ha_myisammrg::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -187,7 +189,7 @@ int ha_myisammrg::rnd_next(byte *buf) int ha_myisammrg::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count,&LOCK_status); int error=myrg_rrnd(file, buf, ha_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -347,7 +349,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) create_info->merge_list.elements++; (*create_info->merge_list.next) = (byte*) ptr; - create_info->merge_list.next= (byte**) &ptr->next; + create_info->merge_list.next= (byte**) &ptr->next_local; } *create_info->merge_list.next=0; } @@ -375,7 +377,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form, if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* sizeof(char*)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - for (pos=table_names ; tables ; tables=tables->next) + for (pos= table_names; tables; tables= tables->next_local) { char *table_name; TABLE **tbl= 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d9545d5cbb8..17aaaf20fa2 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1633,6 +1633,8 @@ int ha_ndbcluster::write_row(byte *record) NdbConnection *trans= m_active_trans; NdbOperation *op; int res; + THD *thd= current_thd; + DBUG_ENTER("write_row"); if(m_ignore_dup_key_not_supported) @@ -1640,7 +1642,7 @@ int ha_ndbcluster::write_row(byte *record) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(thd->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); has_auto_increment= (table->next_number_field && record == table->record[0]); @@ -1792,7 +1794,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) uint i; DBUG_ENTER("update_row"); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(thd->status_var.ha_update_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -1903,12 +1905,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) int ha_ndbcluster::delete_row(const byte *record) { + THD *thd= current_thd; NdbConnection *trans= m_active_trans; NdbResultSet* cursor= m_active_cursor; NdbOperation *op; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(thd->status_var.ha_delete_count,&LOCK_status); if (cursor) { @@ -2267,7 +2270,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, const byte *key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); DBUG_ENTER("index_read_idx"); DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); index_init(index_no); @@ -2280,7 +2283,8 @@ int ha_ndbcluster::index_next(byte *buf) DBUG_ENTER("index_next"); int error= 1; - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); DBUG_RETURN(next_result(buf)); } @@ -2288,7 +2292,8 @@ int ha_ndbcluster::index_next(byte *buf) int ha_ndbcluster::index_prev(byte *buf) { DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count, + &LOCK_status); DBUG_RETURN(1); } @@ -2296,7 +2301,8 @@ int ha_ndbcluster::index_prev(byte *buf) int ha_ndbcluster::index_first(byte *buf) { DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first @@ -2307,7 +2313,7 @@ int ha_ndbcluster::index_first(byte *buf) int ha_ndbcluster::index_last(byte *buf) { DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); int res; if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ NdbResultSet *cursor= m_active_cursor; @@ -2443,7 +2449,8 @@ int ha_ndbcluster::rnd_end() int ha_ndbcluster::rnd_next(byte *buf) { DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (!m_active_cursor) DBUG_RETURN(full_table_scan(buf)); @@ -2461,7 +2468,8 @@ int ha_ndbcluster::rnd_next(byte *buf) int ha_ndbcluster::rnd_pos(byte *buf, byte *pos) { DBUG_ENTER("rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); // The primary key for the record is stored in pos // Perform a pk_read using primary key "index" DBUG_RETURN(pk_read(pos, ref_length, buf)); @@ -2686,6 +2694,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_CHANGE_KEY_TO_DUP: DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP")); + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS")); break; } @@ -3605,21 +3615,19 @@ int ndbcluster_drop_database(const char *path) } -longlong ha_ndbcluster::get_auto_increment() +ulonglong ha_ndbcluster::get_auto_increment() { + int cache_size; + Uint64 auto_value; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); - int cache_size= - (m_rows_to_insert - m_rows_inserted < autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted - : (m_rows_to_insert > autoincrement_prefetch) ? - m_rows_to_insert - : autoincrement_prefetch; - Uint64 auto_value= - (m_skip_auto_increment) ? - m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) - : m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); - DBUG_RETURN((longlong)auto_value); + cache_size= ((m_rows_to_insert - m_rows_inserted < autoincrement_prefetch) ? + m_rows_to_insert - m_rows_inserted : + max(m_rows_to_insert, autoincrement_prefetch)); + auto_value= ((m_skip_auto_increment) ? + m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) : + m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size)); + DBUG_RETURN((ulonglong) auto_value); } @@ -4062,6 +4070,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, (void)mysql_rm_table_part2(thd, &table_list, /* if_exists */ TRUE, /* drop_temporary */ FALSE, + /* drop_view */ FALSE, /* dont_log_query*/ TRUE); } } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 2121228a869..d61876b1357 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -217,7 +217,7 @@ class ha_ndbcluster: public handler int key_cmp(uint keynr, const byte * old_row, const byte * new_row); void print_results(); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int ndb_err(NdbConnection*); bool uses_blob_value(bool all_fields); diff --git a/sql/handler.h b/sql/handler.h index 252861e5c37..858a7861dba 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -141,16 +141,16 @@ /* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */ #define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1 -enum db_type -{ +enum db_type +{ DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1, DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM, DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM, DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, - DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, + DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB, - + DB_TYPE_DEFAULT // Must be last }; @@ -167,16 +167,24 @@ enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, /* struct to hold information about the table that should be created */ /* Bits in used_fields */ -#define HA_CREATE_USED_AUTO 1 -#define HA_CREATE_USED_RAID 2 -#define HA_CREATE_USED_UNION 4 -#define HA_CREATE_USED_INSERT_METHOD 8 -#define HA_CREATE_USED_MIN_ROWS 16 -#define HA_CREATE_USED_MAX_ROWS 32 -#define HA_CREATE_USED_AVG_ROW_LENGTH 64 -#define HA_CREATE_USED_PACK_KEYS 128 -#define HA_CREATE_USED_CHARSET 256 -#define HA_CREATE_USED_DEFAULT_CHARSET 512 +#define HA_CREATE_USED_AUTO (1L << 0) +#define HA_CREATE_USED_RAID (1L << 1) +#define HA_CREATE_USED_UNION (1L << 2) +#define HA_CREATE_USED_INSERT_METHOD (1L << 3) +#define HA_CREATE_USED_MIN_ROWS (1L << 4) +#define HA_CREATE_USED_MAX_ROWS (1L << 5) +#define HA_CREATE_USED_AVG_ROW_LENGTH (1L << 6) +#define HA_CREATE_USED_PACK_KEYS (1L << 7) +#define HA_CREATE_USED_CHARSET (1L << 8) +#define HA_CREATE_USED_DEFAULT_CHARSET (1L << 9) +#define HA_CREATE_USED_DATADIR (1L << 10) +#define HA_CREATE_USED_INDEXDIR (1L << 11) +#define HA_CREATE_USED_ENGINE (1L << 12) +#define HA_CREATE_USED_CHECKSUM (1L << 13) +#define HA_CREATE_USED_DELAY_KEY_WRITE (1L << 14) +#define HA_CREATE_USED_ROW_FORMAT (1L << 15) +#define HA_CREATE_USED_COMMENT (1L << 16) +#define HA_CREATE_USED_PASSWORD (1L << 17) typedef struct st_thd_trans { void *bdb_tid; @@ -207,6 +215,7 @@ typedef struct st_ha_create_information uint raid_type,raid_chunks; uint merge_insert_method; bool table_existed; /* 1 in create if table existed */ + bool frm_only; /* 1 if no ha_create_table() */ } HA_CREATE_INFO; @@ -315,29 +324,33 @@ public: int ha_index_init(uint idx) { + DBUG_ENTER("ha_index_init"); DBUG_ASSERT(inited==NONE); inited=INDEX; - return index_init(idx); + DBUG_RETURN(index_init(idx)); } int ha_index_end() { + DBUG_ENTER("ha_index_end"); DBUG_ASSERT(inited==INDEX); inited=NONE; - return index_end(); + DBUG_RETURN(index_end()); } int ha_rnd_init(bool scan) { + DBUG_ENTER("ha_rnd_init"); DBUG_ASSERT(inited==NONE || (inited==RND && scan)); inited=RND; - return rnd_init(scan); + DBUG_RETURN(rnd_init(scan)); } int ha_rnd_end() { + DBUG_ENTER("ha_rnd_end"); DBUG_ASSERT(inited==RND); inited=NONE; - return rnd_end(); + DBUG_RETURN(rnd_end()); } - /* this is neseccary in many places, e.g. in HANDLER command */ + /* this is necessary in many places, e.g. in HANDLER command */ int ha_index_or_rnd_end() { return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0; @@ -372,6 +385,7 @@ public: virtual int read_range_next(); int compare_key(key_range *range); virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } + void ft_end() { ft_handler=NULL; } virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key, uint keylen) { return NULL; } @@ -408,7 +422,8 @@ public: */ virtual int delete_all_rows() { return (my_errno=HA_ERR_WRONG_COMMAND); } - virtual longlong get_auto_increment(); + virtual ulonglong get_auto_increment(); + virtual void restore_auto_increment(); virtual void update_create_info(HA_CREATE_INFO *create_info) {} /* admin commands - called from mysql_admin_table */ @@ -506,10 +521,18 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + + /* + RETURN + true Primary key (if there is one) is clustered key covering all fields + false otherwise + */ + virtual bool primary_key_is_clustered() { return FALSE; } + + virtual int cmp_ref(const byte *ref1, const byte *ref2) + { + return memcmp(ref1, ref2, ref_length); + } }; /* Some extern variables used with handlers */ diff --git a/sql/item.cc b/sql/item.cc index 0e7a2b50b51..157b420805c 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -22,6 +22,11 @@ #include "mysql_priv.h" #include <m_ctype.h> #include "my_dir.h" +#include "sp_rcontext.h" +#include "sql_acl.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "sql_select.h" static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, @@ -41,11 +46,11 @@ void item_init(void) } Item::Item(): - fixed(0) + name_length(0), fixed(0), + collation(default_charset(), DERIVATION_COERCIBLE) { marker= 0; maybe_null=null_value=with_sum_func=unsigned_flag=0; - collation.set(default_charset(), DERIVATION_COERCIBLE); name= 0; decimals= 0; max_length= 0; @@ -96,13 +101,40 @@ void Item::print_item_w_name(String *str) print(str); if (name) { - str->append(" AS `", 5); - str->append(name); - str->append('`'); + THD *thd= current_thd; + str->append(" AS ", 4); + append_identifier(thd, str, name, strlen(name)); } } +void Item::cleanup() +{ + DBUG_ENTER("Item::cleanup"); + DBUG_PRINT("info", ("Item: 0x%lx", this)); + DBUG_PRINT("info", ("Type: %d", (int)type())); + fixed=0; + marker= 0; + DBUG_VOID_RETURN; +} + + +/* + cleanup() item if it is 'fixed' + + SYNOPSIS + cleanup_processor() + arg - a dummy parameter, is not used here +*/ + +bool Item::cleanup_processor(byte *arg) +{ + if (fixed) + cleanup(); + return FALSE; +} + + Item_ident::Item_ident(const char *db_name_par,const char *table_name_par, const char *field_name_par) :orig_db_name(db_name_par), orig_table_name(table_name_par), @@ -151,6 +183,45 @@ bool Item_ident::remove_dependence_processor(byte * arg) } +/* + Store the pointer to this item field into a list if not already there. + + SYNOPSIS + Item_field::collect_item_field_processor() + arg pointer to a List<Item_field> + + DESCRIPTION + The method is used by Item::walk to collect all unique Item_field objects + from a tree of Items into a set of items represented as a list. + + IMPLEMENTATION + Item_cond::walk() and Item_func::walk() stop the evaluation of the + processor function for its arguments once the processor returns + true.Therefore in order to force this method being called for all item + arguments in a condition the method must return false. + + RETURN + false to force the evaluation of collect_item_field_processor + for the subsequent items. +*/ + +bool Item_field::collect_item_field_processor(byte *arg) +{ + DBUG_ENTER("Item_field::collect_item_field_processor"); + DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname")); + List<Item_field> *item_list= (List<Item_field>*) arg; + List_iterator<Item_field> item_list_it(*item_list); + Item_field *curr_item; + while ((curr_item= item_list_it++)) + { + if (curr_item->eq(this, 1)) + DBUG_RETURN(false); /* Already in the set. */ + } + item_list->push_back(this); + DBUG_RETURN(false); +} + + bool Item::check_cols(uint c) { if (c != 1) @@ -168,12 +239,15 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) { /* Empty string, used by AS or internal function like last_insert_id() */ name= (char*) str; + name_length= 0; return; } if (cs->ctype) { - // This will probably need a better implementation in the future: - // a function in CHARSET_INFO structure. + /* + This will probably need a better implementation in the future: + a function in CHARSET_INFO structure. + */ while (length && !my_isgraph(cs,*str)) { // Fix problem with yacc length--; @@ -183,12 +257,12 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) if (!my_charset_same(cs, system_charset_info)) { uint32 res_length; - name= sql_strmake_with_convert(str, length, cs, + name= sql_strmake_with_convert(str, name_length= length, cs, MAX_ALIAS_NAME, system_charset_info, &res_length); } else - name=sql_strmake(str, min(length,MAX_ALIAS_NAME)); + name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME))); } @@ -295,6 +369,46 @@ CHARSET_INFO *Item::default_charset() } +int Item::save_in_field_no_warnings(Field *field, bool no_conversions) +{ + int res; + THD *thd= field->table->in_use; + enum_check_fields tmp= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; + res= save_in_field(field, no_conversions); + thd->count_cuted_fields= tmp; + return res; +} + + +Item * +Item_splocal::this_item() +{ + THD *thd= current_thd; + + return thd->spcont->get_item(m_offset); +} + +Item * +Item_splocal::this_const_item() const +{ + THD *thd= current_thd; + + return thd->spcont->get_item(m_offset); +} + +Item::Type +Item_splocal::type() const +{ + THD *thd= current_thd; + + if (thd->spcont) + return thd->spcont->get_item(m_offset)->type(); + return NULL_ITEM; // Anything but SUBSELECT_ITEM +} + + + /* Aggregate two collations together taking into account their coercibility (aka derivation): @@ -425,7 +539,9 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags) } Item_field::Item_field(Field *f) - :Item_ident(NullS, f->table_name, f->field_name) + :Item_ident(NullS, f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { set_field(f); /* @@ -436,7 +552,9 @@ Item_field::Item_field(Field *f) } Item_field::Item_field(THD *thd, Field *f) - :Item_ident(f->table->table_cache_key, f->table_name, f->field_name) + :Item_ident(f->table->table_cache_key, f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { /* We always need to provide Item_field with a fully qualified field @@ -473,7 +591,11 @@ Item_field::Item_field(THD *thd, Field *f) Item_field::Item_field(THD *thd, Item_field *item) :Item_ident(thd, item), field(item->field), - result_field(item->result_field) + result_field(item->result_field), + item_equal(item->item_equal), + no_const_subst(item->no_const_subst), + have_privileges(item->have_privileges), + any_privileges(item->any_privileges) { collation.set(DERIVATION_IMPLICIT); } @@ -531,6 +653,54 @@ const char *Item_ident::full_name() const return tmp; } +void Item_ident::print(String *str) +{ + THD *thd= current_thd; + char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; + const char *d_name= db_name, *t_name= table_name; + if (lower_case_table_names) + { + if (table_name && table_name[0]) + { + strmov(t_name_buff, table_name); + my_casedn_str(files_charset_info, t_name_buff); + t_name= t_name_buff; + } + if (db_name && db_name[0]) + { + strmov(d_name_buff, db_name); + my_casedn_str(files_charset_info, d_name_buff); + d_name= d_name_buff; + } + } + + if (!table_name || !field_name) + { + const char *nm= field_name ? field_name : name ? name : "tmp_field"; + append_identifier(thd, str, nm, strlen(nm)); + return; + } + if (db_name && db_name[0]) + { + append_identifier(thd, str, d_name, strlen(d_name)); + str->append('.'); + append_identifier(thd, str, t_name, strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, strlen(field_name)); + } + else + { + if (table_name[0]) + { + append_identifier(thd, str, t_name, strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, strlen(field_name)); + } + else + append_identifier(thd, str, field_name, strlen(field_name)); + } +} + /* ARGSUSED */ String *Item_field::val_str(String *str) { @@ -1234,8 +1404,27 @@ bool Item_param::convert_str_value(THD *thd) return rc; } -/* End of Item_param related */ +void Item_param::print(String *str) +{ + if (state == NO_VALUE) + { + str->append('?'); + } + else + { + char buffer[80]; + String tmp(buffer, sizeof(buffer), &my_charset_bin); + const String *res; + res= query_val_str(&tmp); + str->append(*res); + } +} + + +/**************************************************************************** + Item_copy_string +****************************************************************************/ void Item_copy_string::copy() { @@ -1344,163 +1533,435 @@ static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, } -bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) + + +/* + Search a GROUP BY clause for a field with a certain name. + + SYNOPSIS + find_field_in_group_list() + find_item the item being searched for + group_list GROUP BY clause + + DESCRIPTION + Search the GROUP BY list for a column named as find_item. When searching + preference is given to columns that are qualified with the same table (and + database) name as the one being searched for. + + RETURN + - the found item on success + - NULL if find_item is not in group_list +*/ + +static Item** find_field_in_group_list(Item *find_item, ORDER *group_list) +{ + const char *db_name; + const char *table_name; + const char *field_name; + ORDER *found_group= NULL; + int found_match_degree= 0; + Item_field *cur_field; + int cur_match_degree= 0; + + if (find_item->type() == Item::FIELD_ITEM || + find_item->type() == Item::REF_ITEM) + { + db_name= ((Item_ident*) find_item)->db_name; + table_name= ((Item_ident*) find_item)->table_name; + field_name= ((Item_ident*) find_item)->field_name; + } + else + return NULL; + + DBUG_ASSERT(field_name); + + for (ORDER *cur_group= group_list ; cur_group ; cur_group= cur_group->next) + { + if ((*(cur_group->item))->type() == Item::FIELD_ITEM) + { + cur_field= (Item_field*) *cur_group->item; + cur_match_degree= 0; + + DBUG_ASSERT(cur_field->field_name); + + if (!my_strcasecmp(system_charset_info, + cur_field->field_name, field_name)) + ++cur_match_degree; + else + continue; + + if (cur_field->table_name && table_name) + { + /* If field_name is qualified by a table name. */ + if (strcmp(cur_field->table_name, table_name)) + /* Same field names, different tables. */ + return NULL; + + ++cur_match_degree; + if (cur_field->db_name && db_name) + { + /* If field_name is also qualified by a database name. */ + if (strcmp(cur_field->db_name, db_name)) + /* Same field names, different databases. */ + return NULL; + ++cur_match_degree; + } + } + + if (cur_match_degree > found_match_degree) + { + found_match_degree= cur_match_degree; + found_group= cur_group; + } + else if (found_group && (cur_match_degree == found_match_degree) && + ! (*(found_group->item))->eq(cur_field, 0)) + { + /* + If the current resolve candidate matches equally well as the current + best match, they must reference the same column, otherwise the field + is ambiguous. + */ + my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), + MYF(0), find_item->full_name(), current_thd->where); + return NULL; + } + } + } + + if (found_group) + return found_group->item; + else + return NULL; +} + + +/* + Resolve a column reference in a sub-select. + + SYNOPSIS + resolve_ref_in_select_and_group() + thd current thread + ref column reference being resolved + select the sub-select that ref is resolved against + + DESCRIPTION + Resolve a column reference (usually inside a HAVING clause) against the + SELECT and GROUP BY clauses of the query described by 'select'. The name + resolution algorithm searches both the SELECT and GROUP BY clauses, and in + case of a name conflict prefers GROUP BY column names over SELECT names. If + both clauses contain different fields with the same names, a warning is + issued that name of 'ref' is ambiguous. We extend ANSI SQL in that when no + GROUP BY column is found, then a HAVING name is resolved as a possibly + derived SELECT column. + + NOTES + The resolution procedure is: + - Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT clause of Q. + - Search for a column named col_ref_i [in table T_j] + in the GROUP BY clause of Q. + - If found different columns with the same name in GROUP BY and SELECT + - issue a warning and return the GROUP BY column, + - otherwise return the found SELECT column. + + + RETURN + NULL - there was an error, and the error was already reported + not_found_item - the item was not resolved, no error was reported + resolved item - if the item was resolved +*/ + +static Item** +resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) +{ + Item **group_by_ref= NULL; + Item **select_ref= NULL; + ORDER *group_list= (ORDER*) select->group_list.first; + bool ambiguous_fields= FALSE; + uint counter; + bool not_used; + + /* + Search for a column or derived column named as 'ref' in the SELECT + clause of the current select. + */ + if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), &counter, + REPORT_EXCEPT_NOT_FOUND, ¬_used))) + return NULL; /* Some error occurred. */ + + /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ + if (select->having_fix_field && !ref->with_sum_func && group_list) + { + group_by_ref= find_field_in_group_list(ref, group_list); + + /* Check if the fields found in SELECT and GROUP BY are the same field. */ + if (group_by_ref && (select_ref != not_found_item) && + !((*group_by_ref)->eq(*select_ref, 0))) + { + ambiguous_fields= TRUE; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + ER(ER_NON_UNIQ_ERROR), ref->full_name(), + current_thd->where); + + } + } + + if (select_ref != not_found_item || group_by_ref) + { + if (select_ref != not_found_item && !ambiguous_fields) + { + DBUG_ASSERT(*select_ref); + if (! (*select_ref)->fixed) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), ref->name, + "forward reference in item list"); + return NULL; + } + return (select->ref_pointer_array + counter); + } + else if (group_by_ref) + return group_by_ref; + else + DBUG_ASSERT(FALSE); + } + else + return (Item**) not_found_item; +} + + +/* + Resolve the name of a column reference. + + SYNOPSIS + Item_field::fix_fields() + thd [in] current thread + tables [in] the tables in a FROM clause + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: FROM clause, SELECT clause, GROUP BY clause of a query + Q, or in outer queries that contain Q. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_column_reference([T_j].col_ref_i) + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q; + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + if - Q_k is not a group query AND + - Q_k is not inside an aggregate function + OR + - Q_(k-1) is not in a HAVING or SELECT clause of Q_k + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + } + + if such a column is not found + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + } + } + } + + Notice that compared to Item_ref::fix_fields, here we first search the FROM + clause, and then we search the SELECT and GROUP BY clauses. + + RETURN + TRUE if error + FALSE on success +*/ + +bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference) { DBUG_ASSERT(fixed == 0); if (!field) // If field is not checked { - TABLE_LIST *where= 0; - bool upward_lookup= 0; - Field *tmp= (Field *)not_found_field; - if ((tmp= find_field_in_tables(thd, this, tables, &where, 0)) == + bool upward_lookup= FALSE; + Field *from_field= (Field *)not_found_field; + if ((from_field= find_field_in_tables(thd, this, tables, reference, + IGNORE_EXCEPT_NON_UNIQUE, + !any_privileges)) == not_found_field) { - /* - We can't find table field in table list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_field_in_tables. - */ - SELECT_LEX *last= 0; #ifdef EMBEDDED_LIBRARY thd->net.last_errno= 0; #endif + SELECT_LEX *last= 0; TABLE_LIST *table_list; - Item **refer= (Item **)not_found_item; - uint counter; - bool not_used; - // Prevent using outer fields in subselects, that is not supported now - SELECT_LEX *cursel= (SELECT_LEX *) thd->lex->current_select; - if (cursel->master_unit()->first_select()->linkage != DERIVED_TABLE_TYPE) + Item **ref= (Item **) not_found_item; + SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select; + /* + If there is an outer select, and it is not a derived table (which do + not support the use of outer fields for now), try to resolve this + reference in the outer select(s). + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects are + searched starting from the innermost. + */ + if (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE) { - SELECT_LEX_UNIT *prev_unit= cursel->master_unit(); - for (SELECT_LEX *sl= prev_unit->outer_select(); - sl; - sl= (prev_unit= sl->master_unit())->outer_select()) + SELECT_LEX_UNIT *prev_unit= current_sel->master_unit(); + SELECT_LEX *outer_sel= prev_unit->outer_select(); + for ( ; outer_sel ; + outer_sel= (prev_unit= outer_sel->master_unit())->outer_select()) { - upward_lookup= 1; - table_list= (last= sl)->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) - { + last= outer_sel; + Item_subselect *prev_subselect_item= prev_unit->item; + upward_lookup= TRUE; + + /* Search in the tables of the FROM clause of the outer select. */ + table_list= outer_sel->get_table_list(); + if (outer_sel->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) /* - it is primary INSERT st_select_lex => skip first table - resolving + It is a primary INSERT st_select_lex => do not resolve against the + first table. */ - table_list= table_list->next; - } + table_list= table_list->next_local; - Item_subselect *prev_subselect_item= prev_unit->item; - enum_parsing_place place= - prev_subselect_item->parsing_place; + enum_parsing_place place= prev_subselect_item->parsing_place; /* - check table fields only if subquery used somewhere out of HAVING - or outer SELECT do not use groupping (i.e. tables are - accessable) + Check table fields only if the subquery is used somewhere out of + HAVING, or the outer SELECT does not use grouping (i.e. tables are + accessible). */ if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) - { - if (!tmp) - return -1; - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; - } - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (refer= find_item_in_list(this, sl->item_list, &counter, - REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **) not_found_item) + (outer_sel->with_sum_func == 0 && + outer_sel->group_list.elements == 0)) && + (from_field= find_field_in_tables(thd, this, table_list, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE)) != + not_found_field) { - if (*refer && (*refer)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*refer)->used_tables(); - prev_subselect_item->const_item_cache&= (*refer)->const_item(); - } + if (from_field) + { + if (from_field != view_ref_found) + { + prev_subselect_item->used_tables_cache|= from_field->table->map; + prev_subselect_item->const_item_cache= 0; + } + else + { + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + } + } break; } + /* Search in the SELECT and GROUP lists of the outer select. */ + if (outer_sel->resolve_mode == SELECT_LEX::SELECT_MODE) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, outer_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + // Reference is not found => depend from outer (or just error) prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; prev_subselect_item->const_item_cache= 0; - if (sl->master_unit()->first_select()->linkage == + if (outer_sel->master_unit()->first_select()->linkage == DERIVED_TABLE_TYPE) break; // do not look over derived table } } - if (!tmp) - return -1; - else if (!refer) - return 1; - else if (tmp == not_found_field && refer == (Item **)not_found_item) + + DBUG_ASSERT(ref); + if (!from_field) + return TRUE; + if (ref == not_found_item && from_field == not_found_field) { if (upward_lookup) { - // We can't say exactly what absend table or field + // We can't say exactly what absent table or field my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), full_name(), thd->where); } else { // Call to report error - find_field_in_tables(thd, this, tables, &where, 1); + find_field_in_tables(thd, this, tables, reference, REPORT_ALL_ERRORS, + TRUE); } - return -1; + return TRUE; } - else if (refer != (Item **)not_found_item) + else if (ref != not_found_item) { - if (!(*refer)->fixed) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; - } + /* Should have been checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); Item_ref *rf= new Item_ref(last->ref_pointer_array + counter, (char *)table_name, (char *)field_name); if (!rf) - return 1; - thd->change_item_tree(ref, rf); + return TRUE; + thd->change_item_tree(reference, rf); /* rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) - return 1; + if (rf->fix_fields(thd, tables, reference) || rf->check_cols(1)) + return TRUE; - mark_as_dependent(thd, last, cursel, rf); - return 0; + mark_as_dependent(thd, last, current_sel, rf); + return FALSE; } else { - mark_as_dependent(thd, last, cursel, this); + mark_as_dependent(thd, last, current_sel, this); if (last->having_fix_field) { Item_ref *rf; - rf= new Item_ref((where->db[0] ? where->db : 0), - (char*) where->alias, (char*) field_name); + rf= new Item_ref((cached_table->db[0] ? cached_table->db : 0), + (char*) cached_table->alias, (char*) field_name); if (!rf) - return 1; - thd->change_item_tree(ref, rf); + return TRUE; + thd->change_item_tree(reference, rf); /* rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + return rf->fix_fields(thd, tables, reference) || rf->check_cols(1); } } } - else if (!tmp) - return -1; + else if (!from_field) + return TRUE; + + /* + if it is not expression from merged VIEW we will set this field. + + We can leave expression substituted from view for next PS/SP rexecution + (i.e. do not register this substitution for reverting on cleupup() + (register_item_tree_changing())), because this subtree will be + fix_field'ed during setup_tables()->setup_ancestor() (i.e. before + all other expressions of query, and references on tables which do + not present in query will not make problems. - set_field(tmp); + Also we suppose that view can't be changed during PS/SP life. + */ + if (from_field != view_ref_found) + set_field(from_field); } else if (thd->set_query_id && field->query_id != thd->query_id) { @@ -1509,11 +1970,49 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) field->query_id=thd->query_id; table->used_fields++; table->used_keys.intersect(field->part_of_key); - fixed= 1; } - return 0; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (any_privileges) + { + char *db, *tab; + if (cached_table->view) + { + db= cached_table->view_db.str; + tab= cached_table->view_name.str; + } + else + { + db= cached_table->db; + tab= cached_table->real_name; + } + if (!(have_privileges= (get_column_grant(thd, &field->table->grant, + db, tab, field_name) & + VIEW_ANY_ACL))) + { + my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, + ER(ER_COLUMNACCESS_DENIED_ERROR), + MYF(0), + "ANY", + thd->priv_user, + thd->host_or_ip, + field_name, + tab); + return TRUE; + } + } +#endif + fixed= 1; + return FALSE; +} + + +Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs) +{ + no_const_subst= 1; + return Item::safe_charset_converter(tocs); } + void Item_field::cleanup() { DBUG_ENTER("Item_field::cleanup"); @@ -1527,6 +2026,139 @@ void Item_field::cleanup() DBUG_VOID_RETURN; } +/* + Find a field among specified multiple equalities + + SYNOPSIS + find_item_equal() + cond_equal reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function first searches the field among multiple equalities + of the current level (in the cond_equal->current_level list). + If it fails, it continues searching in upper levels accessed + through a pointer cond_equal->upper_levels. + The search terminates as soon as a multiple equality containing + the field is found. + + RETURN VALUES + First Item_equal containing the field, if success + 0, otherwise +*/ +Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal) +{ + Item_equal *item= 0; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + return item; + } + /* + The field is not found in any of the multiple equalities + of the current level. Look for it in upper levels + */ + cond_equal= cond_equal->upper_levels; + } + return 0; +} + + +/* + Set a pointer to the multiple equality the field reference belongs to + (if any) + + SYNOPSIS + equal_fields_propagator() + arg - reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function looks for a multiple equality containing the field item + among those referenced by arg. + In the case such equality exists the function does the following. + If the found multiple equality contains a constant, then the field + reference is substituted for this constant, otherwise it sets a pointer + to the multiple equality in the field item. + + NOTES + This function is supposed to be called as a callback parameter in calls + of the transform method. + + RETURN VALUES + pointer to the replacing constant item, if the field item was substituted + pointer to the field item, otherwise. +*/ + +Item *Item_field::equal_fields_propagator(byte *arg) +{ + if (no_const_subst) + return this; + item_equal= find_item_equal((COND_EQUAL *) arg); + Item *item= 0; + if (item_equal) + item= item_equal->get_const(); + if (!item) + item= this; + return item; +} + + +/* + Mark the item to not be part of substitution if it's not a binary item + See comments in Arg_comparator::set_compare_func() for details +*/ + +Item *Item_field::set_no_const_sub(byte *arg) +{ + if (field->charset() != &my_charset_bin) + no_const_subst=1; + return this; +} + + +/* + Set a pointer to the multiple equality the field reference belongs to + (if any) + + SYNOPSIS + replace_equal_field_processor() + arg - a dummy parameter, is not used here + + DESCRIPTION + The function replaces a pointer to a field in the Item_field object + by a pointer to another field. + The replacement field is taken from the very beginning of + the item_equal list which the Item_field object refers to (belongs to) + If the Item_field object does not refer any Item_equal object, + nothing is done. + + NOTES + This function is supposed to be called as a callback parameter in calls + of the walk method. + + RETURN VALUES + 0 +*/ + +bool Item_field::replace_equal_field_processor(byte *arg) +{ + if (item_equal) + { + Item_field *subst= item_equal->get_first(); + if (!field->eq(subst->field)) + { + field= subst->field; + return 0; + } + } + return 0; +} + + void Item::init_make_field(Send_field *tmp_field, enum enum_field_types field_type) { @@ -1806,6 +2438,34 @@ Item_num *Item_uint::neg() return new Item_real(name, - ((double) value), 0, max_length); } + +/* + This function is only called during parsing. We will signal an error if + value is not a true double value (overflow) +*/ + +Item_real::Item_real(const char *str_arg, uint length) +{ + int error; + char *end; + value= my_strntod(&my_charset_bin, (char*) str_arg, length, &end, &error); + if (error) + { + /* + Note that we depend on that str_arg is null terminated, which is true + when we are in the parser + */ + DBUG_ASSERT(str_arg[length] == 0); + my_printf_error(ER_ILLEGAL_VALUE_FOR_TYPE, ER(ER_ILLEGAL_VALUE_FOR_TYPE), + MYF(0), "double", (char*) str_arg); + } + presentation= name=(char*) str_arg; + decimals=(uint8) nr_of_decimals(str_arg); + max_length=length; + fixed= 1; +} + + int Item_real::save_in_field(Field *field, bool no_conversions) { double nr=val(); @@ -1815,6 +2475,21 @@ int Item_real::save_in_field(Field *field, bool no_conversions) return field->store(nr); } + +void Item_real::print(String *str) +{ + if (presentation) + { + str->append(presentation); + return; + } + char buffer[20]; + String num(buffer, sizeof(buffer), &my_charset_bin); + num.set(value, decimals, &my_charset_bin); + str->append(num); +} + + /**************************************************************************** ** varbinary item ** In string context this is a binary string @@ -1898,7 +2573,7 @@ bool Item::send(Protocol *protocol, String *buffer) { bool result; enum_field_types type; - LINT_INIT(result); + LINT_INIT(result); // Will be set if null_value == 0 switch ((type=field_type())) { default: @@ -2005,177 +2680,240 @@ bool Item_field::send(Protocol *protocol, String *buffer) /* - This is used for HAVING clause - Find field in select list having the same name + Resolve the name of a reference to a column reference. + + SYNOPSIS + Item_ref::fix_fields() + thd [in] current thread + tables [in] the tables in a FROM clause + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: GROUP BY clause, SELECT clause, outer queries. It is + used typically for columns in the HAVING clause which are not under + aggregate functions. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_extended([T_j].col_ref_i) + { + Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT and GROUP clauses of Q. + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + + if such a column is not found AND + - Q_k is not a group query AND + - Q_k is not inside an aggregate function + OR + - Q_(k-1) is not in a HAVING or SELECT clause of Q_k + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + } + } + } + } + + This procedure treats GROUP BY and SELECT clauses as one namespace for + column references in HAVING. Notice that compared to + Item_field::fix_fields, here we first search the SELECT and GROUP BY + clauses, and then we search the FROM clause. + + RETURN + TRUE if error + FALSE on success */ -bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) +bool Item_ref::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference) { DBUG_ASSERT(fixed == 0); - uint counter; - bool not_used; + SELECT_LEX *current_sel= thd->lex->current_select; + if (!ref) { - TABLE_LIST *where= 0, *table_list; - bool upward_lookup= 0; - SELECT_LEX_UNIT *prev_unit= thd->lex->current_select->master_unit(); - SELECT_LEX *sl= prev_unit->outer_select(); - /* - Finding only in current select will be performed for selects that have - not outer one and for derived tables (which not support using outer - fields for now) - */ - if ((ref= find_item_in_list(this, - *(thd->lex->current_select->get_item_list()), - &counter, - ((sl && - thd->lex->current_select->master_unit()-> - first_select()->linkage != - DERIVED_TABLE_TYPE) ? - REPORT_EXCEPT_NOT_FOUND : - REPORT_ALL_ERRORS ), ¬_used)) == - (Item **)not_found_item) + SELECT_LEX_UNIT *prev_unit= current_sel->master_unit(); + SELECT_LEX *outer_sel= prev_unit->outer_select(); + ORDER *group_list= (ORDER*) current_sel->group_list.first; + bool ambiguous_fields= FALSE; + Item **group_by_ref= NULL; + + if (!(ref= resolve_ref_in_select_and_group(thd, this, current_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + + if (ref == not_found_item) /* This reference was not resolved. */ { - Field *tmp= (Field*) not_found_field; - SELECT_LEX *last= 0; - upward_lookup= 1; /* - We can't find table field in select list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_item_in_list. + If there is an outer select, and it is not a derived table (which do + not support the use of outer fields for now), try to resolve this + reference in the outer select(s). + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects are + searched starting from the innermost. */ - for ( ; sl ; sl= (prev_unit= sl->master_unit())->outer_select()) + if (outer_sel && (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE)) { - last= sl; - Item_subselect *prev_subselect_item= prev_unit->item; - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (ref= find_item_in_list(this, sl->item_list, - &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **)not_found_item) - { - if (*ref && (*ref)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); - prev_subselect_item->const_item_cache&= (*ref)->const_item(); - } - break; - } - table_list= sl->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) - { - // it is primary INSERT st_select_lex => skip first table resolving - table_list= table_list->next; - } - enum_parsing_place place= - prev_subselect_item->parsing_place; - /* - check table fields only if subquery used somewhere out of HAVING - or SELECT list or outer SELECT do not use groupping (i.e. tables - are accessable) - */ - if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) + TABLE_LIST *table_list; + Field *from_field= (Field*) not_found_field; + SELECT_LEX *last= 0; + + for ( ; outer_sel ; + outer_sel= (prev_unit= outer_sel->master_unit())->outer_select()) { - prev_subselect_item->used_tables_cache|= tmp->table->map; + last= outer_sel; + Item_subselect *prev_subselect_item= prev_unit->item; + + /* Search in the SELECT and GROUP lists of the outer select. */ + if (outer_sel->resolve_mode == SELECT_LEX::SELECT_MODE) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, outer_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + + /* Search in the tables of the FROM clause of the outer select. */ + table_list= outer_sel->get_table_list(); + if (outer_sel->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) + /* + It is a primary INSERT st_select_lex => do not resolve against the + first table. + */ + table_list= table_list->next_local; + + enum_parsing_place place= prev_subselect_item->parsing_place; + /* + Check table fields only if the subquery is used somewhere out of + HAVING or the outer SELECT does not use grouping (i.e. tables are + accessible). + TODO: + Here we could first find the field anyway, and then test this + condition, so that we can give a better error message - + ER_WRONG_FIELD_WITH_GROUP, instead of the less informative + ER_BAD_FIELD_ERROR which we produce now. + */ + if ((place != IN_HAVING || + (!outer_sel->with_sum_func && + outer_sel->group_list.elements == 0))) + { + if ((from_field= find_field_in_tables(thd, this, table_list, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE)) != + not_found_field) + { + if (from_field != view_ref_found) + { + prev_subselect_item->used_tables_cache|= from_field->table->map; + prev_subselect_item->const_item_cache= 0; + } + else + { + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + } + break; + } + } + + /* Reference is not found => depend on outer (or just error). */ + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; prev_subselect_item->const_item_cache= 0; - break; - } - // Reference is not found => depend from outer (or just error) - prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; - prev_subselect_item->const_item_cache= 0; - if (sl->master_unit()->first_select()->linkage == - DERIVED_TABLE_TYPE) - break; // do not look over derived table - } + if (outer_sel->master_unit()->first_select()->linkage == + DERIVED_TABLE_TYPE) + break; /* Do not consider derived tables. */ + } - if (!ref) - return 1; - if (!tmp) - return -1; - if (ref == (Item **)not_found_item && tmp == not_found_field) - { - if (upward_lookup) - { - // We can't say exactly what absend (table or field) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - full_name(), thd->where); - } - else - { - // Call to report error - find_item_in_list(this, - *(thd->lex->current_select->get_item_list()), - &counter, REPORT_ALL_ERRORS, ¬_used); - } - ref= 0; // Safety - return 1; - } - if (tmp != not_found_field) - { - Item_field* fld; - /* - Set ref to 0 as we are replacing this item with the found item - and this will ensure we get an error if this item would be - used elsewhere - */ - ref= 0; // Safety - if (!(fld= new Item_field(tmp))) - return 1; - thd->change_item_tree(reference, fld); - mark_as_dependent(thd, last, thd->lex->current_select, fld); - return 0; - } - if (!(*ref)->fixed) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; + DBUG_ASSERT(ref); + if (!from_field) + return TRUE; + if (ref == not_found_item && from_field == not_found_field) + { + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), + this->full_name(), current_thd->where); + ref= 0; // Safety + return TRUE; + } + if (from_field != not_found_field) + { + /* + Set ref to 0 as we are replacing this item with the found item and + this will ensure we get an error if this item would be used + elsewhere + */ + ref= 0; // Safety + if (from_field != view_ref_found) + { + Item_field* fld; + if (!(fld= new Item_field(tmp))) + return TRUE; + thd->change_item_tree(reference, fld); + mark_as_dependent(thd, last, thd->lex->current_select, fld); + return FALSE; + } + /* + We can leave expression substituted from view for next PS/SP + re-execution (i.e. do not register this substitution for reverting + on cleanup() (register_item_tree_changing())), because this subtree + will be fix_field'ed during setup_tables()->setup_ancestor() + (i.e. before all other expressions of query, and references on + tables which do not present in query will not make problems. + + Also we suppose that view can't be changed during PS/SP life. + */ + } + else + { + /* Should be checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); + mark_as_dependent(thd, last, current_sel, this); + } } - mark_as_dependent(thd, last, thd->lex->current_select, - this); - ref= last->ref_pointer_array + counter; - } - else if (!ref) - return 1; - else - { - if (!(*ref)->fixed) + else { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; + /* The current reference cannot be resolved in this query. */ + my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), + this->full_name(), current_thd->where); + return TRUE; } - ref= thd->lex->current_select->ref_pointer_array + counter; } } /* - The following conditional is changed as to correctly identify - incorrect references in group functions or forward references - with sub-select's / derived tables, while it prevents this - check when Item_ref is created in an expression involving - summing function, which is to be placed in the user variable. + Check if this is an incorrect reference in a group function or forward + reference. Do not issue an error if this is an unnamed reference inside an + aggregate function. */ if (((*ref)->with_sum_func && name && (depended_from || - !(thd->lex->current_select->linkage != GLOBAL_OPTIONS_TYPE && - thd->lex->current_select->having_fix_field))) || + !(current_sel->linkage != GLOBAL_OPTIONS_TYPE && + current_sel->having_fix_field))) || !(*ref)->fixed) { my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, ((*ref)->with_sum_func? - "reference on group function": + "reference to group function": "forward reference in item list")); - return 1; + return TRUE; } max_length= (*ref)->max_length; maybe_null= (*ref)->maybe_null; @@ -2185,8 +2923,17 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) fixed= 1; if (ref && (*ref)->check_cols(1)) - return 1; - return 0; + return TRUE; + return FALSE; +} + + +void Item_ref::cleanup() +{ + DBUG_ENTER("Item_ref::cleanup"); + Item_ident::cleanup(); + result_field= 0; + DBUG_VOID_RETURN; } @@ -2199,6 +2946,51 @@ void Item_ref::print(String *str) } +bool Item_ref::send(Protocol *prot, String *tmp) +{ + if (result_field) + return prot->store(result_field); + return (*ref)->send(prot, tmp); +} + + +double Item_ref::val_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0.0; + return result_field->val_real(); + } + return val(); +} + + +longlong Item_ref::val_int_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + return result_field->val_int(); + } + return val_int(); +} + + +String *Item_ref::str_result(String* str) +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + str->set_charset(str_value.charset()); + return result_field->val_str(str, &str_value); + } + return val_str(str); +} + + void Item_ref_null_helper::print(String *str) { str->append("<ref_null_helper>(", 18); @@ -2229,7 +3021,10 @@ bool Item_default_value::fix_fields(THD *thd, struct st_table_list *table_list, Item **items) { + Item_field *field_arg; + Field *def_field; DBUG_ASSERT(fixed == 0); + if (!arg) { fixed= 1; @@ -2247,9 +3042,14 @@ bool Item_default_value::fix_fields(THD *thd, } arg= ref->ref[0]; } - Item_field *field_arg= (Item_field *)arg; - Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); - if (!def_field) + field_arg= (Item_field *)arg; + if (field_arg->field->flags & NO_DEFAULT_VALUE_FLAG) + { + my_printf_error(ER_NO_DEFAULT_FOR_FIELD, ER(ER_NO_DEFAULT_FOR_FIELD), + MYF(0), field_arg->field->field_name); + return 1; + } + if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of()))) return 1; memcpy(def_field, field_arg->field, field_arg->field->size_of()); def_field->move_field(def_field->table->default_values - @@ -2322,6 +3122,97 @@ void Item_insert_value::print(String *str) str->append(')'); } + +/* + Bind item representing field of row being changed in trigger + to appropriate Field object. + + SYNOPSIS + setup_field() + thd - current thread context + table - table of trigger (and where we looking for fields) + event - type of trigger event + + NOTE + This function does almost the same as fix_fields() for Item_field + but is invoked during trigger definition parsing and takes TABLE + object as its argument. + + RETURN VALUES + 0 ok + 1 field was not found. +*/ +bool Item_trigger_field::setup_field(THD *thd, TABLE *table, + enum trg_event_type event) +{ + bool result= 1; + uint field_idx= (uint)-1; + bool save_set_query_id= thd->set_query_id; + + /* TODO: Think more about consequences of this step. */ + thd->set_query_id= 0; + + if (find_field_in_real_table(thd, table, field_name, + strlen(field_name), 0, 0, + &field_idx)) + { + field= (row_version == OLD_ROW && event == TRG_EVENT_UPDATE) ? + table->triggers->old_field[field_idx] : + table->field[field_idx]; + result= 0; + } + + thd->set_query_id= save_set_query_id; + + return result; +} + + +bool Item_trigger_field::eq(const Item *item, bool binary_cmp) const +{ + return item->type() == TRIGGER_FIELD_ITEM && + row_version == ((Item_trigger_field *)item)->row_version && + !my_strcasecmp(system_charset_info, field_name, + ((Item_trigger_field *)item)->field_name); +} + + +bool Item_trigger_field::fix_fields(THD *thd, + TABLE_LIST *table_list, + Item **items) +{ + /* + Since trigger is object tightly associated with TABLE object most + of its set up can be performed during trigger loading i.e. trigger + parsing! So we have little to do in fix_fields. :) + FIXME may be we still should bother about permissions here. + */ + DBUG_ASSERT(fixed == 0); + // QQ: May be this should be moved to setup_field? + set_field(field); + fixed= 1; + return 0; +} + + +void Item_trigger_field::print(String *str) +{ + str->append((row_version == NEW_ROW) ? "NEW" : "OLD", 3); + str->append('.'); + str->append(field_name); +} + + +void Item_trigger_field::cleanup() +{ + /* + Since special nature of Item_trigger_field we should not do most of + things from Item_field::cleanup() or Item_ident::cleanup() here. + */ + Item::cleanup(); +} + + /* If item is a const function, calculate it and return a const item The original item is freed if not returned @@ -2785,5 +3676,6 @@ void Item_result_field::cleanup() template class List<Item>; template class List_iterator<Item>; template class List_iterator_fast<Item>; +template class List_iterator_fast<Item_field>; template class List<List_item>; #endif diff --git a/sql/item.h b/sql/item.h index 547577a7ee0..7b8fed1ecaa 100644 --- a/sql/item.h +++ b/sql/item.h @@ -22,6 +22,7 @@ class Protocol; struct st_table_list; void item_init(void); /* Init item functions */ +class Item_field; /* @@ -110,6 +111,7 @@ public: }; typedef bool (Item::*Item_processor)(byte *arg); +typedef Item* (Item::*Item_transformer) (byte *arg); class Item { Item(const Item &); /* Prevent use of these */ @@ -127,7 +129,7 @@ public: PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM, SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER, - PARAM_ITEM}; + PARAM_ITEM, TRIGGER_FIELD_ITEM}; enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; @@ -139,6 +141,7 @@ public: my_string name; /* Name from select */ Item *next; uint32 max_length; + uint name_length; /* Length of name */ uint8 marker,decimals; my_bool maybe_null; /* If item may be null */ my_bool null_value; /* if item is null */ @@ -158,17 +161,13 @@ public: optimisation changes in prepared statements */ Item(THD *thd, Item *item); - virtual ~Item() { name=0; } /*lint -e1509 */ + virtual ~Item() + { + name=0; + } /*lint -e1509 */ void set_name(const char *str,uint length, CHARSET_INFO *cs); void init_make_field(Send_field *tmp_field,enum enum_field_types type); - virtual void cleanup() - { - DBUG_ENTER("Item::cleanup"); - DBUG_PRINT("info", ("Type: %d", (int)type())); - fixed=0; - marker= 0; - DBUG_VOID_RETURN; - } + virtual void cleanup(); virtual void make_field(Send_field *field); virtual bool fix_fields(THD *, struct st_table_list *, Item **); /* @@ -177,6 +176,7 @@ public: */ inline void quick_fix_field() { fixed= 1; } /* Function returns 1 on overflow and -1 on fatal errors */ + int save_in_field_no_warnings(Field *field, bool no_conversions); virtual int save_in_field(Field *field, bool no_conversions); virtual void save_org_in_field(Field *field) { (void) save_in_field(field, 1); } @@ -297,9 +297,22 @@ public: return (this->*processor)(arg); } + virtual Item* transform(Item_transformer transformer, byte *arg) + { + return (this->*transformer)(arg); + } + virtual bool remove_dependence_processor(byte * arg) { return 0; } virtual bool remove_fixed(byte * arg) { fixed= 0; return 0; } + virtual bool cleanup_processor(byte *arg); + virtual bool collect_item_field_processor(byte * arg) { return 0; } + virtual Item *equal_fields_propagator(byte * arg) { return this; } + virtual Item *set_no_const_sub(byte *arg) { return this; } + virtual bool replace_equal_field_processor(byte * arg) { return 0; } + virtual Item *this_item() { return this; } /* For SPs mostly. */ + virtual Item *this_const_item() const { return const_cast<Item*>(this); } /* For SPs mostly. */ + // Row emulation virtual uint cols() { return 1; } virtual Item* el(uint i) { return this; } @@ -311,6 +324,7 @@ public: virtual void bring_value() {} Field *tmp_table_field_from_field_type(TABLE *table); + virtual Item_field *filed_for_view_update() { return 0; } virtual Item *neg_transformer(THD *thd) { return NULL; } virtual Item *safe_charset_converter(CHARSET_INFO *tocs); @@ -322,6 +336,104 @@ public: }; +// A local SP variable (incl. parameters), used in runtime +class Item_splocal : public Item +{ +private: + + uint m_offset; + LEX_STRING m_name; + +public: + + Item_splocal(LEX_STRING name, uint offset) + : m_offset(offset), m_name(name) + { + Item::maybe_null= TRUE; + } + + Item *this_item(); + Item *this_const_item() const; + + inline uint get_offset() + { + return m_offset; + } + + // Abstract methods inherited from Item. Just defer the call to + // the item in the frame + enum Type type() const; + + inline double val() + { + Item *it= this_item(); + double ret= it->val(); + Item::null_value= it->null_value; + return ret; + } + + inline longlong val_int() + { + Item *it= this_item(); + longlong ret= it->val_int(); + Item::null_value= it->null_value; + return ret; + } + + inline String *val_str(String *sp) + { + Item *it= this_item(); + String *ret= it->val_str(sp); + Item::null_value= it->null_value; + return ret; + } + + inline bool is_null() + { + Item *it= this_item(); + bool ret= it->is_null(); + Item::null_value= it->null_value; + return ret; + } + + inline void make_field(Send_field *field) + { + Item *it= this_item(); + + it->set_name(m_name.str, m_name.length, system_charset_info); + it->make_field(field); + } + + inline Item_result result_type() const + { + return this_const_item()->result_type(); + } + + inline bool const_item() const + { + return TRUE; + } + + inline int save_in_field(Field *field, bool no_conversions) + { + return this_item()->save_in_field(field, no_conversions); + } + + void print(String *str) + { + str->reserve(m_name.length+8); + str->append(m_name.str, m_name.length); + str->append('@'); + str->qs_append(m_offset); + } + + inline bool send(Protocol *protocol, String *str) + { + return this_item()->send(protocol, str); + } +}; + + class Item_num: public Item { public: @@ -366,19 +478,37 @@ public: const char *full_name() const; void cleanup(); bool remove_dependence_processor(byte * arg); + void print(String *str); + + friend bool insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges, bool allocate_view_names); }; +class Item_equal; +class COND_EQUAL; class Item_field :public Item_ident { +protected: void set_field(Field *field); public: Field *field,*result_field; + Item_equal *item_equal; + bool no_const_subst; + /* + if any_privileges set to TRUE then here real effective privileges will + be stored + */ + uint have_privileges; + /* field need any privileges (for VIEW creation) */ + bool any_privileges; Item_field(const char *db_par,const char *table_name_par, const char *field_name_par) :Item_ident(db_par,table_name_par,field_name_par), - field(0), result_field(0) + field(0), result_field(0), item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { collation.set(DERIVATION_IMPLICIT); } /* Constructor needed to process subselect with temporary tables (see Item) @@ -426,8 +556,15 @@ public: bool get_time(TIME *ltime); bool is_null() { return field->is_null(); } Item *get_tmp_table_item(THD *thd); + bool collect_item_field_processor(byte * arg); void cleanup(); + Item_equal *find_item_equal(COND_EQUAL *cond_equal); + Item *equal_fields_propagator(byte *arg); + Item *set_no_const_sub(byte *arg); + bool replace_equal_field_processor(byte *arg); inline uint32 max_disp_length() { return field->max_length(); } + Item_field *filed_for_view_update() { return this; } + Item *safe_charset_converter(CHARSET_INFO *tocs); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; @@ -570,7 +707,7 @@ public: */ virtual table_map used_tables() const { return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT; } - void print(String *str) { str->append('?'); } + void print(String *str); /* parameter never equal to other parameter of other item */ bool eq(const Item *item, bool binary_cmp) const { return 0; } }; @@ -604,6 +741,17 @@ public: }; +class Item_static_int_func :public Item_int +{ + const char *func_name; +public: + Item_static_int_func(const char *str_arg, longlong i, uint length) + :Item_int(NullS, i, length), func_name(str_arg) + {} + void print(String *str) { str->append(func_name); } +}; + + class Item_uint :public Item_int { public: @@ -622,25 +770,20 @@ public: class Item_real :public Item_num { + char *presentation; public: double value; // Item_real() :value(0) {} - Item_real(const char *str_arg, uint length) :value(my_atof(str_arg)) - { - name=(char*) str_arg; - decimals=(uint8) nr_of_decimals(str_arg); - max_length=length; - fixed= 1; - } + Item_real(const char *str_arg, uint length); Item_real(const char *str,double val_arg,uint decimal_par,uint length) :value(val_arg) { - name=(char*) str; + presentation= name=(char*) str; decimals=(uint8) decimal_par; max_length=length; fixed= 1; } - Item_real(double value_par) :value(value_par) { fixed= 1; } + Item_real(double value_par) :presentation(0), value(value_par) { fixed= 1; } int save_in_field(Field *field, bool no_conversions); enum Type type() const { return REAL_ITEM; } enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } @@ -656,6 +799,19 @@ public: void cleanup() {} Item *new_item() { return new Item_real(name,value,decimals,max_length); } Item_num *neg() { value= -value; return this; } + void print(String *str); +}; + + +class Item_static_real_func :public Item_real +{ + const char *func_name; +public: + Item_static_real_func(const char *str, double val_arg, uint decimal_par, + uint length) + :Item_real(NullS, val_arg, decimal_par, length), func_name(str) + {} + void print(String *str) { str->append(func_name); } }; @@ -739,6 +895,20 @@ public: void cleanup() {} }; + +class Item_static_string_func :public Item_string +{ + const char *func_name; +public: + Item_static_string_func(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, + Derivation dv= DERIVATION_COERCIBLE) + :Item_string(NullS, str, length, cs, dv), func_name(name_par) + {} + void print(String *str) { str->append(func_name); } +}; + + /* for show tables */ class Item_datetime :public Item_string @@ -822,11 +992,11 @@ public: Item **ref; Item_ref(const char *db_par, const char *table_name_par, const char *field_name_par) - :Item_ident(db_par, table_name_par, field_name_par), ref(0) {} + :Item_ident(db_par, table_name_par, field_name_par), result_field(0), ref(0) {} Item_ref(Item **item, const char *table_name_par, const char *field_name_par) - :Item_ident(NullS, table_name_par, field_name_par), ref(item) {} + :Item_ident(NullS, table_name_par, field_name_par), result_field(0), ref(item) {} /* Constructor need to process subselect with temporary tables (see Item) */ - Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), ref(item->ref) {} + Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), result_field(item->result_field), ref(item->ref) {} enum Type type() const { return REF_ITEM; } bool eq(const Item *item, bool binary_cmp) const { return ref && (*ref)->eq(item, binary_cmp); } @@ -857,7 +1027,10 @@ public: { return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); } - bool send(Protocol *prot, String *tmp){ return (*ref)->send(prot, tmp); } + double val_result(); + longlong val_int_result(); + String *str_result(String* tmp); + bool send(Protocol *prot, String *tmp); void make_field(Send_field *field) { (*ref)->make_field(field); } bool fix_fields(THD *, struct st_table_list *, Item **); int save_in_field(Field *field, bool no_conversions) @@ -870,13 +1043,17 @@ public: return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } void set_result_field(Field *field) { result_field= field; } + Field *get_tmp_table_field() { return result_field; } bool is_result_field() { return 1; } void save_in_result_field(bool no_conversions) { (*ref)->save_in_field(result_field, no_conversions); } Item *real_item() { return *ref; } + bool walk(Item_processor processor, byte *arg) + { return (*ref)->walk(processor, arg); } void print(String *str); + void cleanup(); }; class Item_in_subselect; @@ -1060,6 +1237,19 @@ public: return arg->walk(processor, args) || (this->*processor)(args); } + + /* + This method like the walk method traverses the item tree, but + at the same time it can replace some nodes in the tree + */ + Item *transform(Item_transformer transformer, byte *args) + { + Item *new_item= arg->transform(transformer, args); + if (!new_item) + return 0; + arg= new_item; + return (this->*transformer)(args); + } }; class Item_insert_value : public Item_field @@ -1084,6 +1274,57 @@ public: } }; + +/* + We need this two enums here instead of sql_lex.h because + at least one of them is used by Item_trigger_field interface. + + Time when trigger is invoked (i.e. before or after row actually + inserted/updated/deleted). +*/ +enum trg_action_time_type +{ + TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1 +}; + +/* + Event on which trigger is invoked. +*/ +enum trg_event_type +{ + TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2 +}; + +/* + Represents NEW/OLD version of field of row which is + changed/read in trigger. + + Note: For this item actual binding to Field object happens not during + fix_fields() (like for Item_field) but during parsing of trigger + definition, when table is opened, with special setup_field() call. +*/ +class Item_trigger_field : public Item_field +{ +public: + /* Is this item represents row from NEW or OLD row ? */ + enum row_version_type {OLD_ROW, NEW_ROW}; + row_version_type row_version; + + Item_trigger_field(row_version_type row_ver_par, + const char *field_name_par): + Item_field((const char *)NULL, (const char *)NULL, field_name_par), + row_version(row_ver_par) + {} + bool setup_field(THD *thd, TABLE *table, enum trg_event_type event); + enum Type type() const { return TRIGGER_FIELD_ITEM; } + bool eq(const Item *item, bool binary_cmp) const; + bool fix_fields(THD *, struct st_table_list *, Item **); + void print(String *str); + table_map used_tables() const { return (table_map)0L; } + void cleanup(); +}; + + class Item_cache: public Item { protected: diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c36f2d191c7..905250ed96f 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -173,12 +173,26 @@ void Item_bool_func2::fix_length_and_dec() if (!args[0] || !args[1]) return; + /* + We allow to convert to Unicode character sets in some cases. + The conditions when conversion is possible are: + - arguments A and B have different charsets + - A wins according to coercibility rules + - character set of A is superset for character set of B + + If all of the above is true, then it's possible to convert + B into the character set of A, and then compare according + to the collation of A. + */ + + DTCollation coll; if (args[0]->result_type() == STRING_RESULT && args[1]->result_type() == STRING_RESULT && agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV)) return; - + + // Make a special case of compare with fields to get nicer DATE comparisons if (functype() == LIKE_FUNC) // Disable conversion in case of LIKE function. @@ -200,7 +214,7 @@ void Item_bool_func2::fix_length_and_dec() } } } - if (args[1]->type() == FIELD_ITEM) + if (args[1]->type() == FIELD_ITEM /* && !args[1]->const_item() */) { Field *field=((Item_field*) args[1])->field; if (field->can_be_compared_as_longlong()) @@ -265,6 +279,17 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) func= &Arg_comparator::compare_binary_string; else if (func == &Arg_comparator::compare_e_string) func= &Arg_comparator::compare_e_binary_string; + + /* + As this is binary comparsion, mark all fields that they can't be + transformed. Otherwise we would get into trouble with comparisons + like: + WHERE col= 'j' AND col LIKE BINARY 'j' + which would be transformed to: + WHERE col= 'j' + */ + (*a)->transform(&Item::set_no_const_sub, (byte*) 0); + (*b)->transform(&Item::set_no_const_sub, (byte*) 0); } } else if (type == INT_RESULT) @@ -542,7 +567,7 @@ bool Item_in_optimizer::fix_left(THD *thd, If it is preparation PS only then we do not know values of parameters => cant't get there values and do not need that values. */ - if (! thd->current_arena->is_stmt_prepare()) + if (!thd->only_prepare()) cache->store(args[0]); if (cache->cols() == 1) { @@ -1006,7 +1031,7 @@ Item_func_if::fix_length_and_dec() if (cached_result_type == STRING_RESULT) { if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV)) - return; + return; } else { @@ -1265,8 +1290,10 @@ void Item_func_case::fix_length_and_dec() if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; - // Aggregate all THEN and ELSE expression types - // and collations when string result + /* + Aggregate all THEN and ELSE expression types + and collations when string result + */ for (nagg= 0 ; nagg < ncases/2 ; nagg++) agg[nagg]= args[nagg*2+1]; @@ -1586,7 +1613,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: %lx", this)); + DBUG_PRINT("enter",("this: 0x%lx", this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -1921,7 +1948,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) char buff[sizeof(char*)]; // Max local vars in function #endif not_null_tables_cache= used_tables_cache= 0; - const_item_cache= 0; + const_item_cache= 1; /* and_table_cache is the value that Item_cond_or() returns for not_null_tables() @@ -1952,7 +1979,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) tmp_table_map= item->not_null_tables(); not_null_tables_cache|= tmp_table_map; and_tables_cache&= tmp_table_map; - const_item_cache&= item->const_item(); + const_item_cache&= item->const_item(); with_sum_func= with_sum_func || item->with_sum_func; if (item->maybe_null) maybe_null=1; @@ -1973,13 +2000,51 @@ bool Item_cond::walk(Item_processor processor, byte *arg) return Item_func::walk(processor, arg); } + +/* + Transform an Item_cond object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method with the + same transformer to each member item of the codition list. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transform method is applied to the root node + of the Item_cond object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_cond::transform(Item_transformer transformer, byte *arg) +{ + List_iterator<Item> li(list); + Item *item; + while ((item= li++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + if (new_item != item) + li.replace(new_item); + } + return Item_func::transform(transformer, arg); +} + + void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields) { List_iterator<Item> li(list); Item *item; used_tables_cache=0; - const_item_cache=0; + const_item_cache=1; while ((item=li++)) { if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) @@ -2018,7 +2083,7 @@ void Item_cond::update_used_tables() { item->update_used_tables(); used_tables_cache|= item->used_tables(); - const_item_cache&= item->const_item(); + const_item_cache&= item->const_item(); } } @@ -2822,3 +2887,290 @@ Item *Item_bool_rowready_func2::negated_item() DBUG_ASSERT(0); return 0; } + +Item_equal::Item_equal(Item_field *f1, Item_field *f2) + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f1); + fields.push_back(f2); +} + +Item_equal::Item_equal(Item *c, Item_field *f) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f); + const_item= c; +} + +Item_equal::Item_equal(Item_equal *item_equal) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + List_iterator_fast<Item_field> li(item_equal->fields); + Item_field *item; + while ((item= li++)) + { + fields.push_back(item); + } + const_item= item_equal->const_item; + cond_false= item_equal->cond_false; +} + +void Item_equal::add(Item *c) +{ + if (cond_false) + return; + if (!const_item) + { + const_item= c; + return; + } + Item_func_eq *func= new Item_func_eq(c, const_item); + func->set_cmp_func(); + func->quick_fix_field(); + cond_false = !(func->val_int()); +} + +void Item_equal::add(Item_field *f) +{ + fields.push_back(f); +} + +uint Item_equal::members() +{ + uint count= 0; + List_iterator_fast<Item_field> li(fields); + Item_field *item; + while ((item= li++)) + count++; + return count; +} + + +/* + Check whether a field is referred in the multiple equality + + SYNOPSIS + contains() + field field whose occurence is to be checked + + DESCRIPTION + The function checks whether field is occured in the Item_equal object + + RETURN VALUES + 1 if nultiple equality contains a reference to field + 0 otherwise +*/ + +bool Item_equal::contains(Field *field) +{ + List_iterator_fast<Item_field> it(fields); + Item_field *item; + while ((item= it++)) + { + if (field->eq(item->field)) + return 1; + } + return 0; +} + + +/* + Join members of another Item_equal object + + SYNOPSIS + merge() + item multiple equality whose members are to be joined + + DESCRIPTION + The function actually merges two multiple equalitis. + After this operation the Item_equal object additionally contains + the field items of another item of the type Item_equal. + If the optional constant items are not equal the cond_false flag is + set to 1. + + RETURN VALUES + none +*/ + +void Item_equal::merge(Item_equal *item) +{ + fields.concat(&item->fields); + Item *c= item->const_item; + if (c) + { + /* + The flag cond_false will be set to 1 after this, if + the multiple equality already contains a constant and its + value is not equal to the value of c. + */ + add(const_item); + } + cond_false|= item->cond_false; +} + + +/* + Order field items in multiple equality according to a sorting criteria + + SYNOPSIS + sort() + cmp function to compare field item + arg context extra parameter for the cmp function + + DESCRIPTION + The function perform ordering of the field items in the Item_equal + object according to the criteria determined by the cmp callback parameter. + If cmp(item_field1,item_field2,arg)<0 than item_field1 must be + placed after item_fiel2. + + IMPLEMENTATION + The function sorts field items by the exchange sort algorithm. + The list of field items is looked through and whenever two neighboring + members follow in a wrong order they are swapped. This is performed + again and again until we get all members in a right order. + + RETURN VALUES + None +*/ + +void Item_equal::sort(Item_field_cmpfunc cmp, void *arg) +{ + bool swap; + List_iterator<Item_field> it(fields); + do + { + Item_field *item1= it++; + Item_field **ref1= it.ref(); + Item_field *item2; + + swap= FALSE; + while ((item2= it++)) + { + Item_field **ref2= it.ref(); + if (cmp(item1, item2, arg) < 0) + { + Item_field *item= *ref1; + *ref1= *ref2; + *ref2= item; + swap= TRUE; + } + else + { + item1= item2; + ref1= ref2; + } + } + it.rewind(); + } while (swap); +} + +bool Item_equal::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + const_item_cache= 0; + while ((item=li++)) + { + table_map tmp_table_map; + used_tables_cache|= item->used_tables(); + tmp_table_map= item->not_null_tables(); + not_null_tables_cache|= tmp_table_map; + if (item->maybe_null) + maybe_null=1; + } + fix_length_and_dec(); + fixed= 1; + return 0; +} + +void Item_equal::update_used_tables() +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + if ((const_item_cache= cond_false)) + return; + while ((item=li++)) + { + item->update_used_tables(); + used_tables_cache|= item->used_tables(); + const_item_cache&= item->const_item(); + } +} + +longlong Item_equal::val_int() +{ + if (cond_false) + return 0; + List_iterator_fast<Item_field> it(fields); + Item *item= const_item ? const_item : it++; + if ((null_value= item->null_value)) + return 0; + eval_item->store_value(item); + while ((item= it++)) + { + if ((null_value= item->null_value) || eval_item->cmp(item)) + return 0; + } + return 1; +} + +void Item_equal::fix_length_and_dec() +{ + Item *item= const_item ? const_item : get_first(); + eval_item= cmp_item::get_comparator(item); + if (item->result_type() == STRING_RESULT) + eval_item->cmp_charset= cmp_collation.collation; +} + +bool Item_equal::walk(Item_processor processor, byte *arg) +{ + List_iterator_fast<Item_field> it(fields); + Item *item; + while ((item= it++)) + if (item->walk(processor, arg)) + return 1; + return Item_func::walk(processor, arg); +} + +Item *Item_equal::transform(Item_transformer transformer, byte *arg) +{ + List_iterator<Item_field> it(fields); + Item *item; + while ((item= it++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + if (new_item != item) + it.replace((Item_field *) new_item); + } + return Item_func::transform(transformer, arg); +} + +void Item_equal::print(String *str) +{ + str->append(func_name()); + str->append('('); + List_iterator_fast<Item_field> it(fields); + Item *item; + if (const_item) + const_item->print(str); + else + { + item= it++; + item->print(str); + } + while ((item= it++)) + { + str->append(','); + str->append(' '); + item->print(str); + } + str->append(')'); +} + diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 69528099aa1..af2c385b296 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -27,6 +27,8 @@ class Arg_comparator; typedef int (Arg_comparator::*arg_cmp_func)(); +typedef int (*Item_field_cmpfunc)(Item_field *f1, Item_field *f2, void *arg); + class Arg_comparator: public Sql_alloc { Item **a, **b; @@ -229,6 +231,40 @@ public: Item *neg_transformer(THD *thd); }; + +/* + The class Item_func_trig_cond is used for guarded predicates + which are employed only for internal purposes. + A guarded predicates is an object consisting of an a regular or + a guarded predicate P and a pointer to a boolean guard variable g. + A guarded predicate P/g is evaluated to true if the value of the + guard g is false, otherwise it is evaluated to the same value that + the predicate P: val(P/g)= g ? val(P):true. + Guarded predicates allow us to include predicates into a conjunction + conditionally. Currently they are utilized for pushed down predicates + in queries with outer join operations. + + In the future, probably, it makes sense to extend this class to + the objects consisting of three elements: a predicate P, a pointer + to a variable g and a firing value s with following evaluation + rule: val(P/g,s)= g==s? val(P) : true. It will allow us to build only + one item for the objects of the form P/g1/g2... + + Objects of this class are built only for query execution after + the execution plan has been already selected. That's why this + class needs only val_int out of generic methods. +*/ + +class Item_func_trig_cond: public Item_bool_func +{ + bool *trig_var; +public: + Item_func_trig_cond(Item *a, bool *f) : Item_bool_func(a) { trig_var= f; } + longlong val_int() { return *trig_var ? args[0]->val_int() : 1; } + enum Functype functype() const { return TRIG_COND_FUNC; }; + const char *func_name() const { return "trigcond"; }; +}; + class Item_func_not_all :public Item_func_not { bool abort_on_null; @@ -815,7 +851,7 @@ public: } const char *func_name() const { return "isnotnull"; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } - table_map not_null_tables() const { return 0; } + table_map not_null_tables() const { return used_tables(); } Item *neg_transformer(THD *thd); void print(String *str); CHARSET_INFO *compare_collation() { return args[0]->collation.collation; } @@ -914,6 +950,7 @@ public: Item_cond(List<Item> &nlist) :Item_bool_func(), list(nlist), abort_on_null(0) {} bool add(Item *item) { return list.push_back(item); } + void add_at_head(List<Item> *nlist) { list.prepand(nlist); } bool fix_fields(THD *, struct st_table_list *, Item **ref); enum Type type() const { return COND_ITEM; } @@ -926,13 +963,151 @@ public: void top_level_item() { abort_on_null=1; } void copy_andor_arguments(THD *thd, Item_cond *item); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); void neg_arguments(THD *thd); }; +/* + The class Item_equal is used to represent conjuctions of equality + predicates of the form field1 = field2, and field=const in where + conditions and on expressions. + + All equality predicates of the form field1=field2 contained in a + conjuction are substituted for a sequence of items of this class. + An item of this class Item_equal(f1,f2,...fk) respresents a + multiple equality f1=f2=...=fk. + + If a conjuction contains predicates f1=f2 and f2=f3, a new item of + this class is created Item_equal(f1,f2,f3) representing the multiple + equality f1=f2=f3 that substitutes the above equality predicates in + the conjuction. + A conjuction of the predicates f2=f1 and f3=f1 and f3=f2 will be + substituted for the item representing the same multiple equality + f1=f2=f3. + An item Item_equal(f1,f2) can appear instead of a conjuction of + f2=f1 and f1=f2, or instead of just the predicate f1=f2. + + An item of the class Item_equal inherites equalities from outer + conjunctive levels. + + Suppose we have a where condition of the following form: + WHERE f1=f2 AND f3=f4 AND f3=f5 AND ... AND (...OR (f1=f3 AND ...)). + In this case: + f1=f2 will be substituted for Item_equal(f1,f2); + f3=f4 and f3=f5 will be substituted for Item_equal(f3,f4,f5); + f1=f3 will be substituted for Item_equal(f1,f2,f3,f4,f5); + + An object of the class Item_equal can contain an optional constant + item c. Thenit represents a multiple equality of the form + c=f1=...=fk. + + Objects of the class Item_equal are used for the following: + + 1. An object Item_equal(t1.f1,...,tk.fk) allows us to consider any + pair of tables ti and tj as joined by an equi-condition. + Thus it provide us with additional access paths from table to table. + + 2. An object Item_equal(t1.f1,...,tk.fk) is applied to deduce new + SARGable predicates: + f1=...=fk AND P(fi) => f1=...=fk AND P(fi) AND P(fj). + It also can give us additional index scans and can allow us to + improve selectivity estimates. + + 3. An object Item_equal(t1.f1,...,tk.fk) is used to optimize the + selected execution plan for the query: if table ti is accessed + before the table tj then in any predicate P in the where condition + the occurence of tj.fj is substituted for ti.fi. This can allow + an evaluation of the predicate at an earlier step. + + When feature 1 is supported they say that join transitive closure + is employed. + When feature 2 is supported they say that search argument transitive + closure is employed. + Both features are usually supported by preprocessing original query and + adding additional predicates. + We do not just add predicates, we rather dynamically replace some + predicates that can not be used to access tables in the investigated + plan for those, obtained by substitution of some fields for equal fields, + that can be used. +*/ + +class Item_equal: public Item_bool_func +{ + List<Item_field> fields; /* list of equal field items */ + Item *const_item; /* optional constant item equal to fields items */ + cmp_item *eval_item; + bool cond_false; + DTCollation cmp_collation; +public: + inline Item_equal() + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) + { const_item_cache=0 ;} + Item_equal(Item_field *f1, Item_field *f2); + Item_equal(Item *c, Item_field *f); + Item_equal(Item_equal *item_equal); + inline Item* get_const() { return const_item; } + void add(Item *c); + void add(Item_field *f); + uint members(); + bool contains(Field *field); + Item_field* get_first() { return fields.head(); } + void merge(Item_equal *item); + enum Functype functype() const { return MULT_EQUAL_FUNC; } + longlong val_int(); + const char *func_name() const { return "multiple equal"; } + optimize_type select_optimize() const { return OPTIMIZE_EQUAL; } + void sort(Item_field_cmpfunc cmp, void *arg); + friend class Item_equal_iterator; + void fix_length_and_dec(); + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + void update_used_tables(); + bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); + void print(String *str); + CHARSET_INFO *compare_collation() + { return fields.head()->collation.collation; } +}; + +class COND_EQUAL: public Sql_alloc +{ +public: + uint max_members; /* max number of members the current level + list and all lower level lists */ + COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */ + List<Item_equal> current_level; /* list of multiple equalities of + the current and level */ + COND_EQUAL() + { + max_members= 0; + upper_levels= 0; + } +}; + + +class Item_equal_iterator :List_iterator_fast<Item_field> +{ +public: + inline Item_equal_iterator(Item_equal &item_equal) + :List_iterator_fast<Item_field> (item_equal.fields) + {} + inline Item_field* operator++(int) + { + Item_field *item= (*(List_iterator_fast<Item_field> *) this)++; + return item; + } + inline void rewind(void) + { + List_iterator_fast<Item_field>::rewind(); + } +}; + class Item_cond_and :public Item_cond { public: + COND_EQUAL cond_equal; /* contains list of Item_equal objects for + the current and level and reference + to multiple equalities of upper and levels */ Item_cond_and() :Item_cond() {} Item_cond_and(Item *i1,Item *i2) :Item_cond(i1,i2) {} Item_cond_and(THD *thd, Item_cond_and *item) :Item_cond(thd, item) {} diff --git a/sql/item_create.cc b/sql/item_create.cc index 800489a6a72..cec6de3eede 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -18,10 +18,6 @@ #include "mysql_priv.h" -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - Item *create_func_abs(Item* a) { return new Item_func_abs(a); @@ -77,12 +73,13 @@ Item *create_func_connection_id(void) { THD *thd=current_thd; thd->lex->safe_to_cache_query= 0; - return new Item_int(NullS,(longlong) - ((thd->slave_thread) ? - thd->variables.pseudo_thread_id : - thd->thread_id), - 10); -} + return new Item_static_int_func("connection_id()", + (longlong) + ((thd->slave_thread) ? + thd->variables.pseudo_thread_id : + thd->thread_id), + 10); +} Item *create_func_conv(Item* a, Item *b, Item *c) { @@ -292,7 +289,7 @@ Item *create_func_period_diff(Item* a, Item *b) Item *create_func_pi(void) { - return new Item_real("pi()",M_PI,6,8); + return new Item_static_real_func("pi()", M_PI, 6, 8); } Item *create_func_pow(Item* a, Item *b) @@ -308,13 +305,9 @@ Item *create_func_current_user() length= (uint) (strxmov(buff, thd->priv_user, "@", thd->priv_host, NullS) - buff); - return new Item_string(NullS, thd->memdup(buff, length), length, - system_charset_info); -} - -Item *create_func_quarter(Item* a) -{ - return new Item_func_quarter(a); + return new Item_static_string_func("current_user()", + thd->memdup(buff, length), length, + system_charset_info); } Item *create_func_radians(Item *a) @@ -441,7 +434,7 @@ Item *create_func_uuid(void) Item *create_func_version(void) { - return new Item_string(NullS,server_version, + return new Item_static_string_func("version()", server_version, (uint) strlen(server_version), system_charset_info, DERIVATION_IMPLICIT); } diff --git a/sql/item_create.h b/sql/item_create.h index 7577627ef04..d48aed5284a 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -72,7 +72,6 @@ Item *create_func_period_diff(Item* a, Item *b); Item *create_func_pi(void); Item *create_func_pow(Item* a, Item *b); Item *create_func_current_user(void); -Item *create_func_quarter(Item* a); Item *create_func_radians(Item *a); Item *create_func_release_lock(Item* a); Item *create_func_repeat(Item* a, Item *b); diff --git a/sql/item_func.cc b/sql/item_func.cc index bf85e5b378a..185d4f115ad 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -28,6 +28,9 @@ #include <time.h> #include <ft_global.h> +#include "sp_head.h" +#include "sp_rcontext.h" +#include "sp.h" bool check_reserved_words(LEX_STRING *name) { @@ -189,6 +192,8 @@ bool Item_func::agg_arg_charsets(DTCollation &coll, res= TRUE; break; // we cannot return here, we need to restore "arena". } + if ((*arg)->type() == FIELD_ITEM) + ((Item_field *)(*arg))->no_const_subst= 1; conv->fix_fields(thd, 0, &conv); *arg= conv; } @@ -337,6 +342,46 @@ bool Item_func::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + +/* + Transform an Item_func object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + argument parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method with the + same transformer to each argument the function. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transform method is applied to the root node + of the Item_func object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_func::transform(Item_transformer transformer, byte *argument) +{ + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + Item *new_item= (*arg)->transform(transformer, argument); + if (!new_item) + return 0; + if (*arg != new_item) + current_thd->change_item_tree(arg, new_item); + } + } + return (this->*transformer)(argument); +} + + void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields) { @@ -511,6 +556,16 @@ void Item_func::fix_num_length_and_dec() max_length=float_length(decimals); } +void Item_func::signal_divide_by_null() +{ + THD *thd= current_thd; + if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DIVISION_BY_ZERO, + ER(ER_DIVISION_BY_ZERO)); + null_value= 1; +} + + Item *Item_func::get_tmp_table_item(THD *thd) { if (!with_sum_func && !const_item()) @@ -676,11 +731,17 @@ double Item_func_div::val() DBUG_ASSERT(fixed == 1); double value=args[0]->val(); double val2=args[1]->val(); - if ((null_value= val2 == 0.0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0.0; + if (val2 == 0.0) + { + signal_divide_by_null(); return 0.0; + } return value/val2; } + longlong Item_func_div::val_int() { DBUG_ASSERT(fixed == 1); @@ -688,13 +749,19 @@ longlong Item_func_div::val_int() { longlong value=args[0]->val_int(); longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0; + if (val2 == 0) + { + signal_divide_by_null(); return 0; + } return value/val2; } return (longlong) Item_func_div::val(); } + void Item_func_div::fix_length_and_dec() { decimals=max(args[0]->decimals,args[1]->decimals)+2; @@ -712,8 +779,13 @@ longlong Item_func_int_div::val_int() DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + if (args[0]->null_value || args[1]->null_value) + return 0; + if (val2 == 0) + { + signal_divide_by_null(); return 0; + } return (unsigned_flag ? (ulonglong) value / (ulonglong) val2 : value / val2); @@ -731,11 +803,16 @@ void Item_func_int_div::fix_length_and_dec() double Item_func_mod::val() { DBUG_ASSERT(fixed == 1); - double x= args[0]->val(); - double y= args[1]->val(); - if ((null_value= (y == 0.0) || args[0]->null_value || args[1]->null_value)) + double value= args[0]->val(); + double val2= args[1]->val(); + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0.0; /* purecov: inspected */ - return fmod(x, y); + if (val2 == 0.0) + { + signal_divide_by_null(); + return 0.0; + } + return fmod(value,val2); } longlong Item_func_mod::val_int() @@ -743,8 +820,13 @@ longlong Item_func_mod::val_int() DBUG_ASSERT(fixed == 1); longlong value= args[0]->val_int(); longlong val2= args[1]->val_int(); - if ((null_value=val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0; /* purecov: inspected */ + if (val2 == 0) + { + signal_divide_by_null(); + return 0; + } return value % val2; } @@ -865,7 +947,7 @@ double Item_func_log2::val() double value=args[0]->val(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; - return log(value) / log(2.0); + return log(value) / M_LN2; } double Item_func_log10::val() @@ -1685,11 +1767,16 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, const_item_cache&=item->const_item(); f_args.arg_type[i]=item->result_type(); } + //TODO: why all folowing memory is not allocated with 1 call of sql_alloc? if (!(buffers=new String[arg_count]) || !(f_args.args= (char**) sql_alloc(arg_count * sizeof(char *))) || - !(f_args.lengths=(ulong*) sql_alloc(arg_count * sizeof(long))) || - !(f_args.maybe_null=(char*) sql_alloc(arg_count * sizeof(char))) || - !(num_buffer= (char*) sql_alloc(ALIGN_SIZE(sizeof(double))*arg_count))) + !(f_args.lengths= (ulong*) sql_alloc(arg_count * sizeof(long))) || + !(f_args.maybe_null= (char*) sql_alloc(arg_count * sizeof(char))) || + !(num_buffer= (char*) sql_alloc(arg_count * + ALIGN_SIZE(sizeof(double)))) || + !(f_args.attributes= (char**) sql_alloc(arg_count * sizeof(char *))) || + !(f_args.attribute_lengths= (ulong*) sql_alloc(arg_count * + sizeof(long)))) { free_udf(u_d); DBUG_RETURN(1); @@ -1708,8 +1795,10 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, for (uint i=0; i < arg_count; i++) { f_args.args[i]=0; - f_args.lengths[i]=arguments[i]->max_length; - f_args.maybe_null[i]=(char) arguments[i]->maybe_null; + f_args.lengths[i]= arguments[i]->max_length; + f_args.maybe_null[i]= (char) arguments[i]->maybe_null; + f_args.attributes[i]= arguments[i]->name; + f_args.attribute_lengths[i]= arguments[i]->name_length; switch(arguments[i]->type()) { case Item::STRING_ITEM: // Constant string ! @@ -2685,6 +2774,16 @@ void Item_func_set_user_var::print(String *str) } +void Item_func_set_user_var::print_as_stmt(String *str) +{ + str->append("set @", 5); + str->append(name.str, name.length); + str->append(":=", 2); + args[0]->print(str); + str->append(')'); +} + + String * Item_func_get_user_var::val_str(String *str) { @@ -3383,6 +3482,158 @@ longlong Item_func_is_used_lock::val_int() } +longlong Item_func_row_count::val_int() +{ + DBUG_ASSERT(fixed == 1); + THD *thd= current_thd; + + return thd->row_count_func; +} + + +Item_func_sp::Item_func_sp(sp_name *name) + :Item_func(), m_name(name), m_sp(NULL) +{ + m_name->init_qname(current_thd); +} + +Item_func_sp::Item_func_sp(sp_name *name, List<Item> &list) + :Item_func(list), m_name(name), m_sp(NULL) +{ + m_name->init_qname(current_thd); +} + +const char * +Item_func_sp::func_name() const +{ + THD *thd= current_thd; + /* Calculate length to avoud reallocation of string for sure */ + uint len= ((m_name->m_db.length + + m_name->m_name.length)*2 + //characters*quoting + 2 + // ` and ` + 1 + // . + 1 + // end of string + ALIGN_SIZE(1)); // to avoid String reallocation + String qname((char *)alloc_root(thd->mem_root, len), len, + system_charset_info); + + qname.length(0); + append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length); + qname.append('.'); + append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length); + return qname.ptr(); +} + + +int +Item_func_sp::execute(Item **itp) +{ + DBUG_ENTER("Item_func_sp::execute"); + THD *thd= current_thd; + int res; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + st_sp_security_context save_ctx; +#endif + + if (! m_sp) + m_sp= sp_find_function(thd, m_name); + if (! m_sp) + { + my_printf_error(ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), MYF(0), + "FUNCTION", m_name->m_qname); + DBUG_RETURN(-1); + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_change_security_context(thd, m_sp, &save_ctx); +#endif + + /* + We don't need to surpress senfing of ok packet here (by setting + thd->net.no_send_ok to true), because we are not allowing statements + in functions now. + */ + res= m_sp->execute_function(thd, args, arg_count, itp); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_restore_security_context(thd, m_sp, &save_ctx); +#endif + + DBUG_RETURN(res); +} + +enum enum_field_types +Item_func_sp::field_type() const +{ + DBUG_ENTER("Item_func_sp::field_type"); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (m_sp) + { + DBUG_PRINT("info", ("m_returns = %d", m_sp->m_returns)); + DBUG_RETURN(m_sp->m_returns); + } + my_printf_error(ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), MYF(0), + "FUNCTION", m_name->m_qname); + DBUG_RETURN(MYSQL_TYPE_STRING); +} + +Item_result +Item_func_sp::result_type() const +{ + DBUG_ENTER("Item_func_sp::result_type"); + DBUG_PRINT("info", ("m_sp = %p", m_sp)); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (m_sp) + { + DBUG_RETURN(m_sp->result()); + } + my_printf_error(ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), MYF(0), + "FUNCTION", m_name->m_qname); + DBUG_RETURN(STRING_RESULT); +} + +void +Item_func_sp::fix_length_and_dec() +{ + DBUG_ENTER("Item_func_sp::fix_length_and_dec"); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (! m_sp) + { + my_printf_error(ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), MYF(0), + "FUNCTION", m_name->m_qname); + } + else + { + switch (m_sp->result()) { + case STRING_RESULT: + maybe_null= 1; + max_length= MAX_BLOB_WIDTH; + break; + case REAL_RESULT: + decimals= NOT_FIXED_DEC; + max_length= float_length(decimals); + break; + case INT_RESULT: + decimals= 0; + max_length= 21; + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; + } + } + DBUG_VOID_RETURN; +} + + longlong Item_func_found_rows::val_int() { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_func.h b/sql/item_func.h index ce2b34499d6..602b77ae956 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -40,15 +40,19 @@ public: enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, LIKE_FUNC,NOTLIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC, - COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, BETWEEN, IN_FUNC, + COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, + BETWEEN, IN_FUNC, MULT_EQUAL_FUNC, INTERVAL_FUNC, ISNOTNULLTEST_FUNC, SP_EQUALS_FUNC, SP_DISJOINT_FUNC,SP_INTERSECTS_FUNC, SP_TOUCHES_FUNC,SP_CROSSES_FUNC,SP_WITHIN_FUNC, SP_CONTAINS_FUNC,SP_OVERLAPS_FUNC, SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING, SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, - NOT_FUNC, NOT_ALL_FUNC, NOW_FUNC}; - enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL }; + NOT_FUNC, NOT_ALL_FUNC, + NOW_FUNC, TRIG_COND_FUNC, + GUSERVAR_FUNC}; + enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL, + OPTIMIZE_EQUAL }; enum Type type() const { return FUNC_ITEM; } virtual enum Functype functype() const { return UNKNOWN_FUNC; } Item_func(void): @@ -135,6 +139,7 @@ public: return (null_value=args[0]->get_time(ltime)); } bool is_null() { (void) val_int(); return null_value; } + void signal_divide_by_null(); friend class udf_handler; Field *tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg); @@ -148,6 +153,7 @@ public: bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, uint flags= 0); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); }; @@ -950,6 +956,7 @@ public: bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); void fix_length_and_dec(); void print(String *str); + void print_as_stmt(String *str); const char *func_name() const { return "set_user_var"; } }; @@ -962,6 +969,8 @@ class Item_func_get_user_var :public Item_func public: Item_func_get_user_var(LEX_STRING a): Item_func(), name(a) {} + enum Functype functype() const { return GUSERVAR_FUNC; } + LEX_STRING get_name() { return name; } double val(); longlong val_int(); String *val_str(String* str); @@ -1081,6 +1090,88 @@ enum Cast_target }; +class Item_func_row_count :public Item_int_func +{ +public: + Item_func_row_count() :Item_int_func() {} + longlong val_int(); + const char *func_name() const { return "row_count"; } + void fix_length_and_dec() { decimals= 0; maybe_null=0; } +}; + + +/* + * + * Stored FUNCTIONs + * + */ + +class sp_head; +class sp_name; + +class Item_func_sp :public Item_func +{ +private: + sp_name *m_name; + mutable sp_head *m_sp; + + int execute(Item **itp); + +public: + + Item_func_sp(sp_name *name); + + Item_func_sp(sp_name *name, List<Item> &list); + + virtual ~Item_func_sp() + {} + + const char *func_name() const; + + enum enum_field_types field_type() const; + + Item_result result_type() const; + + longlong val_int() + { + return (longlong)Item_func_sp::val(); + } + + double val() + { + Item *it; + double d; + + if (execute(&it)) + { + null_value= 1; + return 0.0; + } + d= it->val(); + null_value= it->null_value; + return d; + } + + String *val_str(String *str) + { + Item *it; + String *s; + + if (execute(&it)) + { + null_value= 1; + return NULL; + } + s= it->val_str(str); + null_value= it->null_value; + return s; + } + + void fix_length_and_dec(); + +}; + + class Item_func_found_rows :public Item_int_func { public: diff --git a/sql/item_row.cc b/sql/item_row.cc index 289efe45300..8a020861fef 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -147,6 +147,18 @@ bool Item_row::walk(Item_processor processor, byte *arg) return (this->*processor)(arg); } +Item *Item_row::transform(Item_transformer transformer, byte *arg) +{ + for (uint i= 0; i < arg_count; i++) + { + Item *new_item= items[i]->transform(transformer, arg); + if (!new_item) + return 0; + items[i]= new_item; + } + return (this->*transformer)(arg); +} + void Item_row::bring_value() { for (uint i= 0; i < arg_count; i++) diff --git a/sql/item_row.h b/sql/item_row.h index 39bc4513e1e..ef5856dcdd3 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -65,6 +65,7 @@ public: void print(String *str); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); uint cols() { return arg_count; } Item* el(uint i) { return items[i]; } diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 81fff899ec7..889b00eb6a0 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -2255,6 +2255,18 @@ bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const return 1; } + +void Item_func_set_collation::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(" collate ", 9); + DBUG_ASSERT(args[1]->basic_const_item() && + args[1]->type() == Item::STRING_ITEM); + args[1]->str_value.print(str); + str->append(')'); +} + String *Item_func_charset::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2829,6 +2841,8 @@ String *Item_func_uuid::val_str(String *str) { DBUG_ASSERT(fixed == 1); char *s; + THD *thd= current_thd; + pthread_mutex_lock(&LOCK_uuid_generator); if (! uuid_time) /* first UUID() call. initializing data */ { @@ -2843,7 +2857,7 @@ String *Item_func_uuid::val_str(String *str) with a clock_seq value (initialized random below), we use a separate randominit() here */ - randominit(&uuid_rand, tmp + (ulong)current_thd, tmp + query_id); + randominit(&uuid_rand, tmp + (ulong) thd, tmp + query_id); for (i=0; i < (int)sizeof(mac); i++) mac[i]=(uchar)(my_rnd(&uuid_rand)*255); } @@ -2853,7 +2867,8 @@ String *Item_func_uuid::val_str(String *str) *--s=_dig_vec_lower[mac[i] & 15]; *--s=_dig_vec_lower[mac[i] >> 4]; } - randominit(&uuid_rand, tmp + (ulong)start_time, tmp + bytes_sent); + randominit(&uuid_rand, tmp + (ulong)start_time, + tmp + thd->status_var.bytes_sent); set_clock_seq_str(); } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index afe03c31345..afc4b9da0a1 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -200,7 +200,7 @@ public: Item_func_substr_index(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} String *val_str(String *); void fix_length_and_dec(); - const char *func_name() const { return "substr_index"; } + const char *func_name() const { return "substring_index"; } }; @@ -243,7 +243,7 @@ public: Returns strcat('*', octet2hex(sha1(sha1(password)))). '*' stands for new password format, sha1(sha1(password) is so-called hash_stage2 value. Length of returned string is always 41 byte. To find out how entire - authentification procedure works, see comments in password.c. + authentication procedure works, see comments in password.c. */ class Item_func_password :public Item_str_func @@ -410,6 +410,14 @@ public: return item->walk(processor, arg) || Item_str_func::walk(processor, arg); } + Item *transform(Item_transformer transformer, byte *arg) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + item= new_item; + return Item_str_func::transform(transformer, arg); + } void print(String *str); }; @@ -617,7 +625,12 @@ public: void fix_length_and_dec(); bool eq(const Item *item, bool binary_cmp) const; const char *func_name() const { return "collate"; } - void print(String *str) { print_op(str); } + void print(String *str); + Item_field *filed_for_view_update() + { + /* this function is transparent for view updating */ + return args[0]->filed_for_view_update(); + } }; class Item_func_charset :public Item_str_func diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 62cd016b0df..f426f14a25f 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -132,12 +132,12 @@ Item_subselect::select_transformer(JOIN *join) bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) { + char const *save_where= thd_param->where; + int res; + DBUG_ASSERT(fixed == 0); engine->set_thd((thd= thd_param)); - char const *save_where= thd->where; - int res; - if (check_stack_overrun(thd, (gptr)&res)) return 1; @@ -268,7 +268,8 @@ Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex) DBUG_VOID_RETURN; } -Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, +Item_maxmin_subselect::Item_maxmin_subselect(THD *thd_param, + Item_subselect *parent, st_select_lex *select_lex, bool max_arg) :Item_singlerow_subselect() @@ -287,6 +288,12 @@ Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, used_tables_cache= parent->get_used_tables_cache(); const_item_cache= parent->get_const_item_cache(); + /* + this subquery alwais creates during preparation, so we can assign + thd here + */ + thd= thd_param; + DBUG_VOID_RETURN; } @@ -310,10 +317,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join) return RES_OK; SELECT_LEX *select_lex= join->select_lex; - - /* Juggle with current arena only if we're in prepared statement prepare */ - Item_arena *arena= join->thd->current_arena; - + Item_arena *arena= thd->current_arena; + if (!select_lex->master_unit()->first_select()->next_select() && !select_lex->table_list.elements && select_lex->item_list.elements == 1 && @@ -337,11 +342,11 @@ Item_singlerow_subselect::select_transformer(JOIN *join) { have_to_be_excluded= 1; - if (join->thd->lex->describe) + if (thd->lex->describe) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); - push_warning(join->thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } substitution= select_lex->item_list.head(); @@ -364,7 +369,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join) if (!(substitution= new Item_func_if(cond, substitution, new Item_null()))) goto err; - } + } return RES_REDUCE; } return RES_OK; @@ -373,6 +378,7 @@ err: return RES_ERROR; } + void Item_singlerow_subselect::store(uint i, Item *item) { row[i]->store(item); @@ -647,6 +653,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, Comp_creator *func) { const char *save_where= thd->where; + Item_subselect::trans_res result= RES_ERROR; DBUG_ENTER("Item_in_subselect::single_value_transformer"); if (changed) @@ -734,7 +741,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, // remove LIMIT placed by ALL/ANY subquery select_lex->master_unit()->global_parameters->select_limit= HA_POS_ERROR; - subs= new Item_maxmin_subselect(this, select_lex, func->l_op()); + subs= new Item_maxmin_subselect(thd, this, select_lex, func->l_op()); } // left expression belong to outer select SELECT_LEX *current= thd->lex->current_select, *up; @@ -778,9 +785,6 @@ Item_in_subselect::single_value_transformer(JOIN *join, } select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; - Item *item; - - item= (Item*) select_lex->item_list.head(); /* Add the left part of a subselect to a WHERE or HAVING clause of the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => @@ -791,11 +795,13 @@ Item_in_subselect::single_value_transformer(JOIN *join, if (join->having || select_lex->with_sum_func || select_lex->group_list.elements) { - item= func->create(expr, - new Item_ref_null_helper(this, - select_lex->ref_pointer_array, - (char *)"<ref>", - this->full_name())); + bool tmp; + Item *item= func->create(expr, + new Item_ref_null_helper(this, + select_lex-> + ref_pointer_array, + (char *)"<ref>", + this->full_name())); /* AND and comparison functions can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last @@ -803,21 +809,22 @@ Item_in_subselect::single_value_transformer(JOIN *join, */ select_lex->having= join->having= and_items(join->having, item); select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; - goto err; - } + tmp= join->having->fix_fields(thd, join->tables_list, 0); select_lex->having_fix_field= 0; + if (tmp) + goto err; } else { + Item *item= (Item*) select_lex->item_list.head(); + select_lex->item_list.empty(); select_lex->item_list.push_back(new Item_int("Not_used", (longlong) 1, 21)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); if (select_lex->table_list.elements) { + bool tmp; Item *having= item, *orig_item= item; item= func->create(expr, item); if (!abort_on_null && orig_item->maybe_null) @@ -833,12 +840,10 @@ Item_in_subselect::single_value_transformer(JOIN *join, new Item_cond_and(having, join->having) : having); select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; + tmp= join->having->fix_fields(thd, join->tables_list, 0); + select_lex->having_fix_field= 0; + if (tmp) goto err; - } - select_lex->having_fix_field= 0; item= new Item_cond_or(item, new Item_func_isnull(orig_item)); } @@ -854,6 +859,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, } else { + bool tmp; if (select_lex->master_unit()->first_select()->next_select()) { /* @@ -863,24 +869,21 @@ Item_in_subselect::single_value_transformer(JOIN *join, */ select_lex->having= join->having= - func->create(expr, + func->create(expr, new Item_null_helper(this, item, (char *)"<no matter>", (char *)"<result>")); select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, - 0)) - { - select_lex->having_fix_field= 0; + tmp= join->having->fix_fields(thd, join->tables_list, 0); + select_lex->having_fix_field= 0; + if (tmp) goto err; - } - select_lex->having_fix_field= 0; } else { // it is single select without tables => possible optimization item= func->create(left_expr, item); - // fix_field of item will be done in time of substituting + // fix_field of item will be done in time of substituting substitution= item; have_to_be_excluded= 1; if (thd->lex->describe) @@ -890,23 +893,20 @@ Item_in_subselect::single_value_transformer(JOIN *join, push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } - if (arena) - thd->restore_backup_item_arena(arena, &backup); - DBUG_RETURN(RES_REDUCE); + result= RES_REDUCE; + goto err; } } } ok: - if (arena) - thd->restore_backup_item_arena(arena, &backup); thd->where= save_where; - DBUG_RETURN(RES_OK); + result= RES_OK; err: if (arena) thd->restore_backup_item_arena(arena, &backup); - DBUG_RETURN(RES_ERROR); + DBUG_RETURN(result); } @@ -914,17 +914,17 @@ Item_subselect::trans_res Item_in_subselect::row_value_transformer(JOIN *join) { const char *save_where= thd->where; + Item *item= 0; + SELECT_LEX *select_lex= join->select_lex; DBUG_ENTER("Item_in_subselect::row_value_transformer"); if (changed) { DBUG_RETURN(RES_OK); } - Item_arena *arena, backup; - Item *item= 0; - SELECT_LEX *select_lex= join->select_lex; - thd->where= "row IN/ALL/ANY subquery"; + + Item_arena *arena, backup; arena= thd->change_arena_if_needed(&backup); if (select_lex->item_list.elements != left_expr->cols()) @@ -1002,9 +1002,9 @@ Item_in_subselect::row_value_transformer(JOIN *join) if (join->conds->fix_fields(thd, join->tables_list, 0)) goto err; } + thd->where= save_where; if (arena) thd->restore_backup_item_arena(arena, &backup); - thd->where= save_where; DBUG_RETURN(RES_OK); err: @@ -1071,13 +1071,7 @@ subselect_single_select_engine(st_select_lex *select, { select_lex= select; SELECT_LEX_UNIT *unit= select_lex->master_unit(); - unit->offset_limit_cnt= unit->global_parameters->offset_limit; - unit->select_limit_cnt= unit->global_parameters->select_limit+ - unit->global_parameters ->offset_limit; - if (unit->select_limit_cnt < unit->global_parameters->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; + unit->set_limit(unit->global_parameters, select_lex); unit->item= item; this->select_lex= select_lex; } @@ -1216,17 +1210,17 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) int subselect_single_select_engine::exec() { DBUG_ENTER("subselect_single_select_engine::exec"); - char const *save_where= join->thd->where; - SELECT_LEX *save_select= join->thd->lex->current_select; - join->thd->lex->current_select= select_lex; + char const *save_where= thd->where; + SELECT_LEX *save_select= thd->lex->current_select; + thd->lex->current_select= select_lex; if (!optimized) { optimized=1; if (join->optimize()) { - join->thd->where= save_where; + thd->where= save_where; executed= 1; - join->thd->lex->current_select= save_select; + thd->lex->current_select= save_select; DBUG_RETURN(join->error ? join->error : 1); } if (item->engine_changed) @@ -1238,8 +1232,8 @@ int subselect_single_select_engine::exec() { if (join->reinit()) { - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(1); } item->reset(); @@ -1249,20 +1243,20 @@ int subselect_single_select_engine::exec() { join->exec(); executed= 1; - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(join->error||thd->is_fatal_error); } - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(0); } int subselect_union_engine::exec() { - char const *save_where= unit->thd->where; + char const *save_where= thd->where; int res= unit->exec(); - unit->thd->where= save_where; + thd->where= save_where; return res; } @@ -1428,7 +1422,7 @@ void subselect_uniquesubquery_engine::exclude() table_map subselect_engine::calc_const_tables(TABLE_LIST *table) { table_map map= 0; - for (; table; table= table->next) + for(; table; table= table->next_local) { TABLE *tbl= table->table; if (tbl && tbl->const_table) diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 764c41f33b4..6efb9052115 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -26,7 +26,7 @@ class JOIN; class select_subselect; class subselect_engine; class Item_bool_func2; -class Statement; +class Item_arena; /* base class for subselects */ @@ -109,6 +109,7 @@ public: engine_changed= 1; return eng == 0; } + enum_parsing_place place() { return parsing_place; } friend class select_subselect; friend class Item_in_optimizer; @@ -154,7 +155,7 @@ class Item_maxmin_subselect :public Item_singlerow_subselect { bool max; public: - Item_maxmin_subselect(Item_subselect *parent, + Item_maxmin_subselect(THD *thd, Item_subselect *parent, st_select_lex *select_lex, bool max); void print(String *str); }; @@ -209,7 +210,6 @@ public: Item_in_subselect(Item * left_expr, st_select_lex *select_lex); Item_in_subselect() :Item_exists_subselect(), abort_on_null(0), transformed(0), upper_not(0) - {} subs_type substype() { return IN_SUBS; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 3b3a6083725..72c85e2dd40 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -138,6 +138,7 @@ bool Item_sum::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + String * Item_sum_num::val_str(String *str) { @@ -280,6 +281,122 @@ double Item_sum_sum::val() } +/* Item_sum_sum_distinct */ + +Item_sum_sum_distinct::Item_sum_sum_distinct(Item *item) + :Item_sum_num(item), sum(0.0), tree(0) +{ + /* + quick_group is an optimizer hint, which means that GROUP BY can be + handled with help of index on grouped columns. + By setting quick_group to zero we force creation of temporary table + to perform GROUP BY. + */ + quick_group= 0; +} + + +Item_sum_sum_distinct::Item_sum_sum_distinct(THD *thd, + Item_sum_sum_distinct *original) + :Item_sum_num(thd, original), sum(0.0), tree(0) +{ + quick_group= 0; +} + + +Item_sum_sum_distinct::~Item_sum_sum_distinct() +{ + delete tree; +} + + +Item * +Item_sum_sum_distinct::copy_or_same(THD *thd) +{ + return new (thd->mem_root) Item_sum_sum_distinct(thd, this); +} + +C_MODE_START + +static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2) +{ + return memcmp(key1, key2, *(uint *) arg); +} + +C_MODE_END + +bool Item_sum_sum_distinct::setup(THD *thd) +{ + SELECT_LEX *select_lex= thd->lex->current_select; + /* what does it mean??? */ + if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) + return 1; + + DBUG_ASSERT(tree == 0); /* setup can not be called twice */ + + /* + Uniques handles all unique elements in a tree until they can't fit in. + Then thee tree is dumped to the temporary file. + See class Unique for details. + */ + null_value= maybe_null= 1; + /* + TODO: if underlying item result fits in 4 bytes we can take advantage + of it and have tree of long/ulong. It gives 10% performance boost + */ + static uint key_length= sizeof(double); + tree= new Unique(simple_raw_key_cmp, &key_length, key_length, + thd->variables.max_heap_table_size); + return tree == 0; +} + +void Item_sum_sum_distinct::clear() +{ + DBUG_ASSERT(tree); /* we always have a tree */ + null_value= 1; + tree->reset(); +} + +bool Item_sum_sum_distinct::add() +{ + /* args[0]->val() may reset args[0]->null_value */ + double val= args[0]->val(); + if (!args[0]->null_value) + { + DBUG_ASSERT(tree); + null_value= 0; + if (val) + return tree->unique_add(&val); + } + return 0; +} + +C_MODE_START + +static int sum_sum_distinct(void *element, element_count num_of_dups, + void *item_sum_sum_distinct) +{ + ((Item_sum_sum_distinct *) + (item_sum_sum_distinct))->add(* (double *) element); + return 0; +} + +C_MODE_END + +double Item_sum_sum_distinct::val() +{ + /* + We don't have a tree only if 'setup()' hasn't been called; + this is the case of sql_select.cc:return_zero_rows. + */ + sum= 0.0; + if (tree) + tree->walk(sum_sum_distinct, (void *) this); + return sum; +} + +/* end of Item_sum_sum_distinct */ + Item *Item_sum_count::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_count(thd, this); @@ -1071,11 +1188,6 @@ String *Item_variance_field::val_str(String *str) #include "sql_select.h" -int simple_raw_key_cmp(void* arg, byte* key1, byte* key2) -{ - return memcmp(key1, key2, *(uint*) arg); -} - int simple_str_key_cmp(void* arg, byte* key1, byte* key2) { Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; @@ -1915,7 +2027,7 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) result_field= 0; null_value= 1; max_length= group_concat_max_len; - thd->allow_sum_func= 1; + thd->allow_sum_func= 1; if (!(tmp_table_param= new TMP_TABLE_PARAM)) return 1; /* We'll convert all blobs to varchar fields in the temporary table */ @@ -1931,7 +2043,6 @@ bool Item_func_group_concat::setup(THD *thd) List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; uint const_fields; - byte *record; qsort_cmp2 compare_key; DBUG_ENTER("Item_func_group_concat::setup"); @@ -1940,7 +2051,7 @@ bool Item_func_group_concat::setup(THD *thd) /* push all not constant fields to list and create temp table - */ + */ const_fields= 0; always_null= 0; for (uint i= 0; i < arg_count_field; i++) @@ -1958,15 +2069,15 @@ bool Item_func_group_concat::setup(THD *thd) } if (always_null) DBUG_RETURN(0); - + List<Item> all_fields(list); - if (arg_count_order) + if (arg_count_order) { bool hidden_group_fields; setup_group(thd, args, tables_list, list, all_fields, *order, &hidden_group_fields); } - + count_field_types(tmp_table_param,all_fields,0); if (table) { @@ -1996,7 +2107,6 @@ bool Item_func_group_concat::setup(THD *thd) table->no_rows= 1; key_length= table->reclength; - record= table->record[0]; /* Offset to first result field in table */ field_list_offset= table->fields - (list.elements - const_fields); diff --git a/sql/item_sum.h b/sql/item_sum.h index 5aa0d37190b..cede5a1e02e 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -23,13 +23,15 @@ #include <my_tree.h> +class Item_arena; + class Item_sum :public Item_result_field { public: enum Sumfunctype - { COUNT_FUNC,COUNT_DISTINCT_FUNC,SUM_FUNC,AVG_FUNC,MIN_FUNC, - MAX_FUNC, UNIQUE_USERS_FUNC,STD_FUNC,VARIANCE_FUNC,SUM_BIT_FUNC, - UDF_SUM_FUNC, GROUP_CONCAT_FUNC + { COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC, + MIN_FUNC, MAX_FUNC, UNIQUE_USERS_FUNC, STD_FUNC, VARIANCE_FUNC, + SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC }; Item **args, *tmp_args[2]; @@ -146,6 +148,39 @@ class Item_sum_sum :public Item_sum_num }; +/* + Item_sum_sum_distinct - SELECT SUM(DISTINCT expr) FROM ... + support. See also: MySQL manual, chapter 'Adding New Functions To MySQL' + and comments in item_sum.cc. +*/ + +class Unique; + +class Item_sum_sum_distinct :public Item_sum_num +{ + double sum; + Unique *tree; +private: + Item_sum_sum_distinct(THD *thd, Item_sum_sum_distinct *item); +public: + Item_sum_sum_distinct(Item *item_par); + ~Item_sum_sum_distinct(); + + bool setup(THD *thd); + void clear(); + bool add(); + double val(); + + inline void add(double val) { sum+= val; } + enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; } + void reset_field() {} // not used + void update_field() {} // not used + const char *func_name() const { return "sum_distinct"; } + Item *copy_or_same(THD* thd); + virtual void no_rows_in_result() {} +}; + + class Item_sum_count :public Item_sum_int { longlong count; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index d8142352e70..394507af4f0 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -460,7 +460,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (!my_isspace(&my_charset_latin1,*val)) { make_truncated_value_warning(current_thd, val_begin, length, - cached_timestamp_type); + cached_timestamp_type, NullS); break; } } while (++val != val_end); @@ -791,7 +791,7 @@ longlong Item_func_to_days::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day); } @@ -800,7 +800,7 @@ longlong Item_func_dayofyear::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime,TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day) - calc_daynr(ltime.year,1,1) + 1; @@ -810,7 +810,7 @@ longlong Item_func_dayofmonth::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.day; } @@ -818,7 +818,7 @@ longlong Item_func_month::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.month; } @@ -847,7 +847,7 @@ longlong Item_func_quarter::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ((ltime.month+2)/3); } @@ -919,7 +919,7 @@ longlong Item_func_week::val_int() DBUG_ASSERT(fixed == 1); uint year; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_week(<ime, week_mode((uint) args[1]->val_int()), @@ -932,7 +932,7 @@ longlong Item_func_yearweek::val_int() DBUG_ASSERT(fixed == 1); uint year,week; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; week= calc_week(<ime, (week_mode((uint) args[1]->val_int()) | WEEK_YEAR), @@ -973,7 +973,7 @@ longlong Item_func_year::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.year; } @@ -1072,9 +1072,15 @@ static bool get_interval_value(Item *args,interval_type int_type, case INTERVAL_YEAR: interval->year= (ulong) value; break; + case INTERVAL_QUARTER: + interval->month= (ulong)(value*3); + break; case INTERVAL_MONTH: interval->month= (ulong) value; break; + case INTERVAL_WEEK: + interval->day= (ulong)(value*7); + break; case INTERVAL_DAY: interval->day= (ulong) value; break; @@ -1545,7 +1551,7 @@ String *Item_func_date_format::val_str(String *str) if (!is_time_format) { - if (get_arg0_date(&l_time,1)) + if (get_arg0_date(&l_time, TIME_FUZZY_DATE)) return 0; } else @@ -1707,7 +1713,7 @@ longlong Item_func_convert_tz::val_int() bool Item_func_convert_tz::get_date(TIME *ltime, - uint fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { my_time_t my_time_tmp; bool not_used; @@ -1719,7 +1725,7 @@ bool Item_func_convert_tz::get_date(TIME *ltime, if (!args[2]->const_item()) to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); - if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, 0)) + if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, TIME_NO_ZERO_DATE)) { null_value= 1; return 1; @@ -1783,7 +1789,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) INTERVAL interval; ltime->neg= 0; - if (args[0]->get_date(ltime,0) || + if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) || get_interval_value(args[1],int_type,&value,&interval)) goto null_date; sign= (interval.neg ? -1 : 1); @@ -1843,6 +1849,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) break; } case INTERVAL_DAY: + case INTERVAL_WEEK: period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ @@ -1859,6 +1866,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) ltime->day=28; // Was leap-year break; case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: case INTERVAL_MONTH: period= (ltime->year*12 + sign * (long) interval.year*12 + ltime->month-1 + sign * (long) interval.month); @@ -1890,7 +1898,7 @@ String *Item_date_add_interval::val_str(String *str) TIME ltime; enum date_time_format_types format; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return 0; if (ltime.time_type == MYSQL_TIMESTAMP_DATE) @@ -1913,7 +1921,7 @@ longlong Item_date_add_interval::val_int() DBUG_ASSERT(fixed == 1); TIME ltime; longlong date; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return (longlong) 0; date = (ltime.year*100L + ltime.month)*100L + ltime.day; return ltime.time_type == MYSQL_TIMESTAMP_DATE ? date : @@ -1922,12 +1930,13 @@ longlong Item_date_add_interval::val_int() static const char *interval_names[]= { - "year", "month", "day", "hour", "minute", - "second", "microsecond", "year_month", - "day_hour", "day_minute", "day_second", - "hour_minute", "hour_second", "minute_second", - "day_microsecond", "hour_microsecond", - "minute_microsecond", "second_microsecond" + "year", "quarter", "month", "day", "hour", + "minute", "week", "second", "microsecond", + "year_month", "day_hour", "day_minute", + "day_second", "hour_minute", "hour_second", + "minute_second", "day_microsecond", + "hour_microsecond", "minute_microsecond", + "second_microsecond" }; void Item_date_add_interval::print(String *str) @@ -1958,7 +1967,9 @@ void Item_extract::fix_length_and_dec() switch (int_type) { case INTERVAL_YEAR: max_length=4; date_value=1; break; case INTERVAL_YEAR_MONTH: max_length=6; date_value=1; break; + case INTERVAL_QUARTER: max_length=2; date_value=1; break; case INTERVAL_MONTH: max_length=2; date_value=1; break; + case INTERVAL_WEEK: max_length=2; date_value=1; break; case INTERVAL_DAY: max_length=2; date_value=1; break; case INTERVAL_DAY_HOUR: max_length=9; date_value=0; break; case INTERVAL_DAY_MINUTE: max_length=11; date_value=0; break; @@ -1982,10 +1993,12 @@ longlong Item_extract::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; + uint year; + ulong week_format; long neg; if (date_value) { - if (get_arg0_date(<ime,1)) + if (get_arg0_date(<ime, TIME_FUZZY_DATE)) return 0; neg=1; } @@ -2003,7 +2016,13 @@ longlong Item_extract::val_int() switch (int_type) { case INTERVAL_YEAR: return ltime.year; case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month; + case INTERVAL_QUARTER: return ltime.month/3 + 1; case INTERVAL_MONTH: return ltime.month; + case INTERVAL_WEEK: + { + week_format= current_thd->variables.default_week_format; + return calc_week(<ime, week_mode(week_format), &year); + } case INTERVAL_DAY: return ltime.day; case INTERVAL_DAY_HOUR: return (long) (ltime.day*100L+ltime.hour)*neg; case INTERVAL_DAY_MINUTE: return (long) (ltime.day*10000L+ @@ -2103,7 +2122,7 @@ void Item_char_typecast::print(String *str) if (cast_cs) { str->append(" charset ", 9); - str->append(cast_cs->name); + str->append(cast_cs->csname); } str->append(')'); } @@ -2171,7 +2190,7 @@ String *Item_datetime_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !make_datetime(ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME, <ime, str)) return str; @@ -2206,7 +2225,7 @@ String *Item_time_typecast::val_str(String *str) bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) { - bool res= get_arg0_date(ltime,1); + bool res= get_arg0_date(ltime, TIME_FUZZY_DATE); ltime->time_type= MYSQL_TIMESTAMP_DATE; return res; } @@ -2217,7 +2236,7 @@ String *Item_date_typecast::val_str(String *str) DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && !str->alloc(11)) + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !str->alloc(11)) { make_date((DATE_TIME_FORMAT *) 0, <ime, str); return str; @@ -2311,7 +2330,7 @@ String *Item_func_add_time::val_str(String *str) l_time3.neg= 0; if (is_date) // TIMESTAMP function { - if (get_arg0_date(&l_time1,1) || + if (get_arg0_date(&l_time1, TIME_FUZZY_DATE) || args[1]->get_time(&l_time2) || l_time1.time_type == MYSQL_TIMESTAMP_TIME || l_time2.time_type != MYSQL_TIMESTAMP_TIME) @@ -2414,6 +2433,79 @@ void Item_func_add_time::print(String *str) /* + SYNOPSIS + calc_time_diff() + l_time1 TIME/DATE/DATETIME value + l_time2 TIME/DATE/DATETIME value + l_sign Can be 1 (operation of addition) + or -1 (substraction) + seconds_out Returns count of seconds bitween + l_time1 and l_time2 + microseconds_out Returns count of microseconds bitween + l_time1 and l_time2. + + DESCRIPTION + Calculates difference in seconds(seconds_out) + and microseconds(microseconds_out) + bitween two TIME/DATE/DATETIME values. + + RETURN VALUES + Rertuns sign of difference. + 1 means negative result + 0 means positive result + +*/ + +bool calc_time_diff(TIME *l_time1,TIME *l_time2, int l_sign, + longlong *seconds_out, long *microseconds_out) +{ + long days; + bool neg; + longlong seconds= *seconds_out; + long microseconds= *microseconds_out; + + /* + We suppose that if first argument is MYSQL_TIMESTAMP_TIME + the second argument should be TIMESTAMP_TIME also. + We should check it before calc_time_diff call. + */ + if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value + days= l_time1->day - l_sign*l_time2->day; + else // DateTime value + days= (calc_daynr((uint) l_time1->year, + (uint) l_time1->month, + (uint) l_time1->day) - + l_sign*calc_daynr((uint) l_time2->year, + (uint) l_time2->month, + (uint) l_time2->day)); + + microseconds= l_time1->second_part - l_sign*l_time2->second_part; + seconds= ((longlong) days*86400L + l_time1->hour*3600L + + l_time1->minute*60L + l_time1->second + microseconds/1000000L - + (longlong)l_sign*(l_time2->hour*3600L+l_time2->minute*60L+l_time2->second)); + + neg= 0; + if (seconds < 0) + { + seconds= -seconds; + neg= 1; + } + else if (seconds == 0 && microseconds < 0) + { + microseconds= -microseconds; + neg= 1; + } + if (microseconds < 0) + { + microseconds+= 1000000L; + seconds--; + } + *seconds_out= seconds; + *microseconds_out= microseconds; + return neg; +} + +/* TIMEDIFF(t,s) is a time function that calculates the time value between a start and end time. @@ -2426,7 +2518,6 @@ String *Item_func_timediff::val_str(String *str) DBUG_ASSERT(fixed == 1); longlong seconds; long microseconds; - long days; int l_sign= 1; TIME l_time1 ,l_time2, l_time3; @@ -2439,40 +2530,16 @@ String *Item_func_timediff::val_str(String *str) if (l_time1.neg != l_time2.neg) l_sign= -l_sign; - if (l_time1.time_type == MYSQL_TIMESTAMP_TIME) // Time value - days= l_time1.day - l_sign*l_time2.day; - else // DateTime value - days= (calc_daynr((uint) l_time1.year, - (uint) l_time1.month, - (uint) l_time1.day) - - l_sign*calc_daynr((uint) l_time2.year, - (uint) l_time2.month, - (uint) l_time2.day)); - - microseconds= l_time1.second_part - l_sign*l_time2.second_part; - seconds= ((longlong) days*86400L + l_time1.hour*3600L + - l_time1.minute*60L + l_time1.second + microseconds/1000000L - - (longlong)l_sign*(l_time2.hour*3600L+l_time2.minute*60L+ - l_time2.second)); + l_time3.neg= calc_time_diff(&l_time1,&l_time2, l_sign, + &seconds, µseconds); - l_time3.neg= 0; - if (seconds < 0) - { - seconds= -seconds; - l_time3.neg= 1; - } - else if (seconds == 0 && microseconds < 0) - { - microseconds= -microseconds; - l_time3.neg= 1; - } - if (microseconds < 0) - { - microseconds+= 1000000L; - seconds--; - } + /* + For MYSQL_TIMESTAMP_TIME only: + If both argumets are negative values and diff between them + is negative we need to swap sign as result should be positive. + */ if ((l_time2.neg == l_time1.neg) && l_time1.neg) - l_time3.neg= l_time3.neg ? 0 : 1; + l_time3.neg= 1-l_time3.neg; // Swap sign of result calc_time_from_sec(&l_time3, (long) seconds, microseconds); @@ -2541,6 +2608,165 @@ longlong Item_func_microsecond::val_int() } +longlong Item_func_timestamp_diff::val_int() +{ + TIME ltime1, ltime2; + longlong seconds; + long microseconds; + long months= 0; + int neg= 1; + + null_value= 0; + if (args[0]->get_date(<ime1, TIME_NO_ZERO_DATE) || + args[1]->get_date(<ime2, TIME_NO_ZERO_DATE)) + goto null_date; + + if (calc_time_diff(<ime2,<ime1, 1, + &seconds, µseconds)) + neg= -1; + + if (int_type == INTERVAL_YEAR || + int_type == INTERVAL_QUARTER || + int_type == INTERVAL_MONTH) + { + uint year; + uint year_beg, year_end, month_beg, month_end; + uint diff_days= (uint) (seconds/86400L); + uint diff_years= 0; + if (neg == -1) + { + year_beg= ltime2.year; + year_end= ltime1.year; + month_beg= ltime2.month; + month_end= ltime1.month; + } + else + { + year_beg= ltime1.year; + year_end= ltime2.year; + month_beg= ltime1.month; + month_end= ltime2.month; + } + /* calc years*/ + for (year= year_beg;year < year_end; year++) + { + uint days=calc_days_in_year(year); + if (days > diff_days) + break; + diff_days-= days; + diff_years++; + } + + /* calc months; Current year is in the 'year' variable */ + month_beg--; /* Change months to be 0-11 for easier calculation */ + month_end--; + + months= 12*diff_years; + while (month_beg != month_end) + { + uint m_days= (uint) days_in_month[month_beg]; + if (month_beg == 1) + { + /* This is only calculated once so there is no reason to cache it*/ + uint leap= (uint) ((year & 3) == 0 && (year%100 || + (year%400 == 0 && year))); + m_days+= leap; + } + if (m_days > diff_days) + break; + diff_days-= m_days; + months++; + if (month_beg++ == 11) /* if we wrap to next year */ + { + month_beg= 0; + year++; + } + } + if (neg == -1) + months= -months; + } + + switch (int_type) { + case INTERVAL_YEAR: + return months/12; + case INTERVAL_QUARTER: + return months/3; + case INTERVAL_MONTH: + return months; + case INTERVAL_WEEK: + return seconds/86400L/7L*neg; + case INTERVAL_DAY: + return seconds/86400L*neg; + case INTERVAL_HOUR: + return seconds/3600L*neg; + case INTERVAL_MINUTE: + return seconds/60L*neg; + case INTERVAL_SECOND: + return seconds*neg; + case INTERVAL_MICROSECOND: + { + longlong max_sec= LONGLONG_MAX/1000000; + if (max_sec > seconds || + max_sec == seconds && LONGLONG_MAX%1000000 >= microseconds) + return (longlong) (seconds*1000000L+microseconds)*neg; + goto null_date; + } + default: + break; + } + +null_date: + null_value=1; + return 0; +} + + +void Item_func_timestamp_diff::print(String *str) +{ + str->append(func_name()); + str->append('('); + + switch (int_type) { + case INTERVAL_YEAR: + str->append("YEAR"); + break; + case INTERVAL_QUARTER: + str->append("QUARTER"); + break; + case INTERVAL_MONTH: + str->append("MONTH"); + break; + case INTERVAL_WEEK: + str->append("WEEK"); + break; + case INTERVAL_DAY: + str->append("DAY"); + break; + case INTERVAL_HOUR: + str->append("HOUR"); + break; + case INTERVAL_MINUTE: + str->append("MINUTE"); + break; + case INTERVAL_SECOND: + str->append("SECOND"); + break; + case INTERVAL_MICROSECOND: + str->append("SECOND_FRAC"); + break; + default: + break; + } + + for (uint i=0 ; i < 2 ; i++) + { + str->append(','); + args[i]->print(str); + } + str->append(')'); +} + + String *Item_func_get_format::val_str(String *str) { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index df0b05d6d42..694f2dc583c 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -593,11 +593,11 @@ public: enum interval_type { - INTERVAL_YEAR, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, - INTERVAL_SECOND, INTERVAL_MICROSECOND ,INTERVAL_YEAR_MONTH, - INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, - INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, - INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, + INTERVAL_YEAR, INTERVAL_QUARTER, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, + INTERVAL_MINUTE, INTERVAL_WEEK, INTERVAL_SECOND, INTERVAL_MICROSECOND , + INTERVAL_YEAR_MONTH, INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, + INTERVAL_DAY_SECOND, INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, + INTERVAL_MINUTE_SECOND, INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, INTERVAL_MINUTE_MICROSECOND, INTERVAL_SECOND_MICROSECOND }; @@ -838,6 +838,23 @@ public: }; +class Item_func_timestamp_diff :public Item_int_func +{ + const interval_type int_type; +public: + Item_func_timestamp_diff(Item *a,Item *b,interval_type type_arg) + :Item_int_func(a,b), int_type(type_arg) {} + const char *func_name() const { return "timestamp_diff"; } + longlong val_int(); + void fix_length_and_dec() + { + decimals=0; + maybe_null=1; + } + void print(String *str); +}; + + enum date_time_format { USA_FORMAT, JIS_FORMAT, ISO_FORMAT, EUR_FORMAT, INTERNAL_FORMAT diff --git a/sql/key.cc b/sql/key.cc index b1f4c9533a9..15d84b73beb 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -66,94 +66,121 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) } - /* Copy a key from record to some buffer */ - /* if length == 0 then copy whole key */ +/* + Copy part of a record that forms a key or key prefix to a buffer. + + SYNOPSIS + key_copy() + to_key buffer that will be used as a key + from_record full record to be copied from + key_info descriptor of the index + key_length specifies length of all keyparts that will be copied + + DESCRIPTION + The function takes a complete table record (as e.g. retrieved by + handler::index_read()), and a description of an index on the same table, + and extracts the first key_length bytes of the record which are part of a + key into to_key. If length == 0 then copy all bytes from the record that + form a key. + + RETURN + None +*/ -void key_copy(byte *key,TABLE *table,uint idx,uint key_length) +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) - key_length=key_info->key_length; - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + key_length= key_info->key_length; + for (key_part= key_info->key_part; (int) key_length > 0; key_part++) { if (key_part->null_bit) { - *key++= test(table->record[0][key_part->null_offset] & + *to_key++= test(from_record[key_part->null_offset] & key_part->null_bit); key_length--; } if (key_part->key_part_flag & HA_BLOB_PART) { char *pos; - ulong blob_length=((Field_blob*) key_part->field)->get_length(); - key_length-=2; + ulong blob_length= ((Field_blob*) key_part->field)->get_length(); + key_length-= 2; ((Field_blob*) key_part->field)->get_ptr(&pos); - length=min(key_length,key_part->length); - set_if_smaller(blob_length,length); - int2store(key,(uint) blob_length); - key+=2; // Skip length info - memcpy(key,pos,blob_length); + length=min(key_length, key_part->length); + set_if_smaller(blob_length, length); + int2store(to_key, (uint) blob_length); + to_key+= 2; // Skip length info + memcpy(to_key, pos, blob_length); } else { - length=min(key_length,key_part->length); - memcpy(key,table->record[0]+key_part->offset,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_key, from_record + key_part->offset, (size_t) length); } - key+=length; - key_length-=length; + to_key+= length; + key_length-= length; } -} /* key_copy */ +} - /* restore a key from some buffer to record */ +/* + Restore a key from some buffer to record. + + SYNOPSIS + key_restore() + to_record record buffer where the key will be restored to + from_key buffer that contains a key + key_info descriptor of the index + key_length specifies length of all keyparts that will be restored + + DESCRIPTION + This function converts a key into record format. It can be used in cases + when we want to return a key as a result row. + + RETURN + None +*/ -void key_restore(TABLE *table,byte *key,uint idx,uint key_length) +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) { - if (idx == (uint) -1) - return; - key_length=key_info->key_length; + key_length= key_info->key_length; } - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + for (key_part= key_info->key_part ; (int) key_length > 0 ; key_part++) { if (key_part->null_bit) { - if (*key++) - table->record[0][key_part->null_offset]|= key_part->null_bit; + if (*from_key++) + to_record[key_part->null_offset]|= key_part->null_bit; else - table->record[0][key_part->null_offset]&= ~key_part->null_bit; + to_record[key_part->null_offset]&= ~key_part->null_bit; key_length--; } if (key_part->key_part_flag & HA_BLOB_PART) { - uint blob_length=uint2korr(key); - key+=2; - key_length-=2; + uint blob_length= uint2korr(from_key); + from_key+= 2; + key_length-= 2; ((Field_blob*) key_part->field)->set_ptr((ulong) blob_length, - (char*) key); - length=key_part->length; + (char*) from_key); + length= key_part->length; } else { - length=min(key_length,key_part->length); - memcpy(table->record[0]+key_part->offset,key,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_record + key_part->offset, from_key, (size_t) length); } - key+=length; - key_length-=length; + from_key+= length; + key_length-= length; } -} /* key_restore */ +} /* diff --git a/sql/lock.cc b/sql/lock.cc index 646babea6a1..6d5ca5bf6b4 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -152,7 +152,7 @@ retry: thd->proc_info=0; if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); if (sql_lock) { mysql_unlock_tables(thd,sql_lock); @@ -578,7 +578,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) static bool locked_named_table(THD *thd, TABLE_LIST *table_list) { - for (; table_list ; table_list=table_list->next) + for (; table_list ; table_list=table_list->next_local) { if (table_list->table && table_is_used(table_list->table,0)) return 1; @@ -632,7 +632,7 @@ bool lock_table_names(THD *thd, TABLE_LIST *table_list) bool got_all_locks=1; TABLE_LIST *lock_table; - for (lock_table=table_list ; lock_table ; lock_table=lock_table->next) + for (lock_table= table_list; lock_table; lock_table= lock_table->next_local) { int got_lock; if ((got_lock=lock_table_name(thd,lock_table)) < 0) @@ -675,7 +675,9 @@ end: void unlock_table_names(THD *thd, TABLE_LIST *table_list, TABLE_LIST *last_table) { - for (TABLE_LIST *table=table_list ; table != last_table ; table=table->next) + for (TABLE_LIST *table= table_list; + table != last_table; + table= table->next_local) unlock_table_name(thd,table); pthread_cond_broadcast(&COND_refresh); } diff --git a/sql/log_event.h b/sql/log_event.h index 8070c334d8b..a1c02b0e3c0 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1,15 +1,15 @@ /* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -35,12 +35,42 @@ #define LOG_EVENT_OFFSET 4 -#define BINLOG_VERSION 3 +/* + 3 is MySQL 4.x; 4 is MySQL 5.0.0. + Compared to version 3, version 4 has: + - a different Start_log_event, which includes info about the binary log + (sizes of headers); this info is included for better compatibility if the + master's MySQL version is different from the slave's. + - all events have a unique ID (the triplet (server_id, timestamp at server + start, other) to be sure an event is not executed more than once in a + multimaster setup, example: + M1 + / \ + v v + M2 M3 + \ / + v v + S + if a query is run on M1, it will arrive twice on S, so we need that S + remembers the last unique ID it has processed, to compare and know if the + event should be skipped or not. Example of ID: we already have the server id + (4 bytes), plus: + timestamp_when_the_master_started (4 bytes), a counter (a sequence number + which increments every time we write an event to the binlog) (3 bytes). + Q: how do we handle when the counter is overflowed and restarts from 0 ? + + - Query and Load (Create or Execute) events may have a more precise timestamp + (with microseconds), number of matched/affected/warnings rows + and fields of session variables: SQL_MODE, + FOREIGN_KEY_CHECKS, UNIQUE_CHECKS, SQL_AUTO_IS_NULL, the collations and + charsets, the PASSWORD() version (old/new/...). +*/ +#define BINLOG_VERSION 4 /* We could have used SERVER_VERSION_LENGTH, but this introduces an obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH - this would have broken the replication protocol + this would break the replication protocol */ #define ST_SERVER_VER_LEN 50 @@ -49,6 +79,12 @@ TERMINATED etc). */ +/* + These are flags and structs to handle all the LOAD DATA INFILE options (LINES + TERMINATED etc). + DUMPFILE_FLAG is probably useless (DUMPFILE is a clause of SELECT, not of LOAD + DATA). +*/ #define DUMPFILE_FLAG 0x1 #define OPT_ENCLOSED_FLAG 0x2 #define REPLACE_FLAG 0x4 @@ -92,18 +128,18 @@ struct sql_ex_info char* escaped; int cached_new_format; uint8 field_term_len,enclosed_len,line_term_len,line_start_len, escaped_len; - char opt_flags; + char opt_flags; char empty_flags; - + // store in new format even if old is possible - void force_new_format() { cached_new_format = 1;} + void force_new_format() { cached_new_format = 1;} int data_size() { return (new_format() ? field_term_len + enclosed_len + line_term_len + line_start_len + escaped_len + 6 : 7); } - int write_data(IO_CACHE* file); + bool write_data(IO_CACHE* file); char* init(char* buf,char* buf_end,bool use_new_format); bool new_format() { @@ -136,19 +172,31 @@ struct sql_ex_info #define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */ #define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */ +/* + Fixed header length, where 4.x and 5.0 agree. That is, 5.0 may have a longer + header (it will for sure when we have the unique event's ID), but at least + the first 19 bytes are the same in 4.x and 5.0. So when we have the unique + event's ID, LOG_EVENT_HEADER_LEN will be something like 26, but + LOG_EVENT_MINIMAL_HEADER_LEN will remain 19. +*/ +#define LOG_EVENT_MINIMAL_HEADER_LEN 19 /* event-specific post-header sizes */ -#define QUERY_HEADER_LEN (4 + 4 + 1 + 2) +// where 3.23, 4.x and 5.0 agree +#define QUERY_HEADER_MINIMAL_LEN (4 + 4 + 1 + 2) +// where 5.0 differs: 2 for len of N-bytes vars. +#define QUERY_HEADER_LEN (QUERY_HEADER_MINIMAL_LEN + 2) #define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4) -#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) -#define ROTATE_HEADER_LEN 8 +#define START_V3_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) +#define ROTATE_HEADER_LEN 8 // this is FROZEN (the Rotate post-header is frozen) #define CREATE_FILE_HEADER_LEN 4 #define APPEND_BLOCK_HEADER_LEN 4 #define EXEC_LOAD_HEADER_LEN 4 #define DELETE_FILE_HEADER_LEN 4 +#define FORMAT_DESCRIPTION_HEADER_LEN (START_V3_HEADER_LEN+1+LOG_EVENT_TYPES) -/* - Event header offsets; +/* + Event header offsets; these point to places inside the fixed header. */ @@ -158,11 +206,12 @@ struct sql_ex_info #define LOG_POS_OFFSET 13 #define FLAGS_OFFSET 17 -/* start event post-header */ +/* start event post-header (for v3 and v4) */ #define ST_BINLOG_VER_OFFSET 0 #define ST_SERVER_VER_OFFSET 2 #define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN) +#define ST_COMMON_HEADER_LEN_OFFSET (ST_CREATED_OFFSET + 4) /* slave event post-header (this event is never written) */ @@ -176,7 +225,13 @@ struct sql_ex_info #define Q_EXEC_TIME_OFFSET 4 #define Q_DB_LEN_OFFSET 8 #define Q_ERR_CODE_OFFSET 9 +#define Q_STATUS_VARS_LEN_OFFSET 11 #define Q_DATA_OFFSET QUERY_HEADER_LEN +/* these are codes, not offsets; not more than 256 values (1 byte). */ +#define Q_FLAGS2_CODE 0 +#define Q_SQL_MODE_CODE 1 +#define Q_CATALOG_CODE 2 +#define Q_AUTO_INCREMENT 3 /* Intvar event post-header */ @@ -228,16 +283,6 @@ struct sql_ex_info /* DF = "Delete File" */ #define DF_FILE_ID_OFFSET 0 -#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN) -#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN) -#define CREATE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+\ - +LOAD_HEADER_LEN+CREATE_FILE_HEADER_LEN) -#define DELETE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+DELETE_FILE_HEADER_LEN) -#define EXEC_LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+EXEC_LOAD_HEADER_LEN) -#define APPEND_BLOCK_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+APPEND_BLOCK_HEADER_LEN) - /* 4 bytes which all binlogs should begin with */ #define BINLOG_MAGIC "\xfe\x62\x69\x6e" @@ -249,30 +294,69 @@ struct sql_ex_info So they are now removed and their place may later be reused for other flags. Then one must remember that Rotate events in 4.x have LOG_EVENT_FORCED_ROTATE_F set, so one should not rely on the value of the - replacing flag when reading a Rotate event. + replacing flag when reading a Rotate event. I keep the defines here just to remember what they were. */ #ifdef TO_BE_REMOVED #define LOG_EVENT_TIME_F 0x1 -#define LOG_EVENT_FORCED_ROTATE_F 0x2 +#define LOG_EVENT_FORCED_ROTATE_F 0x2 #endif -/* +/* If the query depends on the thread (for example: TEMPORARY TABLE). Currently this is used by mysqlbinlog to know it must print SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it for every query but this would be slow). */ -#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 +#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 + +/* + OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must be written + to the binlog. OPTIONS_WRITTEN_TO_BINLOG could be written into the + Format_description_log_event, so that if later we don't want to replicate a + variable we did replicate, or the contrary, it's doable. But it should not be + too hard to decide once for all of what we replicate and what we don't, among + the fixed 32 bits of thd->options. + I (Guilhem) have read through every option's usage, and it looks like + OPTION_AUTO_IS_NULL and OPTION_NO_FOREIGN_KEYS are the only ones which alter + how the query modifies the table. It's good to replicate + OPTION_RELAXED_UNIQUE_CHECKS too because otherwise, the slave may insert data + slower than the master, in InnoDB. + OPTION_BIG_SELECTS is not needed (the slave thread runs with + max_join_size=HA_POS_ERROR) and OPTION_BIG_TABLES is not needed either, as + the manual says (because a too big in-memory temp table is automatically + written to disk). +*/ +#define OPTIONS_WRITTEN_TO_BIN_LOG (OPTION_AUTO_IS_NULL | \ +OPTION_NO_FOREIGN_KEY_CHECKS | OPTION_RELAXED_UNIQUE_CHECKS) enum Log_event_type { - UNKNOWN_EVENT= 0, START_EVENT= 1, QUERY_EVENT= 2, STOP_EVENT= 3, - ROTATE_EVENT= 4, INTVAR_EVENT= 5, LOAD_EVENT=6, SLAVE_EVENT= 7, - CREATE_FILE_EVENT= 8, APPEND_BLOCK_EVENT= 9, EXEC_LOAD_EVENT= 10, - DELETE_FILE_EVENT= 11, NEW_LOAD_EVENT= 12, RAND_EVENT= 13, - USER_VAR_EVENT= 14 + /* + Every time you update this enum (when you add a type), you have to + update the code of Format_description_log_event::Format_description_log_event(). + Make sure you always insert new types ***BEFORE*** ENUM_END_EVENT. + */ + UNKNOWN_EVENT= 0, START_EVENT_V3, QUERY_EVENT, STOP_EVENT, ROTATE_EVENT, + INTVAR_EVENT, LOAD_EVENT, SLAVE_EVENT, CREATE_FILE_EVENT, + APPEND_BLOCK_EVENT, EXEC_LOAD_EVENT, DELETE_FILE_EVENT, + /* + NEW_LOAD_EVENT is like LOAD_EVENT except that it has a longer sql_ex, + allowing multibyte TERMINATED BY etc; both types share the same class + (Load_log_event) + */ + NEW_LOAD_EVENT, + RAND_EVENT, USER_VAR_EVENT, + FORMAT_DESCRIPTION_EVENT, + ENUM_END_EVENT /* end marker */ }; +/* + The number of types we handle in Format_description_log_event (UNKNOWN_EVENT + is not to be handled, it does not exist in binlogs, it does not have a + format). +*/ +#define LOG_EVENT_TYPES (ENUM_END_EVENT-1) + enum Int_event_type { INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2 @@ -285,8 +369,35 @@ class MYSQL_LOG; class THD; #endif +class Format_description_log_event; + struct st_relay_log_info; +#ifdef MYSQL_CLIENT +/* + A structure for mysqlbinlog to remember the last db, flags2, sql_mode etc; it + is passed to events' print() methods, so that they print only the necessary + USE and SET commands. +*/ +typedef struct st_last_event_info +{ + // TODO: have the last catalog here ?? + char db[FN_REFLEN+1]; // TODO: make this a LEX_STRING when thd->db is + bool flags2_inited; + uint32 flags2; + bool sql_mode_inited; + ulong sql_mode; /* must be same as THD.variables.sql_mode */ + ulong auto_increment_increment, auto_increment_offset; + st_last_event_info() + :flags2_inited(0), flags2(0), sql_mode_inited(0), sql_mode(0), + auto_increment_increment(1),auto_increment_offset(1) + { + db[0]= 0; /* initially, the db is unknown */ + } +} LAST_EVENT_INFO; +#endif + + /***************************************************************************** Log_event class @@ -297,37 +408,41 @@ struct st_relay_log_info; class Log_event { public: - /* - The offset in the log where this event originally appeared (it is preserved - in relay logs, making SHOW SLAVE STATUS able to print coordinates of the - event in the master's binlog). Note: when a transaction is written by the - master to its binlog (wrapped in BEGIN/COMMIT) the log_pos of all the - queries it contains is the one of the BEGIN (this way, when one does SHOW - SLAVE STATUS it sees the offset of the BEGIN, which is logical as rollback - may occur), except the COMMIT query which has its real offset. + /* + The offset in the log where this event originally appeared (it is + preserved in relay logs, making SHOW SLAVE STATUS able to print + coordinates of the event in the master's binlog). Note: when a + transaction is written by the master to its binlog (wrapped in + BEGIN/COMMIT) the log_pos of all the queries it contains is the + one of the BEGIN (this way, when one does SHOW SLAVE STATUS it + sees the offset of the BEGIN, which is logical as rollback may + occur), except the COMMIT query which has its real offset. */ my_off_t log_pos; - /* + /* A temp buffer for read_log_event; it is later analysed according to the event's type, and its content is distributed in the event-specific fields. */ - char *temp_buf; + char *temp_buf; /* - Timestamp on the master(for debugging and replication of NOW()/TIMESTAMP). - It is important for queries and LOAD DATA INFILE. This is set at the event's - creation time, except for Query and Load (et al.) events where this is set - at the query's execution time, which guarantees good replication (otherwise, - we could have a query and its event with different timestamps). + Timestamp on the master(for debugging and replication of + NOW()/TIMESTAMP). It is important for queries and LOAD DATA + INFILE. This is set at the event's creation time, except for Query + and Load (et al.) events where this is set at the query's + execution time, which guarantees good replication (otherwise, we + could have a query and its event with different timestamps). */ time_t when; /* The number of seconds the query took to run on the master. */ ulong exec_time; - /* - The master's server id (is preserved in the relay log; used to prevent from - infinite loops in circular replication). + /* Number of bytes written by write() function */ + ulong data_written; + + /* + The master's server id (is preserved in the relay log; used to prevent from + infinite loops in circular replication). */ uint32 server_id; - uint cached_event_len; /* Some 16 flags. Only one is really used now; look above for @@ -337,28 +452,32 @@ public: uint16 flags; bool cache_stmt; + #ifndef MYSQL_CLIENT THD* thd; - Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); Log_event(); + Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); /* - read_log_event() functions read an event from a binlog or relay log; used by - SHOW BINLOG EVENTS, the binlog_dump thread on the master (reads master's - binlog), the slave IO thread (reads the event sent by binlog_dump), the - slave SQL thread (reads the event from the relay log). + read_log_event() functions read an event from a binlog or relay + log; used by SHOW BINLOG EVENTS, the binlog_dump thread on the + master (reads master's binlog), the slave IO thread (reads the + event sent by binlog_dump), the slave SQL thread (reads the event + from the relay log). If mutex is 0, the read will proceed without + mutex. We need the description_event to be able to parse the + event (to know the post-header's size); in fact in read_log_event + we detect the event's type, then call the specific event's + constructor and pass description_event as an argument. */ - // if mutex is 0, the read will proceed without mutex static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - bool old_format); + const Format_description_log_event *description_event); static int read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock); - /* set_log_pos() is used to fill log_pos with tell(log). */ - void set_log_pos(MYSQL_LOG* log); /* - init_show_field_list() prepares the column names and types for the output of - SHOW BINLOG EVENTS; it is used only by SHOW BINLOG EVENTS. + init_show_field_list() prepares the column names and types for the + output of SHOW BINLOG EVENTS; it is used only by SHOW BINLOG + EVENTS. */ static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION @@ -379,13 +498,15 @@ public: return thd ? thd->db : 0; } #else - // avoid having to link mysqlbinlog against libpthread - static Log_event* read_log_event(IO_CACHE* file, bool old_format); + Log_event() : temp_buf(0) {} + /* avoid having to link mysqlbinlog against libpthread */ + static Log_event* read_log_event(IO_CACHE* file, + const Format_description_log_event *description_event); /* print*() functions are used by mysqlbinlog */ - virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0; + virtual void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0) = 0; void print_timestamp(FILE* file, time_t *ts = 0); void print_header(FILE* file); -#endif +#endif static void *operator new(size_t size) { @@ -395,19 +516,23 @@ public: { my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR)); } - - int write(IO_CACHE* file); - int write_header(IO_CACHE* file); - virtual int write_data(IO_CACHE* file) - { return write_data_header(file) || write_data_body(file); } - virtual int write_data_header(IO_CACHE* file __attribute__((unused))) + + bool write_header(IO_CACHE* file, ulong data_length); + virtual bool write(IO_CACHE* file) + { + return (write_header(file, get_data_size()) || + write_data_header(file) || + write_data_body(file)); + } + virtual bool is_artificial_event() { return 0; } + virtual bool write_data_header(IO_CACHE* file) { return 0; } - virtual int write_data_body(IO_CACHE* file __attribute__((unused))) + virtual bool write_data_body(IO_CACHE* file __attribute__((unused))) { return 0; } virtual Log_event_type get_type_code() = 0; - virtual bool is_valid() = 0; + virtual bool is_valid() const = 0; inline bool get_cache_stmt() { return cache_stmt; } - Log_event(const char* buf, bool old_format); + Log_event(const char* buf, const Format_description_log_event* description_event); virtual ~Log_event() { free_temp_buf();} void register_temp_buf(char* buf) { temp_buf = buf; } void free_temp_buf() @@ -419,18 +544,30 @@ public: } } virtual int get_data_size() { return 0;} - virtual int get_data_body_offset() { return 0; } - int get_event_len() - { - return (cached_event_len ? cached_event_len : - (cached_event_len = LOG_EVENT_HEADER_LEN + get_data_size())); - } - static Log_event* read_log_event(const char* buf, int event_len, - const char **error, bool old_format); + /* + Get event length for simple events. For complicated events the length + is calculated during write() + */ + static Log_event* read_log_event(const char* buf, uint event_len, + const char **error, + const Format_description_log_event + *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); }; +/* + One class for each type of event. + Two constructors for each class: + - one to create the event for logging (when the server acts as a master), + called after an update to the database is done, + which accepts parameters like the query, the database, the options for LOAD + DATA INFILE... + - one to create the event from a packet (when the server acts as a slave), + called before reproducing the update, which accepts parameters (like a + buffer). Used to read from the master, from the relay log, and in + mysqlbinlog. This constructor must be format-tolerant. +*/ /***************************************************************************** @@ -445,6 +582,7 @@ protected: char* data_buf; public: const char* query; + const char* catalog; const char* db; /* If we already know the length of the query string @@ -455,13 +593,60 @@ public: uint32 db_len; uint16 error_code; ulong thread_id; - /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + /* + For events created by Query_log_event::exec_event (and + Load_log_event::exec_event()) we need the *original* thread id, to be able + to log the event with the original (=master's) thread id (fix for + BUG#1686). */ ulong slave_proxy_id; + + /* + Binlog format 3 and 4 start to differ (as far as class members are + concerned) from here. + */ + + int catalog_len; // <= 255 char; -1 means uninited + + /* + We want to be able to store a variable number of N-bit status vars: + (generally N=32; but N=64 for SQL_MODE) a user may want to log the number + of affected rows (for debugging) while another does not want to lose 4 + bytes in this. + The storage on disk is the following: + status_vars_len is part of the post-header, + status_vars are in the variable-length part, after the post-header, before + the db & query. + status_vars on disk is a sequence of pairs (code, value) where 'code' means + 'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so + its first byte is its length. For now the order of status vars is: + flags2 - sql_mode - catalog. + We should add the same thing to Load_log_event, but in fact + LOAD DATA INFILE is going to be logged with a new type of event (logging of + the plain text query), so Load_log_event would be frozen, so no need. The + new way of logging LOAD DATA INFILE would use a derived class of + Query_log_event, so automatically benefit from the work already done for + status variables in Query_log_event. + */ + uint16 status_vars_len; + + /* + 'flags2' is a second set of flags (on top of those in Log_event), for + session variables. These are thd->options which is & against a mask + (OPTIONS_WRITTEN_TO_BINLOG). + flags2_inited helps make a difference between flags2==0 (3.23 or 4.x + master, we don't know flags2, so use the slave server's global options) and + flags2==0 (5.0 master, we know this has a meaning of flags all down which + must influence the query). + */ + bool flags2_inited; + bool sql_mode_inited; + + uint32 flags2; + /* In connections sql_mode is 32 bits now but will be 64 bits soon */ + ulong sql_mode; + ulong auto_increment_increment, auto_increment_offset; + #ifndef MYSQL_CLIENT Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, @@ -472,10 +657,11 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Query_log_event(const char* buf, int event_len, bool old_format); + Query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event); ~Query_log_event() { if (data_buf) @@ -484,17 +670,8 @@ public: } } Log_event_type get_type_code() { return QUERY_EVENT; } - int write(IO_CACHE* file); - int write_data(IO_CACHE* file); // returns 0 on success, -1 on error - bool is_valid() { return query != 0; } - int get_data_size() - { - return (q_len + db_len + 2 - + 4 // thread_id - + 4 // exec_time - + 2 // error_code - ); - } + bool write(IO_CACHE* file); + bool is_valid() const { return query != 0; } }; #ifdef HAVE_REPLICATION @@ -504,6 +681,7 @@ public: Slave Log Event class Note that this class is currently not used at all; no code writes a Slave_log_event (though some code in repl_failsafe.cc reads Slave_log_event). + So it's not a problem if this code is not maintained. ****************************************************************************/ class Slave_log_event: public Log_event @@ -519,20 +697,20 @@ public: int master_log_len; uint16 master_port; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Slave_log_event(const char* buf, int event_len); + Slave_log_event(const char* buf, uint event_len); ~Slave_log_event(); int get_data_size(); - bool is_valid() { return master_host != 0; } + bool is_valid() const { return master_host != 0; } Log_event_type get_type_code() { return SLAVE_EVENT; } - int write_data(IO_CACHE* file ); + bool write(IO_CACHE* file); }; #endif /* HAVE_REPLICATION */ @@ -546,12 +724,18 @@ public: class Load_log_event: public Log_event { protected: - int copy_log_event(const char *buf, ulong event_len, bool old_format); + int copy_log_event(const char *buf, ulong event_len, + int body_offset, const Format_description_log_event* description_event); public: ulong thread_id; ulong slave_proxy_id; uint32 table_name_len; + /* + No need to have a catalog, as these events can only come from 4.x. + TODO: this may become false if Dmitri pushes his new LOAD DATA INFILE in + 5.0 only (not in 4.x). + */ uint32 db_len; uint32 fname_len; uint32 num_fields; @@ -582,7 +766,7 @@ public: #ifndef MYSQL_CLIENT String field_lens_buf; String fields_buf; - + Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, enum enum_duplicates handle_dup, @@ -595,60 +779,74 @@ public: { return exec_event(thd->slave_net,rli,0); } - int exec_event(NET* net, struct st_relay_log_info* rli, + int exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool commented); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info = 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool commented); #endif - Load_log_event(const char* buf, int event_len, bool old_format); + /* + Note that for all the events related to LOAD DATA (Load_log_event, + Create_file/Append/Exec/Delete, we pass description_event; however as + logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used + for the common_header_len (post_header_len will not be changed). + */ + Load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Load_log_event() {} Log_event_type get_type_code() { return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); - bool is_valid() { return table_name != 0; } + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); + bool is_valid() const { return table_name != 0; } int get_data_size() { - return (table_name_len + 2 + db_len + 2 + fname_len - + 4 // thread_id - + 4 // exec_time - + 4 // skip_lines - + 4 // field block len + return (table_name_len + db_len + 2 + fname_len + + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } - int get_data_body_offset() { return LOAD_EVENT_OVERHEAD; } }; extern char server_version[SERVER_VERSION_LENGTH]; /***************************************************************************** - Start Log Event class + Start Log Event_v3 class + + Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and + 4.x). + Format_description_log_event derives from Start_log_event_v3; it is the + Start_log_event of binlog format 4 (MySQL 5.0), that is, the event that + describes the other events' header/postheader lengths. This event is sent by + MySQL 5.0 whenever it starts sending a new binlog if the requested position + is >4 (otherwise if ==4 the event will be sent naturally). ****************************************************************************/ -class Start_log_event: public Log_event + +class Start_log_event_v3: public Log_event { public: - /* - If this event is at the start of the first binary log since server startup - 'created' should be the timestamp when the event (and the binary log) was - created. - In the other case (i.e. this event is at the start of a binary log created - by FLUSH LOGS or automatic rotation), 'created' should be 0. - This "trick" is used by MySQL >=4.0.14 slaves to know if they must drop the - stale temporary tables or not. - Note that when 'created'!=0, it is always equal to the event's timestamp; - indeed Start_log_event is written only in log.cc where the first - constructor below is called, in which 'created' is set to 'when'. - So in fact 'created' is a useless variable. When it is 0 - we can read the actual value from timestamp ('when') and when it is - non-zero we can read the same value from timestamp ('when'). Conclusion: + /* + If this event is at the start of the first binary log since server + startup 'created' should be the timestamp when the event (and the + binary log) was created. In the other case (i.e. this event is at + the start of a binary log created by FLUSH LOGS or automatic + rotation), 'created' should be 0. This "trick" is used by MySQL + >=4.0.14 slaves to know if they must drop the stale temporary + tables or not. + + Note that when 'created'!=0, it is always equal to the event's + timestamp; indeed Start_log_event is written only in log.cc where + the first constructor below is called, in which 'created' is set + to 'when'. So in fact 'created' is a useless variable. When it is + 0 we can read the actual value from timestamp ('when') and when it + is non-zero we can read the same value from timestamp + ('when'). Conclusion: - we use timestamp to print when the binlog was created. - we use 'created' only to know if this is a first binlog or not. In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in @@ -658,29 +856,85 @@ public: time_t created; uint16 binlog_version; char server_version[ST_SERVER_VER_LEN]; + /* + artifical_event is 1 in the case where this is a generated event that + should not case any cleanup actions. We handle this in the log by + setting log_event == 0 (for now). + */ + bool artificial_event; #ifndef MYSQL_CLIENT - Start_log_event() :Log_event(), binlog_version(BINLOG_VERSION) - { - created = (time_t) when; - memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); - } + Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Start_log_event(const char* buf, bool old_format); - ~Start_log_event() {} - Log_event_type get_type_code() { return START_EVENT;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + Start_log_event_v3() {} + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif + + Start_log_event_v3(const char* buf, + const Format_description_log_event* description_event); + ~Start_log_event_v3() {} + Log_event_type get_type_code() { return START_EVENT_V3;} + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } int get_data_size() { - return START_HEADER_LEN; + return START_V3_HEADER_LEN; //no variable-sized part + } + virtual bool is_artificial_event() { return artificial_event; } +}; + + +/* + For binlog version 4. + This event is saved by threads which read it, as they need it for future + use (to decode the ordinary events). +*/ + +class Format_description_log_event: public Start_log_event_v3 +{ +public: + /* + The size of the fixed header which _all_ events have + (for binlogs written by this version, this is equal to + LOG_EVENT_HEADER_LEN), except FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT + (those have a header of size LOG_EVENT_MINIMAL_HEADER_LEN). + */ + uint8 common_header_len; + uint8 number_of_event_types; + /* The list of post-headers' lengthes */ + uint8 *post_header_len; + + Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); + +#ifndef MYSQL_CLIENT +#ifdef HAVE_REPLICATION + int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ +#endif + + Format_description_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); + ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } + Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;} + bool write(IO_CACHE* file); + bool is_valid() const + { + return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN : + LOG_EVENT_MINIMAL_HEADER_LEN)) && + (post_header_len != NULL)); + } + int get_data_size() + { + /* + The vector of post-header lengths is considered as part of the + post-header, because in a given version it never changes (contrary to the + query in a Query_log_event). + */ + return FORMAT_DESCRIPTION_HEADER_LEN; } }; @@ -692,13 +946,14 @@ public: Logs special variables such as auto_increment values ****************************************************************************/ + class Intvar_log_event: public Log_event { public: ulonglong val; uchar type; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg) :Log_event(thd_arg,0,0),val(val_arg),type(type_arg) {} @@ -707,25 +962,30 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Intvar_log_event(const char* buf, bool old_format); + Intvar_log_event(const char* buf, const Format_description_log_event* description_event); ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} const char* get_var_type_name(); int get_data_size() { return 9; /* sizeof(type) + sizeof(val) */;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** Rand Log Event class - Logs random seed used by the next RAND(), and by PASSWORD() in 4.1. + Logs random seed used by the next RAND(), and by PASSWORD() in 4.1.0. + 4.1.1 does not need it (it's repeatable again) so this event needn't be + written in 4.1.1 for PASSWORD() (but the fact that it is written is just a + waste, it does not cause bugs). ****************************************************************************/ + class Rand_log_event: public Log_event { public: @@ -741,17 +1001,18 @@ class Rand_log_event: public Log_event int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Rand_log_event(const char* buf, bool old_format); + Rand_log_event(const char* buf, const Format_description_log_event* description_event); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** User var Log Event class @@ -759,7 +1020,11 @@ class Rand_log_event: public Log_event Every time a query uses the value of a user variable, a User_var_log_event is written before the Query_log_event, to set the user variable. + Every time a query uses the value of a user variable, a User_var_log_event is + written before the Query_log_event, to set the user variable. + ****************************************************************************/ + class User_var_log_event: public Log_event { public: @@ -780,22 +1045,17 @@ public: void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - User_var_log_event(const char* buf, bool old_format); + User_var_log_event(const char* buf, const Format_description_log_event* description_event); ~User_var_log_event() {} Log_event_type get_type_code() { return USER_VAR_EVENT;} - int get_data_size() - { - return (is_null ? UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL : - UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + - UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE + val_len); - } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** Stop Log Event class @@ -811,15 +1071,15 @@ public: {} int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Stop_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + Stop_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; #endif /* HAVE_REPLICATION */ @@ -832,6 +1092,7 @@ public: This will be depricated when we move to using sequence ids. ****************************************************************************/ + class Rotate_log_event: public Log_event { public: @@ -839,7 +1100,7 @@ public: ulonglong pos; uint ident_len; bool alloced; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg, uint ident_len_arg = 0, ulonglong pos_arg = LOG_EVENT_OFFSET) @@ -852,10 +1113,11 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Rotate_log_event(const char* buf, int event_len, bool old_format); + Rotate_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Rotate_log_event() { if (alloced) @@ -863,17 +1125,17 @@ public: } Log_event_type get_type_code() { return ROTATE_EVENT;} int get_data_size() { return ident_len + ROTATE_HEADER_LEN;} - bool is_valid() { return new_log_ident != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return new_log_ident != 0; } + bool write(IO_CACHE* file); }; + /* the classes below are for the new LOAD DATA INFILE logging */ /***************************************************************************** - Create File Log Event class - ****************************************************************************/ + class Create_file_log_event: public Load_log_event { protected: @@ -882,7 +1144,7 @@ protected: our Load part - used on the slave when writing event out to SQL_LOAD-*.info file */ - bool fake_base; + bool fake_base; public: char* block; const char *event_buf; @@ -902,11 +1164,12 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Create_file_log_event(const char* buf, int event_len, bool old_format); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool enable_local); +#endif + + Create_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Create_file_log_event() { my_free((char*) event_buf, MYF(MY_ALLOW_ZERO_PTR)); @@ -922,19 +1185,14 @@ public: Load_log_event::get_data_size() + 4 + 1 + block_len); } - int get_data_body_offset() - { - return (fake_base ? LOAD_EVENT_OVERHEAD: - LOAD_EVENT_OVERHEAD + CREATE_FILE_HEADER_LEN); - } - bool is_valid() { return inited_from_old || block != 0; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); + bool is_valid() const { return inited_from_old || block != 0; } + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); /* Cut out Create_file extentions and write it as Load event - used on the slave */ - int write_base(IO_CACHE* file); + bool write_base(IO_CACHE* file); }; @@ -943,6 +1201,7 @@ public: Append Block Log Event class ****************************************************************************/ + class Append_block_log_event: public Log_event { public: @@ -950,14 +1209,15 @@ public: uint block_len; uint file_id; /* - 'db' is filled when the event is created in mysql_load() (the event needs to - have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not - written to the binlog (it's not used by Append_block_log_event::write()), so - it can't be read in the Append_block_log_event(const char* buf, int - event_len) constructor. - In other words, 'db' is used only for filtering by binlog-*-db rules. - Create_file_log_event is different: its 'db' (which is inherited from - Load_log_event) is written to the binlog and can be re-read. + 'db' is filled when the event is created in mysql_load() (the + event needs to have a 'db' member to be well filtered by + binlog-*-db rules). 'db' is not written to the binlog (it's not + used by Append_block_log_event::write()), so it can't be read in + the Append_block_log_event(const char* buf, int event_len) + constructor. In other words, 'db' is used only for filtering by + binlog-*-db rules. Create_file_log_event is different: it's 'db' + (which is inherited from Load_log_event) is written to the binlog + and can be re-read. */ const char* db; @@ -969,29 +1229,32 @@ public: void pack_info(Protocol* protocol); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - - Append_block_log_event(const char* buf, int event_len); + + Append_block_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Append_block_log_event() {} Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;} int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;} - bool is_valid() { return block != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return block != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + /***************************************************************************** Delete File Log Event class ****************************************************************************/ + class Delete_file_log_event: public Log_event { public: uint file_id; const char* db; /* see comment in Append_block_log_event */ - + #ifndef MYSQL_CLIENT Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION @@ -999,29 +1262,32 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Delete_file_log_event(const char* buf, int event_len); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool enable_local); +#endif + + Delete_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Delete_file_log_event() {} Log_event_type get_type_code() { return DELETE_FILE_EVENT;} int get_data_size() { return DELETE_FILE_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + /***************************************************************************** Execute Load Log Event class ****************************************************************************/ + class Execute_load_log_event: public Log_event { public: uint file_id; - const char* db; /* see comment in Append_block_log_event */ + const char* db; /* see comment in Append_block_log_event */ #ifndef MYSQL_CLIENT Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); @@ -1030,30 +1296,37 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Execute_load_log_event(const char* buf, int event_len); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif + + Execute_load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Execute_load_log_event() {} Log_event_type get_type_code() { return EXEC_LOAD_EVENT;} int get_data_size() { return EXEC_LOAD_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + #ifdef MYSQL_CLIENT class Unknown_log_event: public Log_event { public: - Unknown_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + /* + Even if this is an unknown event, we still pass description_event to + Log_event's ctor, this way we can extract maximum information from the + event's header (the unique ID for example). + */ + Unknown_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Unknown_log_event() {} - void print(FILE* file, bool short_form= 0, char* last_db= 0); + void print(FILE* file, bool short_form= 0, LAST_EVENT_INFO* last_event_info= 0); Log_event_type get_type_code() { return UNKNOWN_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; -#endif +#endif #endif /* _log_event_h */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 3f55a88b262..20ede9a623e 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -14,6 +14,15 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + Mostly this file is used in the server. But a little part of it is used in + mysqlbinlog too (definition of SELECT_DISTINCT and others). + The consequence is that 90% of the file is wrapped in #ifndef MYSQL_CLIENT, + except the part which must be in the server and in the client. +*/ + +#ifndef MYSQL_CLIENT + #include <my_global.h> #include <mysql_version.h> #include <mysql_embed.h> @@ -55,7 +64,7 @@ char *sql_strmake_with_convert(const char *str, uint32 arg_length, CHARSET_INFO *from_cs, uint32 max_res_length, CHARSET_INFO *to_cs, uint32 *result_length); -void kill_one_thread(THD *thd, ulong id); +void kill_one_thread(THD *thd, ulong id, bool only_kill_query); bool net_request_file(NET* net, const char* fname); char* query_table_status(THD *thd,const char *db,const char *table_name); @@ -112,6 +121,26 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define TIME_FOR_COMPARE 5 // 5 compares == one read /* + Number of comparisons of table rowids equivalent to reading one row from a + table. +*/ +#define TIME_FOR_COMPARE_ROWID (TIME_FOR_COMPARE*2) + +/* + For sequential disk seeks the cost formula is: + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST * #blocks_to_skip + + The cost of average seek + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*BLOCKS_IN_AVG_SEEK =1.0. +*/ +#define DISK_SEEK_BASE_COST ((double)0.5) + +#define BLOCKS_IN_AVG_SEEK 128 + +#define DISK_SEEK_PROP_COST ((double)0.5/BLOCKS_IN_AVG_SEEK) + + +/* Number of rows in a reference table when refereed through a not unique key. This value is only used when we don't know anything about the key distribution. @@ -173,8 +202,15 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define TEST_SIGINT 1024 /* Allow sigint on threads */ #define TEST_SYNCHRONIZATION 2048 /* get server to do sleep in some places */ +#endif + +/* + This is included in the server and in the client. + Options for select set by the yacc parser (stored in lex->options). + None of the 32 defines below should have its value changed, or this will + break replication. +*/ -/* options for select set by the yacc parser (stored in lex->options) */ #define SELECT_DISTINCT (1L << 0) #define SELECT_STRAIGHT_JOIN (1L << 1) #define SELECT_DESCRIBE (1L << 2) @@ -212,6 +248,9 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define OPTION_RELAXED_UNIQUE_CHECKS (1L << 27) #define SELECT_NO_UNLOCK (1L << 28) +/* The rest of the file is included in the server only */ +#ifndef MYSQL_CLIENT + /* Bits for different SQL modes modes (including ANSI mode) */ #define MODE_REAL_AS_FLOAT 1 #define MODE_PIPES_AS_CONCAT 2 @@ -227,12 +266,21 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define MODE_DB2 2048 #define MODE_MAXDB 4096 #define MODE_NO_KEY_OPTIONS 8192 -#define MODE_NO_TABLE_OPTIONS 16384 -#define MODE_NO_FIELD_OPTIONS 32768 -#define MODE_MYSQL323 65536 -#define MODE_MYSQL40 (MODE_MYSQL323*2) -#define MODE_ANSI (MODE_MYSQL40*2) -#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_TABLE_OPTIONS 16384 +#define MODE_NO_FIELD_OPTIONS 32768 +#define MODE_MYSQL323 65536 +#define MODE_MYSQL40 (MODE_MYSQL323*2) +#define MODE_ANSI (MODE_MYSQL40*2) +#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_BACKSLASH_ESCAPES (MODE_NO_AUTO_VALUE_ON_ZERO*2) +#define MODE_STRICT_TRANS_TABLES (MODE_NO_BACKSLASH_ESCAPES*2) +#define MODE_STRICT_ALL_TABLES (MODE_STRICT_TRANS_TABLES*2) +#define MODE_NO_ZERO_IN_DATE (MODE_STRICT_ALL_TABLES*2) +#define MODE_NO_ZERO_DATE (MODE_NO_ZERO_IN_DATE*2) +#define MODE_INVALID_DATES (MODE_NO_ZERO_DATE*2) +#define MODE_ERROR_FOR_DIVISION_BY_ZERO (MODE_INVALID_DATES*2) +#define MODE_TRADITIONAL (MODE_ERROR_FOR_DIVISION_BY_ZERO*2) +#define MODE_NO_AUTO_CREATE_USER (MODE_TRADITIONAL*2) #define RAID_BLOCK_SIZE 1024 @@ -344,6 +392,7 @@ inline THD *_current_thd(void) #include "sql_list.h" #include "sql_map.h" #include "handler.h" +#include "parse_file.h" #include "table.h" #include "field.h" /* Field definitions */ #include "protocol.h" @@ -357,12 +406,16 @@ void free_items(Item *item); void cleanup_items(Item *item); class THD; void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); -int check_one_table_access(THD *thd, ulong privilege, +bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); +bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table); bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list); int multi_update_precheck(THD *thd, TABLE_LIST *tables); int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count); +int mysql_multi_update_prepare(THD *thd); +int mysql_multi_delete_prepare(THD *thd); +int mysql_insert_select_prepare(THD *thd); int insert_select_precheck(THD *thd, TABLE_LIST *tables); int update_precheck(THD *thd, TABLE_LIST *tables); int delete_precheck(THD *thd, TABLE_LIST *tables); @@ -425,12 +478,13 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags); int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool log_query); + bool drop_temporary, bool drop_view, bool log_query); int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool log_query); int quick_rm_table(enum db_type base,const char *db, const char *table_name); +void close_cached_table(THD *thd, TABLE *table); bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list); bool mysql_change_db(THD *thd,const char *name); void mysql_parse(THD *thd,char *inBuf,uint length); @@ -438,8 +492,8 @@ bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); bool is_update_query(enum enum_sql_command command); bool alloc_query(THD *thd, char *packet, ulong packet_length); void mysql_init_select(LEX *lex); -void mysql_init_query(THD *thd, uchar *buf, uint length); void mysql_reset_thd_for_next_command(THD *thd); +void mysql_init_query(THD *thd, uchar *buf, uint length); bool mysql_new_select(LEX *lex, bool move_down); void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); @@ -450,7 +504,7 @@ extern "C" pthread_handler_decl(handle_one_connection,arg); extern "C" pthread_handler_decl(handle_bootstrap,arg); void end_thread(THD *thd,bool put_in_cache); void flush_thread_cache(); -void mysql_execute_command(THD *thd); +int mysql_execute_command(THD *thd); bool do_command(THD *thd); bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length); @@ -511,7 +565,11 @@ int mysql_explain_select(THD *thd, SELECT_LEX *sl, char const *type, select_result *result); int mysql_union(THD *thd, LEX *lex, select_result *result, SELECT_LEX_UNIT *unit); -int mysql_handle_derived(LEX *lex); +int mysql_handle_derived(LEX *lex, int (*processor)(THD *thd, + st_lex *lex, + st_table_list *table)); +int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *t); +int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *t); Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, Item ***copy_func, Field **from_field, bool group, bool modify_item, uint convert_blob_length); @@ -526,7 +584,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, bool tmp_table, uint select_field_count); TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, + TABLE_LIST *create_table, List<create_field> *extra_fields, List<Key> *keys, List<Item> *items, @@ -552,7 +610,6 @@ int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys); int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info); int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, Item **conds, uint order_num, ORDER *order); int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields, List<Item> &values,COND *conds, @@ -563,20 +620,21 @@ int mysql_multi_update(THD *thd, TABLE_LIST *table_list, COND *conds, ulong options, enum enum_duplicates handle_duplicates, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, TABLE *table, +int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic); int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, List<List_item> &values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates flag); +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry); int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, ha_rows rows, ulong options); int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); +int mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create); TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update); -TABLE *open_table(THD *thd,const char *db,const char *table,const char *alias, +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem, bool *refresh); TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table); TABLE *find_locked_table(THD *thd, const char *db,const char *table_name); @@ -592,11 +650,26 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name); void execute_init_command(THD *thd, sys_var_str *init_command_var, rw_lock_t *var_mutex); extern const Field *not_found_field; +extern const Field *view_ref_found; + +enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, + IGNORE_ERRORS, REPORT_EXCEPT_NON_UNIQUE, + IGNORE_EXCEPT_NON_UNIQUE}; Field *find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error); -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grant,bool allow_rowid, - uint *cached_field_index_ptr); + Item **ref, + find_item_error_report_type report_error, + bool check_privileges); +Field * +find_field_in_table(THD *thd, TABLE_LIST *table_list, + const char *name, const char *item_name, + uint length, Item **ref, + bool check_grants_table, bool check_grants_view, + bool allow_rowid, + uint *cached_field_index_ptr); +Field * +find_field_in_real_table(THD *thd, TABLE *table, const char *name, + uint length, bool check_grants, bool allow_rowid, + uint *cached_field_index_ptr); #ifdef HAVE_OPENSSL #include <openssl/des.h> struct st_des_keyblock @@ -621,7 +694,8 @@ int mysql_do(THD *thd, List<Item> &values); /* sql_show.cc */ int mysqld_show_dbs(THD *thd,const char *wild); int mysqld_show_open_tables(THD *thd,const char *wild); -int mysqld_show_tables(THD *thd,const char *db,const char *wild); +int mysqld_show_tables(THD *thd, const char *db, const char *wild, + bool verbose); int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild); int mysqld_show_fields(THD *thd,TABLE_LIST *table, const char *wild, bool verbose); @@ -640,7 +714,8 @@ int mysqld_show_status(THD *thd); int mysqld_show_variables(THD *thd,const char *wild); int mysqld_show(THD *thd, const char *wild, show_var_st *variables, enum enum_var_type value_type, - pthread_mutex_t *mutex); + pthread_mutex_t *mutex, + struct system_status_var *status_var); int mysql_find_files(THD *thd,List<char> *files, const char *db, const char *path, const char *wild, bool dir); int mysqld_show_charsets(THD *thd,const char *wild); @@ -649,17 +724,18 @@ int mysqld_show_storage_engines(THD *thd); int mysqld_show_privileges(THD *thd); int mysqld_show_column_types(THD *thd); int mysqld_help (THD *thd, const char *text); +void calc_sum_of_all_status(STATUS_VAR *to); /* sql_prepare.cc */ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, LEX_STRING *name=NULL); void mysql_stmt_execute(THD *thd, char *packet, uint packet_length); void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name); +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length); void mysql_stmt_free(THD *thd, char *packet); void mysql_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); -int check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, - List<Item> &values, ulong counter); +void reset_stmt_for_execute(THD *thd, LEX *lex); /* sql_error.cc */ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, @@ -698,9 +774,8 @@ bool add_proc_to_list(THD *thd, Item *item); TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find); SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error); -enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, - IGNORE_ERRORS}; + table_map read_tables, COND *conds, int *error, + bool allow_null_cond= false); extern const Item **not_found_item; Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, find_item_error_report_type report_error, @@ -709,8 +784,9 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); bool insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, const char *table_name, - List_iterator<Item> *it); -bool setup_tables(TABLE_LIST *tables); + List_iterator<Item> *it, bool any_privileges, + bool allocate_view_names); +bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds); int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num); int setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables, @@ -723,7 +799,6 @@ void wait_for_refresh(THD *thd); int open_tables(THD *thd, TABLE_LIST *tables, uint *counter); int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables); int open_and_lock_tables(THD *thd,TABLE_LIST *tables); -void relink_tables_for_derived(THD *thd); int lock_tables(THD *thd, TABLE_LIST *tables, uint counter); TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list); @@ -732,11 +807,11 @@ void free_io_cache(TABLE *entry); void intern_close_table(TABLE *entry); bool close_thread_table(THD *thd, TABLE **table_ptr); void close_temporary_tables(THD *thd); -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name); -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name); +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + uint offset_to_list, + const char *db_name, + const char *table_name); +TABLE_LIST *unique_table(TABLE_LIST *table, TABLE_LIST *table_list); TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name); bool close_temporary_table(THD *thd, const char *db, const char *table_name); void close_temporary(TABLE *table, bool delete_table=1); @@ -752,14 +827,32 @@ int fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors); int fill_record(Field **field,List<Item> &values, bool ignore_errors); OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild); +inline TABLE_LIST *find_table_in_global_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, offsetof(TABLE_LIST, next_global), + db_name, table_name); +} + +inline TABLE_LIST *find_table_in_local_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, offsetof(TABLE_LIST, next_local), + db_name, table_name); +} + + /* sql_calc.cc */ bool eval_const_cond(COND *cond); /* sql_load.cc */ -int mysql_load(THD *thd,sql_exchange *ex, TABLE_LIST *table_list, +int mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, - bool local_file,thr_lock_type lock_type); -int write_record(TABLE *table,COPY_INFO *info); + bool local_file, thr_lock_type lock_type, + bool ignore_check_option_errors); +int write_record(THD *thd, TABLE *table, COPY_INFO *info); /* sql_manager.cc */ /* bits set in manager_status */ @@ -774,12 +867,15 @@ extern "C" pthread_handler_decl(handle_manager, arg); void print_where(COND *cond,const char *info); void print_cached_tables(void); void TEST_filesort(SORT_FIELD *sortorder,uint s_length); +void print_plan(JOIN* join, double read_time, double record_count, + uint idx, const char *info); #endif void mysql_print_status(THD *thd); /* key.cc */ int find_ref_key(TABLE *form,Field *field, uint *offset); -void key_copy(byte *key,TABLE *form,uint index,uint key_length); -void key_restore(TABLE *form,byte *key,uint index,uint key_length); +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length); bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields); @@ -851,39 +947,32 @@ extern char language[LIBLEN],reg_ext[FN_EXTLEN]; extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN]; extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; extern char log_error_file[FN_REFLEN]; +extern double last_query_cost; extern double log_10[32]; extern ulonglong log_10_int[20]; extern ulonglong keybuff_size; -extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables; -extern ulong created_tmp_tables, created_tmp_disk_tables, bytes_sent; +extern ulong refresh_version,flush_version, thread_id,query_id; extern ulong binlog_cache_use, binlog_cache_disk_use; extern ulong aborted_threads,aborted_connects; extern ulong delayed_insert_timeout; extern ulong delayed_insert_limit, delayed_queue_size; extern ulong delayed_insert_threads, delayed_insert_writes; extern ulong delayed_rows_in_use,delayed_insert_errors; -extern ulong filesort_rows, filesort_range_count, filesort_scan_count; -extern ulong filesort_merge_passes; -extern ulong select_range_check_count, select_range_count, select_scan_count; -extern ulong select_full_range_join_count,select_full_join_count; extern ulong slave_open_temp_tables; extern ulong query_cache_size, query_cache_min_res_unit; extern ulong thd_startup_options, slow_launch_threads, slow_launch_time; extern ulong server_id, concurrency; -extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count; -extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count; -extern ulong ha_read_first_count, ha_read_last_count; -extern ulong ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; -extern ulong ha_commit_count, ha_rollback_count,table_cache_size; +extern ulong ha_read_count, ha_discover_count; +extern ulong table_cache_size; extern ulong max_connections,max_connect_errors, connect_timeout; extern ulong slave_net_timeout; extern ulong max_user_connections; -extern ulong long_query_count, what_to_log,flush_time; +extern ulong what_to_log,flush_time; extern ulong query_buff_size, thread_stack,thread_stack_min; extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit; extern ulong max_binlog_size, max_relay_log_size; extern ulong rpl_recovery_rank, thread_cache_size; -extern ulong com_stat[(uint) SQLCOM_END], com_other, back_log; +extern ulong back_log; extern ulong specialflag, current_pid; extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; extern my_bool relay_log_purge, opt_innodb_safe_binlog; @@ -896,6 +985,7 @@ extern bool using_update_log, opt_large_files, server_id_supplied; extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; extern bool opt_disable_networking, opt_skip_show_db; extern bool volatile abort_loop, shutdown_in_progress, grant_option; +extern bool mysql_proc_table_exists; extern uint volatile thread_count, thread_running, global_read_lock; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; extern my_bool opt_safe_show_db, opt_local_infile; @@ -908,7 +998,7 @@ extern char *shared_memory_base_name, *mysqld_unix_port; extern bool opt_enable_shared_memory; extern char *default_tz_name; -extern MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; +extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log; extern FILE *bootstrap_file; extern pthread_key(MEM_ROOT**,THR_MALLOC); extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, @@ -932,6 +1022,7 @@ extern SHOW_COMP_OPTION have_berkeley_db; extern SHOW_COMP_OPTION have_ndbcluster; extern struct system_variables global_system_variables; extern struct system_variables max_system_variables; +extern struct system_status_var global_status_var; extern struct rand_struct sql_rand; extern KEY_CACHE *sql_key_cache; @@ -973,7 +1064,8 @@ void mysql_lock_abort_for_thread(THD *thd, TABLE *table); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); bool lock_global_read_lock(THD *thd); void unlock_global_read_lock(THD *thd); -bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commit); +bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, + bool is_not_commit); void start_waiting_global_read_lock(THD *thd); void make_global_read_lock_block_commit(THD *thd); @@ -1000,8 +1092,8 @@ int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info, uint key_count,KEY *key_info); int format_number(uint inputflag,uint max_length,my_string pos,uint length, my_string *errpos); -int openfrm(const char *name,const char *alias,uint filestat,uint prgflag, - uint ha_open_flags, TABLE *outparam); +int openfrm(THD *thd, const char *name,const char *alias,uint filestat, + uint prgflag, uint ha_open_flags, TABLE *outparam); int readfrm(const char *name, const void** data, uint* length); int writefrm(const char* name, const void* data, uint len); int closefrm(TABLE *table); @@ -1011,7 +1103,6 @@ void free_blobs(TABLE *table); int set_zone(int nr,int min_zone,int max_zone); ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); -uint calc_days_in_year(uint year); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist); @@ -1024,7 +1115,8 @@ void localtime_to_TIME(TIME *to, struct tm *from); void calc_time_from_sec(TIME *to, long seconds, long microseconds); void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type); + uint str_length, timestamp_type time_type, + const char *field_name); extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type, const char *format_str, uint format_length); @@ -1115,6 +1207,8 @@ extern int yyparse(void *thd); SQL_CRYPT *get_crypt_for_frm(void); #endif +#include "sql_view.h" + /* Some inline functions for more speed */ inline bool add_item_to_list(THD *thd, Item *item) @@ -1213,3 +1307,5 @@ bool check_stack_overrun(THD *thd,char *dummy); inline void kill_delayed_threads(void) {} #define check_stack_overrun(A, B) 0 #endif + +#endif /* MYSQL_CLIENT */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 5622ac50a7b..675f57c4ea5 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -23,6 +23,7 @@ #include "repl_failsafe.h" #include "stacktrace.h" #include "mysqld_suffix.h" +#include "mysys_err.h" #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" #endif @@ -40,6 +41,8 @@ #include <thr_alarm.h> #include <ft_global.h> #include <errmsg.h> +#include "sp_rcontext.h" +#include "sp_cache.h" #define mysqld_charset &my_charset_latin1 @@ -89,7 +92,7 @@ extern "C" { // Because of SCO 3.2V4.2 #if defined(OS2) # include <sys/un.h> -#elif !defined( __WIN__) +#elif !defined(__WIN__) # ifndef __NETWARE__ #include <sys/resource.h> # endif /* __NETWARE__ */ @@ -216,7 +219,10 @@ const char *sql_mode_names[] = "NO_DIR_IN_CREATE", "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI", - "NO_AUTO_VALUE_ON_ZERO", NullS + "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES", "STRICT_ALL_TABLES", + "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES", "ERROR_FOR_DIVISION_BY_ZERO", + "TRADITIONAL", "NO_AUTO_CREATE_USER", + NullS }; TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"", sql_mode_names, NULL }; @@ -286,20 +292,13 @@ ulong open_files_limit, max_binlog_size, max_relay_log_size; ulong slave_net_timeout; ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; ulong query_cache_size=0; -ulong com_stat[(uint) SQLCOM_END], com_other; -ulong bytes_sent, bytes_received, net_big_packet_count; ulong refresh_version, flush_version; /* Increments on each reload */ -ulong query_id, long_query_count; +ulong query_id; ulong aborted_threads, killed_threads, aborted_connects; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; ulong delayed_insert_errors,flush_time, thread_created; -ulong filesort_rows, filesort_range_count, filesort_scan_count; -ulong filesort_merge_passes; -ulong select_range_check_count, select_range_count, select_scan_count; -ulong select_full_range_join_count,select_full_join_count; -ulong specialflag=0,opened_tables=0,created_tmp_tables=0, - created_tmp_disk_tables=0; +ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong max_connections,max_used_connections, max_connect_errors, max_user_connections = 0; @@ -309,6 +308,7 @@ ulong expire_logs_days = 0; ulong rpl_recovery_rank=0; ulong my_bind_addr; /* the address we bind to */ volatile ulong cached_thread_count= 0; +double last_query_cost= -1; /* -1 denotes that no query was compiled yet */ double log_10[32]; /* 10 potences */ time_t start_time; @@ -358,6 +358,7 @@ I_List<NAMED_LIST> key_caches; struct system_variables global_system_variables; struct system_variables max_system_variables; +struct system_status_var global_status_var; MY_TMPDIR mysql_tmpdir_list; MY_BITMAP temp_pool; @@ -515,6 +516,8 @@ static void close_connections(void) #ifdef EXTRA_DEBUG int count=0; #endif + THD *thd= current_thd; + DBUG_ENTER("close_connections"); /* Clear thread cache */ @@ -525,14 +528,14 @@ static void close_connections(void) (void) pthread_mutex_lock(&LOCK_manager); if (manager_thread_in_use) { - DBUG_PRINT("quit",("killing manager thread: %lx",manager_thread)); + DBUG_PRINT("quit",("killing manager thread: 0x%lx",manager_thread)); (void) pthread_cond_signal(&COND_manager); } (void) pthread_mutex_unlock(&LOCK_manager); /* kill connection thread */ #if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__) - DBUG_PRINT("quit",("waiting for select thread: %lx",select_thread)); + DBUG_PRINT("quit",("waiting for select thread: 0x%lx",select_thread)); (void) pthread_mutex_lock(&LOCK_thread_count); while (select_thread_in_use) @@ -566,7 +569,7 @@ static void close_connections(void) /* Abort listening to new connections */ DBUG_PRINT("quit",("Closing sockets")); - if ( !opt_disable_networking ) + if (!opt_disable_networking ) { if (ip_sock != INVALID_SOCKET) { @@ -579,7 +582,7 @@ static void close_connections(void) if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe) { HANDLE temp; - DBUG_PRINT( "quit", ("Closing named pipes") ); + DBUG_PRINT("quit", ("Closing named pipes") ); /* Create connection to the handle named pipe handler to break the loop */ if ((temp = CreateFile(pipe_name, @@ -621,7 +624,7 @@ static void close_connections(void) { DBUG_PRINT("quit",("Informing thread %ld that it's time to die", tmp->thread_id)); - tmp->killed=1; + tmp->killed= THD::KILL_CONNECTION; if (tmp->mysys_var) { tmp->mysys_var->abort=1; @@ -745,7 +748,7 @@ void kill_mysql(void) } #endif #elif defined(OS2) - pthread_cond_signal( &eventShutdown); // post semaphore + pthread_cond_signal(&eventShutdown); // post semaphore #elif defined(HAVE_PTHREAD_KILL) if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL)) { @@ -804,6 +807,7 @@ static void __cdecl kill_server(int sig_ptr) unireg_abort(1); /* purecov: inspected */ else unireg_end(); + #ifdef __NETWARE__ pthread_join(select_thread, NULL); // wait for main thread #endif /* __NETWARE__ */ @@ -895,7 +899,6 @@ void clean_up(bool print_message) mysql_log.cleanup(); mysql_slow_log.cleanup(); - mysql_update_log.cleanup(); mysql_bin_log.cleanup(); #ifdef HAVE_REPLICATION @@ -1164,7 +1167,7 @@ static void server_init(void) DBUG_ENTER("server_init"); #ifdef __WIN__ - if ( !opt_disable_networking ) + if (!opt_disable_networking) { WSADATA WsaData; if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData)) @@ -1248,7 +1251,7 @@ static void server_init(void) sql_perror("Can't start server : Set security descriptor"); unireg_abort(1); } - saPipeSecurity.nLength = sizeof( SECURITY_ATTRIBUTES ); + saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES); saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor; saPipeSecurity.bInheritHandle = FALSE; if ((hPipe= CreateNamedPipe(pipe_name, @@ -1268,9 +1271,9 @@ static void server_init(void) FORMAT_MESSAGE_FROM_SYSTEM, NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL ); - MessageBox( NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", - MB_OK|MB_ICONINFORMATION ); - LocalFree( lpMsgBuf ); + MessageBox(NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", + MB_OK|MB_ICONINFORMATION); + LocalFree(lpMsgBuf); unireg_abort(1); } } @@ -1475,7 +1478,7 @@ extern "C" sig_handler abort_thread(int sig __attribute__((unused))) THD *thd=current_thd; DBUG_ENTER("abort_thread"); if (thd) - thd->killed=1; + thd->killed= THD::KILL_CONNECTION; DBUG_VOID_RETURN; } #endif @@ -1491,7 +1494,7 @@ static void init_signals(void) { int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ; for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++) - signal( signals[i], kill_server) ; + signal(signals[i], kill_server) ; #if defined(__WIN__) signal(SIGBREAK,SIG_IGN); //ignore SIGBREAK for NT #else @@ -2126,9 +2129,14 @@ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) { THD *thd; DBUG_ENTER("my_message_sql"); - DBUG_PRINT("error", ("Message: '%s'", str)); + DBUG_PRINT("error", ("error: %u message: '%s'", error, str)); if ((thd= current_thd)) { + if (thd->spcont && + thd->spcont->find_handler(error, MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_RETURN(0); + } /* thd->lex->current_select == 0 if lex structure is not inited (not query command (COM_QUERY)) @@ -2207,10 +2215,10 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) my_thread_init(); // wait semaphore - pthread_cond_wait( &eventShutdown, NULL); + pthread_cond_wait(&eventShutdown, NULL); // close semaphore and kill server - pthread_cond_destroy( &eventShutdown); + pthread_cond_destroy(&eventShutdown); /* Exit main loop on main thread, so kill will be done from @@ -2256,7 +2264,7 @@ bool open_log(MYSQL_LOG *log, const char *hostname, } return log->open(opt_name, type, 0, index_file_name, (read_append) ? SEQ_READ_APPEND : WRITE_CACHE, - no_auto_events, max_size); + no_auto_events, max_size, 0); } @@ -2346,7 +2354,6 @@ static int init_common_variables(const char *conf_file_name, int argc, before MY_INIT(). So we do it here. */ mysql_log.init_pthread_objects(); - mysql_update_log.init_pthread_objects(); mysql_slow_log.init_pthread_objects(); mysql_bin_log.init_pthread_objects(); @@ -2484,6 +2491,7 @@ static int init_thread_environment() (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST); (void) pthread_cond_init(&COND_rpl_status, NULL); #endif + sp_cache_init(); /* Parameter for threads created for connections */ (void) pthread_attr_init(&connection_attrib); (void) pthread_attr_setdetachstate(&connection_attrib, @@ -2511,7 +2519,7 @@ static void init_ssl() ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); - DBUG_PRINT("info",("ssl_acceptor_fd: %lx", (long) ssl_acceptor_fd)); + DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd)); if (!ssl_acceptor_fd) opt_use_ssl = 0; } @@ -2542,9 +2550,55 @@ static int init_server_components() LOG_NORMAL, 0, 0, 0); if (opt_update_log) { - open_log(&mysql_update_log, glob_hostname, opt_update_logname, "", - NullS, LOG_NEW, 0, 0, 0); - using_update_log=1; + /* + Update log is removed since 5.0. But we still accept the option. + The idea is if the user already uses the binlog and the update log, + we completely ignore any option/variable related to the update log, like + if the update log did not exist. But if the user uses only the update log, + then we translate everything into binlog for him (with warnings). + Implementation of the above : + - If mysqld is started with --log-update and --log-bin, + ignore --log-update (print a warning), push a warning when SQL_LOG_UPDATE + is used, and turn off --sql-bin-update-same. + This will completely ignore SQL_LOG_UPDATE + - If mysqld is started with --log-update only, + change it to --log-bin (with the filename passed to log-update, + plus '-bin') (print a warning), push a warning when SQL_LOG_UPDATE is + used, and turn on --sql-bin-update-same. + This will translate SQL_LOG_UPDATE to SQL_LOG_BIN. + + Note that we tell the user that --sql-bin-update-same is deprecated and + does nothing, and we don't take into account if he used this option or + not; but internally we give this variable a value to have the behaviour we + want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not). + As sql-bin-update-same, log-update and log-bin cannot be changed by the + user after starting the server (they are not variables), the user will not + later interfere with the settings we do here. + */ + if (opt_bin_log) + { + opt_sql_bin_update= 0; + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log."); + } + else + { + opt_sql_bin_update= 1; + opt_bin_log= 1; + if (opt_update_logname) + { + // as opt_bin_log==0, no need to free opt_bin_logname + if (!(opt_bin_logname= my_strdup(opt_update_logname, MYF(MY_WME)))) + exit(EXIT_OUT_OF_MEMORY); + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin='%s' instead.",opt_bin_logname); + } + else + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin instead."); + } } if (opt_slow_log) open_log(&mysql_slow_log, glob_hostname, opt_slow_logname, "-slow.log", @@ -2813,7 +2867,7 @@ int main(int argc, char **argv) if (_cust_check_startup()) { / * _cust_check_startup will report startup failure error * / - exit( 1 ); + exit(1); } #endif @@ -2990,7 +3044,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); handle_connections_methods(); #else #ifdef __WIN__ - if ( !have_tcpip || opt_disable_networking) + if (!have_tcpip || opt_disable_networking) { sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces"); unireg_abort(1); @@ -3106,7 +3160,8 @@ default_service_handling(char **argv, const char *servicename, const char *displayname, const char *file_path, - const char *extra_opt) + const char *extra_opt, + const char *account_name) { char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end; end= path_and_service + sizeof(path_and_service)-3; @@ -3125,12 +3180,12 @@ default_service_handling(char **argv, if (Service.got_service_option(argv, "install")) { - Service.Install(1, servicename, displayname, path_and_service); + Service.Install(1, servicename, displayname, path_and_service, account_name); return 0; } if (Service.got_service_option(argv, "install-manual")) { - Service.Install(0, servicename, displayname, path_and_service); + Service.Install(0, servicename, displayname, path_and_service, account_name); return 0; } if (Service.got_service_option(argv, "remove")) @@ -3165,7 +3220,7 @@ int main(int argc, char **argv) if (argc == 2) { if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, - file_path, "")) + file_path, "", NULL)) return 0; if (Service.IsService(argv[1])) /* Start an optional service */ { @@ -3184,7 +3239,7 @@ int main(int argc, char **argv) } else if (argc == 3) /* install or remove any optional service */ { - if (!default_service_handling(argv, argv[2], argv[2], file_path, "")) + if (!default_service_handling(argv, argv[2], argv[2], file_path, "", NULL)) return 0; if (Service.IsService(argv[2])) { @@ -3202,15 +3257,39 @@ int main(int argc, char **argv) return 0; } } - else if (argc == 4) + else if (argc >= 4) { - /* - Install an optional service with optional config file - mysqld --install-manual mysqldopt --defaults-file=c:\miguel\my.ini - */ - if (!default_service_handling(argv, argv[2], argv[2], file_path, - argv[3])) - return 0; + const char *defaults_file = "--defaults-file"; + const char *service = "--local-service"; + char extra_opt[FN_REFLEN] = ""; + char *account_name = NULL; + char *option; + int index; + for (index = 3; index < argc; index++) + { + option= argv[index]; + /* + Install an optional service with optional config file + mysqld --install-manual mysqldopt --defaults-file=c:\miguel\my.ini + */ + if (strncmp(option, defaults_file, strlen(defaults_file)) == 0) + { + strmov(extra_opt, option); + } + else + /* + Install an optional service as local service + mysqld --install-manual mysqldopt --local-service + */ + if (strncmp(option, service, strlen(service)) == 0) + { + account_name=(char*)malloc(27); + strmov(account_name, "NT AUTHORITY\\LocalService\0"); + } + } + + if (!default_service_handling(argv, argv[2], argv[2], file_path, extra_opt, account_name)) + return 0; } else if (argc == 1 && Service.IsService(MYSQL_SERVICENAME)) { @@ -3351,7 +3430,7 @@ static void create_new_thread(THD *thd) ("Can't create thread to handle request (error %d)", error)); thread_count--; - thd->killed=1; // Safety + thd->killed= THD::KILL_CONNECTION; // Safety (void) pthread_mutex_unlock(&LOCK_thread_count); statistic_increment(aborted_connects,&LOCK_status); net_printf(thd,ER_CANT_CREATE_THREAD,error); @@ -3625,25 +3704,27 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) while (!abort_loop) { /* wait for named pipe connection */ - fConnected = ConnectNamedPipe( hPipe, NULL ); + fConnected = ConnectNamedPipe(hPipe, NULL); if (abort_loop) break; if (!fConnected) fConnected = GetLastError() == ERROR_PIPE_CONNECTED; if (!fConnected) { - CloseHandle( hPipe ); - if ((hPipe = CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, - PIPE_TYPE_BYTE | - PIPE_READMODE_BYTE | - PIPE_WAIT, - PIPE_UNLIMITED_INSTANCES, - (int) global_system_variables.net_buffer_length, - (int) global_system_variables.net_buffer_length, - NMPWAIT_USE_DEFAULT_WAIT, - &saPipeSecurity )) == - INVALID_HANDLE_VALUE ) + CloseHandle(hPipe); + if ((hPipe= CreateNamedPipe(pipe_name, + PIPE_ACCESS_DUPLEX, + PIPE_TYPE_BYTE | + PIPE_READMODE_BYTE | + PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, + (int) global_system_variables. + net_buffer_length, + (int) global_system_variables. + net_buffer_length, + NMPWAIT_USE_DEFAULT_WAIT, + &saPipeSecurity)) == + INVALID_HANDLE_VALUE) { sql_perror("Can't create new named pipe!"); break; // Abort @@ -3670,8 +3751,8 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) if (!(thd = new THD)) { - DisconnectNamedPipe( hConnectedPipe ); - CloseHandle( hConnectedPipe ); + DisconnectNamedPipe(hConnectedPipe); + CloseHandle(hConnectedPipe); continue; } if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) || @@ -4036,7 +4117,11 @@ enum options_mysqld OPT_TIME_FORMAT, OPT_DATETIME_FORMAT, OPT_LOG_QUERIES_NOT_USING_INDEXES, - OPT_DEFAULT_TIME_ZONE + OPT_DEFAULT_TIME_ZONE, + OPT_OPTIMIZER_SEARCH_DEPTH, + OPT_OPTIMIZER_PRUNE_LEVEL, + OPT_UPDATABLE_VIEWS_WITH_LIMIT, + OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET }; @@ -4055,6 +4140,16 @@ struct my_option my_long_options[] = #endif /* HAVE_REPLICATION */ {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-increment-increment", OPT_AUTO_INCREMENT, + "Auto-increment columns are incremented by this", + (gptr*) &global_system_variables.auto_increment_increment, + (gptr*) &max_system_variables.auto_increment_increment, 0, GET_ULONG, + OPT_ARG, 1, 1, 65535, 0, 1, 0 }, + {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET, + "Offset added to Auto-increment columns. Used when auto-increment-increment != 1", + (gptr*) &global_system_variables.auto_increment_offset, + (gptr*) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG, + 1, 1, 65535, 0, 1, 0 }, {"basedir", 'b', "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, @@ -4305,12 +4400,13 @@ Disable with --skip-isam.", (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-update", OPT_UPDATE_LOG, - "Log updates to file.# where # is a unique number if not given.", + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option justs turns on --log-bin instead.", (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-warnings", 'W', "Log some non-critical warnings to the error log file. Use this option twice or --log-warnings=2 if you also want 'Aborted connections' warnings.", + {"log-warnings", 'W', "Log some not critical warnings to the log file.", (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, "INSERT/DELETE/UPDATE has lower priority than selects.", @@ -4573,9 +4669,9 @@ replicating a LOAD DATA INFILE command.", 0}, #endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, - "If set, setting SQL_LOG_BIN to a value will automatically set SQL_LOG_UPDATE to the same value and vice versa.", - (gptr*) &opt_sql_bin_update, (gptr*) &opt_sql_bin_update, 0, GET_BOOL, - NO_ARG, 0, 0, 0, 0, 0, 0}, + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option does nothing anymore.", + 0, 0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sql-mode", OPT_SQL_MODE, "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", (gptr*) &sql_mode_str, (gptr*) &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, @@ -4597,7 +4693,7 @@ replicating a LOAD DATA INFILE command.", 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files. Several paths may be specified, separated by a " -#if defined( __WIN__) || defined(OS2) || defined(__NETWARE__) +#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__) "semicolon (;)" #else "colon (:)" @@ -4984,6 +5080,16 @@ The minimum value for this variable is 4096.", "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0}, + {"optimizer_prune_level", OPT_OPTIMIZER_PRUNE_LEVEL, + "Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows.", + (gptr*) &global_system_variables.optimizer_prune_level, + (gptr*) &max_system_variables.optimizer_prune_level, + 0, GET_ULONG, OPT_ARG, 1, 0, 1, 0, 1, 0}, + {"optimizer_search_depth", OPT_OPTIMIZER_SEARCH_DEPTH, + "Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Smaller values than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value; if set to MAX_TABLES+2, the optimizer will switch to the original find_best (used for testing/comparison).", + (gptr*) &global_system_variables.optimizer_search_depth, + (gptr*) &max_system_variables.optimizer_search_depth, + 0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0}, {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE, "The size of the buffer that is allocated when preloading indexes", (gptr*) &global_system_variables.preload_buff_size, @@ -5134,6 +5240,11 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, + "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlaying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", + (gptr*) &global_system_variables.updatable_views_with_limit, + (gptr*) &max_system_variables.updatable_views_with_limit, + 0, GET_ULONG, REQUIRED_ARG, 1, 0, 1, 0, 1, 0}, {"wait_timeout", OPT_WAIT_TIMEOUT, "The number of seconds the server waits for activity on a connection before closing it.", (gptr*) &global_system_variables.net_wait_timeout, @@ -5149,118 +5260,137 @@ struct show_var_st status_vars[]= { {"Aborted_connects", (char*) &aborted_connects, SHOW_LONG}, {"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG}, {"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG}, - {"Bytes_received", (char*) &bytes_received, SHOW_LONG}, - {"Bytes_sent", (char*) &bytes_sent, SHOW_LONG}, - {"Com_admin_commands", (char*) &com_other, SHOW_LONG}, - {"Com_alter_db", (char*) (com_stat+(uint) SQLCOM_ALTER_DB),SHOW_LONG}, - {"Com_alter_table", (char*) (com_stat+(uint) SQLCOM_ALTER_TABLE),SHOW_LONG}, - {"Com_analyze", (char*) (com_stat+(uint) SQLCOM_ANALYZE),SHOW_LONG}, - {"Com_backup_table", (char*) (com_stat+(uint) SQLCOM_BACKUP_TABLE),SHOW_LONG}, - {"Com_begin", (char*) (com_stat+(uint) SQLCOM_BEGIN),SHOW_LONG}, - {"Com_change_db", (char*) (com_stat+(uint) SQLCOM_CHANGE_DB),SHOW_LONG}, - {"Com_change_master", (char*) (com_stat+(uint) SQLCOM_CHANGE_MASTER),SHOW_LONG}, - {"Com_check", (char*) (com_stat+(uint) SQLCOM_CHECK),SHOW_LONG}, - {"Com_checksum", (char*) (com_stat+(uint) SQLCOM_CHECKSUM),SHOW_LONG}, - {"Com_commit", (char*) (com_stat+(uint) SQLCOM_COMMIT),SHOW_LONG}, - {"Com_create_db", (char*) (com_stat+(uint) SQLCOM_CREATE_DB),SHOW_LONG}, - {"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG}, - {"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG}, - {"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG}, - {"Com_dealloc_sql", (char*) (com_stat+(uint) - SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, - {"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG}, - {"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG}, - {"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG}, - {"Com_drop_db", (char*) (com_stat+(uint) SQLCOM_DROP_DB),SHOW_LONG}, - {"Com_drop_function", (char*) (com_stat+(uint) SQLCOM_DROP_FUNCTION),SHOW_LONG}, - {"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG}, - {"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG}, - {"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG}, - {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), - SHOW_LONG}, - {"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG}, - {"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG}, - {"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG}, - {"Com_ha_open", (char*) (com_stat+(uint) SQLCOM_HA_OPEN),SHOW_LONG}, - {"Com_ha_read", (char*) (com_stat+(uint) SQLCOM_HA_READ),SHOW_LONG}, - {"Com_help", (char*) (com_stat+(uint) SQLCOM_HELP),SHOW_LONG}, - {"Com_insert", (char*) (com_stat+(uint) SQLCOM_INSERT),SHOW_LONG}, - {"Com_insert_select", (char*) (com_stat+(uint) SQLCOM_INSERT_SELECT),SHOW_LONG}, - {"Com_kill", (char*) (com_stat+(uint) SQLCOM_KILL),SHOW_LONG}, - {"Com_load", (char*) (com_stat+(uint) SQLCOM_LOAD),SHOW_LONG}, - {"Com_load_master_data", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_DATA),SHOW_LONG}, - {"Com_load_master_table", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_TABLE),SHOW_LONG}, - {"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG}, - {"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG}, - {"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG}, - {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), - SHOW_LONG}, - {"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG}, - {"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG}, - {"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG}, - {"Com_repair", (char*) (com_stat+(uint) SQLCOM_REPAIR),SHOW_LONG}, - {"Com_replace", (char*) (com_stat+(uint) SQLCOM_REPLACE),SHOW_LONG}, - {"Com_replace_select", (char*) (com_stat+(uint) SQLCOM_REPLACE_SELECT),SHOW_LONG}, - {"Com_reset", (char*) (com_stat+(uint) SQLCOM_RESET),SHOW_LONG}, - {"Com_restore_table", (char*) (com_stat+(uint) SQLCOM_RESTORE_TABLE),SHOW_LONG}, - {"Com_revoke", (char*) (com_stat+(uint) SQLCOM_REVOKE),SHOW_LONG}, - {"Com_revoke_all", (char*) (com_stat+(uint) SQLCOM_REVOKE_ALL),SHOW_LONG}, - {"Com_rollback", (char*) (com_stat+(uint) SQLCOM_ROLLBACK),SHOW_LONG}, - {"Com_savepoint", (char*) (com_stat+(uint) SQLCOM_SAVEPOINT),SHOW_LONG}, - {"Com_select", (char*) (com_stat+(uint) SQLCOM_SELECT),SHOW_LONG}, - {"Com_set_option", (char*) (com_stat+(uint) SQLCOM_SET_OPTION),SHOW_LONG}, - {"Com_show_binlog_events", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOG_EVENTS),SHOW_LONG}, - {"Com_show_binlogs", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOGS),SHOW_LONG}, - {"Com_show_charsets", (char*) (com_stat+(uint) SQLCOM_SHOW_CHARSETS),SHOW_LONG}, - {"Com_show_collations", (char*) (com_stat+(uint) SQLCOM_SHOW_COLLATIONS),SHOW_LONG}, - {"Com_show_column_types", (char*) (com_stat+(uint) SQLCOM_SHOW_COLUMN_TYPES),SHOW_LONG}, - {"Com_show_create_db", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE_DB),SHOW_LONG}, - {"Com_show_create_table", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE),SHOW_LONG}, - {"Com_show_databases", (char*) (com_stat+(uint) SQLCOM_SHOW_DATABASES),SHOW_LONG}, - {"Com_show_errors", (char*) (com_stat+(uint) SQLCOM_SHOW_ERRORS),SHOW_LONG}, - {"Com_show_fields", (char*) (com_stat+(uint) SQLCOM_SHOW_FIELDS),SHOW_LONG}, - {"Com_show_grants", (char*) (com_stat+(uint) SQLCOM_SHOW_GRANTS),SHOW_LONG}, - {"Com_show_innodb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_INNODB_STATUS),SHOW_LONG}, - {"Com_show_keys", (char*) (com_stat+(uint) SQLCOM_SHOW_KEYS),SHOW_LONG}, - {"Com_show_logs", (char*) (com_stat+(uint) SQLCOM_SHOW_LOGS),SHOW_LONG}, - {"Com_show_master_status", (char*) (com_stat+(uint) SQLCOM_SHOW_MASTER_STAT),SHOW_LONG}, - {"Com_show_new_master", (char*) (com_stat+(uint) SQLCOM_SHOW_NEW_MASTER),SHOW_LONG}, - {"Com_show_open_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_OPEN_TABLES),SHOW_LONG}, - {"Com_show_privileges", (char*) (com_stat+(uint) SQLCOM_SHOW_PRIVILEGES),SHOW_LONG}, - {"Com_show_processlist", (char*) (com_stat+(uint) SQLCOM_SHOW_PROCESSLIST),SHOW_LONG}, - {"Com_show_slave_hosts", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_HOSTS),SHOW_LONG}, - {"Com_show_slave_status", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_STAT),SHOW_LONG}, - {"Com_show_status", (char*) (com_stat+(uint) SQLCOM_SHOW_STATUS),SHOW_LONG}, - {"Com_show_storage_engines", (char*) (com_stat+(uint) SQLCOM_SHOW_STORAGE_ENGINES),SHOW_LONG}, - {"Com_show_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_TABLES),SHOW_LONG}, - {"Com_show_variables", (char*) (com_stat+(uint) SQLCOM_SHOW_VARIABLES),SHOW_LONG}, - {"Com_show_warnings", (char*) (com_stat+(uint) SQLCOM_SHOW_WARNS),SHOW_LONG}, - {"Com_slave_start", (char*) (com_stat+(uint) SQLCOM_SLAVE_START),SHOW_LONG}, - {"Com_slave_stop", (char*) (com_stat+(uint) SQLCOM_SLAVE_STOP),SHOW_LONG}, - {"Com_truncate", (char*) (com_stat+(uint) SQLCOM_TRUNCATE),SHOW_LONG}, - {"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG}, - {"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG}, - {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG}, + {"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), + SHOW_LONG_STATUS}, + {"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), + SHOW_LONG_STATUS}, + {"Com_admin_commands", (char*) offsetof(STATUS_VAR, com_other), + SHOW_LONG_STATUS}, + {"Com_alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS}, + {"Com_alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS}, + {"Com_backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS}, + {"Com_begin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS}, + {"Com_change_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS}, + {"Com_change_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS}, + {"Com_check", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS}, + {"Com_checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS}, + {"Com_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS}, + {"Com_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS}, + {"Com_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS}, + {"Com_dealloc_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS}, + {"Com_delete", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS}, + {"Com_delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS}, + {"Com_do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS}, + {"Com_drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS}, + {"Com_drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS}, + {"Com_drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS}, + {"Com_drop_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS}, + {"Com_execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS}, + {"Com_flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS}, + {"Com_grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS}, + {"Com_ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS}, + {"Com_ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS}, + {"Com_ha_read", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS}, + {"Com_help", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS}, + {"Com_insert", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS}, + {"Com_insert_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS}, + {"Com_kill", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS}, + {"Com_load", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS}, + {"Com_load_master_data", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS}, + {"Com_load_master_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_lock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_optimize", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS}, + {"Com_preload_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS}, + {"Com_prepare_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS}, + {"Com_purge", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS}, + {"Com_purge_before_date", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS}, + {"Com_rename_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS}, + {"Com_repair", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS}, + {"Com_replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS}, + {"Com_replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS}, + {"Com_reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS}, + {"Com_restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS}, + {"Com_revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS}, + {"Com_revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS}, + {"Com_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS}, + {"Com_savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS}, + {"Com_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS}, + {"Com_set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS}, + {"Com_show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS}, + {"Com_show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS}, + {"Com_show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS}, + {"Com_show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS}, + {"Com_show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS}, + {"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS}, + {"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS}, + {"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS}, + {"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS}, + {"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS}, + {"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS}, + {"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS}, + {"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS}, + {"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS}, + {"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS}, + {"Com_show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS}, + {"Com_show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS}, + {"Com_show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS}, + {"Com_show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS}, + {"Com_show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS}, + {"Com_show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS}, + {"Com_slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS}, + {"Com_slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS}, + {"Com_truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS}, + {"Com_unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_update", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS}, + {"Com_update_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS}, {"Connections", (char*) &thread_id, SHOW_LONG_CONST}, - {"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG}, + {"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, + created_tmp_disk_tables), + SHOW_LONG_STATUS}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, - {"Created_tmp_tables", (char*) &created_tmp_tables, SHOW_LONG}, + {"Created_tmp_tables", (char*) offsetof(STATUS_VAR, + created_tmp_tables), + SHOW_LONG_STATUS}, {"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG}, {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_CONST}, {"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG}, {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST}, - {"Handler_commit", (char*) &ha_commit_count, SHOW_LONG}, - {"Handler_delete", (char*) &ha_delete_count, SHOW_LONG}, + {"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), + SHOW_LONG_STATUS}, + {"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), + SHOW_LONG_STATUS}, {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, - {"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG}, - {"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG}, - {"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG}, - {"Handler_read_prev", (char*) &ha_read_prev_count, SHOW_LONG}, - {"Handler_read_rnd", (char*) &ha_read_rnd_count, SHOW_LONG}, - {"Handler_read_rnd_next", (char*) &ha_read_rnd_next_count, SHOW_LONG}, - {"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG}, - {"Handler_update", (char*) &ha_update_count, SHOW_LONG}, - {"Handler_write", (char*) &ha_write_count, SHOW_LONG}, + {"Handler_read_first", (char*) offsetof(STATUS_VAR, + ha_read_first_count), + SHOW_LONG_STATUS}, + {"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), + SHOW_LONG_STATUS}, + {"Handler_read_next", (char*) offsetof(STATUS_VAR, + ha_read_next_count), + SHOW_LONG_STATUS}, + {"Handler_read_prev", (char*) offsetof(STATUS_VAR, + ha_read_prev_count), + SHOW_LONG_STATUS}, + {"Handler_read_rnd", (char*) offsetof(STATUS_VAR, ha_read_rnd_count), + SHOW_LONG_STATUS}, + {"Handler_read_rnd_next", (char*) offsetof(STATUS_VAR, + ha_read_rnd_next_count), + SHOW_LONG_STATUS}, + {"Handler_rollback", (char*) offsetof(STATUS_VAR, ha_rollback_count), + SHOW_LONG_STATUS}, + {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), + SHOW_LONG_STATUS}, + {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), + SHOW_LONG_STATUS}, {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG}, {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, @@ -5275,12 +5405,14 @@ struct show_var_st status_vars[]= { SHOW_KEY_CACHE_LONG}, {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONG}, + {"Last_query_cost", (char*) &last_query_cost, SHOW_DOUBLE}, {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST}, {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST}, {"Open_tables", (char*) 0, SHOW_OPENTABLES}, - {"Opened_tables", (char*) &opened_tables, SHOW_LONG}, + {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), + SHOW_LONG_STATUS}, #ifdef HAVE_QUERY_CACHE {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST}, @@ -5296,19 +5428,36 @@ struct show_var_st status_vars[]= { #endif /*HAVE_QUERY_CACHE*/ {"Questions", (char*) 0, SHOW_QUESTION}, {"Rpl_status", (char*) 0, SHOW_RPL_STATUS}, - {"Select_full_join", (char*) &select_full_join_count, SHOW_LONG}, - {"Select_full_range_join", (char*) &select_full_range_join_count, SHOW_LONG}, - {"Select_range", (char*) &select_range_count, SHOW_LONG}, - {"Select_range_check", (char*) &select_range_check_count, SHOW_LONG}, - {"Select_scan", (char*) &select_scan_count, SHOW_LONG}, + {"Select_full_join", (char*) offsetof(STATUS_VAR, + select_full_join_count), + SHOW_LONG_STATUS}, + {"Select_full_range_join", (char*) offsetof(STATUS_VAR, + select_full_range_join_count), + SHOW_LONG_STATUS}, + {"Select_range", (char*) offsetof(STATUS_VAR, + select_range_count), + SHOW_LONG_STATUS}, + {"Select_range_check", (char*) offsetof(STATUS_VAR, + select_range_check_count), + SHOW_LONG_STATUS}, + {"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), + SHOW_LONG_STATUS}, {"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG}, {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING}, {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, - {"Slow_queries", (char*) &long_query_count, SHOW_LONG}, - {"Sort_merge_passes", (char*) &filesort_merge_passes, SHOW_LONG}, - {"Sort_range", (char*) &filesort_range_count, SHOW_LONG}, - {"Sort_rows", (char*) &filesort_rows, SHOW_LONG}, - {"Sort_scan", (char*) &filesort_scan_count, SHOW_LONG}, + {"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), + SHOW_LONG_STATUS}, + {"Sort_merge_passes", (char*) offsetof(STATUS_VAR, + filesort_merge_passes), + SHOW_LONG_STATUS}, + {"Sort_range", (char*) offsetof(STATUS_VAR, + filesort_range_count), + SHOW_LONG_STATUS}, + {"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows), + SHOW_LONG_STATUS}, + {"Sort_scan", (char*) offsetof(STATUS_VAR, + filesort_scan_count), + SHOW_LONG_STATUS}, #ifdef HAVE_OPENSSL {"Ssl_accept_renegotiates", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE}, {"Ssl_accepts", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT}, @@ -5439,27 +5588,22 @@ static void mysql_init_variables(void) test_flags= select_errors= dropping_tables= ha_open_options=0; thread_count= thread_running= kill_cached_threads= wake_thread=0; slave_open_temp_tables= 0; - com_other= 0; cached_thread_count= 0; - bytes_sent= bytes_received= 0; opt_endinfo= using_udf_functions= 0; opt_using_transactions= using_update_log= 0; abort_loop= select_thread_in_use= signal_thread_in_use= 0; ready_to_exit= shutdown_in_progress= grant_option= 0; - long_query_count= aborted_threads= aborted_connects= 0; + aborted_threads= aborted_connects= 0; delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0; delayed_insert_errors= thread_created= 0; - filesort_rows= filesort_range_count= filesort_scan_count= 0; - filesort_merge_passes= select_range_check_count= select_range_count= 0; - select_scan_count= select_full_range_join_count= select_full_join_count= 0; - specialflag= opened_tables= created_tmp_tables= created_tmp_disk_tables= 0; + specialflag= 0; binlog_cache_use= binlog_cache_disk_use= 0; max_used_connections= slow_launch_threads = 0; mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0; errmesg= 0; mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); - bzero((gptr) &com_stat, sizeof(com_stat)); + bzero((char *) &global_status_var, sizeof(global_status_var)); /* Character sets */ system_charset_info= &my_charset_utf8_general_ci; @@ -6039,7 +6183,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *end; uint length= strlen(argument); long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err); - if (test_if_int(argument,(uint) length, end, &my_charset_latin1)) + if (end == argument+length) berkeley_lock_scan_time= value; else { diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 5985cf63ed6..bcb1f8634c0 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -93,11 +93,10 @@ extern uint test_flags; extern ulong bytes_sent, bytes_received, net_big_packet_count; extern pthread_mutex_t LOCK_bytes_sent , LOCK_bytes_received; extern void query_cache_insert(NET *net, const char *packet, ulong length); +#define update_statistics(A) A #else -#undef statistic_add -#undef statistic_increment -#define statistic_add(A,B,C) -#define statistic_increment(A,B) +#define update_statistics(A) +#define thd_increment_bytes_sent() #endif #define TEST_BLOCKING 8 @@ -119,6 +118,7 @@ my_bool my_net_init(NET *net, Vio* vio) net->buff_end=net->buff+net->max_packet; net->vio = vio; net->no_send_ok = 0; + net->no_send_eof = 0; net->error=0; net->return_errno=0; net->return_status=0; net->pkt_nr=net->compress_pkt_nr=0; net->write_pos=net->read_pos = net->buff; @@ -552,7 +552,7 @@ net_real_write(NET *net,const char *packet,ulong len) break; } pos+=length; - statistic_add(bytes_sent,length,&LOCK_bytes_sent); + update_statistics(thd_increment_bytes_sent(length)); } #ifndef __WIN__ end: @@ -623,7 +623,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, DBUG_PRINT("enter",("bytes_to_skip: %u", (uint) remain)); /* The following is good for debugging */ - statistic_increment(net_big_packet_count,&LOCK_bytes_received); + update_statistics(thd_increment_net_big_packet_count(1)); if (!thr_alarm_in_use(alarmed)) { @@ -639,7 +639,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, uint length= min(remain, net->max_packet); if (net_safe_read(net, (char*) net->buff, length, alarmed)) DBUG_RETURN(1); - statistic_add(bytes_received, length, &LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); remain -= (uint32) length; } if (old != MAX_PACKET_LENGTH) @@ -764,7 +764,7 @@ my_real_read(NET *net, ulong *complen) } remain -= (uint32) length; pos+= (ulong) length; - statistic_add(bytes_received,(ulong) length,&LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); } if (i == 0) { /* First parts is packet length */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index d25901e56b1..f036cbc799b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -23,6 +23,19 @@ */ +/* + Classes in this file are used in the following way: + 1. For a selection condition a tree of SEL_IMERGE/SEL_TREE/SEL_ARG objects + is created. #of rows in table and index statistics are ignored at this + step. + 2. Created SEL_TREE and index stats data are used to construct a + TABLE_READ_PLAN-derived object (TRP_*). Several 'candidate' table read + plans may be created. + 3. The least expensive table read plan is used to create a tree of + QUICK_SELECT_I-derived objects which are later used for row retrieval. + QUICK_RANGEs are also created in this step. +*/ + #ifdef __GNUC__ #pragma implementation // gcc: Class implementation #endif @@ -167,8 +180,7 @@ public: min_value=arg->max_value; min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN; } - void store(uint length,char **min_key,uint min_key_flag, - char **max_key, uint max_key_flag) + void store_min(uint length,char **min_key,uint min_key_flag) { if ((min_flag & GEOM_FLAG) || (!(min_flag & NO_MIN_RANGE) && @@ -183,6 +195,11 @@ public: memcpy(*min_key,min_value,length); (*min_key)+= length; } + } + void store(uint length,char **min_key,uint min_key_flag, + char **max_key, uint max_key_flag) + { + store_min(length, min_key, min_key_flag); if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) { @@ -267,31 +284,78 @@ public: SEL_ARG *clone_tree(); }; +class SEL_IMERGE; + class SEL_TREE :public Sql_alloc { public: enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type; SEL_TREE(enum Type type_arg) :type(type_arg) {} - SEL_TREE() :type(KEY) { bzero((char*) keys,sizeof(keys));} + SEL_TREE() :type(KEY) + { + keys_map.clear_all(); + bzero((char*) keys,sizeof(keys)); + } SEL_ARG *keys[MAX_KEY]; + key_map keys_map; /* bitmask of non-NULL elements in keys */ + + /* + Possible ways to read rows using index_merge. The list is non-empty only + if type==KEY. Currently can be non empty only if keys_map.is_clear_all(). + */ + List<SEL_IMERGE> merges; + + /* The members below are filled/used only after get_mm_tree is done */ + key_map ror_scans_map; /* bitmask of ROR scan-able elements in keys */ + uint n_ror_scans; /* number of set bits in ror_scans_map */ + + struct st_ror_scan_info **ror_scans; /* list of ROR key scans */ + struct st_ror_scan_info **ror_scans_end; /* last ROR scan */ + /* Note that #records for each key scan is stored in table->quick_rows */ }; typedef struct st_qsel_param { THD *thd; TABLE *table; - KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY]; + KEY_PART *key_parts,*key_parts_end; + KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */ MEM_ROOT *mem_root; table_map prev_tables,read_tables,current_table; - uint baseflag, keys, max_key_part, range_count; + uint baseflag, max_key_part, range_count; + + uint keys; /* number of keys used in the query */ + + /* used_key_no -> table_key_no translation table */ uint real_keynr[MAX_KEY]; + char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH], max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; bool quick; // Don't calulate possible keys COND *cond; + + uint fields_bitmap_size; + MY_BITMAP needed_fields; /* bitmask of fields needed by the query */ + + key_map *needed_reg; /* ptr to SQL_SELECT::needed_reg */ + + uint *imerge_cost_buff; /* buffer for index_merge cost estimates */ + uint imerge_cost_buff_size; /* size of the buffer */ + + /* TRUE if last checked tree->key can be used for ROR-scan */ + bool is_ror_scan; } PARAM; +class TABLE_READ_PLAN; + class TRP_RANGE; + class TRP_ROR_INTERSECT; + class TRP_ROR_UNION; + class TRP_ROR_INDEX_MERGE; + class TRP_GROUP_MIN_MAX; + +struct st_ror_scan_info; + static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field, Item_func::Functype type,Item *value, Item_result cmp_type); @@ -299,32 +363,279 @@ static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field, KEY_PART *key_part, Item_func::Functype type,Item *value); static SEL_TREE *get_mm_tree(PARAM *param,COND *cond); + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts); static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag); -static QUICK_SELECT *get_quick_select(PARAM *param,uint index, - SEL_ARG *key_tree); +QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index, + SEL_ARG *key_tree, + MEM_ROOT *alloc = NULL); +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time); +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering); +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time); +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time); +static +TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree); +static int get_index_merge_params(PARAM *param, key_map& needed_reg, + SEL_IMERGE *imerge, double *read_time, + ha_rows* imerge_rows); +inline double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr); + #ifndef DBUG_OFF -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg); +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg); +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end); +static void print_rowid(byte* val, int len); +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); #endif + static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); -static bool get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key,uint max_key_flag); static bool eq_tree(SEL_ARG* a,SEL_ARG *b); static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE); -static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); +static bool null_part_in_key(KEY_PART *key_part, const char *key, + uint length); +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param); + + +/* + SEL_IMERGE is a list of possible ways to do index merge, i.e. it is + a condition in the following form: + (t_1||t_2||...||t_N) && (next) + + where all t_i are SEL_TREEs, next is another SEL_IMERGE and no pair + (t_i,t_j) contains SEL_ARGS for the same index. + + SEL_TREE contained in SEL_IMERGE always has merges=NULL. + + This class relies on memory manager to do the cleanup. +*/ + +class SEL_IMERGE : public Sql_alloc +{ + enum { PREALLOCED_TREES= 10}; +public: + SEL_TREE *trees_prealloced[PREALLOCED_TREES]; + SEL_TREE **trees; /* trees used to do index_merge */ + SEL_TREE **trees_next; /* last of these trees */ + SEL_TREE **trees_end; /* end of allocated space */ + + SEL_ARG ***best_keys; /* best keys to read in SEL_TREEs */ + + SEL_IMERGE() : + trees(&trees_prealloced[0]), + trees_next(trees), + trees_end(trees + PREALLOCED_TREES) + {} + int or_sel_tree(PARAM *param, SEL_TREE *tree); + int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree); + int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge); +}; + + +/* + Add SEL_TREE to this index_merge without any checks, + + NOTES + This function implements the following: + (x_1||...||x_N) || t = (x_1||...||x_N||t), where x_i, t are SEL_TREEs + + RETURN + 0 - OK + -1 - Out of memory. +*/ + +int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree) +{ + if (trees_next == trees_end) + { + const int realloc_ratio= 2; /* Double size for next round */ + uint old_elements= (trees_end - trees); + uint old_size= sizeof(SEL_TREE**) * old_elements; + uint new_size= old_size * realloc_ratio; + SEL_TREE **new_trees; + if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size))) + return -1; + memcpy(new_trees, trees, old_size); + trees= new_trees; + trees_next= trees + old_elements; + trees_end= trees + old_elements * realloc_ratio; + } + *(trees_next++)= tree; + return 0; +} + + +/* + Perform OR operation on this SEL_IMERGE and supplied SEL_TREE new_tree, + combining new_tree with one of the trees in this SEL_IMERGE if they both + have SEL_ARGs for the same key. + + SYNOPSIS + or_sel_tree_with_checks() + param PARAM from SQL_SELECT::test_quick_select + new_tree SEL_TREE with type KEY or KEY_SMALLER. + + NOTES + This does the following: + (t_1||...||t_k)||new_tree = + either + = (t_1||...||t_k||new_tree) + or + = (t_1||....||(t_j|| new_tree)||...||t_k), + + where t_i, y are SEL_TREEs. + new_tree is combined with the first t_j it has a SEL_ARG on common + key with. As a consequence of this, choice of keys to do index_merge + read may depend on the order of conditions in WHERE part of the query. + + RETURN + 0 OK + 1 One of the trees was combined with new_tree to SEL_TREE::ALWAYS, + and (*this) should be discarded. + -1 An error occurred. +*/ + +int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree) +{ + for (SEL_TREE** tree = trees; + tree != trees_next; + tree++) + { + if (sel_trees_can_be_ored(*tree, new_tree, param)) + { + *tree = tree_or(param, *tree, new_tree); + if (!*tree) + return 1; + if (((*tree)->type == SEL_TREE::MAYBE) || + ((*tree)->type == SEL_TREE::ALWAYS)) + return 1; + /* SEL_TREE::IMPOSSIBLE is impossible here */ + return 0; + } + } + + /* New tree cannot be combined with any of existing trees. */ + return or_sel_tree(param, new_tree); +} + + +/* + Perform OR operation on this index_merge and supplied index_merge list. + + RETURN + 0 - OK + 1 - One of conditions in result is always TRUE and this SEL_IMERGE + should be discarded. + -1 - An error occurred +*/ + +int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge) +{ + for (SEL_TREE** tree= imerge->trees; + tree != imerge->trees_next; + tree++) + { + if (or_sel_tree_with_checks(param, *tree)) + return 1; + } + return 0; +} + + +/* + Perform AND operation on two index_merge lists and store result in *im1. +*/ + +inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2) +{ + im1->concat(im2); +} + + +/* + Perform OR operation on 2 index_merge lists, storing result in first list. + + NOTES + The following conversion is implemented: + (a_1 &&...&& a_N)||(b_1 &&...&& b_K) = AND_i,j(a_i || b_j) => + => (a_1||b_1). + + i.e. all conjuncts except the first one are currently dropped. + This is done to avoid producing N*K ways to do index_merge. + + If (a_1||b_1) produce a condition that is always TRUE, NULL is returned + and index_merge is discarded (while it is actually possible to try + harder). + + As a consequence of this, choice of keys to do index_merge read may depend + on the order of conditions in WHERE part of the query. + + RETURN + 0 OK, result is stored in *im1 + other Error, both passed lists are unusable +*/ + +int imerge_list_or_list(PARAM *param, + List<SEL_IMERGE> *im1, + List<SEL_IMERGE> *im2) +{ + SEL_IMERGE *imerge= im1->head(); + im1->empty(); + im1->push_back(imerge); + + return imerge->or_sel_imerge_with_checks(param, im2->head()); +} + + +/* + Perform OR operation on index_merge list and key tree. + + RETURN + 0 OK, result is stored in *im1. + other Error +*/ + +int imerge_list_or_tree(PARAM *param, + List<SEL_IMERGE> *im1, + SEL_TREE *tree) +{ + SEL_IMERGE *imerge; + List_iterator<SEL_IMERGE> it(*im1); + while((imerge= it++)) + { + if (imerge->or_sel_tree_with_checks(param, tree)) + it.remove(); + } + return im1->is_empty(); +} /*************************************************************************** -** Basic functions for SQL_SELECT and QUICK_SELECT +** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ /* make a select from mysql info @@ -334,13 +645,15 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); */ SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error) + table_map read_tables, COND *conds, int *error, + bool allow_null_cond) { SQL_SELECT *select; DBUG_ENTER("make_select"); *error=0; - if (!conds) + + if (!conds && !allow_null_cond) DBUG_RETURN(0); if (!(select= new SQL_SELECT)) { @@ -380,7 +693,7 @@ void SQL_SELECT::cleanup() free_cond=0; delete cond; cond= 0; - } + } close_cached_file(&file); } @@ -392,11 +705,22 @@ SQL_SELECT::~SQL_SELECT() #undef index // Fix for Unixware 7 -QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) - :dont_free(0),sorted(0),error(0),index(key_nr),max_used_key_length(0), - used_key_parts(0), head(table), it(ranges),range(0) +QUICK_SELECT_I::QUICK_SELECT_I() + :max_used_key_length(0), + used_key_parts(0) +{} + +QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr, + bool no_alloc, MEM_ROOT *parent_alloc) + :dont_free(0),error(0),free_file(0),cur_range(NULL),range(0) { - if (!no_alloc) + sorted= 0; + index= key_nr; + head= table; + key_part_info= head->key_info[index].key_part; + my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16); + + if (!no_alloc && !parent_alloc) { // Allocates everything through the internal memroot init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); @@ -404,21 +728,431 @@ QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) } else bzero((char*) &alloc,sizeof(alloc)); - file=head->file; - record=head->record[0]; - init(); + file= head->file; + record= head->record[0]; } -QUICK_SELECT::~QUICK_SELECT() + +int QUICK_RANGE_SELECT::init() +{ + DBUG_ENTER("QUICK_RANGE_SELECT::init"); + if (file->inited == handler::NONE) + DBUG_RETURN(error= file->ha_index_init(index)); + error= 0; + DBUG_RETURN(0); +} + + +void QUICK_RANGE_SELECT::range_end() +{ + if (file->inited != handler::NONE) + file->ha_index_end(); +} + + +QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() { + DBUG_ENTER("QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT"); if (!dont_free) { - if (file->inited) - file->ha_index_end(); + /* file is NULL for CPK scan on covering ROR-intersection */ + if (file) + { + range_end(); + file->extra(HA_EXTRA_NO_KEYREAD); + if (free_file) + { + DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file, + free_file)); + file->reset(); + file->close(); + } + } + delete_dynamic(&ranges); /* ranges are allocated in alloc */ free_root(&alloc,MYF(0)); } + DBUG_VOID_RETURN; +} + + +QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param, + TABLE *table) + :cur_quick_it(quick_selects),pk_quick_select(NULL),unique(NULL), + thd(thd_param) +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT"); + index= MAX_KEY; + head= table; + bzero(&read_record, sizeof(read_record)); + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + DBUG_VOID_RETURN; +} + +int QUICK_INDEX_MERGE_SELECT::init() +{ + cur_quick_it.rewind(); + cur_quick_select= cur_quick_it++; + return 0; +} + +int QUICK_INDEX_MERGE_SELECT::reset() +{ + int result; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::reset"); + result= cur_quick_select->reset() || prepare_unique(); + DBUG_RETURN(result); +} + +bool +QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range) +{ + /* + Save quick_select that does scan on clustered primary key as it will be + processed separately. + */ + if (head->file->primary_key_is_clustered() && + quick_sel_range->index == head->primary_key) + pk_quick_select= quick_sel_range; + else + return quick_selects.push_back(quick_sel_range); + return 0; +} + +QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT() +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT"); + delete unique; + quick_selects.delete_elements(); + delete pk_quick_select; + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param, + TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + : cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows) +{ + index= MAX_KEY; + head= table; + record= head->record[0]; + if (!parent_alloc) + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + else + bzero(&alloc, sizeof(MEM_ROOT)); + last_rowid= (byte*)alloc_root(parent_alloc? parent_alloc : &alloc, + head->file->ref_length); +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::init() +{ + /* Check if last_rowid was successfully allocated in ctor */ + return !last_rowid; +} + + +/* + Initialize this quick select to be a ROR-merged scan. + + SYNOPSIS + QUICK_RANGE_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create a separate + handler object + + NOTES + This function creates and prepares for subsequent use a separate handler + object if it can't reuse head->file. The reason for this is that during + ROR-merge several key scans are performed simultaneously, and a single + handler is only capable of preserving context of a single key scan. + + In ROR-merge the quick select doing merge does full records retrieval, + merged quick selects read only keys. + + RETURN + 0 ROR child scan initialized, ok to use. + 1 error +*/ + +int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + handler *save_file= file; + DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan"); + + if (reuse_handler) + { + DBUG_PRINT("info", ("Reusing handler %p", file)); + if (file->extra(HA_EXTRA_KEYREAD) || + file->extra(HA_EXTRA_RETRIEVE_ALL_COLS) | + init() || reset()) + { + DBUG_RETURN(1); + } + DBUG_RETURN(0); + } + + /* Create a separate handler object for this quick select */ + if (free_file) + { + /* already have own 'handler' object. */ + DBUG_RETURN(0); + } + + if (!(file= get_new_handler(head, head->db_type))) + goto failure; + DBUG_PRINT("info", ("Allocated new handler %p", file)); + if (file->ha_open(head->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + { + /* Caller will free the memory */ + goto failure; + } + + if (file->extra(HA_EXTRA_KEYREAD) || + file->extra(HA_EXTRA_RETRIEVE_ALL_COLS) || + init() || reset()) + { + file->close(); + goto failure; + } + free_file= TRUE; + last_rowid= file->ref; + DBUG_RETURN(0); + +failure: + file= save_file; + DBUG_RETURN(1); +} + + +/* + Initialize this quick select to be a part of a ROR-merged scan. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create separate + handler object. + RETURN + 0 OK + other error code +*/ +int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan"); + + /* Initialize all merged "children" quick selects */ + DBUG_ASSERT(!(need_to_fetch_row && !reuse_handler)); + if (!need_to_fetch_row && reuse_handler) + { + quick= quick_it++; + /* + There is no use of this->file. Use it for the first of merged range + selects. + */ + if (quick->init_ror_merged_scan(TRUE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + } + while((quick= quick_it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + /* All merged scans share the same record buffer in intersection. */ + quick->record= head->record[0]; + } + + if (need_to_fetch_row && head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::reset() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset"); + DBUG_RETURN(init_ror_merged_scan(TRUE)); +} + + +/* + Add a merged quick select to this ROR-intersection quick select. + + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::push_quick_back() + quick Quick select to be added. The quick select must return + rows in rowid order. + NOTES + This call can only be made before init() is called. + + RETURN + FALSE OK + TRUE Out of memory. +*/ + +bool +QUICK_ROR_INTERSECT_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick) +{ + return quick_selects.push_back(quick); +} + +QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT"); + quick_selects.delete_elements(); + delete cpk_quick; + free_root(&alloc,MYF(0)); + if (need_to_fetch_row && head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param, + TABLE *table) + :thd(thd_param) +{ + index= MAX_KEY; + head= table; + rowid_length= table->file->ref_length; + record= head->record[0]; + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + thd_param->mem_root= &alloc; +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_UNION_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::init() +{ + if (init_queue(&queue, quick_selects.elements, 0, + FALSE , QUICK_ROR_UNION_SELECT::queue_cmp, + (void*) this)) + { + bzero(&queue, sizeof(QUEUE)); + return 1; + } + + if (!(cur_rowid= (byte*)alloc_root(&alloc, 2*head->file->ref_length))) + return 1; + prev_rowid= cur_rowid + head->file->ref_length; + return 0; +} + + +/* + Comparison function to be used QUICK_ROR_UNION_SELECT::queue priority + queue. + + SYNPOSIS + QUICK_ROR_UNION_SELECT::queue_cmp() + arg Pointer to QUICK_ROR_UNION_SELECT + val1 First merged select + val2 Second merged select +*/ +int QUICK_ROR_UNION_SELECT::queue_cmp(void *arg, byte *val1, byte *val2) +{ + QUICK_ROR_UNION_SELECT *self= (QUICK_ROR_UNION_SELECT*)arg; + return self->head->file->cmp_ref(((QUICK_SELECT_I*)val1)->last_rowid, + ((QUICK_SELECT_I*)val2)->last_rowid); +} + + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::reset() +{ + QUICK_SELECT_I* quick; + int error; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::reset"); + have_prev_rowid= FALSE; + /* + Initialize scans for merged quick selects and put all merged quick + selects into the queue. + */ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + if ((error= quick->get_next())) + { + if (error == HA_ERR_END_OF_FILE) + continue; + DBUG_RETURN(error); + } + quick->save_last_pos(); + queue_insert(&queue, (byte*)quick); + } + + if (head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + + DBUG_RETURN(0); +} + + +bool +QUICK_ROR_UNION_SELECT::push_quick_back(QUICK_SELECT_I *quick_sel_range) +{ + return quick_selects.push_back(quick_sel_range); } +QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() +{ + DBUG_ENTER("QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT"); + delete_queue(&queue); + quick_selects.delete_elements(); + if (head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + QUICK_RANGE::QUICK_RANGE() :min_key(0),max_key(0),min_length(0),max_length(0), flag(NO_MIN_RANGE | NO_MAX_RANGE) @@ -582,28 +1316,269 @@ SEL_ARG *SEL_ARG::clone_tree() return root; } + /* - Test if a key can be used in different ranges + Table rows retrieval plan. Range optimizer creates QUICK_SELECT_I-derived + objects from table read plans. +*/ +class TABLE_READ_PLAN +{ +public: + /* + Plan read cost, with or without cost of full row retrieval, depending + on plan creation parameters. + */ + double read_cost; + ha_rows records; /* estimate of #rows to be examined */ + + /* + If TRUE, the scan returns rows in rowid order. This is used only for + scans that can be both ROR and non-ROR. + */ + bool is_ror; + + /* + Create quick select for this plan. + SYNOPSIS + make_quick() + param Parameter from test_quick_select + retrieve_full_rows If TRUE, created quick select will do full record + retrieval. + parent_alloc Memory pool to use, if any. + + NOTES + retrieve_full_rows is ignored by some implementations. + + RETURN + created quick select + NULL on any error. + */ + virtual QUICK_SELECT_I *make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc=NULL) = 0; + + /* Table read plans are allocated on MEM_ROOT and are never deleted */ + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr,size_t size) {} +}; + +class TRP_ROR_INTERSECT; +class TRP_ROR_UNION; +class TRP_INDEX_MERGE; + + +/* + Plan for a QUICK_RANGE_SELECT scan. + TRP_RANGE::make_quick ignores retrieve_full_rows parameter because + QUICK_RANGE_SELECT doesn't distinguish between 'index only' scans and full + record retrieval scans. +*/ + +class TRP_RANGE : public TABLE_READ_PLAN +{ +public: + SEL_ARG *key; /* set of intervals to be used in "range" method retrieval */ + uint key_idx; /* key number in PARAM::key */ + + TRP_RANGE(SEL_ARG *key_arg, uint idx_arg) + : key(key_arg), key_idx(idx_arg) + {} + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + { + DBUG_ENTER("TRP_RANGE::make_quick"); + QUICK_RANGE_SELECT *quick; + if ((quick= get_quick_select(param, key_idx, key, parent_alloc))) + { + quick->records= records; + quick->read_time= read_cost; + } + DBUG_RETURN(quick); + } +}; + + +/* Plan for QUICK_ROR_INTERSECT_SELECT scan. */ + +class TRP_ROR_INTERSECT : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + + /* Array of pointers to ROR range scans used in this intersection */ + struct st_ror_scan_info **first_scan; + struct st_ror_scan_info **last_scan; /* End of the above array */ + struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */ + bool is_covering; /* TRUE if no row retrieval phase is necessary */ + double index_scan_costs; /* SUM(cost(index_scan)) */ +}; + + +/* + Plan for QUICK_ROR_UNION_SELECT scan. + QUICK_ROR_UNION_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_ROR_UNION : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */ + TABLE_READ_PLAN **last_ror; /* end of the above array */ +}; + + +/* + Plan for QUICK_INDEX_MERGE_SELECT scan. + QUICK_ROR_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_INDEX_MERGE : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */ + TRP_RANGE **range_scans_end; /* end of the array */ +}; + + +/* + Plan for a QUICK_GROUP_MIN_MAX_SELECT scan. +*/ + +class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN +{ +private: + bool have_min, have_max; + KEY_PART_INFO *min_max_arg_part; + uint group_prefix_len; + uint used_key_parts; + uint group_key_parts; + KEY *index_info; + uint index; + uint key_infix_len; + byte key_infix[MAX_KEY_LENGTH]; + SEL_TREE *range_tree; /* Represents all range predicates in the query. */ + SEL_ARG *index_tree; /* The SEL_ARG sub-tree corresponding to index_info. */ + uint param_idx; /* Index of used key in param->key. */ + /* Number of records selected by the ranges in index_tree. */ +public: + ha_rows quick_prefix_records; +public: + TRP_GROUP_MIN_MAX(bool have_min_arg, bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, uint used_key_parts_arg, + uint group_key_parts_arg, KEY *index_info_arg, + uint index_arg, uint key_infix_len_arg, + byte *key_infix_arg, + SEL_TREE *tree_arg, SEL_ARG *index_tree_arg, + uint param_idx_arg, ha_rows quick_prefix_records_arg) + : have_min(have_min_arg), have_max(have_max_arg), + min_max_arg_part(min_max_arg_part_arg), + group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg), + group_key_parts(group_key_parts_arg), index_info(index_info_arg), + index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg), + index_tree(index_tree_arg), param_idx(param_idx_arg), + quick_prefix_records(quick_prefix_records_arg) + { + if (key_infix_len) + memcpy(this->key_infix, key_infix_arg, key_infix_len); + } + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); +}; + +/* + Fill param->needed_fields with bitmap of fields used in the query. SYNOPSIS - SQL_SELECT::test_quick_select(thd,keys_to_use, prev_tables, - limit, force_quick_range) + fill_used_fields_bitmap() + param Parameter from test_quick_select function. + + NOTES + Clustered PK members are not put into the bitmap as they are implicitly + present in all keys (and it is impossible to avoid reading them). + RETURN + 0 Ok + 1 Out of memory. +*/ + +static int fill_used_fields_bitmap(PARAM *param) +{ + TABLE *table= param->table; + param->fields_bitmap_size= (table->fields/8 + 1); + uchar *tmp; + uint pk; + if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) || + bitmap_init(¶m->needed_fields, tmp, param->fields_bitmap_size*8, + FALSE)) + return 1; + + bitmap_clear_all(¶m->needed_fields); + for (uint i= 0; i < table->fields; i++) + { + if (param->thd->query_id == table->field[i]->query_id) + bitmap_set_bit(¶m->needed_fields, i+1); + } - Updates the following in the select parameter: - needed_reg - Bits for keys with may be used if all prev regs are read - quick - Parameter to use when reading records. - In the table struct the following information is updated: - quick_keys - Which keys can be used - quick_rows - How many rows the key matches + pk= param->table->primary_key; + if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY) + { + /* The table uses clustered PK and it is not internally generated */ + KEY_PART_INFO *key_part= param->table->key_info[pk].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[pk].key_parts; + for(;key_part != key_part_end; ++key_part) + { + bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr); + } + } + return 0; +} - RETURN VALUES - -1 if impossible select - 0 if can't use quick_select - 1 if found usable range - TODO - check if the function really needs to modify keys_to_use, and change the - code to pass it by reference if not +/* + Test if a key can be used in different ranges + + SYNOPSIS + SQL_SELECT::test_quick_select() + thd Current thread + keys_to_use Keys to use for range retrieval + prev_tables Tables assumed to be already read when the scan is + performed (but not read at the moment of this call) + limit Query limit + force_quick_range Prefer to use range (instead of full table scan) even + if it is more expensive. + + NOTES + Updates the following in the select parameter: + needed_reg - Bits for keys with may be used if all prev regs are read + quick - Parameter to use when reading records. + + In the table struct the following information is updated: + quick_keys - Which keys can be used + quick_rows - How many rows the key matches + + TODO + Check if this function really needs to modify keys_to_use, and change the + code to pass it by reference if it doesn't. + + In addition to force_quick_range other means can be (an usually are) used + to make this function prefer range over full table scan. Figure out if + force_quick_range is really needed. + + RETURN + -1 if impossible select (i.e. certainly no rows will be selected) + 0 if can't use quick_select + 1 if found usable ranges and quick select has been successfully created. */ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, @@ -612,28 +1587,29 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, { uint idx; double scan_time; - DBUG_ENTER("test_quick_select"); + DBUG_ENTER("SQL_SELECT::test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); delete quick; quick=0; - needed_reg.clear_all(); quick_keys.clear_all(); - if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || + needed_reg.clear_all(); + quick_keys.clear_all(); + if ((specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || !limit) DBUG_RETURN(0); /* purecov: inspected */ if (keys_to_use.is_clear_all()) DBUG_RETURN(0); - records=head->file->records; + records= head->file->records; if (!records) records++; /* purecov: inspected */ - scan_time=(double) records / TIME_FOR_COMPARE+1; - read_time=(double) head->file->scan_time()+ scan_time + 1.1; + scan_time= (double) records / TIME_FOR_COMPARE + 1; + read_time= (double) head->file->scan_time() + scan_time + 1.1; if (head->force_index) scan_time= read_time= DBL_MAX; if (limit < records) - read_time=(double) records+scan_time+1; // Force to use index + read_time= (double) records + scan_time + 1; // Force to use index else if (read_time <= 2.0 && !force_quick_range) DBUG_RETURN(0); /* No need for quick select */ @@ -643,7 +1619,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, if (!keys_to_use.is_clear_all()) { MEM_ROOT *old_root,alloc; - SEL_TREE *tree; + SEL_TREE *tree= NULL; KEY_PART *key_parts; KEY *key_info; PARAM param; @@ -657,11 +1633,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.table=head; param.keys=0; param.mem_root= &alloc; + param.needed_reg= &needed_reg; + param.imerge_cost_buff_size= 0; + thd->no_errors=1; // Don't warn about NULL init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc, sizeof(KEY_PART)* - head->key_parts))) + head->key_parts)) + || fill_used_fields_bitmap(¶m)) { thd->no_errors=0; free_root(&alloc,MYF(0)); // Return memory & allocator @@ -671,6 +1651,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, old_root= thd->mem_root; thd->mem_root= &alloc; + /* + Make an array with description of all key parts of all table keys. + This is used in get_mm_parts function. + */ key_info= head->key_info; for (idx=0 ; idx < head->keys ; idx++, key_info++) { @@ -698,81 +1682,139 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, } param.key_parts_end=key_parts; - if ((tree=get_mm_tree(¶m,cond))) + /* Calculate cost of full index read for the shortest covering index */ + if (!head->used_keys.is_clear_all()) + { + int key_for_use= find_shortest_key(head, &head->used_keys); + double key_read_time= (get_index_only_read_time(¶m, records, + key_for_use) + + (double) records / TIME_FOR_COMPARE); + DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, " + "read time %g", key_for_use, key_read_time)); + if (key_read_time < read_time) + read_time= key_read_time; + } + + TABLE_READ_PLAN *best_trp= NULL; + TRP_GROUP_MIN_MAX *group_trp; + double best_read_time= read_time; + + if (cond) + tree= get_mm_tree(¶m,cond); + + if (tree && tree->type == SEL_TREE::IMPOSSIBLE) + { + records=0L; /* Return -1 from this function. */ + read_time= (double) HA_POS_ERROR; + goto free_mem; + } + else if (tree && tree->type != SEL_TREE::KEY && + tree->type != SEL_TREE::KEY_SMALLER) + goto free_mem; + + + /* + Try to construct a QUICK_GROUP_MIN_MAX_SELECT. + Notice that it can be constructed no matter if there is a range tree. + */ + group_trp= get_best_group_min_max(¶m, tree); + if (group_trp && group_trp->read_cost < best_read_time) + { + best_trp= group_trp; + best_read_time= best_trp->read_cost; + } + + if (tree) { - if (tree->type == SEL_TREE::IMPOSSIBLE) + /* + It is possible to use a range-based quick select (but it might be + slower than 'all' table scan). + */ + if (tree->merges.is_empty()) { - records=0L; // Return -1 from this function - read_time= (double) HA_POS_ERROR; + TRP_RANGE *range_trp; + TRP_ROR_INTERSECT *rori_trp; + bool can_build_covering= FALSE; + + /* Get best 'range' plan and prepare data for making other plans */ + if ((range_trp= get_key_scans_params(¶m, tree, FALSE, + best_read_time))) + { + best_trp= range_trp; + best_read_time= best_trp->read_cost; + } + + /* + Simultaneous key scans and row deletes on several handler + objects are not allowed so don't use ROR-intersection for + table deletes. + */ + if ((thd->lex->sql_command != SQLCOM_DELETE) )//&& +// (thd->lex->sql_command != SQLCOM_UPDATE)) + { + /* + Get best non-covering ROR-intersection plan and prepare data for + building covering ROR-intersection. + */ + if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time, + &can_build_covering))) + { + best_trp= rori_trp; + best_read_time= best_trp->read_cost; + /* + Try constructing covering ROR-intersect only if it looks possible + and worth doing. + */ + if (!rori_trp->is_covering && can_build_covering && + (rori_trp= get_best_covering_ror_intersect(¶m, tree, + best_read_time))) + best_trp= rori_trp; + } + } } - else if (tree->type == SEL_TREE::KEY || - tree->type == SEL_TREE::KEY_SMALLER) + else { - SEL_ARG **key,**end,**best_key=0; + /* Try creating index_merge/ROR-union scan. */ + SEL_IMERGE *imerge; + TABLE_READ_PLAN *best_conj_trp= NULL, *new_conj_trp; + LINT_INIT(new_conj_trp); /* no empty index_merge lists possible */ + + DBUG_PRINT("info",("No range reads possible," + " trying to construct index_merge")); + List_iterator_fast<SEL_IMERGE> it(tree->merges); + while ((imerge= it++)) + { + new_conj_trp= get_best_disjunct_quick(¶m, imerge, best_read_time); + if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost < + best_conj_trp->read_cost)) + best_conj_trp= new_conj_trp; + } + if (best_conj_trp) + best_trp= best_conj_trp; + } + } + thd->mem_root= old_root; - for (idx=0,key=tree->keys, end=key+param.keys ; - key != end ; - key++,idx++) - { - ha_rows found_records; - double found_read_time; - if (*key) - { - uint keynr= param.real_keynr[idx]; - if ((*key)->type == SEL_ARG::MAYBE_KEY || - (*key)->maybe_flag) - needed_reg.set_bit(keynr); - - found_records=check_quick_select(¶m, idx, *key); - if (found_records != HA_POS_ERROR && found_records > 2 && - head->used_keys.is_set(keynr) && - (head->file->index_flags(keynr, param.max_key_part, 1) & - HA_KEYREAD_ONLY)) - { - /* - We can resolve this by only reading through this key. - Assume that we will read trough the whole key range - and that all key blocks are half full (normally things are - much better). - */ - uint keys_per_block= (head->file->block_size/2/ - (head->key_info[keynr].key_length+ - head->file->ref_length) + 1); - found_read_time=((double) (found_records+keys_per_block-1)/ - (double) keys_per_block); - } - else - found_read_time= (head->file->read_time(keynr, - param.range_count, - found_records)+ - (double) found_records / TIME_FOR_COMPARE); - DBUG_PRINT("info",("read_time: %g found_read_time: %g", - read_time, found_read_time)); - if (read_time > found_read_time && found_records != HA_POS_ERROR) - { - read_time=found_read_time; - records=found_records; - best_key=key; - } - } - } - if (best_key && records) - { - if ((quick=get_quick_select(¶m,(uint) (best_key-tree->keys), - *best_key))) - { - quick->records=records; - quick->read_time=read_time; - } - } + /* If we got a read plan, create a quick select from it. */ + if (best_trp) + { + records= best_trp->records; + if (!(quick= best_trp->make_quick(¶m, TRUE)) || quick->init()) + { + delete quick; + quick= NULL; } } + + free_mem: free_root(&alloc,MYF(0)); // Return memory & allocator thd->mem_root= old_root; thd->no_errors=0; } - DBUG_EXECUTE("info",print_quick(quick,&needed_reg);); + + DBUG_EXECUTE("info", print_quick(quick, &needed_reg);); + /* Assume that if the user is using 'limit' we will only need to scan limit rows if we are using a key @@ -780,11 +1822,1510 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, DBUG_RETURN(records ? test(quick) : -1); } + +/* + Get cost of 'sweep' full records retrieval. + SYNOPSIS + get_sweep_read_cost() + param Parameter from test_quick_select + records # of records to be retrieved + RETURN + cost of sweep +*/ + +double get_sweep_read_cost(const PARAM *param, ha_rows records) +{ + double result; + if (param->table->file->primary_key_is_clustered()) + { + result= param->table->file->read_time(param->table->primary_key, + records, records); + } + else + { + double n_blocks= + ceil((double)param->table->file->data_file_length / IO_SIZE); + double busy_blocks= + n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records))); + if (busy_blocks < 1.0) + busy_blocks= 1.0; + DBUG_PRINT("info",("sweep: nblocks=%g, busy_blocks=%g", n_blocks, + busy_blocks)); + /* + Disabled: Bail out if # of blocks to read is bigger than # of blocks in + table data file. + if (max_cost != DBL_MAX && (busy_blocks+index_reads_cost) >= n_blocks) + return 1; + */ + JOIN *join= param->thd->lex->select_lex.join; + if (!join || join->tables == 1) + { + /* No join, assume reading is done in one 'sweep' */ + result= busy_blocks*(DISK_SEEK_BASE_COST + + DISK_SEEK_PROP_COST*n_blocks/busy_blocks); + } + else + { + /* + Possibly this is a join with source table being non-last table, so + assume that disk seeks are random here. + */ + result= busy_blocks; + } + } + DBUG_PRINT("info",("returning cost=%g", result)); + return result; +} + + +/* + Get best plan for a SEL_IMERGE disjunctive expression. + SYNOPSIS + get_best_disjunct_quick() + param Parameter from check_quick_select function + imerge Expression to use + read_time Don't create scans with cost > read_time + + NOTES + index_merge cost is calculated as follows: + index_merge_cost = + cost(index_reads) + (see #1) + cost(rowid_to_row_scan) + (see #2) + cost(unique_use) (see #3) + + 1. cost(index_reads) =SUM_i(cost(index_read_i)) + For non-CPK scans, + cost(index_read_i) = {cost of ordinary 'index only' scan} + For CPK scan, + cost(index_read_i) = {cost of non-'index only' scan} + + 2. cost(rowid_to_row_scan) + If table PK is clustered then + cost(rowid_to_row_scan) = + {cost of ordinary clustered PK scan with n_ranges=n_rows} + + Otherwise, we use the following model to calculate costs: + We need to retrieve n_rows rows from file that occupies n_blocks blocks. + We assume that offsets of rows we need are independent variates with + uniform distribution in [0..max_file_offset] range. + + We'll denote block as "busy" if it contains row(s) we need to retrieve + and "empty" if doesn't contain rows we need. + + Probability that a block is empty is (1 - 1/n_blocks)^n_rows (this + applies to any block in file). Let x_i be a variate taking value 1 if + block #i is empty and 0 otherwise. + + Then E(x_i) = (1 - 1/n_blocks)^n_rows; + + E(n_empty_blocks) = E(sum(x_i)) = sum(E(x_i)) = + = n_blocks * ((1 - 1/n_blocks)^n_rows) = + ~= n_blocks * exp(-n_rows/n_blocks). + + E(n_busy_blocks) = n_blocks*(1 - (1 - 1/n_blocks)^n_rows) = + ~= n_blocks * (1 - exp(-n_rows/n_blocks)). + + Average size of "hole" between neighbor non-empty blocks is + E(hole_size) = n_blocks/E(n_busy_blocks). + + The total cost of reading all needed blocks in one "sweep" is: + + E(n_busy_blocks)* + (DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*n_blocks/E(n_busy_blocks)). + + 3. Cost of Unique use is calculated in Unique::get_use_cost function. + + ROR-union cost is calculated in the same way index_merge, but instead of + Unique a priority queue is used. + + RETURN + Created read plan + NULL - Out of memory or no read scan could be built. +*/ + +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time) +{ + SEL_TREE **ptree; + TRP_INDEX_MERGE *imerge_trp= NULL; + uint n_child_scans= imerge->trees_next - imerge->trees; + TRP_RANGE **range_scans; + TRP_RANGE **cur_child; + TRP_RANGE **cpk_scan= NULL; + bool imerge_too_expensive= FALSE; + double imerge_cost= 0.0; + ha_rows cpk_scan_records= 0; + ha_rows non_cpk_scan_records= 0; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + bool all_scans_ror_able= TRUE; + bool all_scans_rors= TRUE; + uint unique_calc_buff_size; + TABLE_READ_PLAN **roru_read_plans; + TABLE_READ_PLAN **cur_roru_plan; + double roru_index_costs; + double blocks_in_index_read; + ha_rows roru_total_records; + double roru_intersect_part= 1.0; + DBUG_ENTER("get_best_disjunct_quick"); + DBUG_PRINT("info", ("Full table scan cost =%g", read_time)); + + if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root, + sizeof(TRP_RANGE*)* + n_child_scans))) + DBUG_RETURN(NULL); + /* + Collect best 'range' scan for each of disjuncts, and, while doing so, + analyze possibility of ROR scans. Also calculate some values needed by + other parts of the code. + */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++) + { + DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map, + "tree in SEL_IMERGE");); + if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, read_time))) + { + /* + One of index scans in this index_merge is more expensive than entire + table read for another available option. The entire index_merge (and + any possible ROR-union) will be more expensive then, too. We continue + here only to update SQL_SELECT members. + */ + imerge_too_expensive= TRUE; + } + if (imerge_too_expensive) + continue; + + imerge_cost += (*cur_child)->read_cost; + all_scans_ror_able &= ((*ptree)->n_ror_scans > 0); + all_scans_rors &= (*cur_child)->is_ror; + if (pk_is_clustered && + param->real_keynr[(*cur_child)->key_idx] == param->table->primary_key) + { + cpk_scan= cur_child; + cpk_scan_records= (*cur_child)->records; + } + else + non_cpk_scan_records += (*cur_child)->records; + } + + DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost)); + if (imerge_too_expensive || (imerge_cost > read_time) || + (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) && + read_time != DBL_MAX) + { + /* + Bail out if it is obvious that both index_merge and ROR-union will be + more expensive + */ + DBUG_PRINT("info", ("Sum of index_merge scans is more expensive than " + "full table scan, bailing out")); + DBUG_RETURN(NULL); + } + if (all_scans_rors) + { + roru_read_plans= (TABLE_READ_PLAN**)range_scans; + goto skip_to_ror_scan; + } + blocks_in_index_read= imerge_cost; + if (cpk_scan) + { + /* + Add one ROWID comparison for each row retrieved on non-CPK scan. (it + is done in QUICK_RANGE_SELECT::row_in_ranges) + */ + imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID; + } + + /* Calculate cost(rowid_to_row_scan) */ + imerge_cost += get_sweep_read_cost(param, non_cpk_scan_records); + DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g", + imerge_cost)); + if (imerge_cost > read_time) + goto build_ror_index_merge; + + /* Add Unique operations cost */ + unique_calc_buff_size= + Unique::get_cost_calc_buff_size(non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + if (param->imerge_cost_buff_size < unique_calc_buff_size) + { + if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root, + unique_calc_buff_size))) + DBUG_RETURN(NULL); + param->imerge_cost_buff_size= unique_calc_buff_size; + } + + imerge_cost += + Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", + imerge_cost, read_time)); + if (imerge_cost < read_time) + { + if ((imerge_trp= new (param->mem_root)TRP_INDEX_MERGE)) + { + imerge_trp->read_cost= imerge_cost; + imerge_trp->records= non_cpk_scan_records + cpk_scan_records; + imerge_trp->records= min(imerge_trp->records, + param->table->file->records); + imerge_trp->range_scans= range_scans; + imerge_trp->range_scans_end= range_scans + n_child_scans; + read_time= imerge_cost; + } + } + +build_ror_index_merge: + if (!all_scans_ror_able || param->thd->lex->sql_command == SQLCOM_DELETE) + DBUG_RETURN(imerge_trp); + + /* Ok, it is possible to build a ROR-union, try it. */ + bool dummy; + if (!(roru_read_plans= + (TABLE_READ_PLAN**)alloc_root(param->mem_root, + sizeof(TABLE_READ_PLAN*)* + n_child_scans))) + DBUG_RETURN(imerge_trp); +skip_to_ror_scan: + roru_index_costs= 0.0; + roru_total_records= 0; + cur_roru_plan= roru_read_plans; + + /* Find 'best' ROR scan for each of trees in disjunction */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++, cur_roru_plan++) + { + /* + Assume the best ROR scan is the one that has cheapest full-row-retrieval + scan cost. + Also accumulate index_only scan costs as we'll need them to calculate + overall index_intersection cost. + */ + double cost; + if ((*cur_child)->is_ror) + { + /* Ok, we have index_only cost, now get full rows scan cost */ + cost= param->table->file-> + read_time(param->real_keynr[(*cur_child)->key_idx], 1, + (*cur_child)->records) + + rows2double((*cur_child)->records) / TIME_FOR_COMPARE; + } + else + cost= read_time; + + TABLE_READ_PLAN *prev_plan= *cur_child; + if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost, + &dummy))) + { + if (prev_plan->is_ror) + *cur_roru_plan= prev_plan; + else + DBUG_RETURN(imerge_trp); + roru_index_costs += (*cur_roru_plan)->read_cost; + } + else + roru_index_costs += + ((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs; + roru_total_records += (*cur_roru_plan)->records; + roru_intersect_part *= (*cur_roru_plan)->records / + param->table->file->records; + } + + /* + rows to retrieve= + SUM(rows_in_scan_i) - table_rows * PROD(rows_in_scan_i / table_rows). + This is valid because index_merge construction guarantees that conditions + in disjunction do not share key parts. + */ + roru_total_records -= (ha_rows)(roru_intersect_part* + param->table->file->records); + /* ok, got a ROR read plan for each of the disjuncts + Calculate cost: + cost(index_union_scan(scan_1, ... scan_n)) = + SUM_i(cost_of_index_only_scan(scan_i)) + + queue_use_cost(rowid_len, n) + + cost_of_row_retrieval + See get_merge_buffers_cost function for queue_use_cost formula derivation. + */ + + double roru_total_cost; + roru_total_cost= roru_index_costs + + rows2double(roru_total_records)*log((double)n_child_scans) / + (TIME_FOR_COMPARE_ROWID * M_LN2) + + get_sweep_read_cost(param, roru_total_records); + + DBUG_PRINT("info", ("ROR-union: cost %g, %d members", roru_total_cost, + n_child_scans)); + TRP_ROR_UNION* roru; + if (roru_total_cost < read_time) + { + if ((roru= new (param->mem_root) TRP_ROR_UNION)) + { + roru->first_ror= roru_read_plans; + roru->last_ror= roru_read_plans + n_child_scans; + roru->read_cost= roru_total_cost; + roru->records= roru_total_records; + DBUG_RETURN(roru); + } + } + DBUG_RETURN(imerge_trp); +} + + +/* + Calculate cost of 'index only' scan for given index and number of records. + + SYNOPSIS + get_index_only_read_time() + param parameters structure + records #of records to read + keynr key to read + + NOTES + It is assumed that we will read trough the whole key range and that all + key blocks are half full (normally things are much better). It is also + assumed that each time we read the next key from the index, the handler + performs a random seek, thus the cost is proportional to the number of + blocks read. + + TODO: + Move this to handler->read_time() by adding a flag 'index-only-read' to + this call. The reason for doing this is that the current function doesn't + handle the case when the row is stored in the b-tree (like in innodb + clustered index) +*/ + +inline double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr) +{ + double read_time; + uint keys_per_block= (param->table->file->block_size/2/ + (param->table->key_info[keynr].key_length+ + param->table->file->ref_length) + 1); + read_time=((double) (records+keys_per_block-1)/ + (double) keys_per_block); + return read_time; +} + + +typedef struct st_ror_scan_info +{ + uint idx; /* # of used key in param->keys */ + uint keynr; /* # of used key in table */ + ha_rows records; /* estimate of # records this scan will return */ + + /* Set of intervals over key fields that will be used for row retrieval. */ + SEL_ARG *sel_arg; + + /* Fields used in the query and covered by this ROR scan. */ + MY_BITMAP covered_fields; + uint used_fields_covered; /* # of set bits in covered_fields */ + int key_rec_length; /* length of key record (including rowid) */ + + /* + Cost of reading all index records with values in sel_arg intervals set + (assuming there is no need to access full table records) + */ + double index_read_cost; + uint first_uncovered_field; /* first unused bit in covered_fields */ + uint key_components; /* # of parts in the key */ +} ROR_SCAN_INFO; + + +/* + Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using + sel_arg set of intervals. + + SYNOPSIS + make_ror_scan() + param Parameter from test_quick_select function + idx Index of key in param->keys + sel_arg Set of intervals for a given key + RETURN + NULL - out of memory + ROR scan structure containing a scan for {idx, sel_arg} +*/ + +static +ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg) +{ + ROR_SCAN_INFO *ror_scan; + uchar *bitmap_buf; + uint keynr; + DBUG_ENTER("make_ror_scan"); + if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO)))) + DBUG_RETURN(NULL); + + ror_scan->idx= idx; + ror_scan->keynr= keynr= param->real_keynr[idx]; + ror_scan->key_rec_length= param->table->key_info[keynr].key_length + + param->table->file->ref_length; + ror_scan->sel_arg= sel_arg; + ror_scan->records= param->table->quick_rows[keynr]; + + if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root, + param->fields_bitmap_size))) + DBUG_RETURN(NULL); + + if (bitmap_init(&ror_scan->covered_fields, bitmap_buf, + param->fields_bitmap_size*8, FALSE)) + DBUG_RETURN(NULL); + bitmap_clear_all(&ror_scan->covered_fields); + + KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[keynr].key_parts; + uint n_used_covered= 0; + for (;key_part != key_part_end; ++key_part) + { + if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr)) + { + n_used_covered++; + bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr); + } + } + ror_scan->index_read_cost= + get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr], + ror_scan->keynr); + DBUG_RETURN(ror_scan); +} + + +/* + Compare two ROR_SCAN_INFO** by E(#records_matched) * key_record_length. + SYNOPSIS + cmp_ror_scan_info() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ +static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + double val1= rows2double((*a)->records) * (*a)->key_rec_length; + double val2= rows2double((*b)->records) * (*b)->key_rec_length; + return (val1 < val2)? -1: (val1 == val2)? 0 : 1; +} + +/* + Compare two ROR_SCAN_INFO** by + (#covered fields in F desc, + #components asc, + number of first not covered component asc) + + SYNOPSIS + cmp_ror_scan_info_covering() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ +static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + if ((*a)->used_fields_covered > (*b)->used_fields_covered) + return -1; + if ((*a)->used_fields_covered < (*b)->used_fields_covered) + return 1; + if ((*a)->key_components < (*b)->key_components) + return -1; + if ((*a)->key_components > (*b)->key_components) + return 1; + if ((*a)->first_uncovered_field < (*b)->first_uncovered_field) + return -1; + if ((*a)->first_uncovered_field > (*b)->first_uncovered_field) + return 1; + return 0; +} + +/* Auxiliary structure for incremental ROR-intersection creation */ +typedef struct +{ + const PARAM *param; + MY_BITMAP covered_fields; /* union of fields covered by all scans */ + + /* TRUE if covered_fields is a superset of needed_fields */ + bool is_covering; + + double index_scan_costs; /* SUM(cost of 'index-only' scans) */ + double total_cost; + /* + Fraction of table records that satisfies conditions of all scans. + This is the number of full records that will be retrieved if a + non-index_only index intersection will be employed. + */ + double records_fract; + ha_rows index_records; /* sum(#records to look in indexes) */ +} ROR_INTERSECT_INFO; + + +/* + Re-initialize an allocated intersect info to contain zero scans. + SYNOPSIS + info Intersection info structure to re-initialize. +*/ + +static void ror_intersect_reinit(ROR_INTERSECT_INFO *info) +{ + info->is_covering= FALSE; + info->index_scan_costs= 0.0f; + info->records_fract= 1.0f; + bitmap_clear_all(&info->covered_fields); +} + +/* + Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans. + + SYNOPSIS + ror_intersect_init() + param Parameter from test_quick_select + is_index_only If TRUE, set ROR_INTERSECT_INFO to be covering + + RETURN + allocated structure + NULL on error +*/ + +static +ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param, bool is_index_only) +{ + ROR_INTERSECT_INFO *info; + uchar* buf; + if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root, + sizeof(ROR_INTERSECT_INFO)))) + return NULL; + info->param= param; + if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size))) + return NULL; + if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8, + FALSE)) + return NULL; + ror_intersect_reinit(info); + return info; +} + + +/* + Check if adding a ROR scan to a ROR-intersection reduces its cost of + ROR-intersection and if yes, update parameters of ROR-intersection, + including its cost. + + SYNOPSIS + ror_intersect_add() + param Parameter from test_quick_select + info ROR-intersection structure to add the scan to. + ror_scan ROR scan info to add. + is_cpk_scan If TRUE, add the scan as CPK scan (this can be inferred + from other parameters and is passed separately only to + avoid duplicating the inference code) + + NOTES + Adding a ROR scan to ROR-intersect "makes sense" iff the cost of ROR- + intersection decreases. The cost of ROR-intersection is caclulated as + follows: + + cost= SUM_i(key_scan_cost_i) + cost_of_full_rows_retrieval + + if (union of indexes used covers all needed fields) + cost_of_full_rows_retrieval= 0; + else + { + cost_of_full_rows_retrieval= + cost_of_sweep_read(E(rows_to_retrieve), rows_in_table); + } + + E(rows_to_retrieve) is caclulated as follows: + Suppose we have a condition on several keys + cond=k_11=c_11 AND k_12=c_12 AND ... // parts of first key + k_21=c_21 AND k_22=c_22 AND ... // parts of second key + ... + k_n1=c_n1 AND k_n3=c_n3 AND ... (1) + + where k_ij may be the same as any k_pq (i.e. keys may have common parts). + + A full row is retrieved iff entire cond holds. + + The recursive procedure for finding P(cond) is as follows: + + First step: + Pick 1st part of 1st key and break conjunction (1) into two parts: + cond= (k_11=c_11 AND R) + + Here R may still contain condition(s) equivalent to k_11=c_11. + Nevertheless, the following holds: + + P(k_11=c_11 AND R) = P(k_11=c_11) * P(R|k_11=c_11). + + Mark k_11 as fixed field (and satisfied condition) F, save P(F), + save R to be cond and proceed to recursion step. + + Recursion step: + We have a set of fixed fields/satisfied conditions) F, probability P(F), + and remaining conjunction R + Pick next key part on current key and its condition "k_ij=c_ij". + We will add "k_ij=c_ij" into F and update P(F). + Lets denote k_ij as t, R = t AND R1, where R1 may still contain t. Then + + P((t AND R1)|F) = P(t|F) * P(R1|t|F) = P(t|F) * P(R1|(t AND F)) (2) + + (where '|' mean conditional probability, not "or") + + Consider the first multiplier in (2). One of the following holds: + a) F contains condition on field used in t (i.e. t AND F = F). + Then P(t|F) = 1 + + b) F doesn't contain condition on field used in t. Then F and t are + considered independent. + + P(t|F) = P(t|(fields_before_t_in_key AND other_fields)) = + = P(t|fields_before_t_in_key). + + P(t|fields_before_t_in_key) = #records(fields_before_t_in_key) / + #records(fields_before_t_in_key, t) + + The second multiplier is calculated by applying this step recursively. + + IMPLEMENTATION + This function calculates the result of application of the "recursion step" + described above for all fixed key members of a single key, accumulating set + of covered fields, selectivity, etc. + + The calculation is conducted as follows: + Lets denote #records(keypart1, ... keypartK) as n_k. We need to calculate + + n_{k1} n_{k_2} + --------- * --------- * .... (3) + n_{k1-1} n_{k2_1} + + where k1,k2,... are key parts which fields were not yet marked as fixed + ( this is result of application of option b) of the recursion step for + parts of a single key). + Since it is reasonable to expect that most of the fields are not marked + as fixed, we calcualate (3) as + + n_{i1} n_{i_2} + (3) = n_{max_key_part} / ( --------- * --------- * .... ) + n_{i1-1} n_{i2_1} + + where i1,i2, .. are key parts that were already marked as fixed. + + In order to minimize number of expensive records_in_range calls we group + and reduce adjacent fractions. + + RETURN + TRUE ROR scan added to ROR-intersection, cost updated. + FALSE It doesn't make sense to add this ROR scan to this ROR-intersection. +*/ + +bool ror_intersect_add(const PARAM *param, ROR_INTERSECT_INFO *info, + ROR_SCAN_INFO* ror_scan, bool is_cpk_scan=FALSE) +{ + int i; + SEL_ARG *sel_arg; + KEY_PART_INFO *key_part= + info->param->table->key_info[ror_scan->keynr].key_part; + double selectivity_mult= 1.0; + byte key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */ + + DBUG_ENTER("ror_intersect_add"); + DBUG_PRINT("info", ("Current selectivity= %g", info->records_fract)); + DBUG_PRINT("info", ("Adding scan on %s", + info->param->table->key_info[ror_scan->keynr].name)); + SEL_ARG *tuple_arg= NULL; + char *key_ptr= (char*) key_val; + bool cur_covered, prev_covered= + bitmap_is_set(&info->covered_fields, key_part->fieldnr); + + ha_rows prev_records= param->table->file->records; + key_range min_range; + key_range max_range; + min_range.key= (byte*) key_val; + min_range.flag= HA_READ_KEY_EXACT; + max_range.key= (byte*) key_val; + max_range.flag= HA_READ_AFTER_KEY; + + for(i= 0, sel_arg= ror_scan->sel_arg; sel_arg; + i++, sel_arg= sel_arg->next_key_part) + { + cur_covered= bitmap_is_set(&info->covered_fields, (key_part + i)->fieldnr); + if (cur_covered != prev_covered) + { + /* create (part1val, ..., part{n-1}val) tuple. */ + { + if (!tuple_arg) + { + tuple_arg= ror_scan->sel_arg; + tuple_arg->store_min(key_part->length, &key_ptr, 0); + } + while (tuple_arg->next_key_part != sel_arg) + { + tuple_arg= tuple_arg->next_key_part; + tuple_arg->store_min(key_part->length, &key_ptr, 0); + } + } + ha_rows records; + min_range.length= max_range.length= ((char*) key_ptr - (char*) key_val); + records= param->table->file-> + records_in_range(ror_scan->keynr, + &min_range, + &max_range); + if (cur_covered) + { + /* uncovered -> covered */ + double tmp= rows2double(records)/rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + prev_records= HA_POS_ERROR; + } + else + { + /* covered -> uncovered */ + prev_records= records; + } + } + prev_covered= cur_covered; + } + if (!prev_covered) + { + double tmp= rows2double(param->table->quick_rows[ror_scan->keynr]) / + rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + } + + if (selectivity_mult == 1.0) + { + /* Don't add this scan if it doesn't improve selectivity. */ + DBUG_PRINT("info", ("The scan doesn't improve selectivity.")); + DBUG_RETURN(FALSE); + } + + info->records_fract *= selectivity_mult; + ha_rows cur_scan_records= info->param->table->quick_rows[ror_scan->keynr]; + if (is_cpk_scan) + { + info->index_scan_costs += rows2double(cur_scan_records)* + TIME_FOR_COMPARE_ROWID; + } + else + { + info->index_records += cur_scan_records; + info->index_scan_costs += ror_scan->index_read_cost; + bitmap_union(&info->covered_fields, &ror_scan->covered_fields); + } + + if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields, + &info->covered_fields)) + { + DBUG_PRINT("info", ("ROR-intersect is covering now")); + info->is_covering= TRUE; + } + + info->total_cost= info->index_scan_costs; + if (!info->is_covering) + { + ha_rows table_recs= info->param->table->file->records; + info->total_cost += + get_sweep_read_cost(info->param, + (ha_rows)(info->records_fract*table_recs)); + } + DBUG_PRINT("info", ("New selectivity= %g", info->records_fract)); + DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost, + info->is_covering?"" : "non-")); + DBUG_RETURN(TRUE); +} + + +/* + Get best ROR-intersection plan using non-covering ROR-intersection search + algorithm. The returned plan may be covering. + + SYNOPSIS + get_best_ror_intersect() + param Parameter from test_quick_select function. + tree Transformed restriction condition to be used to look + for ROR scans. + read_time Do not return read plans with cost > read_time. + are_all_covering [out] set to TRUE if union of all scans covers all + fields needed by the query (and it is possible to build + a covering ROR-intersection) + + NOTES + get_key_scans_params must be called before for the same SEL_TREE before + this function can be called. + + The approximate best non-covering plan search algorithm is as follows: + + find_min_ror_intersection_scan() + { + R= select all ROR scans; + order R by (E(#records_matched) * key_record_length). + + S= first(R); -- set of scans that will be used for ROR-intersection + R= R-first(S); + min_cost= cost(S); + min_scan= make_scan(S); + while (R is not empty) + { + if (!selectivity(S + first(R) < selectivity(S))) + continue; + + S= S + first(R); + R= R - first(R); + if (cost(S) < min_cost) + { + min_cost= cost(S); + min_scan= make_scan(S); + } + } + return min_scan; + } + + See ror_intersect_add function for ROR intersection costs. + + Special handling for Clustered PK scans + Clustered PK contains all table fields, so using it as a regular scan in + index intersection doesn't make sense: a range scan on CPK will be less + expensive in this case. + Clustered PK scan has special handling in ROR-intersection: it is not used + to retrieve rows, instead its condition is used to filter row references + we get from scans on other keys. + + RETURN + ROR-intersection table read plan + NULL if out of memory or no suitable plan found. +*/ + +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering) +{ + uint idx; + double min_cost= read_time; + DBUG_ENTER("get_best_ror_intersect"); + + if (tree->n_ror_scans < 2) + DBUG_RETURN(NULL); + + /* + Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of them. + Also find and save clustered PK scan if there is one. + */ + ROR_SCAN_INFO **cur_ror_scan; + ROR_SCAN_INFO *cpk_scan= NULL; + bool cpk_scan_used= FALSE; + if (!(tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + param->keys))) + return NULL; + uint cpk_no= (param->table->file->primary_key_is_clustered())? + param->table->primary_key : MAX_KEY; + + for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++) + { + ROR_SCAN_INFO *scan; + if (!tree->ror_scans_map.is_set(idx)) + continue; + if (!(scan= make_ror_scan(param, idx, tree->keys[idx]))) + return NULL; + if (param->real_keynr[idx] == cpk_no) + { + cpk_scan= scan; + tree->n_ror_scans--; + } + else + *(cur_ror_scan++)= scan; + } + + tree->ror_scans_end= cur_ror_scan; + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "original", + tree->ror_scans, + tree->ror_scans_end);); + /* + Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized + ROR_SCAN_INFOs. + Now, get a minimal key scan using an approximate algorithm. + */ + qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info); + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "ordered", + tree->ror_scans, + tree->ror_scans_end);); + + ROR_SCAN_INFO **intersect_scans; /* ROR scans used in index intersection */ + ROR_SCAN_INFO **intersect_scans_end; + if (!(intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + tree->n_ror_scans))) + return NULL; + intersect_scans_end= intersect_scans; + + /* Create and incrementally update ROR intersection. */ + ROR_INTERSECT_INFO *intersect; + if (!(intersect= ror_intersect_init(param, FALSE))) + return NULL; + + /* [intersect_scans, intersect_scans_best) will hold the best combination */ + ROR_SCAN_INFO **intersect_scans_best; + ha_rows best_rows; + bool is_best_covering; + double best_index_scan_costs; + LINT_INIT(best_rows); /* protected by intersect_scans_best */ + LINT_INIT(is_best_covering); + LINT_INIT(best_index_scan_costs); + + cur_ror_scan= tree->ror_scans; + /* Start with one scan */ + intersect_scans_best= intersect_scans; + while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering) + { + /* S= S + first(R); */ + if (ror_intersect_add(param, intersect, *cur_ror_scan)) + *(intersect_scans_end++)= *cur_ror_scan; + /* R= R - first(R); */ + cur_ror_scan++; + + if (intersect->total_cost < min_cost) + { + /* Local minimum found, save it */ + min_cost= intersect->total_cost; + best_rows= (ha_rows)(intersect->records_fract* + rows2double(param->table->file->records)); + is_best_covering= intersect->is_covering; + intersect_scans_best= intersect_scans_end; + best_index_scan_costs= intersect->index_scan_costs; + } + } + + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, + "best ROR-intersection", + intersect_scans, + intersect_scans_best);); + + *are_all_covering= intersect->is_covering; + uint best_num= intersect_scans_best - intersect_scans; + /* + Ok, found the best ROR-intersection of non-CPK key scans. + Check if we should add a CPK scan. + + If the obtained ROR-intersection is covering, it doesn't make sense + to add CPK scan - Clustered PK contains all fields and if we're doing + CPK scan doing other CPK scans will only add more overhead. + */ + if (cpk_scan && !intersect->is_covering) + { + /* + Handle the special case: ROR-intersect(PRIMARY, key1) is + the best, but cost(range(key1)) > cost(best_non_ror_range_scan) + */ + if (best_num == 0) + { + cur_ror_scan= tree->ror_scans; + intersect_scans_end= intersect_scans; + ror_intersect_reinit(intersect); + if (!ror_intersect_add(param, intersect, *cur_ror_scan)) + DBUG_RETURN(NULL); /* shouldn't happen actually */ + *(intersect_scans_end++)= *cur_ror_scan; + best_num++; + } + + if (ror_intersect_add(param, intersect, cpk_scan)) + { + cpk_scan_used= TRUE; + min_cost= intersect->total_cost; + best_rows= (ha_rows)(intersect->records_fract* + rows2double(param->table->file->records)); + is_best_covering= intersect->is_covering; + best_index_scan_costs= intersect->index_scan_costs; + } + } + + /* Ok, return ROR-intersect plan if we have found one */ + TRP_ROR_INTERSECT *trp= NULL; + if (best_num > 1 || cpk_scan_used) + { + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + if (!(trp->first_scan= + (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)*best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= is_best_covering; + trp->read_cost= min_cost; + trp->records= best_rows? best_rows : 1; + trp->index_scan_costs= best_index_scan_costs; + trp->cpk_scan= cpk_scan; + } + DBUG_PRINT("info", + ("Returning non-covering ROR-intersect plan: cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + DBUG_RETURN(trp); +} + + +/* + Get best covering ROR-intersection. + SYNOPSIS + get_best_covering_ror_intersect() + param Parameter from test_quick_select function. + tree SEL_TREE with sets of intervals for different keys. + read_time Don't return table read plans with cost > read_time. + + RETURN + Best covering ROR-intersection plan + NULL if no plan found. + + NOTES + get_best_ror_intersect must be called for a tree before calling this + function for it. + This function invalidates tree->ror_scans member values. + + The following approximate algorithm is used: + I=set of all covering indexes + F=set of all fields to cover + S={} + + do { + Order I by (#covered fields in F desc, + #components asc, + number of first not covered component asc); + F=F-covered by first(I); + S=S+first(I); + I=I-first(I); + } while F is not empty. +*/ + +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time) +{ + ROR_SCAN_INFO **ror_scan_mark; + ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end; + DBUG_ENTER("get_best_covering_ror_intersect"); + uint nbits= param->fields_bitmap_size*8; + + for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan) + (*scan)->key_components= + param->table->key_info[(*scan)->keynr].key_parts; + + /* + Run covering-ROR-search algorithm. + Assume set I is [ror_scan .. ror_scans_end) + */ + + /*I=set of all covering indexes */ + ror_scan_mark= tree->ror_scans; + + uchar buf[MAX_KEY/8+1]; + MY_BITMAP covered_fields; + if (bitmap_init(&covered_fields, buf, nbits, FALSE)) + DBUG_RETURN(0); + bitmap_clear_all(&covered_fields); + + double total_cost= 0.0f; + ha_rows records=0; + bool all_covered; + + DBUG_PRINT("info", ("Building covering ROR-intersection")); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "building covering ROR-I", + ror_scan_mark, ror_scans_end);); + do { + /* + Update changed sorting info: + #covered fields, + number of first not covered component + Calculate and save these values for each of remaining scans. + */ + for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan) + { + bitmap_subtract(&(*scan)->covered_fields, &covered_fields); + (*scan)->used_fields_covered= + bitmap_bits_set(&(*scan)->covered_fields); + (*scan)->first_uncovered_field= + bitmap_get_first(&(*scan)->covered_fields); + } + + qsort(ror_scan_mark, ror_scans_end-ror_scan_mark, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info_covering); + + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "remaining scans", + ror_scan_mark, ror_scans_end);); + + /* I=I-first(I) */ + total_cost += (*ror_scan_mark)->index_read_cost; + records += (*ror_scan_mark)->records; + DBUG_PRINT("info", ("Adding scan on %s", + param->table->key_info[(*ror_scan_mark)->keynr].name)); + if (total_cost > read_time) + DBUG_RETURN(NULL); + /* F=F-covered by first(I) */ + bitmap_union(&covered_fields, &(*ror_scan_mark)->covered_fields); + all_covered= bitmap_is_subset(¶m->needed_fields, &covered_fields); + } while (!all_covered && (++ror_scan_mark < ror_scans_end)); + + if (!all_covered) + DBUG_RETURN(NULL); /* should not happen actually */ + + /* + Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with + cost total_cost. + */ + DBUG_PRINT("info", ("Covering ROR-intersect scans cost: %g", total_cost)); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating covering ROR-intersect", + tree->ror_scans, ror_scan_mark);); + + /* Add priority queue use cost. */ + total_cost += rows2double(records)* + log((double)(ror_scan_mark - tree->ror_scans)) / + (TIME_FOR_COMPARE_ROWID * M_LN2); + DBUG_PRINT("info", ("Covering ROR-intersect full cost: %g", total_cost)); + + if (total_cost > read_time) + DBUG_RETURN(NULL); + + TRP_ROR_INTERSECT *trp; + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + uint best_num= (ror_scan_mark - tree->ror_scans); + if (!(trp->first_scan= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, ror_scan_mark, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= TRUE; + trp->read_cost= total_cost; + trp->records= records; + + DBUG_PRINT("info", + ("Returning covering ROR-intersect plan: cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + DBUG_RETURN(trp); +} + + +/* + Get best "range" table read plan for given SEL_TREE. + Also update PARAM members and store ROR scans info in the SEL_TREE. + SYNOPSIS + get_key_scans_params + param parameters from test_quick_select + tree make range select for this SEL_TREE + index_read_must_be_used if TRUE, assume 'index only' option will be set + (except for clustered PK indexes) + read_time don't create read plans with cost > read_time. + RETURN + Best range read plan + NULL if no plan found or error occurred +*/ + +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time) +{ + int idx; + SEL_ARG **key,**end, **key_to_read= NULL; + ha_rows best_records; + TRP_RANGE* read_plan= NULL; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + DBUG_ENTER("get_key_scans_params"); + LINT_INIT(best_records); /* protected by key_to_read */ + /* + Note that there may be trees that have type SEL_TREE::KEY but contain no + key reads at all, e.g. tree for expression "key1 is not null" where key1 + is defined as "not null". + */ + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->keys_map, + "tree scans");); + tree->ror_scans_map.clear_all(); + tree->n_ror_scans= 0; + for (idx= 0,key=tree->keys, end=key+param->keys; + key != end ; + key++,idx++) + { + ha_rows found_records; + double found_read_time; + if (*key) + { + uint keynr= param->real_keynr[idx]; + if ((*key)->type == SEL_ARG::MAYBE_KEY || + (*key)->maybe_flag) + param->needed_reg->set_bit(keynr); + + bool read_index_only= index_read_must_be_used ? TRUE : + (bool) param->table->used_keys.is_set(keynr); + + found_records= check_quick_select(param, idx, *key); + if (param->is_ror_scan) + { + tree->n_ror_scans++; + tree->ror_scans_map.set_bit(idx); + } + double cpu_cost= (double) found_records / TIME_FOR_COMPARE; + if (found_records != HA_POS_ERROR && found_records > 2 && + read_index_only && + (param->table->file->index_flags(keynr, param->max_key_part,1) & + HA_KEYREAD_ONLY) && + !(pk_is_clustered && keynr == param->table->primary_key)) + /* We can resolve this by only reading through this key. */ + found_read_time= get_index_only_read_time(param,found_records,keynr) + + cpu_cost; + else + /* + cost(read_through_index) = cost(disk_io) + cost(row_in_range_checks) + The row_in_range check is in QUICK_RANGE_SELECT::cmp_next function. + */ + found_read_time= param->table->file->read_time(keynr, + param->range_count, + found_records) + + cpu_cost; + + DBUG_PRINT("info",("read_time: %g found_read_time: %g", + read_time, found_read_time)); + + if (read_time > found_read_time && found_records != HA_POS_ERROR + /*|| read_time == DBL_MAX*/ ) + { + read_time= found_read_time; + best_records= found_records; + key_to_read= key; + } + + } + } + + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->ror_scans_map, + "ROR scans");); + if (key_to_read) + { + idx= key_to_read - tree->keys; + if ((read_plan= new (param->mem_root) TRP_RANGE(*key_to_read, idx))) + { + read_plan->records= best_records; + read_plan->is_ror= tree->ror_scans_map.is_set(idx); + read_plan->read_cost= read_time; + DBUG_PRINT("info", + ("Returning range plan for key %s, cost %g, records %lu", + param->table->key_info[param->real_keynr[idx]].name, + read_plan->read_cost, (ulong) read_plan->records)); + } + } + else + DBUG_PRINT("info", ("No 'range' table read plan found")); + + DBUG_RETURN(read_plan); +} + + +QUICK_SELECT_I *TRP_INDEX_MERGE::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_INDEX_MERGE_SELECT *quick_imerge; + QUICK_RANGE_SELECT *quick; + /* index_merge always retrieves full rows, ignore retrieve_full_rows */ + if (!(quick_imerge= new QUICK_INDEX_MERGE_SELECT(param->thd, param->table))) + return NULL; + + quick_imerge->records= records; + quick_imerge->read_time= read_cost; + for(TRP_RANGE **range_scan= range_scans; range_scan != range_scans_end; + range_scan++) + { + if (!(quick= (QUICK_RANGE_SELECT*) + ((*range_scan)->make_quick(param, FALSE, &quick_imerge->alloc)))|| + quick_imerge->push_quick_back(quick)) + { + delete quick; + delete quick_imerge; + return NULL; + } + } + return quick_imerge; +} + +QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_INTERSECT_SELECT *quick_intrsect; + QUICK_RANGE_SELECT *quick; + DBUG_ENTER("TRP_ROR_INTERSECT::make_quick"); + MEM_ROOT *alloc; + + if ((quick_intrsect= + new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table, + retrieve_full_rows? (!is_covering):FALSE, + parent_alloc))) + { + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating ROR-intersect", + first_scan, last_scan);); + alloc= parent_alloc? parent_alloc: &quick_intrsect->alloc; + for(; first_scan != last_scan;++first_scan) + { + if (!(quick= get_quick_select(param, (*first_scan)->idx, + (*first_scan)->sel_arg, alloc)) || + quick_intrsect->push_quick_back(quick)) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + } + if (cpk_scan) + { + if (!(quick= get_quick_select(param, cpk_scan->idx, + cpk_scan->sel_arg, alloc))) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + quick->file= NULL; + quick_intrsect->cpk_quick= quick; + } + quick_intrsect->records= records; + quick_intrsect->read_time= read_cost; + } + DBUG_RETURN(quick_intrsect); +} + + +QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_UNION_SELECT *quick_roru; + TABLE_READ_PLAN **scan; + QUICK_SELECT_I *quick; + DBUG_ENTER("TRP_ROR_UNION::make_quick"); + /* + It is impossible to construct a ROR-union that will not retrieve full + rows, ignore retrieve_full_rows parameter. + */ + if ((quick_roru= new QUICK_ROR_UNION_SELECT(param->thd, param->table))) + { + for(scan= first_ror; scan != last_ror; scan++) + { + if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) || + quick_roru->push_quick_back(quick)) + DBUG_RETURN(NULL); + } + quick_roru->records= records; + quick_roru->read_time= read_cost; + } + DBUG_RETURN(quick_roru); +} + + +/* + Build a SEL_TREE for a simple predicate + + SYNOPSIS + get_func_mm_tree() + param PARAM from SQL_SELECT::test_quick_select + cond_func item for the predicate + field field in the predicate + value constant in the predicate + cmp_type compare type for the field + + RETURN + Pointer to thre built tree +*/ + +static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func, + Field *field, Item *value, + Item_result cmp_type) +{ + SEL_TREE *tree= 0; + DBUG_ENTER("get_func_mm_tree"); + + switch (cond_func->functype()) { + case Item_func::NE_FUNC: + tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC, + value, cmp_type); + if (tree) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::GT_FUNC, + value, cmp_type)); + } + break; + case Item_func::BETWEEN: + tree= get_mm_parts(param, cond_func, field, Item_func::GE_FUNC, + cond_func->arguments()[1],cmp_type); + if (tree) + { + tree= tree_and(param, tree, get_mm_parts(param, cond_func, field, + Item_func::LE_FUNC, + cond_func->arguments()[2], + cmp_type)); + } + break; + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + tree= get_mm_parts(param, cond_func, field, Item_func::EQ_FUNC, + func->arguments()[1], cmp_type); + if (tree) + { + Item **arg, **end; + for (arg= func->arguments()+2, end= arg+func->argument_count()-2; + arg < end ; arg++) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::EQ_FUNC, + *arg, + cmp_type)); + } + } + break; + } + default: + { + /* + Here the function for the following predicates are processed: + <, <=, =, >=, >, LIKE, IS NULL, IS NOT NULL. + If the predicate is of the form (value op field) it is handled + as the equivalent predicate (field rev_op value), e.g. + 2 <= a is handled as a >= 2. + */ + Item_func::Functype func_type= + (value != cond_func->arguments()[0]) ? cond_func->functype() : + ((Item_bool_func2*) cond_func)->rev_functype(); + tree= get_mm_parts(param, cond_func, field, func_type, value, cmp_type); + } + } + + DBUG_RETURN(tree); + +} + /* make a select tree of all keys in condition */ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) { SEL_TREE *tree=0; + SEL_TREE *ftree= 0; + Item_field *field_item= 0; + Item *value; DBUG_ENTER("get_mm_tree"); if (cond->type() == Item::COND_ITEM) @@ -832,9 +3373,12 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) DBUG_RETURN(new SEL_TREE(SEL_TREE::IMPOSSIBLE)); } - table_map ref_tables=cond->used_tables(); + table_map ref_tables= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); if (cond->type() != Item::FUNC_ITEM) { // Should be a field + ref_tables= cond->used_tables(); if ((ref_tables & param->current_table) || (ref_tables & ~(param->prev_tables | param->read_tables))) DBUG_RETURN(0); @@ -847,97 +3391,111 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) param->cond= cond; - if (cond_func->functype() == Item_func::BETWEEN) + switch (cond_func->functype()) { + case Item_func::BETWEEN: + if (cond_func->arguments()[0]->type() != Item::FIELD_ITEM) + DBUG_RETURN(0); + field_item= (Item_field*) (cond_func->arguments()[0]); + value= NULL; + break; + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + if (func->key_item()->type() != Item::FIELD_ITEM) + DBUG_RETURN(0); + field_item= (Item_field*) (func->key_item()); + value= NULL; + break; + } + case Item_func::MULT_EQUAL_FUNC: { + Item_equal *item_equal= (Item_equal *) cond; + if (!(value= item_equal->get_const())) + DBUG_RETURN(0); + Item_equal_iterator it(*item_equal); + ref_tables= value->used_tables(); + while ((field_item= it++)) + { + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + { + tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC, + value,cmp_type); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } + } + + DBUG_RETURN(ftree); + } + default: if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) { - Field *field=((Item_field*) (cond_func->arguments()[0]))->field; - Item_result cmp_type=field->cmp_type(); - DBUG_RETURN(tree_and(param, - get_mm_parts(param, cond_func, field, - Item_func::GE_FUNC, - cond_func->arguments()[1], cmp_type), - get_mm_parts(param, cond_func, field, - Item_func::LE_FUNC, - cond_func->arguments()[2], cmp_type))); + field_item= (Item_field*) (cond_func->arguments()[0]); + value= cond_func->arg_count > 1 ? cond_func->arguments()[1] : 0; } - DBUG_RETURN(0); + else if (cond_func->have_rev_func() && + cond_func->arguments()[1]->type() == Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[1]); + value= cond_func->arguments()[0]; + } + else + DBUG_RETURN(0); } - if (cond_func->functype() == Item_func::IN_FUNC) - { // COND OR - Item_func_in *func=(Item_func_in*) cond_func; - if (func->key_item()->type() == Item::FIELD_ITEM) - { - Field *field=((Item_field*) (func->key_item()))->field; - Item_result cmp_type=field->cmp_type(); - tree= get_mm_parts(param,cond_func,field,Item_func::EQ_FUNC, - func->arguments()[1],cmp_type); - if (!tree) - DBUG_RETURN(tree); // Not key field - for (uint i=2 ; i < func->argument_count(); i++) - { - SEL_TREE *new_tree=get_mm_parts(param,cond_func,field, - Item_func::EQ_FUNC, - func->arguments()[i],cmp_type); - tree=tree_or(param,tree,new_tree); - } - DBUG_RETURN(tree); - } - DBUG_RETURN(0); // Can't optimize this IN - } - - if (ref_tables & ~(param->prev_tables | param->read_tables | - param->current_table)) - DBUG_RETURN(0); // Can't be calculated yet - if (!(ref_tables & param->current_table)) - DBUG_RETURN(new SEL_TREE(SEL_TREE::MAYBE)); // This may be false or true - - /* check field op const */ - /* btw, ft_func's arguments()[0] isn't FIELD_ITEM. SerG*/ - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) - { - tree= get_mm_parts(param, cond_func, - ((Item_field*) (cond_func->arguments()[0]))->field, - cond_func->functype(), - cond_func->arg_count > 1 ? cond_func->arguments()[1] : - 0, - ((Item_field*) (cond_func->arguments()[0]))->field-> - cmp_type()); - } - /* check const op field */ - if (!tree && - cond_func->have_rev_func() && - cond_func->arguments()[1]->type() == Item::FIELD_ITEM) - { - DBUG_RETURN(get_mm_parts(param, cond_func, - ((Item_field*) - (cond_func->arguments()[1]))->field, - ((Item_bool_func2*) cond_func)->rev_functype(), - cond_func->arguments()[0], - ((Item_field*) - (cond_func->arguments()[1]))->field->cmp_type() - )); + + /* + If the where condition contains a predicate (ti.field op const), + then not only SELL_TREE for this predicate is built, but + the trees for the results of substitution of ti.field for + each tj.field belonging to the same multiple equality as ti.field + are built as well. + E.g. for WHERE t1.a=t2.a AND t2.a > 10 + a SEL_TREE for t2.a > 10 will be built for quick select from t2 + and + a SEL_TREE for t1.a > 10 will be built for quick select from t1. + */ + + for (uint i= 0; i < cond_func->arg_count; i++) + { + Item *arg= cond_func->arguments()[i]; + if (arg != field_item) + ref_tables|= arg->used_tables(); } - DBUG_RETURN(tree); + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + Field *f= item->field; + if (field->eq(f)) + continue; + if (!((ref_tables | f->table->map) & param_comp)) + { + tree= get_func_mm_tree(param, cond_func, f, value, cmp_type); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } + } + } + DBUG_RETURN(ftree); } static SEL_TREE * get_mm_parts(PARAM *param, COND *cond_func, Field *field, - Item_func::Functype type, + Item_func::Functype type, Item *value, Item_result cmp_type) { - bool ne_func= FALSE; DBUG_ENTER("get_mm_parts"); if (field->table != param->table) DBUG_RETURN(0); - if (type == Item_func::NE_FUNC) - { - ne_func= TRUE; - type= Item_func::LT_FUNC; - } - KEY_PART *key_part = param->key_parts; KEY_PART *end = param->key_parts_end; SEL_TREE *tree=0; @@ -966,22 +3524,15 @@ get_mm_parts(PARAM *param, COND *cond_func, Field *field, else { // This key may be used later - if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) + if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) DBUG_RETURN(0); // OOM } sel_arg->part=(uchar) key_part->part; tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg); + tree->keys_map.set_bit(key_part->key); } } - if (ne_func) - { - SEL_TREE *tree2= get_mm_parts(param, cond_func, - field, Item_func::GT_FUNC, - value, cmp_type); - if (tree2) - tree= tree_or(param,tree,tree2); - } DBUG_RETURN(tree); } @@ -992,6 +3543,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, { uint maybe_null=(uint) field->real_maybe_null(), copies; uint field_length=field->pack_length()+maybe_null; + bool optimize_range; SEL_ARG *tree; char *str, *str2; DBUG_ENTER("get_mm_leaf"); @@ -1031,6 +3583,9 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, !(conf_func->compare_collation()->state & MY_CS_BINSORT)) DBUG_RETURN(0); + optimize_range= field->optimize_range(param->real_keynr[key_part->key], + key_part->part); + if (type == Item_func::LIKE_FUNC) { bool like_error; @@ -1038,8 +3593,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, String tmp(buff1,sizeof(buff1),value->collation.collation),*res; uint length,offset,min_length,max_length; - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part)) + if (!optimize_range) DBUG_RETURN(0); // Can't optimize this if (!(res= value->val_str(&tmp))) DBUG_RETURN(&null_element); @@ -1104,8 +3658,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, DBUG_RETURN(new SEL_ARG(field,min_str,max_str)); } - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part) && + if (!optimize_range && type != Item_func::EQ_FUNC && type != Item_func::EQUAL_FUNC) DBUG_RETURN(0); // Can't optimize this @@ -1118,11 +3671,11 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, value->result_type() != STRING_RESULT && field->cmp_type() != value->result_type()) DBUG_RETURN(0); - - if (value->save_in_field(field, 1) < 0) + + if (value->save_in_field_no_warnings(field, 1) < 0) { /* This happens when we try to insert a NULL field in a not null column */ - DBUG_RETURN(&null_element); // cmp with NULL is never true + DBUG_RETURN(&null_element); // cmp with NULL is never TRUE } /* Get local copy of key */ copies= 1; @@ -1227,8 +3780,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, ** If tree is 0 it means that the condition can't be tested. It refers ** to a non existent table or to a field in current table with isn't a key. ** The different tree flags: -** IMPOSSIBLE: Condition is never true -** ALWAYS: Condition is always true +** IMPOSSIBLE: Condition is never TRUE +** ALWAYS: Condition is always TRUE ** MAYBE: Condition may exists when tables are read ** MAYBE_KEY: Condition refers to a key that may be used in join loop ** KEY_RANGE: Condition uses a key @@ -1298,6 +3851,8 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) DBUG_RETURN(tree1); } + key_map result_keys; + result_keys.clear_all(); /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; @@ -1314,17 +3869,60 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; + DBUG_RETURN(tree1); + } + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG + if (*key1) (*key1)->test_use_count(*key1); #endif - break; - } } } + tree1->keys_map= result_keys; + /* dispose index_merge if there is a "range" option */ + if (!result_keys.is_clear_all()) + { + tree1->merges.empty(); + DBUG_RETURN(tree1); + } + + /* ok, both trees are index_merge trees */ + imerge_list_and_list(&tree1->merges, &tree2->merges); DBUG_RETURN(tree1); } +/* + Check if two SEL_TREES can be combined into one (i.e. a single key range + read can be constructed for "cond_of_tree1 OR cond_of_tree2" ) without + using index_merge. +*/ + +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param) +{ + key_map common_keys= tree1->keys_map; + DBUG_ENTER("sel_trees_can_be_ored"); + common_keys.intersect(tree2->keys_map); + + if (common_keys.is_clear_all()) + DBUG_RETURN(FALSE); + + /* trees have a common key, check if they refer to same key part */ + SEL_ARG **key1,**key2; + for (uint key_no=0; key_no < param->keys; key_no++) + { + if (common_keys.is_set(key_no)) + { + key1= tree1->keys + key_no; + key2= tree2->keys + key_no; + if ((*key1)->part == (*key2)->part) + { + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} static SEL_TREE * tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) @@ -1341,19 +3939,62 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (tree2->type == SEL_TREE::MAYBE) DBUG_RETURN(tree2); - /* Join the trees key per key */ - SEL_ARG **key1,**key2,**end; - SEL_TREE *result=0; - for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; - key1 != end ; key1++,key2++) + SEL_TREE *result= 0; + key_map result_keys; + result_keys.clear_all(); + if (sel_trees_can_be_ored(tree1, tree2, param)) { - *key1=key_or(*key1,*key2); - if (*key1) + /* Join the trees key per key */ + SEL_ARG **key1,**key2,**end; + for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; + key1 != end ; key1++,key2++) { - result=tree1; // Added to tree1 + *key1=key_or(*key1,*key2); + if (*key1) + { + result=tree1; // Added to tree1 + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + (*key1)->test_use_count(*key1); #endif + } + } + if (result) + result->keys_map= result_keys; + } + else + { + /* ok, two trees have KEY type but cannot be used without index merge */ + if (tree1->merges.is_empty() && tree2->merges.is_empty()) + { + SEL_IMERGE *merge; + /* both trees are "range" trees, produce new index merge structure */ + if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) || + (result->merges.push_back(merge)) || + (merge->or_sel_tree(param, tree1)) || + (merge->or_sel_tree(param, tree2))) + result= NULL; + else + result->type= tree1->type; + } + else if (!tree1->merges.is_empty() && !tree2->merges.is_empty()) + { + if (imerge_list_or_list(param, &tree1->merges, &tree2->merges)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; + } + else + { + /* one tree is index merge tree and another is range tree */ + if (tree1->merges.is_empty()) + swap_variables(SEL_TREE*, tree1, tree2); + + /* add tree2 to tree1->merges, checking if it collapses to ALWAYS */ + if (imerge_list_or_tree(param, &tree1->merges, tree2)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; } } DBUG_RETURN(result); @@ -1402,7 +4043,6 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) } - static SEL_ARG * key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { @@ -1472,6 +4112,13 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) return 0; // Can't optimize this } + if ((key1->min_flag | key2->min_flag) & GEOM_FLAG) + { + key1->free_tree(); + key2->free_tree(); + return 0; // Can't optimize this + } + key1->use_count--; key2->use_count--; SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0; @@ -1895,7 +4542,7 @@ SEL_ARG::find_range(SEL_ARG *key) SYNOPSIS tree_delete() key Key that is to be deleted from tree (this) - + NOTE This also frees all sub trees that is used by the element @@ -2142,7 +4789,7 @@ SEL_ARG *rb_delete_fixup(SEL_ARG *root,SEL_ARG *key,SEL_ARG *par) } - /* Test that the proporties for a red-black tree holds */ + /* Test that the properties for a red-black tree hold */ #ifdef EXTRA_DEBUG int test_rb_tree(SEL_ARG *element,SEL_ARG *parent) @@ -2215,7 +4862,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_information("Use_count: Wrong count for key at %lx, %lu should be %lu", + sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu should be %lu", pos,pos->next_key_part->use_count,count); return; } @@ -2223,45 +4870,123 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } } if (e_count != elements) - sql_print_warning("Wrong use count: %u (should be %u) for tree at %lx", + sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx", e_count, elements, (gptr) this); } #endif +/* + Calculate estimate of number records that will be retrieved by a range + scan on given index using given SEL_ARG intervals tree. + SYNOPSIS + check_quick_select + param Parameter from test_quick_select + idx Number of index to use in PARAM::key SEL_TREE::key + tree Transformed selection condition, tree->key[idx] holds intervals + tree to be used for scanning. + NOTES + param->is_ror_scan is set to reflect if the key scan is a ROR (see + is_key_scan_ror function for more info) + param->table->quick_*, param->range_count (and maybe others) are + updated with data of given key scan, see check_quick_keys for details. -/***************************************************************************** -** Check how many records we will find by using the found tree -*****************************************************************************/ + RETURN + Estimate # of records to be retrieved. + HA_POS_ERROR if estimate calculation failed due to table handler problems. + +*/ static ha_rows check_quick_select(PARAM *param,uint idx,SEL_ARG *tree) { ha_rows records; + bool cpk_scan; + uint key; DBUG_ENTER("check_quick_select"); + param->is_ror_scan= FALSE; + if (!tree) DBUG_RETURN(HA_POS_ERROR); // Can't use it param->max_key_part=0; param->range_count=0; + key= param->real_keynr[idx]; + if (tree->type == SEL_ARG::IMPOSSIBLE) DBUG_RETURN(0L); // Impossible select. return if (tree->type != SEL_ARG::KEY_RANGE || tree->part != 0) DBUG_RETURN(HA_POS_ERROR); // Don't use tree + + enum ha_key_alg key_alg= param->table->key_info[key].algorithm; + if ((key_alg != HA_KEY_ALG_BTREE) && (key_alg!= HA_KEY_ALG_UNDEF)) + { + /* Records are not ordered by rowid for other types of indexes. */ + cpk_scan= FALSE; + } + else + { + /* + Clustered PK scan is a special case, check_quick_keys doesn't recognize + CPK scans as ROR scans (while actually any CPK scan is a ROR scan). + */ + cpk_scan= (param->table->primary_key == param->real_keynr[idx]) && + param->table->file->primary_key_is_clustered(); + param->is_ror_scan= !cpk_scan; + } + records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0); if (records != HA_POS_ERROR) { - uint key=param->real_keynr[idx]; param->table->quick_keys.set_bit(key); param->table->quick_rows[key]=records; param->table->quick_key_parts[key]=param->max_key_part+1; + + if (cpk_scan) + param->is_ror_scan= TRUE; } DBUG_PRINT("exit", ("Records: %lu", (ulong) records)); DBUG_RETURN(records); } +/* + Recursively calculate estimate of # rows that will be retrieved by + key scan on key idx. + SYNOPSIS + check_quick_keys() + param Parameter from test_quick select function. + idx Number of key to use in PARAM::keys in list of used keys + (param->real_keynr[idx] holds the key number in table) + key_tree SEL_ARG tree being examined. + min_key Buffer with partial min key value tuple + min_key_flag + max_key Buffer with partial max key value tuple + max_key_flag + + NOTES + The function does the recursive descent on the tree via SEL_ARG::left, + SEL_ARG::right, and SEL_ARG::next_key_part edges. The #rows estimates + are calculated using records_in_range calls at the leaf nodes and then + summed. + + param->min_key and param->max_key are used to hold prefixes of key value + tuples. + + The side effects are: + + param->max_key_part is updated to hold the maximum number of key parts used + in scan minus 1. + + param->range_count is incremented if the function finds a range that + wasn't counted by the caller. + + param->is_ror_scan is cleared if the function detects that the key scan is + not a Rowid-Ordered Retrieval scan ( see comments for is_key_scan_ror + function for description of which key scans are ROR scans) +*/ + static ha_rows check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, @@ -2272,6 +4997,13 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, param->max_key_part=max(param->max_key_part,key_tree->part); if (key_tree->left != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; records=check_quick_keys(param,idx,key_tree->left,min_key,min_key_flag, max_key,max_key_flag); if (records == HA_POS_ERROR) // Impossible @@ -2286,6 +5018,19 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, uint min_key_length= (uint) (tmp_min_key- param->min_key); uint max_key_length= (uint) (tmp_max_key- param->max_key); + if (param->is_ror_scan) + { + /* + If the index doesn't cover entire key, mark the scan as non-ROR scan. + Actually we're cutting off some ROR scans here. + */ + uint16 fieldnr= param->table->key_info[param->real_keynr[idx]]. + key_part[key_tree->part].fieldnr - 1; + if (param->table->field[fieldnr]->key_length() != + param->key[idx][key_tree->part].length) + param->is_ror_scan= FALSE; + } + if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) @@ -2299,6 +5044,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp_max_key, max_key_flag | key_tree->max_flag); goto end; // Ugly, but efficient } + else + { + /* The interval for current key part is not c1 <= keyXpartY <= c1 */ + param->is_ror_scan= FALSE; + } + tmp_min_flag=key_tree->min_flag; tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) @@ -2327,6 +5078,24 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp=1; // Max one record else { + if (param->is_ror_scan) + { + /* + If we get here, the condition on the key was converted to form + "(keyXpart1 = c1) AND ... AND (keyXpart{key_tree->part - 1} = cN) AND + somecond(keyXpart{key_tree->part})" + Check if + somecond is "keyXpart{key_tree->part} = const" and + uncovered "tail" of KeyX parts is either empty or is identical to + first members of clustered primary key. + */ + if (!(min_key_length == max_key_length && + !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !key_tree->min_flag && !key_tree->max_flag && + is_key_scan_ror(param, keynr, key_tree->part + 1))) + param->is_ror_scan= FALSE; + } + if (tmp_min_flag & GEOM_FLAG) { key_range min_range; @@ -2363,6 +5132,13 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, records+=tmp; if (key_tree->right != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; tmp=check_quick_keys(param,idx,key_tree->right,min_key,min_key_flag, max_key,max_key_flag); if (tmp == HA_POS_ERROR) @@ -2373,22 +5149,109 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, } -/**************************************************************************** -** change a tree to a structure to be used by quick_select -** This uses it's own malloc tree -****************************************************************************/ +/* + Check if key scan on given index with equality conditions on first n key + parts is a ROR scan. + + SYNOPSIS + is_key_scan_ror() + param Parameter from test_quick_select + keynr Number of key in the table. The key must not be a clustered + primary key. + nparts Number of first key parts for which equality conditions + are present. + + NOTES + ROR (Rowid Ordered Retrieval) key scan is a key scan that produces + ordered sequence of rowids (ha_xxx::cmp_ref is the comparison function) + + An index scan is a ROR scan if it is done using a condition in form + + "key1_1=c_1 AND ... AND key1_n=c_n" (1) -static QUICK_SELECT * -get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) + where the index is defined on (key1_1, ..., key1_N [,a_1, ..., a_n]) + + and the table has a clustered Primary Key + + PRIMARY KEY(a_1, ..., a_n, b1, ..., b_k) with first key parts being + identical to uncovered parts ot the key being scanned (2) + + Scans on HASH indexes are not ROR scans, + any range scan on clustered primary key is ROR scan (3) + + Check (1) is made in check_quick_keys() + Check (3) is made check_quick_select() + Check (2) is made by this function. + + RETURN + TRUE If the scan is ROR-scan + FALSE otherwise +*/ + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) { - QUICK_SELECT *quick; + KEY *table_key= param->table->key_info + keynr; + KEY_PART_INFO *key_part= table_key->key_part + nparts; + KEY_PART_INFO *key_part_end= table_key->key_part + + table_key->key_parts; + + if (key_part == key_part_end) + return TRUE; + uint pk_number= param->table->primary_key; + if (!param->table->file->primary_key_is_clustered() || pk_number == MAX_KEY) + return FALSE; + + KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; + KEY_PART_INFO *pk_part_end= pk_part + + param->table->key_info[pk_number].key_parts; + for(;(key_part!=key_part_end) && (pk_part != pk_part_end); + ++key_part, ++pk_part) + { + if ((key_part->field != pk_part->field) || + (key_part->length != pk_part->length)) + return FALSE; + } + return (key_part == key_part_end); +} + + +/* + Create a QUICK_RANGE_SELECT from given key and SEL_ARG tree for that key. + + SYNOPSIS + get_quick_select() + param + idx Index of used key in param->key. + key_tree SEL_ARG tree for the used key + parent_alloc If not NULL, use it to allocate memory for + quick select data. Otherwise use quick->alloc. + NOTES + The caller must call QUICK_SELECT::init for returned quick select + + CAUTION! This function may change thd->mem_root to a MEM_ROOT which will be + deallocated when the returned quick select is deleted. + + RETURN + NULL on error + otherwise created quick select +*/ + +QUICK_RANGE_SELECT * +get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree, + MEM_ROOT *parent_alloc) +{ + QUICK_RANGE_SELECT *quick; DBUG_ENTER("get_quick_select"); if (param->table->key_info[param->real_keynr[idx]].flags & HA_SPATIAL) - quick=new QUICK_SELECT_GEOM(param->thd, param->table, param->real_keynr[idx], - 0); + quick=new QUICK_RANGE_SELECT_GEOM(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc), + parent_alloc); else - quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]); + quick=new QUICK_RANGE_SELECT(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc)); if (quick) { @@ -2402,9 +5265,10 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) else { quick->key_parts=(KEY_PART*) - memdup_root(&quick->alloc,(char*) param->key[idx], - sizeof(KEY_PART)* - param->table->key_info[param->real_keynr[idx]].key_parts); + memdup_root(parent_alloc? parent_alloc : &quick->alloc, + (char*) param->key[idx], + sizeof(KEY_PART)* + param->table->key_info[param->real_keynr[idx]].key_parts); } } DBUG_RETURN(quick); @@ -2414,9 +5278,8 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) /* ** Fix this to get all possible sub_ranges */ - -static bool -get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool +get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { @@ -2512,7 +5375,8 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, set_if_bigger(quick->max_used_key_length,range->min_length); set_if_bigger(quick->max_used_key_length,range->max_length); set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); - quick->ranges.push_back(range); + if (insert_dynamic(&quick->ranges, (gptr)&range)) + return 1; end: if (key_tree->right != &null_element) @@ -2526,12 +5390,12 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, Return 1 if there is only one range and this uses the whole primary key */ -bool QUICK_SELECT::unique_key_range() +bool QUICK_RANGE_SELECT::unique_key_range() { if (ranges.elements == 1) { - QUICK_RANGE *tmp; - if (((tmp=ranges.head())->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) + QUICK_RANGE *tmp= *((QUICK_RANGE**)ranges.buffer); + if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) { KEY *key=head->key_info+index; return ((key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && @@ -2542,11 +5406,11 @@ bool QUICK_SELECT::unique_key_range() } -/* Returns true if any part of the key is NULL */ +/* Returns TRUE if any part of the key is NULL */ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) { - for (const char *end=key+length ; + for (const char *end=key+length ; key < end; key+= key_part++->store_length) { @@ -2557,15 +5421,60 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) } +bool QUICK_SELECT_I::check_if_keys_used(List<Item> *fields) +{ + return check_if_key_used(head, index, *fields); +} + +bool QUICK_INDEX_MERGE_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (check_if_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_INTERSECT_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (check_if_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_UNION_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->check_if_keys_used(fields)) + return 1; + } + return 0; +} + + /**************************************************************************** Create a QUICK RANGE based on a key + This allocates things in a new memory root, as this may be called many times + during a query. ****************************************************************************/ -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + TABLE_REF *ref) { MEM_ROOT *old_root= thd->mem_root; /* The following call may change thd->mem_root */ - QUICK_SELECT *quick= new QUICK_SELECT(thd, table, ref->key); + QUICK_RANGE_SELECT *quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0); KEY *key_info = &table->key_info[ref->key]; KEY_PART *key_part; QUICK_RANGE *range; @@ -2573,15 +5482,15 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) if (!quick) return 0; /* no ranges found */ - if (cp_buffer_from_ref(ref)) + if (quick->init()) { - if (thd->is_fatal_error) - goto err; // out of memory - goto ok; // empty range + delete quick; + goto err; } - if (!(range= new QUICK_RANGE())) - goto err; // out of memory + if (cp_buffer_from_ref(ref) && thd->is_fatal_error || + !(range= new QUICK_RANGE())) + goto err; // out of memory range->min_key=range->max_key=(char*) ref->key_buff; range->min_length=range->max_length=ref->key_length; @@ -2601,10 +5510,10 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; } - if (quick->ranges.push_back(range)) + if (insert_dynamic(&quick->ranges,(gptr)&range)) goto err; - /* + /* Add a NULL range if REF_OR_NULL optimization is used. For example: if we have "WHERE A=2 OR A IS NULL" we created the (A=2) range above @@ -2620,7 +5529,7 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) EQ_RANGE))) goto err; *ref->null_ref_key= 0; // Clear null byte - if (quick->ranges.push_back(null_range)) + if (insert_dynamic(&quick->ranges,(gptr)&null_range)) goto err; } @@ -2634,11 +5543,288 @@ err: return 0; } + +/* + Fetch all row ids into unique. + + If table has a clustered primary key that covers all rows (TRUE for bdb + and innodb currently) and one of the index_merge scans is a scan on PK, + then + primary key scan rowids are not put into Unique and also + rows that will be retrieved by PK scan are not put into Unique + + RETURN + 0 OK + other error +*/ + +int QUICK_INDEX_MERGE_SELECT::prepare_unique() +{ + int result; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique"); + + /* We're going to just read rowids. */ + head->file->extra(HA_EXTRA_KEYREAD); + + /* + Make innodb retrieve all PK member fields, so + * ha_innobase::position (which uses them) call works. + * We can filter out rows that will be retrieved by clustered PK. + (This also creates a deficiency - it is possible that we will retrieve + parts of key that are not used by current query at all.) + */ + head->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + + cur_quick_select->init(); + + unique= new Unique(refpos_order_cmp, (void *)head->file, + head->file->ref_length, + thd->variables.sortbuff_size); + if (!unique) + DBUG_RETURN(1); + for (;;) + { + while ((result= cur_quick_select->get_next()) == HA_ERR_END_OF_FILE) + { + cur_quick_select->range_end(); + cur_quick_select= cur_quick_it++; + if (!cur_quick_select) + break; + + if (cur_quick_select->init()) + DBUG_RETURN(1); + + /* QUICK_RANGE_SELECT::reset never fails */ + cur_quick_select->reset(); + } + + if (result) + { + if (result != HA_ERR_END_OF_FILE) + DBUG_RETURN(result); + break; + } + + if (thd->killed) + DBUG_RETURN(1); + + /* skip row if it will be retrieved by clustered PK scan */ + if (pk_quick_select && pk_quick_select->row_in_ranges()) + continue; + + cur_quick_select->file->position(cur_quick_select->record); + result= unique->unique_add((char*)cur_quick_select->file->ref); + if (result) + DBUG_RETURN(1); + + } + + /* ok, all row ids are in Unique */ + result= unique->get(head); + doing_pk_scan= FALSE; + /* start table scan */ + init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1); + /* index_merge currently doesn't support "using index" at all */ + head->file->extra(HA_EXTRA_NO_KEYREAD); + + DBUG_RETURN(result); +} + + +/* + Get next row for index_merge. + NOTES + The rows are read from + 1. rowids stored in Unique. + 2. QUICK_RANGE_SELECT with clustered primary key (if any). + The sets of rows retrieved in 1) and 2) are guaranteed to be disjoint. +*/ + +int QUICK_INDEX_MERGE_SELECT::get_next() +{ + int result; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::get_next"); + + if (doing_pk_scan) + DBUG_RETURN(pk_quick_select->get_next()); + + result= read_record.read_record(&read_record); + + if (result == -1) + { + result= HA_ERR_END_OF_FILE; + end_read_record(&read_record); + /* All rows from Unique have been retrieved, do a clustered PK scan */ + if (pk_quick_select) + { + doing_pk_scan= TRUE; + if ((result= pk_quick_select->init())) + DBUG_RETURN(result); + DBUG_RETURN(pk_quick_select->get_next()); + } + } + + DBUG_RETURN(result); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::get_next() + + NOTES + Invariant on enter/exit: all intersected selects have retrieved all index + records with rowid <= some_rowid_val and no intersected select has + retrieved any index records with rowid > some_rowid_val. + We start fresh and loop until we have retrieved the same rowid in each of + the key scans or we got an error. + + If a Clustered PK scan is present, it is used only to check if row + satisfies its condition (and never used for row retrieval). + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_INTERSECT_SELECT::get_next() +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + int error, cmp; + uint last_rowid_count=0; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next"); + + /* Get a rowid for first quick and save it as a 'candidate' */ + quick= quick_it++; + if (cpk_quick) + { + do { + error= quick->get_next(); + }while (!error && !cpk_quick->row_in_ranges()); + } + else + error= quick->get_next(); + + if (error) + DBUG_RETURN(error); + + quick->file->position(quick->record); + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + + while (last_rowid_count < quick_selects.elements) + { + if (!(quick= quick_it++)) + { + quick_it.rewind(); + quick= quick_it++; + } + + do { + if ((error= quick->get_next())) + DBUG_RETURN(error); + quick->file->position(quick->record); + cmp= head->file->cmp_ref(quick->file->ref, last_rowid); + } while (cmp < 0); + + /* Ok, current select 'caught up' and returned ref >= cur_ref */ + if (cmp > 0) + { + /* Found a row with ref > cur_ref. Make it a new 'candidate' */ + if (cpk_quick) + { + while (!cpk_quick->row_in_ranges()) + { + if ((error= quick->get_next())) + DBUG_RETURN(error); + } + } + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + } + else + { + /* current 'candidate' row confirmed by this select */ + last_rowid_count++; + } + } + + /* We get here iff we got the same row ref in all scans. */ + if (need_to_fetch_row) + error= head->file->rnd_pos(head->record[0], last_rowid); + DBUG_RETURN(error); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_UNION_SELECT::get_next() + + NOTES + Enter/exit invariant: + For each quick select in the queue a {key,rowid} tuple has been + retrieved but the corresponding row hasn't been passed to output. + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_UNION_SELECT::get_next() +{ + int error, dup_row; + QUICK_SELECT_I *quick; + byte *tmp; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::get_next"); + + do + { + if (!queue.elements) + DBUG_RETURN(HA_ERR_END_OF_FILE); + /* Ok, we have a queue with >= 1 scans */ + + quick= (QUICK_SELECT_I*)queue_top(&queue); + memcpy(cur_rowid, quick->last_rowid, rowid_length); + + /* put into queue rowid from the same stream as top element */ + if ((error= quick->get_next())) + { + if (error != HA_ERR_END_OF_FILE) + DBUG_RETURN(error); + queue_remove(&queue, 0); + } + else + { + quick->save_last_pos(); + queue_replaced(&queue); + } + + if (!have_prev_rowid) + { + /* No rows have been returned yet */ + dup_row= FALSE; + have_prev_rowid= TRUE; + } + else + dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid); + }while (dup_row); + + tmp= cur_rowid; + cur_rowid= prev_rowid; + prev_rowid= tmp; + + error= head->file->rnd_pos(quick->record, prev_rowid); + DBUG_RETURN(error); +} + /* get next possible record using quick-struct */ -int QUICK_SELECT::get_next() +int QUICK_RANGE_SELECT::get_next() { - DBUG_ENTER("get_next"); + DBUG_ENTER("QUICK_RANGE_SELECT::get_next"); for (;;) { @@ -2652,7 +5838,14 @@ int QUICK_SELECT::get_next() DBUG_RETURN(result); } - if (!(range= it++)) + if (!cur_range) + range= *(cur_range= (QUICK_RANGE**) ranges.buffer); + else + range= + (cur_range == ((QUICK_RANGE**) ranges.buffer + ranges.elements - 1)) ? + (QUICK_RANGE*) 0 : *(++cur_range); + + if (!range) DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used start_key.key= (const byte*) range->min_key; @@ -2683,11 +5876,95 @@ int QUICK_SELECT::get_next() } +/* + Get the next record with a different prefix. + + SYNOPSIS + QUICK_RANGE_SELECT::get_next_prefix() + prefix_length length of cur_prefix + cur_prefix prefix of a key to be searached for + + DESCRIPTION + Each subsequent call to the method retrieves the first record that has a + prefix with length prefix_length different from cur_prefix, such that the + record with the new prefix is within the ranges described by + this->ranges. The record found is stored into the buffer pointed by + this->record. + The method is useful for GROUP-BY queries with range conditions to + discover the prefix of the next group that satisfies the range conditions. + + TODO + This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both + methods should be unified into a more general one to reduce code + duplication. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) +{ + DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); + + for (;;) + { + int result; + key_range start_key, end_key; + if (range) + { + /* Read the next record in the same range with prefix after cur_prefix. */ + DBUG_ASSERT(cur_prefix); + result= file->index_read(record, cur_prefix, prefix_length, + HA_READ_AFTER_KEY); + if (result || (file->compare_key(file->end_range) <= 0)) + DBUG_RETURN(result); + } + + if (!cur_range) + range= *(cur_range= (QUICK_RANGE**) ranges.buffer); /* First range. */ + else + range= + (cur_range == ((QUICK_RANGE**) ranges.buffer + ranges.elements - 1)) ? + (QUICK_RANGE*) 0 : *(++cur_range); /* Next range. */ + + if (!range) + DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + + start_key.key= (const byte*) range->min_key; + start_key.length= min(range->min_length, prefix_length); + start_key.flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + end_key.key= (const byte*) range->max_key; + end_key.length= min(range->max_length, prefix_length); + /* + We use READ_AFTER_KEY here because if we are reading on a key + prefix we want to find all keys with this prefix + */ + end_key.flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : + HA_READ_AFTER_KEY); + + result= file->read_range_first(range->min_length ? &start_key : 0, + range->max_length ? &end_key : 0, + test(range->flag & EQ_RANGE), + sorted); + if (range->flag == (UNIQUE_RANGE | EQ_RANGE)) + range=0; // Stop searching + + if (result != HA_ERR_END_OF_FILE) + DBUG_RETURN(result); + range=0; // No matching rows; go to next range + } +} + + /* Get next for geometrical indexes */ -int QUICK_SELECT_GEOM::get_next() +int QUICK_RANGE_SELECT_GEOM::get_next() { - DBUG_ENTER(" QUICK_SELECT_GEOM::get_next"); + DBUG_ENTER("QUICK_RANGE_SELECT_GEOM::get_next"); for (;;) { @@ -2701,7 +5978,14 @@ int QUICK_SELECT_GEOM::get_next() DBUG_RETURN(result); } - if (!(range= it++)) + if (!cur_range) + range= *(cur_range= (QUICK_RANGE**) ranges.buffer); + else + range= + (cur_range == ((QUICK_RANGE**) ranges.buffer + ranges.elements - 1)) ? + (QUICK_RANGE*) 0 : *(++cur_range); + + if (!range) DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used result= file->index_read(record, @@ -2716,6 +6000,46 @@ int QUICK_SELECT_GEOM::get_next() /* + Check if current row will be retrieved by this QUICK_RANGE_SELECT + + NOTES + It is assumed that currently a scan is being done on another index + which reads all necessary parts of the index that is scanned by this + quick select. + The implementation does a binary search on sorted array of disjoint + ranges, without taking size of range into account. + + This function is used to filter out clustered PK scan rows in + index_merge quick select. + + RETURN + TRUE if current row will be retrieved by this quick select + FALSE if not +*/ + +bool QUICK_RANGE_SELECT::row_in_ranges() +{ + QUICK_RANGE *range; + uint min= 0; + uint max= ranges.elements - 1; + uint mid= (max + min)/2; + + while (min != max) + { + if (cmp_next(*(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid))) + { + /* current row value > mid->max */ + min= mid + 1; + } + else + max= mid; + mid= (min + max) / 2; + } + range= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid); + return (!cmp_next(range) && !cmp_prev(range)); +} + +/* This is a hack: we inherit from QUICK_SELECT so that we can use the get_next() interface, but we have to hold a pointer to the original QUICK_SELECT because its data are used all over the place. What @@ -2725,16 +6049,17 @@ int QUICK_SELECT_GEOM::get_next() for now, this seems to work right at least. */ -QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts) - : QUICK_SELECT(*q), rev_it(rev_ranges) +QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, + uint used_key_parts) + : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges) { QUICK_RANGE *r; - it.rewind(); - for (r = it++; r; r = it++) - { - rev_ranges.push_front(r); - } + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **last_range= pr + ranges.elements; + for (; pr!=last_range; pr++) + rev_ranges.push_front(*pr); + /* Remove EQ_RANGE flag for keys that are not using the full key */ for (r = rev_it++; r; r = rev_it++) { @@ -2827,10 +6152,51 @@ int QUICK_SELECT_DESC::get_next() /* + Compare if found key is over max-value + Returns 0 if key <= range->max_key +*/ + +int QUICK_RANGE_SELECT::cmp_next(QUICK_RANGE *range_arg) +{ + if (range_arg->flag & NO_MAX_RANGE) + return 0; /* key can't be to large */ + + KEY_PART *key_part=key_parts; + uint store_length; + + for (char *key=range_arg->max_key, *end=key+range_arg->max_length; + key < end; + key+= store_length, key_part++) + { + int cmp; + store_length= key_part->store_length; + if (key_part->null_bit) + { + if (*key) + { + if (!key_part->field->is_null()) + return 1; + continue; + } + else if (key_part->field->is_null()) + return 0; + key++; // Skip null byte + store_length--; + } + if ((cmp=key_part->field->key_cmp((byte*) key, key_part->length)) < 0) + return 0; + if (cmp > 0) + return 1; + } + return (range_arg->flag & NEAR_MAX) ? 1 : 0; // Exact match +} + + +/* Returns 0 if found key is inside range (found key >= range->min_key). */ -int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) +int QUICK_RANGE_SELECT::cmp_prev(QUICK_RANGE *range_arg) { int cmp; if (range_arg->flag & NO_MIN_RANGE) @@ -2845,7 +6211,7 @@ int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) /* - * True if this range will require using HA_READ_AFTER_KEY + * TRUE if this range will require using HA_READ_AFTER_KEY See comment in get_next() about this */ @@ -2857,7 +6223,7 @@ bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg) } -/* True if we are reading over a key that may have a NULL value */ +/* TRUE if we are reading over a key that may have a NULL value */ #ifdef NOT_USED bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, @@ -2905,6 +6271,2142 @@ bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, #endif +void QUICK_RANGE_SELECT::add_info_string(String *str) +{ + KEY *key_info= head->key_info + index; + str->append(key_info->name); +} + +void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str) +{ + QUICK_RANGE_SELECT *quick; + bool first= TRUE; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append("sort_union("); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + if (pk_quick_select) + { + str->append(','); + pk_quick_select->add_info_string(str); + } + str->append(')'); +} + +void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append("intersect("); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (!first) + str->append(','); + else + first= FALSE; + str->append(key_info->name); + } + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + str->append(','); + str->append(key_info->name); + } + str->append(')'); +} + +void QUICK_ROR_UNION_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + str->append("union("); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + str->append(')'); +} + + +void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + KEY *key_info= head->key_info + index; + key_names->append(key_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + +void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + + KEY *key_info= head->key_info + quick->index; + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + if (pk_quick_select) + { + KEY *key_info= head->key_info + pk_quick_select->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(pk_quick_select->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_INTERSECT_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(cpk_quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + used_lengths->append(','); + key_names->append(','); + } + quick->add_keys_and_lengths(key_names, used_lengths); + } +} + + +/******************************************************************************* +* Implementation of QUICK_GROUP_MIN_MAX_SELECT +*******************************************************************************/ + +static inline uint get_field_keypart(KEY *index, Field *field); +static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, + PARAM *param, uint *param_idx); +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part); +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type); + +static void +cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records); + +/* + Test if this access method is applicable to a GROUP query with MIN/MAX + functions, and if so, construct a new TRP object. + + SYNOPSIS + get_best_group_min_max() + param Parameter from test_quick_select + sel_tree Range tree generated by get_mm_tree + + DESCRIPTION + Test whether a query can be computed via a QUICK_GROUP_MIN_MAX_SELECT. + Queries computable via a QUICK_GROUP_MIN_MAX_SELECT must satisfy the + following conditions: + A) Table T has at least one compound index I of the form: + I = <A_1, ...,A_k, [B_1,..., B_m], C, [D_1,...,D_n]> + B) Query conditions: + B0. Q is over a single table T. + B1. The attributes referenced by Q are a subset of the attributes of I. + B2. All attributes QA in Q can be divided into 3 overlapping groups: + - SA = {S_1, ..., S_l, [C]} - from the SELECT clause, where C is + referenced by any number of MIN and/or MAX functions if present. + - WA = {W_1, ..., W_p} - from the WHERE clause + - GA = <G_1, ..., G_k> - from the GROUP BY clause (if any) + = SA - if Q is a DISTINCT query (based on the + equivalence of DISTINCT and GROUP queries. + - NGA = QA - (GA union C) = {NG_1, ..., NG_m} - the ones not in + GROUP BY and not referenced by MIN/MAX functions. + with the following properties specified below. + + SA1. There is at most one attribute in SA referenced by any number of + MIN and/or MAX functions which, which if present, is denoted as C. + SA2. The position of the C attribute in the index is after the last A_k. + SA3. The attribute C can be referenced in the WHERE clause only in + predicates of the forms: + - (C {< | <= | > | >= | =} const) + - (const {< | <= | > | >= | =} C) + - (C between const_i and const_j) + - C IS NULL + - C IS NOT NULL + - C != const + SA4. If Q has a GROUP BY clause, there are no other aggregate functions + except MIN and MAX. For queries with DISTINCT, aggregate functions + are allowed. + SA5. The select list in DISTINCT queries should not contain expressions. + GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if + G_i = A_j => i = j. + GA2. If Q has a DISTINCT clause, then there is a permutation of SA that + forms a prefix of I. This permutation is used as the GROUP clause + when the DISTINCT query is converted to a GROUP query. + GA3. The attributes in GA may participate in arbitrary predicates, divided + into two groups: + - RNG(G_1,...,G_q ; where q <= k) is a range condition over the + attributes of a prefix of GA + - PA(G_i1,...G_iq) is an arbitrary predicate over an arbitrary subset + of GA. Since P is applied to only GROUP attributes it filters some + groups, and thus can be applied after the grouping. + GA4. There are no expressions among G_i, just direct column references. + NGA1.If in the index I there is a gap between the last GROUP attribute G_k, + and the MIN/MAX attribute C, then NGA must consist of exactly the index + attributes that constitute the gap. As a result there is a permutation + of NGA that coincides with the gap in the index <B_1, ..., B_m>. + NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of + equality conditions for all NG_i of the form (NG_i = const) or + (const = NG_i), such that each NG_i is referenced in exactly one + conjunct. Informally, the predicates provide constants to fill the + gap in the index. + WA1. There are no other attributes in the WHERE clause except the ones + referenced in predicates RNG, PA, PC, EQ defined above. Therefore + WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above + tests. By transitivity then it also follows that each WA_i participates + in the index I (if this was already tested for GA, NGA and C). + + C) Overall query form: + SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)]) + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k + [HAVING PH(A_1, ..., B_1,..., C)] + where EXPR(...) is an arbitrary expression over some or all SELECT fields, + or: + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + NOTES + If the current query satisfies the conditions above, and if + (mem_root! = NULL), then the function constructs and returns a new TRP + object, that is later used to construct a new QUICK_GROUP_MIN_MAX_SELECT. + If (mem_root == NULL), then the function only tests whether the current + query satisfies the conditions above, and, if so, sets + is_applicable = TRUE. + + Queries with DISTINCT for which index access can be used are transformed + into equivalent group-by queries of the form: + + SELECT A_1,...,A_k FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + The group-by list is a permutation of the select attributes, according + to their order in the index. + + TODO + - What happens if the query groups by the MIN/MAX field, and there is no + other field as in: "select min(a) from t1 group by a" ? + - We assume that the general correctness of the GROUP-BY query was checked + before this point. Is this correct, or do we have to check it completely? + + RETURN + If mem_root != NULL + - valid TRP_GROUP_MIN_MAX object if this QUICK class can be used for + the query + - NULL o/w. + If mem_root == NULL + - NULL +*/ + +static TRP_GROUP_MIN_MAX * +get_best_group_min_max(PARAM *param, SEL_TREE *tree) +{ + THD *thd= param->thd; + JOIN *join= thd->lex->select_lex.join; + TABLE *table= param->table; + bool have_min= FALSE; /* TRUE if there is a MIN function. */ + bool have_max= FALSE; /* TRUE if there is a MAX function. */ + Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/ + KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */ + uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */ + KEY *index_info= NULL; /* The index chosen for data access. */ + uint index= 0; /* The id of the chosen index. */ + uint group_key_parts= 0; /* Number of index key parts in the group prefix. */ + uint used_key_parts= 0; /* Number of index key parts used for access. */ + byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/ + uint key_infix_len= 0; /* Length of key_infix. */ + TRP_GROUP_MIN_MAX *read_plan= NULL; /* The eventually constructed TRP. */ + uint key_part_nr; + ORDER *tmp_group; + Item *item; + Item_field *item_field; + + DBUG_ENTER("get_best_group_min_max"); + + /* Perform few 'cheap' tests whether this access method is applicable. */ + if (!join || (thd->lex->sql_command != SQLCOM_SELECT)) + DBUG_RETURN(NULL); /* This is not a select statement. */ + if ((join->tables != 1) || /* The query must reference one table. */ + ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ + (!join->select_distinct))) + DBUG_RETURN(NULL); + if(table->keys == 0) /* There are no indexes to use. */ + DBUG_RETURN(NULL); + + /* Analyze the query in more detail. */ + List_iterator<Item> select_items_it(join->fields_list); + + /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/ + if(join->make_sum_func_list(join->all_fields, join->fields_list, 1)) + DBUG_RETURN(NULL); + if (join->sum_funcs[0]) + { + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (min_max_item->sum_func() == Item_sum::MIN_FUNC) + have_min= TRUE; + else if (min_max_item->sum_func() == Item_sum::MAX_FUNC) + have_max= TRUE; + else + DBUG_RETURN(NULL); + + Item *expr= min_max_item->args[0]; /* The argument of MIN/MAX. */ + if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ + { + if (! min_max_arg_item) + min_max_arg_item= (Item_field*) expr; + else if (! min_max_arg_item->eq(expr, 1)) + DBUG_RETURN(NULL); + } + else + DBUG_RETURN(NULL); + } + } + + /* Check (SA5). */ + if (join->select_distinct) + { + while ((item= select_items_it++)) + { + if (item->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + } + + /* Check (GA4) - that there are no expressions among the group attributes. */ + for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next) + { + if ((*tmp_group->item)->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + + /* + Check that table has at least one compound index such that the conditions + (GA1,GA2) are all TRUE. If there is more than one such index, select the + first one. Here we set the variables: group_prefix_len and index_info. + */ + KEY *cur_index_info= table->key_info; + KEY *cur_index_info_end= cur_index_info + table->keys; + KEY_PART_INFO *cur_part= NULL; + KEY_PART_INFO *end_part; /* Last part for loops. */ + /* Last index part. */ + KEY_PART_INFO *last_part= NULL; + KEY_PART_INFO *first_non_group_part= NULL; + KEY_PART_INFO *first_non_infix_part= NULL; + uint key_infix_parts= 0; + uint cur_group_key_parts= 0; + uint cur_group_prefix_len= 0; + /* Cost-related variables for the best index so far. */ + double best_read_cost= DBL_MAX; + ha_rows best_records= 0; + SEL_ARG *best_index_tree= NULL; + ha_rows best_quick_prefix_records= 0; + uint best_param_idx= 0; + double cur_read_cost= DBL_MAX; + ha_rows cur_records; + SEL_ARG *cur_index_tree= NULL; + ha_rows cur_quick_prefix_records= 0; + uint cur_param_idx; + + for (uint cur_index= 0 ; cur_index_info != cur_index_info_end ; + cur_index_info++, cur_index++) + { + /* Check (B1) - if current index is covering. */ + if (!table->used_keys.is_set(cur_index)) + goto next_index; + + /* + Check (GA1) for GROUP BY queries. + */ + if (join->group_list) + { + cur_part= cur_index_info->key_part; + end_part= cur_part + cur_index_info->key_parts; + /* Iterate in parallel over the GROUP list and the index parts. */ + for (tmp_group= join->group_list; tmp_group && (cur_part != end_part); + tmp_group= tmp_group->next, cur_part++) + { + /* + TODO: + tmp_group::item is an array of Item, is it OK to consider only the + first Item? If so, then why? What is the array for? + */ + /* Above we already checked that all group items are fields. */ + DBUG_ASSERT((*tmp_group->item)->type() == Item::FIELD_ITEM); + Item_field *group_field= (Item_field *) (*tmp_group->item); + if (group_field->field->eq(cur_part->field)) + { + cur_group_prefix_len+= cur_part->store_length; + ++cur_group_key_parts; + } + else + goto next_index; + } + } + /* + Check (GA2) if this is a DISTINCT query. + If GA2, then Store a new ORDER object in group_fields_array at the + position of the key part of item_field->field. Thus we get the ORDER + objects for each field ordered as the corresponding key parts. + Later group_fields_array of ORDER objects is used to convert the query + to a GROUP query. + */ + else if (join->select_distinct) + { + select_items_it.rewind(); + while ((item= select_items_it++)) + { + item_field= (Item_field*) item; /* (SA5) already checked above. */ + /* Find the order of the key part in the index. */ + key_part_nr= get_field_keypart(cur_index_info, item_field->field); + if (key_part_nr < 1 || key_part_nr > join->fields_list.elements) + goto next_index; + cur_part= cur_index_info->key_part + key_part_nr - 1; + cur_group_prefix_len+= cur_part->store_length; + } + cur_group_key_parts= join->fields_list.elements; + } + else + DBUG_ASSERT(FALSE); + + /* Check (SA2). */ + if (min_max_arg_item) + { + key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field); + if (key_part_nr <= cur_group_key_parts) + goto next_index; + min_max_arg_part= cur_index_info->key_part + key_part_nr - 1; + } + + /* + Check (NGA1, NGA2) and extract a sequence of constants to be used as part + of all search keys. + */ + + /* + If there is MIN/MAX, each keypart between the last group part and the + MIN/MAX part must participate in one equality with constants, and all + keyparts after the MIN/MAX part must not be referenced in the query. + + If there is no MIN/MAX, the keyparts after the last group part can be + referenced only in equalities with constants, and the referenced keyparts + must form a sequence without any gaps that starts immediately after the + last group keypart. + */ + last_part= cur_index_info->key_part + cur_index_info->key_parts; + first_non_group_part= (cur_group_key_parts < cur_index_info->key_parts) ? + cur_index_info->key_part + cur_group_key_parts : + NULL; + first_non_infix_part= min_max_arg_part ? + (min_max_arg_part < last_part) ? + min_max_arg_part + 1 : + NULL : + NULL; + if (first_non_group_part && + (!min_max_arg_part || (min_max_arg_part - first_non_group_part > 0))) + { + if (tree) + { + uint dummy; + SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, + &dummy); + if (!get_constant_key_infix(cur_index_info, index_range_tree, + first_non_group_part, min_max_arg_part, + last_part, thd, key_infix, &key_infix_len, + &first_non_infix_part)) + goto next_index; + } + else if (min_max_arg_part && + (min_max_arg_part - first_non_group_part > 0)) + /* + There is a gap but no range tree, thus no predicates at all for the + non-group keyparts. + */ + goto next_index; + } + + /* + Test (WA1) partially - that no other keypart after the last infix part is + referenced in the query. + */ + if (first_non_infix_part) + { + for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++) + { + if (cur_part->field->query_id == thd->query_id) + goto next_index; + } + } + + /* If we got to this point, cur_index_info passes the test. */ + key_infix_parts= key_infix_len ? + (first_non_infix_part - first_non_group_part) : 0; + used_key_parts= cur_group_key_parts + key_infix_parts; + + /* Compute the cost of using this index. */ + if (tree) + { + /* Find the SEL_ARG sub-tree that corresponds to the chosen index. */ + cur_index_tree= get_index_range_tree(cur_index, tree, param, + &cur_param_idx); + /* Check if this range tree can be used for prefix retrieval. */ + cur_quick_prefix_records= check_quick_select(param, cur_param_idx, + cur_index_tree); + } + cost_group_min_max(table, cur_index_info, used_key_parts, + cur_group_key_parts, tree, cur_index_tree, + cur_quick_prefix_records, have_min, have_max, + &cur_read_cost, &cur_records); + + if (cur_read_cost < best_read_cost) + { + index_info= cur_index_info; + index= cur_index; + best_read_cost= cur_read_cost; + best_records= cur_records; + best_index_tree= cur_index_tree; + best_quick_prefix_records= cur_quick_prefix_records; + best_param_idx= cur_param_idx; + group_key_parts= cur_group_key_parts; + group_prefix_len= cur_group_prefix_len; + } + + next_index: + cur_group_key_parts= 0; + cur_group_prefix_len= 0; + } + if (!index_info) /* No usable index found. */ + DBUG_RETURN(NULL); + + /* Check (SA3) for the where clause. */ + if (join->conds && min_max_arg_item && + !check_group_min_max_predicates(join->conds, min_max_arg_item, + (index_info->flags & HA_SPATIAL) ? + Field::itMBR : Field::itRAW)) + DBUG_RETURN(NULL); + + /* The query passes all tests, so construct a new TRP object. */ + read_plan= new (param->mem_root) + TRP_GROUP_MIN_MAX(have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + group_key_parts, index_info, index, + key_infix_len, + (key_infix_len > 0) ? key_infix : NULL, + tree, best_index_tree, best_param_idx, + best_quick_prefix_records); + if (read_plan) + { + if (tree && read_plan->quick_prefix_records == 0) + DBUG_RETURN(NULL); + + read_plan->read_cost= best_read_cost; + read_plan->records= best_records; + + DBUG_PRINT("info", + ("Returning group min/max plan: cost: %g, records: %lu", + read_plan->read_cost, (ulong) read_plan->records)); + } + + DBUG_RETURN(read_plan); +} + + +/* + Check that the MIN/MAX attribute participates only in range predicates + with constants. + + SYNOPSIS + check_group_min_max_predicates() + cond tree (or subtree) describing all or part of the WHERE + clause being analyzed + min_max_arg_item the field referenced by the MIN/MAX function(s) + min_max_arg_part the keypart of the MIN/MAX argument if any + + DESCRIPTION + The function walks recursively over the cond tree representing a WHERE + clause, and checks condition (SA3) - if a field is referenced by a MIN/MAX + aggregate function, it is referenced only by one of the following + predicates: {=, !=, <, <=, >, >=, between, is null, is not null}. + + RETURN + TRUE if cond passes the test + FALSE o/w +*/ + +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type) +{ + DBUG_ENTER("check_group_min_max_predicates"); + DBUG_ASSERT(cond && min_max_arg_item); + + Item::Type cond_type= cond->type(); + if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ + { + DBUG_PRINT("info", ("Analyzing: %s", ((Item_func*) cond)->func_name())); + List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); + Item *and_or_arg; + while ((and_or_arg= li++)) + { + if(!check_group_min_max_predicates(and_or_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + DBUG_RETURN(TRUE); + } + + /* + TODO: + This is a very crude fix to handle sub-selects in the WHERE clause + (Item_subselect objects). With the test below we rule out from the + optimization all queries with subselects in the WHERE clause. What has to + be done, is that here we should analyze whether the subselect references + the MIN/MAX argument field, and disallow the optimization only if this is + so. + */ + if (cond_type == Item::SUBSELECT_ITEM) + DBUG_RETURN(FALSE); + + /* We presume that at this point there are no other Items than functions. */ + DBUG_ASSERT(cond_type == Item::FUNC_ITEM); + + /* Test if cond references only group-by or non-group fields. */ + Item_func *pred= (Item_func*) cond; + Item **arguments= pred->arguments(); + Item *cur_arg; + DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); + for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) + { + cur_arg= arguments[arg_idx]; + DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); + if (cur_arg->type() == Item::FIELD_ITEM) + { + if (min_max_arg_item->eq(cur_arg, 1)) + { + /* + If pred references the MIN/MAX argument, check whether pred is a range + condition that compares the MIN/MAX argument with a constant. + */ + Item_func::Functype pred_type= pred->functype(); + if (pred_type != Item_func::EQUAL_FUNC && + pred_type != Item_func::LT_FUNC && + pred_type != Item_func::LE_FUNC && + pred_type != Item_func::GT_FUNC && + pred_type != Item_func::GE_FUNC && + pred_type != Item_func::BETWEEN && + pred_type != Item_func::ISNULL_FUNC && + pred_type != Item_func::ISNOTNULL_FUNC && + pred_type != Item_func::EQ_FUNC && + pred_type != Item_func::NE_FUNC) + DBUG_RETURN(FALSE); + + /* Check that pred compares min_max_arg_item with a constant. */ + Item *args[3]; + bzero(args, 3 * sizeof(Item*)); + bool inv; + /* Test if this is a comparison of a field and a constant. */ + if (!simple_pred(pred, args, &inv)) + DBUG_RETURN(FALSE); + + /* Check for compatible string comparisons - similar to get_mm_leaf. */ + if (args[0] && args[1] && !args[2] && // this is a binary function + min_max_arg_item->result_type() == STRING_RESULT && + /* + Don't use an index when comparing strings of different collations. + */ + ((args[1]->result_type() == STRING_RESULT && + image_type == Field::itRAW && + ((Field_str*) min_max_arg_item->field)->charset() != + pred->compare_collation()) + || + /* + We can't always use indexes when comparing a string index to a + number. + */ + (args[1]->result_type() != STRING_RESULT && + min_max_arg_item->field->cmp_type() != args[1]->result_type()))) + DBUG_RETURN(FALSE); + } + } + else if (cur_arg->type() == Item::FUNC_ITEM) + { + if(!check_group_min_max_predicates(cur_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + else if (cur_arg->const_item()) + { + DBUG_RETURN(TRUE); + } + else + DBUG_RETURN(FALSE); + } + + DBUG_RETURN(TRUE); +} + + +/* + Extract a sequence of constants from a conjunction of equality predicates. + + SYNOPSIS + get_constant_key_infix() + index_info [in] Descriptor of the chosen index. + index_range_tree [in] Range tree for the chosen index + first_non_group_part [in] First index part after group attribute parts + min_max_arg_part [in] The keypart of the MIN/MAX argument if any + last_part [in] Last keypart of the index + thd [in] Current thread + key_infix [out] Infix of constants to be used for index lookup + key_infix_len [out] Lenghth of the infix + first_non_infix_part [out] The first keypart after the infix (if any) + + DESCRIPTION + Test conditions (NGA1, NGA2) from get_best_group_min_max(). Namely, + for each keypart field NGF_i not in GROUP-BY, check that there is a constant + equality predicate among conds with the form (NGF_i = const_ci) or + (const_ci = NGF_i). + Thus all the NGF_i attributes must fill the 'gap' between the last group-by + attribute and the MIN/MAX attribute in the index (if present). If these + conditions hold, copy each constant from its corresponding predicate into + key_infix, in the order its NG_i attribute appears in the index, and update + key_infix_len with the total length of the key parts in key_infix. + + RETURN + TRUE if the index passes the test + FALSE o/w +*/ + +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part) +{ + SEL_ARG *cur_range; + KEY_PART_INFO *cur_part; + /* End part for the first loop below. */ + KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part; + + *key_infix_len= 0; + byte *key_ptr= key_infix; + for (cur_part= first_non_group_part; cur_part != end_part; cur_part++) + { + /* + Find the range tree for the current keypart. We assume that + index_range_tree points to the leftmost keypart in the index. + */ + for (cur_range= index_range_tree; cur_range; + cur_range= cur_range->next_key_part) + { + if (cur_range->field->eq(cur_part->field)) + break; + } + if (!cur_range) + { + if (min_max_arg_part) + return FALSE; /* The current keypart has no range predicates at all. */ + else + { + *first_non_infix_part= cur_part; + return TRUE; + } + } + + /* Check that the current range tree is a single point interval. */ + if (cur_range->prev || cur_range->next) + return FALSE; /* This is not the only range predicate for the field. */ + if ((cur_range->min_flag & NO_MIN_RANGE) || + (cur_range->max_flag & NO_MAX_RANGE) || + (cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX)) + return FALSE; + + uint field_length= cur_part->store_length; + if ((cur_range->maybe_null && + cur_range->min_value[0] && cur_range->max_value[0]) + || + (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0)) + { /* cur_range specifies 'IS NULL' or an equality condition. */ + memcpy(key_ptr, cur_range->min_value, field_length); + key_ptr+= field_length; + *key_infix_len+= field_length; + } + else + return FALSE; + } + + if (!min_max_arg_part && (cur_part == last_part)) + *first_non_infix_part= last_part; + + return TRUE; +} + + +/* + Find the key part referenced by a field. + + SYNOPSIS + get_field_keypart() + index descriptor of an index + field field that possibly references some key part in index + + NOTES + The return value can be used to get a KEY_PART_INFO pointer by + part= index->key_part + get_field_keypart(...) - 1; + + RETURN + Positive number which is the consecutive number of the key part, or + 0 if field does not reference any index field. +*/ + +static inline uint +get_field_keypart(KEY *index, Field *field) +{ + KEY_PART_INFO *part, *end; + + for (part= index->key_part, end= part + index->key_parts; part < end; part++) + { + if (field->eq(part->field)) + return part - index->key_part + 1; + } + return 0; +} + + +/* + Find the SEL_ARG sub-tree that corresponds to the chosen index. + + SYNOPSIS + get_index_range_tree() + index [in] The ID of the index being looked for + range_tree[in] Tree of ranges being searched + param [in] PARAM from SQL_SELECT::test_quick_select + param_idx [out] Index in the array PARAM::key that corresponds to 'index' + + DESCRIPTION + + A SEL_TREE contains range trees for all usable indexes. This procedure + finds the SEL_ARG sub-tree for 'index'. The members of a SEL_TREE are + ordered in the same way as the members of PARAM::key, thus we first find + the corresponding index in the array PARAM::key. This index is returned + through the variable param_idx, to be used later as argument of + check_quick_select(). + + RETURN + Pointer to the SEL_ARG subtree that corresponds to index. +*/ + +SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, + uint *param_idx) +{ + uint idx= 0; /* Index nr in param->key_parts */ + while (idx < param->keys) + { + if (index == param->real_keynr[idx]) + break; + idx++; + } + *param_idx= idx; + return(range_tree->keys[idx]); +} + + +/* + Compute the cost of a quick_group_min_max_select for a particular index. + + SYNOPSIS + cost_group_min_max() + table [in] The table being accessed + index_info [in] The index used to access the table + used_key_parts [in] Number of key parts used to access the index + group_key_parts [in] Number of index key parts in the group prefix + range_tree [in] Tree of ranges for all indexes + index_tree [in] The range tree for the current index + quick_prefix_records [in] Number of records retrieved by the internally + used quick range select if any + have_min [in] True if there is a MIN function + have_max [in] True if there is a MAX function + read_cost [out] The cost to retrieve rows via this quick select + records [out] The number of rows retrieved + + DESCRIPTION + This method computes the access cost of a TRP_GROUP_MIN_MAX instance and + the number of rows returned. It updates this->read_cost and this->records. + + NOTES + The cost computation distinguishes several cases: + 1) No equality predicates over non-group attributes (thus no key_infix). + If groups are bigger than blocks on the average, then we assume that it + is very unlikely that block ends are aligned with group ends, thus even + if we look for both MIN and MAX keys, all pairs of neighbor MIN/MAX + keys, except for the first MIN and the last MAX keys, will be in the + same block. If groups are smaller than blocks, then we are going to + read all blocks. + 2) There are equality predicates over non-group attributes. + In this case the group prefix is extended by additional constants, and + as a result the min/max values are inside sub-groups of the original + groups. The number of blocks that will be read depends on whether the + ends of these sub-groups will be contained in the same or in different + blocks. We compute the probability for the two ends of a subgroup to be + in two different blocks as the ratio of: + - the number of positions of the left-end of a subgroup inside a group, + such that the right end of the subgroup is past the end of the buffer + containing the left-end, and + - the total number of possible positions for the left-end of the + subgroup, which is the number of keys in the containing group. + We assume it is very unlikely that two ends of subsequent subgroups are + in the same block. + 3) The are range predicates over the group attributes. + Then some groups may be filtered by the range predicates. We use the + selectivity of the range predicates to decide how many groups will be + filtered. + + TODO + - Take into account the optional range predicates over the MIN/MAX + argument. + - Check if we have a PK index and we use all cols - then each key is a + group, and it will be better to use an index scan. + + RETURN + None +*/ + +void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records) +{ + uint table_records; + uint num_groups; + uint num_blocks; + uint keys_per_block; + uint keys_per_group; + uint keys_per_subgroup; /* Average number of keys in sub-groups */ + /* formed by a key infix. */ + double p_overlap; /* Probability that a sub-group overlaps two blocks. */ + double quick_prefix_selectivity; + double io_cost; + double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */ + DBUG_ENTER("TRP_GROUP_MIN_MAX::cost"); + + table_records= table->file->records; + keys_per_block= (table->file->block_size / 2 / + (index_info->key_length + table->file->ref_length) + + 1); + num_blocks= (table_records / keys_per_block) + 1; + + /* Compute the number of keys in a group. */ + keys_per_group= index_info->rec_per_key[group_key_parts - 1]; + if (keys_per_group == 0) /* If there is no statistics try to guess */ + /* each group contains 10% of all records */ + keys_per_group= (table_records / 10) + 1; + num_groups= (table_records / keys_per_group) + 1; + + /* Apply the selectivity of the quick select for group prefixes. */ + if (range_tree && (quick_prefix_records != HA_POS_ERROR)) + { + quick_prefix_selectivity= (double) quick_prefix_records / + (double) table_records; + num_groups= (uint) rint(num_groups * quick_prefix_selectivity); + } + + if (used_key_parts > group_key_parts) + { /* + Compute the probability that two ends of a subgroup are inside + different blocks. + */ + keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1]; + if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */ + p_overlap= 1.0; /* a block, it will overlap at least two blocks. */ + else + { + double blocks_per_group= (double) num_blocks / (double) num_groups; + p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group; + p_overlap= min(p_overlap, 1.0); + } + io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks); + } + else + io_cost= (keys_per_group > keys_per_block) ? + (have_min && have_max) ? (double) (num_groups + 1) : + (double) num_groups : + (double) num_blocks; + + /* + TODO: If there is no WHERE clause and no other expressions, there should be + no CPU cost. We leave it here to make this cost comparable to that of index + scan as computed in SQL_SELECT::test_quick_select(). + */ + cpu_cost= (double) num_groups / TIME_FOR_COMPARE; + + *read_cost= io_cost + cpu_cost; + *records= num_groups; + + DBUG_PRINT("info", + ("records=%u, keys/block=%u, keys/group=%u, records=%u, blocks=%u", + table_records, keys_per_block, keys_per_group, records, + num_blocks)); + DBUG_VOID_RETURN; +} + + +/* + Construct a new quick select object for queries with group by with min/max. + + SYNOPSIS + TRP_GROUP_MIN_MAX::make_quick() + param Parameter from test_quick_select + retrieve_full_rows ignored + parent_alloc Memory pool to use, if any. + + NOTES + Make_quick ignores the retrieve_full_rows parameter because + QUICK_GROUP_MIN_MAX_SELECT always performs 'index only' scans. + The other parameter are ignored as well because all necessary + data to create the QUICK object is computed at this TRP creation + time. + + RETURN + New QUICK_GROUP_MIN_MAX_SELECT object if successfully created, + NULL o/w. +*/ + +QUICK_SELECT_I * +TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_GROUP_MIN_MAX_SELECT *quick; + DBUG_ENTER("TRP_GROUP_MIN_MAX::make_quick"); + + quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table, + param->thd->lex->select_lex.join, + have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + index_info, index, read_cost, records, + key_infix_len, key_infix, + parent_alloc); + if (!quick) + DBUG_RETURN(NULL); + + if (quick->init()) + { + delete quick; + DBUG_RETURN(NULL); + } + + if (range_tree) + { + DBUG_ASSERT(quick_prefix_records > 0); + if (quick_prefix_records == HA_POS_ERROR) + quick->quick_prefix_select= NULL; /* Can't construct a quick select. */ + else + /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */ + quick->quick_prefix_select= get_quick_select(param, param_idx, index_tree, + &quick->alloc); + + /* + Extract the SEL_ARG subtree that contains only ranges for the MIN/MAX + attribute, and create an array of QUICK_RANGES to be used by the + new quick select. + */ + if (min_max_arg_part) + { + SEL_ARG *min_max_range= index_tree; + while (min_max_range) /* Find the tree for the MIN/MAX key part. */ + { + if (min_max_range->field->eq(min_max_arg_part->field)) + break; + min_max_range= min_max_range->next_key_part; + } + /* Scroll to the leftmost interval for the MIN/MAX argument. */ + while (min_max_range && min_max_range->prev) + min_max_range= min_max_range->prev; + /* Create an array of QUICK_RANGEs for the MIN/MAX argument. */ + while (min_max_range) + { + if (quick->add_range(min_max_range)) + { + delete quick; + quick= NULL; + DBUG_RETURN(NULL); + } + min_max_range= min_max_range->next; + } + } + } + else + quick->quick_prefix_select= NULL; + + quick->update_key_stat(); + + DBUG_RETURN(quick); +} + + +/* + Construct new quick select for group queries with min/max. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::QUICK_GROUP_MIN_MAX_SELECT() + table The table being accessed + join Descriptor of the current query + have_min TRUE if the query selects a MIN function + have_max TRUE if the query selects a MAX function + min_max_arg_part The only argument field of all MIN/MAX functions + group_prefix_len Length of all key parts in the group prefix + prefix_key_parts All key parts in the group prefix + index_info The index chosen for data access + use_index The id of index_info + read_cost Cost of this access method + records Number of records returned + key_infix_len Length of the key infix appended to the group prefix + key_infix Infix of constants from equality predicates + parent_alloc Memory pool for this and quick_prefix_select data + + RETURN + None +*/ + +QUICK_GROUP_MIN_MAX_SELECT:: +QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, + bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, + uint used_key_parts_arg, KEY *index_info_arg, + uint use_index, double read_cost_arg, + ha_rows records_arg, uint key_infix_len_arg, + byte *key_infix_arg, MEM_ROOT *parent_alloc) + :join(join_arg), index_info(index_info_arg), + group_prefix_len(group_prefix_len_arg), have_min(have_min_arg), + have_max(have_max_arg), seen_first_key(FALSE), + min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg), + key_infix_len(key_infix_len_arg) +{ + head= table; + file= head->file; + index= use_index; + record= head->record[0]; + tmp_record= head->record[1]; + read_time= read_cost_arg; + records= records_arg; + used_key_parts= used_key_parts_arg; + real_prefix_len= group_prefix_len + key_infix_len; + group_prefix= NULL; + min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0; + + /* + We can't have parent_alloc set as the init function can't handle this case + yet. + */ + DBUG_ASSERT(!parent_alloc); + if (!parent_alloc) + { + init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0); + join->thd->mem_root= &alloc; + } + else + bzero(&alloc, sizeof(MEM_ROOT)); // ensure that it's not used +} + + +/* + Do post-constructor initialization. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::init() + + DESCRIPTION + The method performs initialization that cannot be done in the constructor + such as memory allocations that may fail. It allocates memory for the + group prefix and inifix buffers, and for the lists of MIN/MAX item to be + updated during execution. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::init() +{ + if (group_prefix) /* Already initialized. */ + return 0; + + if (!(last_prefix= (byte*) alloc_root(&alloc, group_prefix_len))) + return 1; + /* + We may use group_prefix to store keys with all select fields, so allocate + enough space for it. + */ + if (!(group_prefix= (byte*) alloc_root(&alloc, + real_prefix_len + min_max_arg_len))) + return 1; + + if (key_infix_len > 0) + { + /* + The memory location pointed to by key_infix will be deleted soon, so + allocate a new buffer and copy the key_infix into it. + */ + byte *tmp_key_infix= (byte*) alloc_root(&alloc, key_infix_len); + if (!tmp_key_infix) + return 1; + memcpy(tmp_key_infix, this->key_infix, key_infix_len); + this->key_infix= tmp_key_infix; + } + + if (min_max_arg_part) + { + if(my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16)) + return 1; + + if (have_min) + { + if(!(min_functions= new List<Item_sum>)) + return 1; + } + else + min_functions= NULL; + if (have_max) + { + if(!(max_functions= new List<Item_sum>)) + return 1; + } + else + max_functions= NULL; + + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (have_min && (min_max_item->sum_func() == Item_sum::MIN_FUNC)) + min_functions->push_back(min_max_item); + else if (have_max && (min_max_item->sum_func() == Item_sum::MAX_FUNC)) + max_functions->push_back(min_max_item); + } + + if (have_min) + { + if (!(min_functions_it= new List_iterator<Item_sum>(*min_functions))) + return 1; + } + else + min_functions_it= NULL; + + if (have_max) + { + if (!(max_functions_it= new List_iterator<Item_sum>(*max_functions))) + return 1; + } + else + max_functions_it= NULL; + } + else + min_max_ranges.elements= 0; + + return 0; +} + + +QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT() +{ + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT"); + if (file->inited != handler::NONE) + file->ha_index_end(); + if (min_max_arg_part) + delete_dynamic(&min_max_ranges); + free_root(&alloc,MYF(0)); + delete quick_prefix_select; + DBUG_VOID_RETURN; +} + + +/* + Eventually create and add a new quick range object. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_range() + sel_range Range object from which a + + NOTES + Construct a new QUICK_RANGE object from a SEL_ARG object, and + add it to the array min_max_ranges. If sel_arg is an infinite + range, e.g. (x < 5 or x > 4), then skip it and do not construct + a quick range. + + RETURN + FALSE on success + TRUE otherwise +*/ + +bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) +{ + QUICK_RANGE *range; + uint range_flag= sel_range->min_flag | sel_range->max_flag; + + /* Skip (-inf,+inf) ranges, e.g. (x < 5 or x > 4). */ + if((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE)) + return FALSE; + + if (!(sel_range->min_flag & NO_MIN_RANGE) && + !(sel_range->max_flag & NO_MAX_RANGE)) + { + if (sel_range->maybe_null && + sel_range->min_value[0] && sel_range->max_value[0]) + range_flag|= NULL_RANGE; /* IS NULL condition */ + else if (memcmp(sel_range->min_value, sel_range->max_value, + min_max_arg_len) == 0) + range_flag|= EQ_RANGE; /* equality condition */ + } + range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len, + sel_range->max_value, min_max_arg_len, + range_flag); + if (!range) + return TRUE; + if (insert_dynamic(&min_max_ranges, (gptr)&range)) + return TRUE; + return FALSE; +} + + +/* + Determine the total number and length of the keys that will be used for + index lookup. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() + + DESCRIPTION + The total length of the keys used for index lookup depends on whether + there are any predicates referencing the min/max argument, and/or if + the min/max argument field can be NULL. + This function does an optimistic analysis whether the search key might + be extended by a constant for the min/max keypart. It is 'optimistic' + because during actual execution it may happen that a particular range + is skipped, and then a shorter key will be used. However this is data + dependent and can't be easily estimated here. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() +{ + max_used_key_length= real_prefix_len; + if (min_max_ranges.elements > 0) + { + QUICK_RANGE *cur_range= 0; + if (have_min) + { /* Check if the right-most range has a lower boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, + min_max_ranges.elements - 1); + if (!(cur_range->flag & NO_MIN_RANGE)) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + return; + } + } + if (have_max) + { /* Check if the left-most range has an upper boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, 0); + if (!(cur_range->flag & NO_MAX_RANGE)) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + return; + } + } + } + else if (have_min && min_max_arg_part && min_max_arg_part->field->is_null()) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + } +} + + +/* + Initialize a quick group min/max select for key retrieval. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::reset() + + DESCRIPTION + Initialize the index chosen for access and find and store the prefix + of the last group. The method is expensive since it performs disk access. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::reset(void) +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset"); + + file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */ + result= file->ha_index_init(index); + result= file->index_last(record); + if (quick_prefix_select) + quick_prefix_select->reset(); + if (result) + DBUG_RETURN(result); + /* Save the prefix of the last group. */ + key_copy(last_prefix, record, index_info, group_prefix_len); + + DBUG_RETURN(0); +} + + + +/* + Get the next key containing the MIN and/or MAX key for the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::get_next() + + DESCRIPTION + The method finds the next subsequent group of records that satisfies the + query conditions and finds the keys that contain the MIN/MAX values for + the key part referenced by the MIN/MAX function(s). Once a group and its + MIN/MAX values are found, store these values in the Item_sum objects for + the MIN/MAX functions. The rest of the values in the result row are stored + in the Item_field::result_field of each select field. If the query does + not contain MIN and/or MAX functions, then the function only finds the + group prefix, which is a query answer itself. + + NOTES + If both MIN and MAX are computed, then we use the fact that if there is + no MIN key, there can't be a MAX key as well, so we can skip looking + for a MAX key in this case. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::get_next() +{ + int min_res= 0; + int max_res= 0; + int result; + int is_last_prefix; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::get_next"); + + /* + Loop until a group is found that satisfies all query conditions or the last + group is reached. + */ + do + { + result= next_prefix(); + /* + Check if this is the last group prefix. Notice that at this point + this->record contains the current prefix in record format. + */ + is_last_prefix= key_cmp(index_info->key_part, last_prefix, + group_prefix_len); + DBUG_ASSERT(is_last_prefix <= 0); + if (result == HA_ERR_KEY_NOT_FOUND) + continue; + else if (result) + break; + + if (have_min) + { + min_res= next_min(); + if (min_res == 0) + update_min_result(); + } + /* If there is no MIN in the group, there is no MAX either. */ + if ((have_max && !have_min) || + (have_max && have_min && (min_res == 0))) + { + max_res= next_max(); + if (max_res == 0) + update_max_result(); + /* If a MIN was found, a MAX must have been found as well. */ + DBUG_ASSERT((have_max && !have_min) || + (have_max && have_min && (max_res == 0))); + } + result= have_min ? min_res : have_max ? max_res : result; + } + while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0); + + if (result == 0) + /* + Partially mimic the behavior of end_select_send. Copy the + field data from Item_field::field into Item_field::result_field + of each non-aggregated field (the group fields, and optionally + other fields in non-ANSI SQL mode). + */ + copy_fields(&join->tmp_table_param); + else if (result == HA_ERR_KEY_NOT_FOUND) + result= HA_ERR_END_OF_FILE; + + DBUG_RETURN(result); +} + + +/* + Retrieve the minimal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min() + + DESCRIPTION + Load the prefix of the next group into group_prefix and find the minimal + key within this group such that the key satisfies the query conditions and + NULL semantics. The found key is loaded into this->record. + + IMPLEMENTATION + Depending on the values of min_max_ranges.elements, key_infix_len, and + whether there is a NULL in the MIN field, this function may directly + return without any data access. In this case we use the key loaded into + this->record by the call to this->next_prefix() just before this call. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min() +{ + int result= 0; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_min"); + + /* Find the MIN key using the eventually extended group prefix. */ + if (min_max_ranges.elements > 0) + { + if ((result= next_min_in_range())) + DBUG_RETURN(result); + } + else + { + /* Apply the constant equality conditions to the non-group select fields. */ + if (key_infix_len > 0) + { + if ((result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_KEY_EXACT))) + DBUG_RETURN(result); + } + + /* + If the min/max argument field is NULL, skip subsequent rows in the same + group with NULL in it. Notice that: + - if the first row in a group doesn't have a NULL in the field, no row + in the same group has (because NULL < any other value), + - min_max_arg_part->field->ptr points to some place in 'record'. + */ + if (min_max_arg_part && min_max_arg_part->field->is_null()) + { + /* Find the first subsequent record without NULL in the MIN/MAX field. */ + key_copy(tmp_record, record, index_info, 0); + result= file->index_read(record, tmp_record, + real_prefix_len + min_max_arg_len, + HA_READ_AFTER_KEY); + /* + Check if the new record belongs to the current group by comparing its + prefix with the group's prefix. If it is from the next group, then the + whole group has NULLs in the MIN/MAX field, so use the first record in + the group as a result. + TODO: + It is possible to reuse this new record as the result candidate for the + next call to next_min(), and to save one lookup in the next call. For + this add a new member 'this->next_group_prefix'. + */ + if (!result) + { + if(key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + key_restore(record, tmp_record, index_info, 0); + } else if (result == HA_ERR_KEY_NOT_FOUND) + result= 0; /* There is a result in any case. */ + } + } + + /* + If the MIN attribute is non-nullable, this->record already contains the + MIN key in the group, so just return. + */ + DBUG_RETURN(result); +} + + +/* + Retrieve the maximal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max() + + DESCRIPTION + If there was no previous next_min call to determine the next group prefix, + then load the next prefix into group_prefix, then lookup the maximal key of + the group, and store it into this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max() +{ + int result; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_max"); + + /* Get the last key in the (possibly extended) group. */ + if (min_max_ranges.elements > 0) + result= next_max_in_range(); + else + result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_PREFIX_LAST); + DBUG_RETURN(result); +} + + +/* + Determine the prefix of the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_prefix() + + DESCRIPTION + Determine the prefix of the next group that satisfies the query conditions. + If there is a range condition referencing the group attributes, use a + QUICK_RANGE_SELECT object to retrieve the *first* key that satisfies the + condition. If there is a key infix of constants, append this infix + immediately after the group attributes. The possibly extended prefix is + stored in this->group_prefix. The first key of the found group is stored in + this->record, on which relies this->next_min(). + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the formed prefix + HA_ERR_END_OF_FILE if there are no more keys + other if some error occurred +*/ +int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_prefix"); + + if (quick_prefix_select) + { + byte *cur_prefix= seen_first_key ? group_prefix : NULL; + if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, + cur_prefix))) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + if (!seen_first_key) + { + result= file->index_first(record); + if (result) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + /* Load the first key in this group into record. */ + result= file->index_read(record, group_prefix, group_prefix_len, + HA_READ_AFTER_KEY); + if (result) + DBUG_RETURN(result); + } + } + + /* Save the prefix of this group for subsequent calls. */ + key_copy(group_prefix, record, index_info, group_prefix_len); + /* Append key_infix to group_prefix. */ + if (key_infix_len > 0) + memcpy(group_prefix + group_prefix_len, + key_infix, key_infix_len); + + DBUG_RETURN(0); +} + + +/* + Find the minimal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the minimal key that is + in the left-most possible range. If there is no such key, then the current + group does not have a MIN key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + bool found_null= FALSE; + int result= HA_ERR_KEY_NOT_FOUND; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= 0; range_idx < min_max_ranges.elements; range_idx++) + { /* Search from the left-most range to the right. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx); + + /* + If the current value for the min/max argument is bigger than the right + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != 0 && !(cur_range->flag & NO_MAX_RANGE) && + (key_cmp(min_max_arg_part, cur_range->max_key, min_max_arg_len) == 1)) + continue; + + if (cur_range->flag & NO_MIN_RANGE) + { + find_flag= HA_READ_KEY_EXACT; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the lower boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->min_key, + cur_range->min_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ? + HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + if ((result == HA_ERR_KEY_NOT_FOUND) && + (cur_range->flag & (EQ_RANGE | NULL_RANGE))) + continue; /* Check the next range. */ + else if (result) + /* + In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE, + HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this + range, it can't succeed for any other subsequent range. + */ + break; + + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + break; /* No need to perform the checks below for equal keys. */ + + if (cur_range->flag & NULL_RANGE) + { /* Remember this key, and continue looking for a non-NULL key that */ + /* satisfies some other condition. */ + memcpy(tmp_record, record, head->rec_buff_length); + found_null= TRUE; + continue; + } + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + + /* If there is an upper limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MAX_RANGE) ) + { + /* Compose the MAX key for the range. */ + byte *max_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(max_key, group_prefix, real_prefix_len); + memcpy(max_key + real_prefix_len, cur_range->max_key, + cur_range->max_length); + /* Compare the found key with max_key. */ + int cmp_res= key_cmp(index_info->key_part, max_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) || + (cmp_res <= 0))) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + } + /* If we got to this point, the current key qualifies as MIN. */ + DBUG_ASSERT(result == 0); + break; + } + /* + If there was a key with NULL in the MIN/MAX field, and there was no other + key without NULL from the same group that satisfies some other condition, + then use the key with the NULL. + */ + if (found_null && result) + { + memcpy(record, tmp_record, head->rec_buff_length); + result= 0; + } + return result; +} + + +/* + Find the maximal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the maximal key that is + in the right-most possible range. If there is no such key, then the current + group does not have a MAX key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + int result; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= min_max_ranges.elements; range_idx > 0; range_idx--) + { /* Search from the right-most range to the left. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx - 1); + + /* + If the current value for the min/max argument is smaller than the left + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != min_max_ranges.elements && + !(cur_range->flag & NO_MIN_RANGE) && + (key_cmp(min_max_arg_part, cur_range->min_key, min_max_arg_len) == -1)) + continue; + + if (cur_range->flag & NO_MAX_RANGE) + { + find_flag= HA_READ_PREFIX_LAST; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the upper boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->max_key, + cur_range->max_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ? + HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + + if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE)) + continue; /* Check the next range. */ + else if (result) + /* + In no key was found with this upper bound, there certainly are no keys + in the ranges to the left. + */ + return result; + + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + return result; /* No need to perform the checks below for equal keys. */ + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + + /* If there is a lower limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MIN_RANGE) ) + { + /* Compose the MIN key for the range. */ + byte *min_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(min_key, group_prefix, real_prefix_len); + memcpy(min_key + real_prefix_len, cur_range->min_key, + cur_range->min_length); + /* Compare the found key with min_key. */ + int cmp_res= key_cmp(index_info->key_part, min_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MIN) && (cmp_res == 1) || + (cmp_res >= 0))) + continue; + } + /* If we got to this point, the current key qualifies as MAX. */ + return result; + } + return HA_ERR_KEY_NOT_FOUND; +} + + +/* + Update all MIN function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_min_result() + + DESCRIPTION + The method iterates through all MIN functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_min(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_min() was called and before next_max() is called, because both MIN and + MAX take their result value from the same buffer this->head->record[0] + (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_min_result() +{ + Item_sum *min_func; + + min_functions_it->rewind(); + while ((min_func= (*min_functions_it)++)) + min_func->reset(); +} + + +/* + Update all MAX function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_max_result() + + DESCRIPTION + The method iterates through all MAX functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_max(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_max() was called, because both MIN and MAX take their result value + from the same buffer this->head->record[0] (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_max_result() +{ + Item_sum *max_func; + + max_functions_it->rewind(); + while ((max_func= (*max_functions_it)++)) + max_func->reset(); +} + + +/* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths() + key_names [out] Names of used indexes + used_lengths [out] Corresponding lengths of the index names + + DESCRIPTION + This method is used by select_describe to extract the names of the + indexes used by a quick select. + +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + key_names->append(index_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + + +#ifndef DBUG_OFF + +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg) +{ + SEL_ARG **key,**end; + int idx; + char buff[1024]; + DBUG_ENTER("print_sel_tree"); + if (! _db_on_) + DBUG_VOID_RETURN; + + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for (idx= 0,key=tree->keys, end=key+param->keys ; + key != end ; + key++,idx++) + { + if (tree_map->is_set(idx)) + { + uint keynr= param->real_keynr[idx]; + if (tmp.length()) + tmp.append(','); + tmp.append(param->table->key_info[keynr].name); + } + } + if (!tmp.length()) + tmp.append("(empty)"); + + DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr())); + + DBUG_VOID_RETURN; +} + + +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end) +{ + DBUG_ENTER("print_ror_scans"); + if (! _db_on_) + DBUG_VOID_RETURN; + + char buff[1024]; + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for(;start != end; start++) + { + if (tmp.length()) + tmp.append(','); + tmp.append(table->key_info[(*start)->keynr].name); + } + if (!tmp.length()) + tmp.append("(empty)"); + DBUG_PRINT("info", ("ROR key scans (%s): %s", msg, tmp.ptr())); + DBUG_VOID_RETURN; +} + + /***************************************************************************** ** Print a quick range for debugging ** TODO: @@ -2912,8 +8414,6 @@ bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, ** of locking the DEBUG stream ! *****************************************************************************/ -#ifndef DBUG_OFF - static void print_key(KEY_PART *key_part,const char *key,uint used_length) { @@ -2946,48 +8446,166 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) } -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg) +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg) { - QUICK_RANGE *range; char buf[MAX_KEY/8+1]; - DBUG_ENTER("print_param"); + DBUG_ENTER("print_quick"); if (! _db_on_ || !quick) DBUG_VOID_RETURN; + DBUG_LOCK_FILE; + + quick->dbug_dump(0, TRUE); + fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); + + DBUG_UNLOCK_FILE; + DBUG_VOID_RETURN; +} - List_iterator<QUICK_RANGE> li(quick->ranges); + +static void print_rowid(byte* val, int len) +{ + byte *pb; DBUG_LOCK_FILE; - fprintf(DBUG_FILE,"Used quick_range on key: %d (other_keys: 0x%s):\n", - quick->index, needed_reg->print(buf)); - while ((range=li++)) + fputc('\"', DBUG_FILE); + for (pb= val; pb!= val + len; ++pb) + fprintf(DBUG_FILE, "%c", *pb); + fprintf(DBUG_FILE, "\", hex: "); + + for (pb= val; pb!= val + len; ++pb) + fprintf(DBUG_FILE, "%x ", *pb); + fputc('\n', DBUG_FILE); + DBUG_UNLOCK_FILE; +} + +void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose) +{ + fprintf(DBUG_FILE, "%*squick range select, key %s, length: %d\n", + indent, "", head->key_info[index].name, max_used_key_length); + + if (verbose) { - if (!(range->flag & NO_MIN_RANGE)) + QUICK_RANGE *range; + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **last_range= pr + ranges.elements; + for (; pr!=last_range; ++pr) { - print_key(quick->key_parts,range->min_key,range->min_length); - if (range->flag & NEAR_MIN) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - } - fputs("X",DBUG_FILE); + fprintf(DBUG_FILE, "%*s", indent + 2, ""); + range= *pr; + if (!(range->flag & NO_MIN_RANGE)) + { + print_key(key_parts,range->min_key,range->min_length); + if (range->flag & NEAR_MIN) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + } + fputs("X",DBUG_FILE); - if (!(range->flag & NO_MAX_RANGE)) - { - if (range->flag & NEAR_MAX) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - print_key(quick->key_parts,range->max_key,range->max_length); + if (!(range->flag & NO_MAX_RANGE)) + { + if (range->flag & NEAR_MAX) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + print_key(key_parts,range->max_key,range->max_length); + } + fputs("\n",DBUG_FILE); } - fputs("\n",DBUG_FILE); } - DBUG_UNLOCK_FILE; - DBUG_VOID_RETURN; } -#endif +void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick index_merge select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (pk_quick_select) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + pk_quick_select->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_INTERSECT_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick ROR-intersect select, %scovering\n", + indent, "", need_to_fetch_row? "":"non-"); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (cpk_quick) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + cpk_quick->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_UNION_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + QUICK_SELECT_I *quick; + fprintf(DBUG_FILE, "%*squick ROR-union select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + + +/* + Print quick select information to DBUG_FILE. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::dbug_dump() + indent Indentation offset + verbose If TRUE show more detailed output. + + DESCRIPTION + Print the contents of this quick select to DBUG_FILE. The method also + calls dbug_dump() for the used quick select if any. + + IMPLEMENTATION + Caller is responsible for locking DBUG_FILE before this call and unlocking + it afterwards. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose) +{ + fprintf(DBUG_FILE, + "%*squick_group_min_max_select: index %s (%d), length: %d\n", + indent, "", index_info->name, index, max_used_key_length); + if (key_infix_len > 0) + { + fprintf(DBUG_FILE, "%*susing key_infix with length %d:\n", + indent, "", key_infix_len); + } + if (quick_prefix_select) + { + fprintf(DBUG_FILE, "%*susing quick_range_select:\n", indent, ""); + quick_prefix_select->dbug_dump(indent + 2, verbose); + } + if (min_max_ranges.elements > 0) + { + fprintf(DBUG_FILE, "%*susing %d quick_ranges for MIN/MAX:\n", + indent, "", min_max_ranges.elements); + } +} + + +#endif /* NOT_USED */ /***************************************************************************** -** Instansiate templates +** Instantiate templates *****************************************************************************/ #ifdef __GNUC__ diff --git a/sql/opt_range.h b/sql/opt_range.h index 9b2e9e45bac..19234f61ea2 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -66,61 +66,567 @@ class QUICK_RANGE :public Sql_alloc { }; -class QUICK_SELECT { +/* + Quick select interface. + This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. +*/ + +class QUICK_SELECT_I +{ +public: + bool sorted; + ha_rows records; /* estimate of # of records to be retrieved */ + double read_time; /* time to perform this retrieval */ + TABLE *head; + /* + Index this quick select uses, or MAX_KEY for quick selects + that use several indexes + */ + uint index; + + /* + Total length of first used_key_parts parts of the key. + Applicable if index!= MAX_KEY. + */ + uint max_used_key_length; + + /* + Max. number of (first) key parts this quick select uses for retrieval. + eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. + Applicable if index!= MAX_KEY. + */ + uint used_key_parts; + + QUICK_SELECT_I(); + virtual ~QUICK_SELECT_I(){}; + + /* + Do post-constructor initialization. + SYNOPSIS + init() + + init() performs initializations that should have been in constructor if + it was possible to return errors from constructors. The join optimizer may + create and then delete quick selects without retrieving any rows so init() + must not contain any IO or CPU intensive code. + + If init() call fails the only valid action is to delete this quick select, + reset() and get_next() must not be called. + + RETURN + 0 OK + other Error code + */ + virtual int init() = 0; + + /* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + reset() should be called when it is certain that row retrieval will be + necessary. This call may do heavyweight initialization like buffering first + N records etc. If reset() call fails get_next() must not be called. + + RETURN + 0 OK + other Error code + */ + virtual int reset(void) = 0; + + /* Range end should be called when we have looped over the whole index */ + virtual void range_end() {} + virtual int get_next() = 0; /* get next record to retrieve */ + virtual bool reverse_sorted() = 0; + virtual bool unique_key_range() { return false; } + + enum { + QS_TYPE_RANGE = 0, + QS_TYPE_INDEX_MERGE = 1, + QS_TYPE_RANGE_DESC = 2, + QS_TYPE_FULLTEXT = 3, + QS_TYPE_ROR_INTERSECT = 4, + QS_TYPE_ROR_UNION = 5, + QS_TYPE_GROUP_MIN_MAX = 6 + }; + + /* Get type of this quick select - one of the QS_TYPE_* values */ + virtual int get_type() = 0; + + /* + Initialize this quick select as a merged scan inside a ROR-union or a ROR- + intersection scan. The caller must not additionally call init() if this + function is called. + SYNOPSIS + init_ror_merged_scan() + reuse_handler If true, the quick select may use table->handler, otherwise + it must create and use a separate handler object. + RETURN + 0 Ok + other Error + */ + virtual int init_ror_merged_scan(bool reuse_handler) + { DBUG_ASSERT(0); return 1; } + + /* + Save ROWID of last retrieved row in file->ref. This used in ROR-merging. + */ + virtual void save_last_pos(){}; + + /* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + This is used by select_describe. + */ + virtual void add_keys_and_lengths(String *key_names, + String *used_lengths)=0; + + /* + Append text representation of quick select structure (what and how is + merged) to str. The result is added to "Extra" field in EXPLAIN output. + This function is implemented only by quick selects that merge other quick + selects output and/or can produce output suitable for merging. + */ + virtual void add_info_string(String *str) {}; + /* + Return 1 if any index used by this quick select + a) uses field that is listed in passed field list or + b) is automatically updated (like a timestamp) + */ + virtual bool check_if_keys_used(List<Item> *fields); + + /* + rowid of last row retrieved by this quick select. This is used only when + doing ROR-index_merge selects + */ + byte *last_rowid; + + /* + Table record buffer used by this quick select. + */ + byte *record; +#ifndef DBUG_OFF + /* + Print quick select information to DBUG_FILE. Caller is responsible + for locking DBUG_FILE before this call and unlocking it afterwards. + */ + virtual void dbug_dump(int indent, bool verbose)= 0; +#endif +}; + + +struct st_qsel_param; +class SEL_ARG; + +/* + Quick select that does a range scan on a single key. The records are + returned in key order. +*/ +class QUICK_RANGE_SELECT : public QUICK_SELECT_I +{ +protected: + bool next,dont_free; public: - bool next,dont_free,sorted; int error; - uint index, max_used_key_length, used_key_parts; - TABLE *head; +protected: handler *file; - byte *record; - List<QUICK_RANGE> ranges; - List_iterator<QUICK_RANGE> it; - QUICK_RANGE *range; - MEM_ROOT alloc; + /* + If true, this quick select has its "own" handler object which should be + closed no later then this quick select is deleted. + */ + bool free_file; +protected: + friend class TRP_ROR_INTERSECT; + friend + QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref); + friend bool get_quick_keys(struct st_qsel_param *param, + QUICK_RANGE_SELECT *quick,KEY_PART *key, + SEL_ARG *key_tree, + char *min_key, uint min_key_flag, + char *max_key, uint max_key_flag); + friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx, + SEL_ARG *key_tree, + MEM_ROOT *alloc); + friend class QUICK_SELECT_DESC; + friend class QUICK_INDEX_MERGE_SELECT; + friend class QUICK_ROR_INTERSECT_SELECT; + + DYNAMIC_ARRAY ranges; /* ordered array of range ptrs */ + QUICK_RANGE **cur_range; /* current element in ranges */ + + QUICK_RANGE *range; KEY_PART *key_parts; KEY_PART_INFO *key_part_info; - ha_rows records; - double read_time; + int cmp_next(QUICK_RANGE *range); + int cmp_prev(QUICK_RANGE *range); + bool row_in_ranges(); +public: + MEM_ROOT alloc; - QUICK_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0); - virtual ~QUICK_SELECT(); - void reset(void) { next=0; it.rewind(); } - int init() + QUICK_RANGE_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0, + MEM_ROOT *parent_alloc=NULL); + ~QUICK_RANGE_SELECT(); + + int reset(void) { - key_part_info= head->key_info[index].key_part; - return error=file->ha_index_init(index); + next=0; + range= NULL; + cur_range= NULL; + return 0; } - virtual int get_next(); - virtual bool reverse_sorted() { return 0; } + int init(); + int get_next(); + void range_end(); + int get_next_prefix(uint prefix_length, byte *cur_prefix); + bool reverse_sorted() { return 0; } bool unique_key_range(); + int init_ror_merged_scan(bool reuse_handler); + void save_last_pos() + { file->position(record); } + int get_type() { return QS_TYPE_RANGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif }; -class QUICK_SELECT_GEOM: public QUICK_SELECT +class QUICK_RANGE_SELECT_GEOM: public QUICK_RANGE_SELECT { public: - QUICK_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, bool no_alloc) - :QUICK_SELECT(thd, table, index_arg, no_alloc) + QUICK_RANGE_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, + bool no_alloc, MEM_ROOT *parent_alloc) + :QUICK_RANGE_SELECT(thd, table, index_arg, no_alloc, parent_alloc) {}; virtual int get_next(); }; -class QUICK_SELECT_DESC: public QUICK_SELECT +/* + QUICK_INDEX_MERGE_SELECT - index_merge access method quick select. + + QUICK_INDEX_MERGE_SELECT uses + * QUICK_RANGE_SELECTs to get rows + * Unique class to remove duplicate rows + + INDEX MERGE OPTIMIZER + Current implementation doesn't detect all cases where index_merge could + be used, in particular: + * index_merge will never be used if range scan is possible (even if + range scan is more expensive) + + * index_merge+'using index' is not supported (this the consequence of + the above restriction) + + * If WHERE part contains complex nested AND and OR conditions, some ways + to retrieve rows using index_merge will not be considered. The choice + of read plan may depend on the order of conjuncts/disjuncts in WHERE + part of the query, see comments near imerge_list_or_list and + SEL_IMERGE::or_sel_tree_with_checks functions for details. + + * There is no "index_merge_ref" method (but index_merge on non-first + table in join is possible with 'range checked for each record'). + + See comments around SEL_IMERGE class and test_quick_select for more + details. + + ROW RETRIEVAL ALGORITHM + + index_merge uses Unique class for duplicates removal. index_merge takes + advantage of Clustered Primary Key (CPK) if the table has one. + The index_merge algorithm consists of two phases: + + Phase 1 (implemented in QUICK_INDEX_MERGE_SELECT::prepare_unique): + prepare() + { + activate 'index only'; + while(retrieve next row for non-CPK scan) + { + if (there is a CPK scan and row will be retrieved by it) + skip this row; + else + put its rowid into Unique; + } + deactivate 'index only'; + } + + Phase 2 (implemented as sequence of QUICK_INDEX_MERGE_SELECT::get_next + calls): + + fetch() + { + retrieve all rows from row pointers stored in Unique; + free Unique; + retrieve all rows for CPK scan; + } +*/ + +class QUICK_INDEX_MERGE_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_INDEX_MERGE_SELECT(THD *thd, TABLE *table); + ~QUICK_INDEX_MERGE_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_INDEX_MERGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* range quick selects this index_merge read consists of */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* quick select which is currently used for rows retrieval */ + List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it; + QUICK_RANGE_SELECT* cur_quick_select; + + /* quick select that uses clustered primary key (NULL if none) */ + QUICK_RANGE_SELECT* pk_quick_select; + + /* true if this select is currently doing a clustered PK scan */ + bool doing_pk_scan; + + Unique *unique; + MEM_ROOT alloc; + + THD *thd; + int prepare_unique(); + + /* used to get rows collected in Unique */ + READ_RECORD read_record; +}; + + +/* + Rowid-Ordered Retrieval (ROR) index intersection quick select. + This quick select produces intersection of row sequences returned + by several QUICK_RANGE_SELECTs it "merges". + + All merged QUICK_RANGE_SELECTs must return rowids in rowid order. + QUICK_ROR_INTERSECT_SELECT will return rows in rowid order, too. + + All merged quick selects retrieve {rowid, covered_fields} tuples (not full + table records). + QUICK_ROR_INTERSECT_SELECT retrieves full records if it is not being used + by QUICK_ROR_INTERSECT_SELECT and all merged quick selects together don't + cover needed all fields. + + If one of the merged quick selects is a Clustered PK range scan, it is + used only to filter rowid sequence produced by other merged quick selects. +*/ + +class QUICK_ROR_INTERSECT_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_ROR_INTERSECT_SELECT(THD *thd, TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + ~QUICK_ROR_INTERSECT_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_INTERSECT; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + int init_ror_merged_scan(bool reuse_handler); + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* + Range quick selects this intersection consists of, not including + cpk_quick. + */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* + Merged quick select that uses Clustered PK, if there is one. This quick + select is not used for row retrieval, it is used for row retrieval. + */ + QUICK_RANGE_SELECT *cpk_quick; + + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + THD *thd; /* current thread */ + bool need_to_fetch_row; /* if true, do retrieve full table records. */ +}; + + +/* + Rowid-Ordered Retrieval index union select. + This quick select produces union of row sequences returned by several + quick select it "merges". + + All merged quick selects must return rowids in rowid order. + QUICK_ROR_UNION_SELECT will return rows in rowid order, too. + + All merged quick selects are set not to retrieve full table records. + ROR-union quick select always retrieves full records. + +*/ + +class QUICK_ROR_UNION_SELECT : public QUICK_SELECT_I { public: - QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts); + QUICK_ROR_UNION_SELECT(THD *thd, TABLE *table); + ~QUICK_ROR_UNION_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_UNION; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_SELECT_I *quick_sel_range); + + List<QUICK_SELECT_I> quick_selects; /* Merged quick selects */ + + QUEUE queue; /* Priority queue for merge operation */ + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + + THD *thd; /* current thread */ + byte *cur_rowid; /* buffer used in get_next() */ + byte *prev_rowid; /* rowid of last row returned by get_next() */ + bool have_prev_rowid; /* true if prev_rowid has valid data */ + uint rowid_length; /* table rowid length */ +private: + static int queue_cmp(void *arg, byte *val1, byte *val2); +}; + + +/* + Index scan for GROUP-BY queries with MIN/MAX aggregate functions. + + This class provides a specialized index access method for GROUP-BY queries + of the forms: + + SELECT A_1,...,A_k, [B_1,...,B_m], [MIN(C)], [MAX(C)] + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + or + + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + where all selected fields are parts of the same index. + The class of queries that can be processed by this quick select is fully + specified in the description of get_best_trp_group_min_max() in opt_range.cc. + + The get_next() method directly produces result tuples, thus obviating the + need to call end_send_group() because all grouping is already done inside + get_next(). + + Since one of the requirements is that all select fields are part of the same + index, this class produces only index keys, and not complete records. +*/ + +class QUICK_GROUP_MIN_MAX_SELECT : public QUICK_SELECT_I +{ +private: + handler *file; /* The handler used to get data. */ + JOIN *join; /* Descriptor of the current query */ + KEY *index_info; /* The index chosen for data access */ + byte *record; /* Buffer where the next record is returned. */ + byte *tmp_record; /* Temporary storage for next_min(), next_max(). */ + byte *group_prefix; /* Key prefix consisting of the GROUP fields. */ + uint group_prefix_len; /* Length of the group prefix. */ + byte *last_prefix; /* Prefix of the last group for detecting EOF. */ + bool have_min; /* Specify whether we are computing */ + bool have_max; /* a MIN, a MAX, or both. */ + bool seen_first_key; /* Denotes whether the first key was retrieved.*/ + KEY_PART_INFO *min_max_arg_part; /* The keypart of the only argument field */ + /* of all MIN/MAX functions. */ + uint min_max_arg_len; /* The length of the MIN/MAX argument field */ + byte *key_infix; /* Infix of constants from equality predicates. */ + uint key_infix_len; + DYNAMIC_ARRAY min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ + uint real_prefix_len; /* Length of key prefix extended with key_infix. */ + List<Item_sum> *min_functions; + List<Item_sum> *max_functions; + List_iterator<Item_sum> *min_functions_it; + List_iterator<Item_sum> *max_functions_it; +public: + /* + The following two members are public to allow easy access from + TRP_GROUP_MIN_MAX::make_quick() + */ + MEM_ROOT alloc; /* Memory pool for this and quick_prefix_select data. */ + QUICK_RANGE_SELECT *quick_prefix_select;/* For retrieval of group prefixes. */ +private: + int next_prefix(); + int next_min_in_range(); + int next_max_in_range(); + int next_min(); + int next_max(); + void update_min_result(); + void update_max_result(); +public: + QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, + bool have_max, KEY_PART_INFO *min_max_arg_part, + uint group_prefix_len, uint used_key_parts, + KEY *index_info, uint use_index, double read_cost, + ha_rows records, uint key_infix_len, + byte *key_infix, MEM_ROOT *parent_alloc); + ~QUICK_GROUP_MIN_MAX_SELECT(); + bool add_range(SEL_ARG *sel_range); + void update_key_stat(); + bool alloc_buffers(); + int init(); + int reset(); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_GROUP_MIN_MAX; } + void add_keys_and_lengths(String *key_names, String *used_lengths); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif +}; + + +class QUICK_SELECT_DESC: public QUICK_RANGE_SELECT +{ +public: + QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, uint used_key_parts); int get_next(); bool reverse_sorted() { return 1; } + int get_type() { return QS_TYPE_RANGE_DESC; } private: - int cmp_prev(QUICK_RANGE *range); bool range_reads_after_key(QUICK_RANGE *range); #ifdef NOT_USED bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts); #endif - void reset(void) { next=0; rev_it.rewind(); } + int reset(void) { next=0; rev_it.rewind(); return 0; } List<QUICK_RANGE> rev_ranges; List_iterator<QUICK_RANGE> rev_it; }; @@ -128,7 +634,7 @@ private: class SQL_SELECT :public Sql_alloc { public: - QUICK_SELECT *quick; // If quick-select used + QUICK_SELECT_I *quick; // If quick-select used COND *cond; // where condition TABLE *head; IO_CACHE file; // Positions to used records @@ -150,17 +656,16 @@ class SQL_SELECT :public Sql_alloc { }; -class FT_SELECT: public QUICK_SELECT { +class FT_SELECT: public QUICK_RANGE_SELECT { public: - FT_SELECT(THD *thd, TABLE *table, uint key): - QUICK_SELECT (thd, table, key, 1) { init(); } - - int init() { return error= file->ft_init(); } - int get_next() { return error= file->ft_read(record); } + FT_SELECT(THD *thd, TABLE *table, uint key) : + QUICK_RANGE_SELECT (thd, table, key, 1) { init(); } + ~FT_SELECT() { file->ft_end(); } + int init() { return error=file->ft_init(); } + int get_next() { return error=file->ft_read(record); } + int get_type() { return QS_TYPE_FULLTEXT; } }; - -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, - struct st_table_ref *ref); - +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref); #endif diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 613b2a4aec1..30033bc39eb 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -89,7 +89,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) where_tables= conds->used_tables(); /* Don't replace expression on a table that is part of an outer join */ - for (TABLE_LIST *tl=tables; tl ; tl= tl->next) + for (TABLE_LIST *tl= tables; tl; tl= tl->next_local) { if (tl->on_expr) { @@ -128,7 +128,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { longlong count= 1; TABLE_LIST *table; - for (table=tables ; table ; table=table->next) + for (table= tables; table; table= table->next_local) { if (outer_tables || (table->table->file->table_flags() & HA_NOT_EXACT_COUNT)) @@ -332,11 +332,23 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) 1 Otherwise */ -static bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) { Item *item; *inv_order= 0; switch (func_item->argument_count()) { + case 0: + /* MULT_EQUAL_FUNC */ + { + Item_equal *item_equal= (Item_equal *) func_item; + Item_equal_iterator it(*item_equal); + args[0]= it++; + if (it++) + return 0; + if (!(args[1]= item_equal->get_const())) + return 0; + } + break; case 1: /* field IS NULL */ item= func_item->arguments()[0]; @@ -477,6 +489,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, case Item_func::BETWEEN: between= 1; break; + case Item_func::MULT_EQUAL_FUNC: + eq_type= 1; + break; default: return 0; // Can't optimize function } diff --git a/sql/parse_file.cc b/sql/parse_file.cc new file mode 100644 index 00000000000..c3043ed2c73 --- /dev/null +++ b/sql/parse_file.cc @@ -0,0 +1,773 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +// Text .frm files management routines + +#include "mysql_priv.h" +#include <errno.h> +#include <m_ctype.h> +#include <my_sys.h> +#include <my_dir.h> + + +/* + write string with escaping + + SYNOPSIS + write_escaped_string() + file - IO_CACHE for record + val_s - string for writing + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_escaped_string(IO_CACHE *file, LEX_STRING *val_s) +{ + char *eos= val_s->str + val_s->length; + char *ptr= val_s->str; + + for (; ptr < eos; ptr++) + { + /* + Should be in sync with read_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': // escape character + if (my_b_append(file, (const byte *)"\\\\", 2)) + return TRUE; + break; + case '\n': // parameter value delimiter + if (my_b_append(file, (const byte *)"\\n", 2)) + return TRUE; + break; + case '\0': // problem for some string processing utilities + if (my_b_append(file, (const byte *)"\\0", 2)) + return TRUE; + break; + case 26: // problem for windows utilities (Ctrl-Z) + if (my_b_append(file, (const byte *)"\\z", 2)) + return TRUE; + break; + case '\'': // list of string delimiter + if (my_b_append(file, (const byte *)"\\\'", 2)) + return TRUE; + break; + default: + if (my_b_append(file, (const byte *)ptr, 1)) + return TRUE; + } + } + return FALSE; +} + + +/* + write parameter value to IO_CACHE + + SYNOPSIS + write_parameter() + file pointer to IO_CACHE structure for writing + base pointer to data structure + parameter pointer to parameter descriptor + old_version for returning back old version number value + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_parameter(IO_CACHE *file, gptr base, File_option *parameter, + ulonglong *old_version) +{ + char num_buf[20]; // buffer for numeric operations + // string for numeric operations + String num(num_buf, sizeof(num_buf), &my_charset_bin); + DBUG_ENTER("write_parameter"); + + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + if (my_b_append(file, (const byte *)val_s->str, val_s->length)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ESTRING: + { + if (write_escaped_string(file, (LEX_STRING *)(base + parameter->offset))) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ULONGLONG: + { + num.set(*((ulonglong *)(base + parameter->offset)), &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_REV: + { + ulonglong *val_i= (ulonglong *)(base + parameter->offset); + *old_version= (*val_i)++; + num.set(*val_i, &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + time_t tm= time(NULL); + + get_date(val_s->str, GETDATE_DATE_TIME|GETDATE_GMT|GETDATE_FIXEDLENGTH, + tm); + val_s->length= PARSE_FILE_TIMESTAMPLENGTH; + if (my_b_append(file, (const byte *)val_s->str, + PARSE_FILE_TIMESTAMPLENGTH)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_STRLIST: + { + List_iterator_fast<LEX_STRING> it(*((List<LEX_STRING>*) + (base + parameter->offset))); + bool first= 1; + LEX_STRING *str; + while ((str= it++)) + { + // We need ' ' after string to detect list continuation + if ((!first && my_b_append(file, (const byte *)" ", 1)) || + my_b_append(file, (const byte *)"\'", 1) || + write_escaped_string(file, str) || + my_b_append(file, (const byte *)"\'", 1)) + { + DBUG_RETURN(TRUE); + } + first= 0; + } + break; + } + default: + DBUG_ASSERT(0); // never should happened + } + DBUG_RETURN(FALSE); +} + + +/* + write new .frm + + SYNOPSIS + sql_create_definition_file() + dir directory where put .frm + file .frm file name + type .frm type string (VIEW, TABLE) + base base address for parameter reading (structure like + TABLE) + parameters parameters description + max_versions number of versions to save + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, + uint max_versions) +{ + File handler; + IO_CACHE file; + char path[FN_REFLEN+1]; // +1 to put temporary file name for sure + ulonglong old_version= ULONGLONG_MAX; + int path_end; + File_option *param; + DBUG_ENTER("sql_create_definition_file"); + DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx", + dir->str, file_name->str, (ulong) base)); + + fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME); + path_end= strlen(path); + + // temporary file name + path[path_end]='~'; + path[path_end+1]= '\0'; + if ((handler= my_create(path, CREATE_MODE, O_RDWR | O_TRUNC, + MYF(MY_WME))) <= 0) + { + DBUG_RETURN(TRUE); + } + + if (init_io_cache(&file, handler, 0, SEQ_READ_APPEND, 0L, 0, MYF(MY_WME))) + goto err_w_file; + + // write header (file signature) + if (my_b_append(&file, (const byte *)"TYPE=", 5) || + my_b_append(&file, (const byte *)type->str, type->length) || + my_b_append(&file, (const byte *)"\n", 1)) + goto err_w_file; + + // write parameters to temporary file + for (param= parameters; param->name.str; param++) + { + if (my_b_append(&file, (const byte *)param->name.str, + param->name.length) || + my_b_append(&file, (const byte *)"=", 1) || + write_parameter(&file, base, param, &old_version) || + my_b_append(&file, (const byte *)"\n", 1)) + goto err_w_cache; + } + + if (end_io_cache(&file)) + goto err_w_file; + + if (my_close(handler, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // archive copies management + path[path_end]='\0'; + if (!access(path, F_OK)) + { + if (old_version != ULONGLONG_MAX && max_versions != 0) + { + // save backup + char path_arc[FN_REFLEN]; + // backup old version + char path_to[FN_REFLEN]; + + // check archive directory existence + fn_format(path_arc, "arc", dir->str, "", MY_UNPACK_FILENAME); + if (access(path_arc, F_OK)) + { + if (my_mkdir(path_arc, 0777, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, (ulong) old_version); + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // remove very old version + if (old_version > max_versions) + { + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, + (ulong)(old_version - max_versions)); + if (!access(path_arc, F_OK) && my_delete(path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + } + else + { + if (my_delete(path, MYF(MY_WME))) // no backups + { + DBUG_RETURN(TRUE); + } + } + } + + { + // rename temporary file + char path_to[FN_REFLEN]; + memcpy(path_to, path, path_end+1); + path[path_end]='~'; + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +err_w_cache: + end_io_cache(&file); +err_w_file: + my_close(handler, MYF(MY_WME)); + DBUG_RETURN(TRUE); +} + + +/* + Prepare frm to parse (read to memory) + + SYNOPSIS + sql_parse_prepare() + file_name - path & filename to .frm file + mem_root - MEM_ROOT for buffer allocation + bad_format_errors - send errors on bad content + + RETURN + 0 - error + parser object + + NOTE + returned pointer + 1 will be type of .frm +*/ + +File_parser * +sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root, + bool bad_format_errors) +{ + MY_STAT stat_info; + uint len; + char *end, *sign; + File_parser *parser; + File file; + DBUG_ENTER("sql__parse_prepare"); + + if (!my_stat(file_name->str, &stat_info, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + if (stat_info.st_size > INT_MAX-1) + { + my_error(ER_FPARSER_TOO_BIG_FILE, MYF(0), file_name->str); + DBUG_RETURN(0); + } + + if (!(parser= new(mem_root) File_parser)) + { + DBUG_RETURN(0); + } + + if (!(parser->buff= alloc_root(mem_root, stat_info.st_size+1))) + { + DBUG_RETURN(0); + } + + if ((file= my_open(file_name->str, O_RDONLY | O_SHARE, MYF(MY_WME))) < 0) + { + DBUG_RETURN(0); + } + + if ((len= my_read(file, (byte *)parser->buff, + stat_info.st_size, MYF(MY_WME))) == + MY_FILE_ERROR) + { + my_close(file, MYF(MY_WME)); + DBUG_RETURN(0); + } + + if (my_close(file, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + end= parser->end= parser->buff + len; + *end= '\0'; // barrier for more simple parsing + + // 7 = 5 (TYPE=) + 1 (letter at least of type name) + 1 ('\n') + if (len < 7 || + parser->buff[0] != 'T' || + parser->buff[1] != 'Y' || + parser->buff[2] != 'P' || + parser->buff[3] != 'E' || + parser->buff[4] != '=') + goto frm_error; + + // skip signature; + parser->file_type.str= sign= parser->buff + 5; + while (*sign >= 'A' && *sign <= 'Z' && sign < end) + sign++; + if (*sign != '\n') + goto frm_error; + parser->file_type.length= sign - parser->file_type.str; + // EOS for file signature just for safety + *sign= '\0'; + + parser->start= sign + 1; + parser->content_ok= 1; + + DBUG_RETURN(parser); + +frm_error: + if (bad_format_errors) + { + my_error(ER_FPARSER_BAD_HEADER, MYF(0), file_name->str); + DBUG_RETURN(0); + } + else + DBUG_RETURN(parser); // upper level have to check parser->ok() +} + + +/* + parse LEX_STRING + + SYNOPSIS + parse_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + // get string length + char *eol= strchr(ptr, '\n'); + + if (eol >= end) + return 0; + + str->length= eol - ptr; + + if (!(str->str= alloc_root(mem_root, str->length+1))) + return 0; + + memcpy(str->str, ptr, str->length); + str->str[str->length]= '\0'; // just for safety + return eol+1; +} + + +/* + read escaped string from ptr to eol in already allocated str + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + eol - pointer on character after end of string + str - target string + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +read_escaped_string(char *ptr, char *eol, LEX_STRING *str) +{ + char *write_pos= str->str; + + for(; ptr < eol; ptr++, write_pos++) + { + char c= *ptr; + if (c == '\\') + { + ptr++; + if (ptr >= eol) + return TRUE; + /* + Should be in sync with write_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': + *write_pos= '\\'; + break; + case 'n': + *write_pos= '\n'; + break; + case '0': + *write_pos= '\0'; + break; + case 'z': + *write_pos= 26; + break; + case '\'': + *write_pos= '\''; + break; + default: + return TRUE; + } + } + else + *write_pos= c; + } + str->str[str->length= write_pos-str->str]= '\0'; // just for safety + return FALSE; +} + + +/* + parse \n delimited escaped string + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_escaped_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol= strchr(ptr, '\n'); + + if (eol == 0 || eol >= end || + !(str->str= alloc_root(mem_root, (eol - ptr) + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + parse '' delimited escaped string + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_quoted_escaped_string(char *ptr, char *end, + MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol; + uint result_len= 0; + bool escaped= 0; + + // starting ' + if (*(ptr++) != '\'') + return 0; + + // find ending ' + for (eol= ptr; (*eol != '\'' || escaped) && eol < end; eol++) + { + if (!(escaped= (*eol == '\\' && !escaped))) + result_len++; + } + + // process string + if (eol >= end || + !(str->str= alloc_root(mem_root, result_len + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + parse parameters + + SYNOPSIS + File_parser::parse() + base base address for parameter writing (structure like + TABLE) + mem_root MEM_ROOT for parameters allocation + parameters parameters description + required number of required parameters in above list + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +File_parser::parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required) +{ + uint first_param= 0, found= 0; + register char *ptr= start; + char *eol; + LEX_STRING *str; + List<LEX_STRING> *list; + DBUG_ENTER("File_parser::parse"); + + while (ptr < end && found < required) + { + char *line= ptr; + if (*ptr == '#') + { + // it is comment + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_COMMENT, MYF(0), line); + DBUG_RETURN(TRUE); + } + ptr++; + } + else + { + File_option *parameter= parameters+first_param, + *parameters_end= parameters+required; + int len= 0; + for(; parameter < parameters_end; parameter++) + { + len= parameter->name.length; + // check length + if (len < (end-ptr) && ptr[len] != '=') + continue; + // check keyword + if (memcmp(parameter->name.str, ptr, len) == 0) + break; + } + + if (parameter < parameters_end) + { + found++; + /* + if we found first parameter, start search from next parameter + next time. + (this small optimisation should work, because they should be + written in same order) + */ + if (parameter == parameters+first_param) + first_param++; + + // get value + ptr+= (len+1); + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + if (!(ptr= parse_string(ptr, end, mem_root, + (LEX_STRING *)(base + + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ESTRING: + { + if (!(ptr= parse_escaped_string(ptr, end, mem_root, + (LEX_STRING *) + (base + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ULONGLONG: + case FILE_OPTIONS_REV: + if (!(eol= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + { + int not_used; + *((ulonglong*)(base + parameter->offset))= + my_strtoll10(ptr, 0, ¬_used); + } + ptr= eol+1; + break; + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val= (LEX_STRING *)(base + parameter->offset); + /* yyyy-mm-dd HH:MM:SS = 19(PARSE_FILE_TIMESTAMPLENGTH) characters */ + if (ptr[PARSE_FILE_TIMESTAMPLENGTH] != '\n') + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + memcpy(val->str, ptr, PARSE_FILE_TIMESTAMPLENGTH); + val->str[val->length= PARSE_FILE_TIMESTAMPLENGTH]= '\0'; + ptr+= (PARSE_FILE_TIMESTAMPLENGTH+1); + break; + } + case FILE_OPTIONS_STRLIST: + { + list= (List<LEX_STRING>*)(base + parameter->offset); + + list->empty(); + // list parsing + while (ptr < end) + { + if (!(str= (LEX_STRING*)alloc_root(mem_root, + sizeof(LEX_STRING))) || + list->push_back(str, mem_root)) + goto list_err; + if(!(ptr= parse_quoted_escaped_string(ptr, end, mem_root, str))) + goto list_err_w_message; + switch (*ptr) { + case '\n': + goto end_of_list; + case ' ': + // we cant go over buffer bounds, because we have \0 at the end + ptr++; + break; + default: + goto list_err_w_message; + } + } + end_of_list: + if (*(ptr++) != '\n') + goto list_err; + break; + + list_err_w_message: + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + list_err: + DBUG_RETURN(TRUE); + } + default: + DBUG_ASSERT(0); // never should happened + } + } + else + { + // skip unknown parameter + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER, MYF(0), + line); + DBUG_RETURN(TRUE); + } + ptr++; + } + } + } + DBUG_RETURN(FALSE); +} diff --git a/sql/parse_file.h b/sql/parse_file.h new file mode 100644 index 00000000000..82a89dffd18 --- /dev/null +++ b/sql/parse_file.h @@ -0,0 +1,69 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _PARSE_FILE_H_ +#define _PARSE_FILE_H_ + +#define PARSE_FILE_TIMESTAMPLENGTH 19 + +enum file_opt_type { + FILE_OPTIONS_STRING, /* String (LEX_STRING) */ + FILE_OPTIONS_ESTRING, /* Escaped string (LEX_STRING) */ + FILE_OPTIONS_ULONGLONG, /* ulonglong parameter (ulonglong) */ + FILE_OPTIONS_REV, /* Revision version number (ulonglong) */ + FILE_OPTIONS_TIMESTAMP, /* timestamp (LEX_STRING have to be + allocated with length 20 (19+1) */ + FILE_OPTIONS_STRLIST /* list of escaped strings + (List<LEX_STRING>) */ +}; + +struct File_option +{ + LEX_STRING name; /* Name of the option */ + int offset; /* offset to base address of value */ + file_opt_type type; /* Option type */ +}; + +class File_parser; +File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, bool bad_format_errors); + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, uint versions); + +class File_parser: public Sql_alloc +{ + char *buff, *start, *end; + LEX_STRING file_type; + my_bool content_ok; +public: + File_parser() :buff(0), start(0), end(0), content_ok(0) + { file_type.str= 0; file_type.length= 0; } + + my_bool ok() { return content_ok; } + LEX_STRING *type() { return &file_type; } + my_bool parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required); + + friend File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, + bool bad_format_errors); +}; + +#endif /* _PARSE_FILE_H_ */ diff --git a/sql/password.c b/sql/password.c index b9f3a07e596..e2f7e14d39f 100644 --- a/sql/password.c +++ b/sql/password.c @@ -65,7 +65,7 @@ #include <sha1.h> #include "mysql.h" -/************ MySQL 3.23-4.0 authentification routines: untouched ***********/ +/************ MySQL 3.23-4.0 authentication routines: untouched ***********/ /* New (MySQL 3.21+) random generation structure initialization @@ -280,7 +280,7 @@ void make_password_from_salt_323(char *to, const ulong *salt) /* - **************** MySQL 4.1.1 authentification routines ************* + **************** MySQL 4.1.1 authentication routines ************* */ /* diff --git a/sql/protocol.cc b/sql/protocol.cc index eaa0fd55b25..2b0ae60f944 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -24,6 +24,7 @@ #endif #include "mysql_priv.h" +#include "sp_rcontext.h" #include <stdarg.h> static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; @@ -64,6 +65,11 @@ void send_error(THD *thd, uint sql_errno, const char *err) err ? err : net->last_error[0] ? net->last_error : "NULL")); + if (thd->spcont && thd->spcont->find_handler(sql_errno, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_VOID_RETURN; + } #ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ query_cache_abort(net); #endif @@ -147,6 +153,11 @@ void send_error(THD *thd, uint sql_errno, const char *err) void send_warning(THD *thd, uint sql_errno, const char *err) { DBUG_ENTER("send_warning"); + if (thd->spcont && + thd->spcont->find_handler(sql_errno, MYSQL_ERROR::WARN_LEVEL_WARN)) + { + DBUG_VOID_RETURN; + } push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, sql_errno, err ? err : ER(sql_errno)); send_ok(thd); @@ -177,6 +188,11 @@ net_printf(THD *thd, uint errcode, ...) DBUG_ENTER("net_printf"); DBUG_PRINT("enter",("message: %u",errcode)); + if (thd->spcont && thd->spcont->find_handler(errcode, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_VOID_RETURN; + } thd->query_error= 1; // needed to catch query errors during replication #ifndef EMBEDDED_LIBRARY query_cache_abort(net); // Safety @@ -350,7 +366,7 @@ send_eof(THD *thd, bool no_flush) { NET *net= &thd->net; DBUG_ENTER("send_eof"); - if (net->vio != 0) + if (net->vio != 0 && !net->no_send_eof) { if (thd->client_capabilities & CLIENT_PROTOCOL_41) { @@ -489,6 +505,7 @@ bool Protocol::flush() flag Bit mask with the following functions: 1 send number of rows 2 send default values + 4 don't write eof packet DESCRIPTION Sum fields has table name empty and field_name. @@ -499,7 +516,7 @@ bool Protocol::flush() */ #ifndef EMBEDDED_LIBRARY -bool Protocol::send_fields(List<Item> *list, uint flag) +bool Protocol::send_fields(List<Item> *list, uint flags) { List_iterator_fast<Item> it(*list); Item *item; @@ -510,7 +527,7 @@ bool Protocol::send_fields(List<Item> *list, uint flag) CHARSET_INFO *thd_charset= thd->variables.character_set_results; DBUG_ENTER("send_fields"); - if (flag & 1) + if (flags & SEND_NUM_ROWS) { // Packet with number of elements char *pos=net_store_length(buff, (uint) list->elements); (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); @@ -596,7 +613,7 @@ bool Protocol::send_fields(List<Item> *list, uint flag) } } local_packet->length((uint) (pos - local_packet->ptr())); - if (flag & 2) + if (flags & SEND_DEFAULTS) item->send(&prot, &tmp); // Send default value if (prot.write()) break; /* purecov: inspected */ @@ -605,7 +622,8 @@ bool Protocol::send_fields(List<Item> *list, uint flag) #endif } - my_net_write(&thd->net, eof_buff, 1); + if (flags & SEND_EOF) + my_net_write(&thd->net, eof_buff, 1); DBUG_RETURN(prepare_for_send(list)); err: @@ -964,12 +982,6 @@ void Protocol_prep::prepare_for_resend() bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= thd->variables.character_set_results; -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -977,12 +989,6 @@ bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) bool Protocol_prep::store(const char *from,uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -1000,10 +1006,6 @@ bool Protocol_prep::store_null() bool Protocol_prep::store_tiny(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TINY); -#endif char buff[1]; field_pos++; buff[0]= (uchar) from; @@ -1013,11 +1015,6 @@ bool Protocol_prep::store_tiny(longlong from) bool Protocol_prep::store_short(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_SHORT || - field_types[field_pos] == MYSQL_TYPE_YEAR); -#endif field_pos++; char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1029,11 +1026,6 @@ bool Protocol_prep::store_short(longlong from) bool Protocol_prep::store_long(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_INT24 || - field_types[field_pos] == MYSQL_TYPE_LONG); -#endif field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1045,10 +1037,6 @@ bool Protocol_prep::store_long(longlong from) bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_LONGLONG); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1060,10 +1048,6 @@ bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) bool Protocol_prep::store(float from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_FLOAT); -#endif field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1075,10 +1059,6 @@ bool Protocol_prep::store(float from, uint32 decimals, String *buffer) bool Protocol_prep::store(double from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DOUBLE); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1102,12 +1082,6 @@ bool Protocol_prep::store(Field *field) bool Protocol_prep::store(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DATETIME || - field_types[field_pos] == MYSQL_TYPE_DATE || - field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); -#endif char buff[12],*pos; uint length; field_pos++; @@ -1142,10 +1116,6 @@ bool Protocol_prep::store_date(TIME *tm) bool Protocol_prep::store_time(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TIME); -#endif char buff[13], *pos; uint length; field_pos++; @@ -1172,12 +1142,3 @@ bool Protocol_prep::store_time(TIME *tm) buff[0]=(char) length; // Length is stored first return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); } - -#ifdef EMBEDDED_LIBRARY -/* Should be removed when we define the Protocol_cursor's future */ -bool Protocol_cursor::write() -{ - return Protocol_simple::write(); -} -#endif - diff --git a/sql/protocol.h b/sql/protocol.h index a3b6da55da3..d342af3ee9f 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -50,17 +50,16 @@ public: Protocol(THD *thd_arg) { init(thd_arg); } virtual ~Protocol() {} void init(THD* thd_arg); - bool send_fields(List<Item> *list, uint flag); + + enum { SEND_NUM_ROWS= 1, SEND_DEFAULTS= 2, SEND_EOF= 4 }; + virtual bool send_fields(List<Item> *list, uint flags); + bool send_records_num(List<Item> *list, ulonglong records); bool store(I_List<i_string> *str_list); bool store(const char *from, CHARSET_INFO *cs); String *storage_packet() { return packet; } inline void free() { packet->free(); } -#ifndef EMBEDDED_LIBRARY - bool write(); -#else virtual bool write(); -#endif inline bool store(uint32 from) { return store_long((longlong) from); } inline bool store(longlong from) @@ -162,13 +161,15 @@ public: Protocol_cursor(THD *thd_arg, MEM_ROOT *ini_alloc) :Protocol_simple(thd_arg), alloc(ini_alloc) {} bool prepare_for_send(List<Item> *item_list) { + row_count= 0; fields= NULL; data= NULL; prev_record= &data; return Protocol_simple::prepare_for_send(item_list); } - bool send_fields(List<Item> *list, uint flag); + bool send_fields(List<Item> *list, uint flags); bool write(); + uint get_field_count() { return field_count; } }; void send_warning(THD *thd, uint sql_errno, const char *err=0); diff --git a/sql/protocol_cursor.cc b/sql/protocol_cursor.cc index 5f35552c562..104457b3bcc 100644 --- a/sql/protocol_cursor.cc +++ b/sql/protocol_cursor.cc @@ -26,22 +26,21 @@ #include "mysql_priv.h" #include <mysql.h> -bool Protocol_cursor::send_fields(List<Item> *list, uint flag) +bool Protocol_cursor::send_fields(List<Item> *list, uint flags) { List_iterator_fast<Item> it(*list); Item *item; MYSQL_FIELD *client_field; - - DBUG_ENTER("send_fields"); + DBUG_ENTER("Protocol_cursor::send_fields"); + if (prepare_for_send(list)) - return FALSE; + return FALSE; fields= (MYSQL_FIELD *)alloc_root(alloc, sizeof(MYSQL_FIELD) * field_count); if (!fields) goto err; - client_field= fields; - while ((item= it++)) + for (client_field= fields; (item= it++) ; client_field++) { Send_field server_field; item->make_field(&server_field); @@ -51,6 +50,7 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) client_field->name= strdup_root(alloc, server_field.col_name); client_field->org_table= strdup_root(alloc, server_field.org_table_name); client_field->org_name= strdup_root(alloc, server_field.org_col_name); + client_field->catalog= strdup_root(alloc, ""); client_field->length= server_field.length; client_field->type= server_field.type; client_field->flags= server_field.flags; @@ -60,12 +60,13 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) client_field->name_length= strlen(client_field->name); client_field->org_name_length= strlen(client_field->org_name); client_field->org_table_length= strlen(client_field->org_table); + client_field->catalog_length= 0; client_field->charsetnr= server_field.charsetnr; if (INTERNAL_NUM_FIELD(client_field)) client_field->flags|= NUM_FLAG; - if (flag & 2) + if (flags & (uint) Protocol::SEND_DEFAULTS) { char buff[80]; String tmp(buff, sizeof(buff), default_charset_info), *res; @@ -78,16 +79,18 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) else client_field->def=0; client_field->max_length= 0; - ++client_field; } DBUG_RETURN(FALSE); - err: + +err: send_error(thd, ER_OUT_OF_RESOURCES); /* purecov: inspected */ DBUG_RETURN(TRUE); /* purecov: inspected */ } + /* Get the length of next field. Change parameter to point at fieldstart */ + bool Protocol_cursor::write() { byte *cp= (byte *)packet->ptr(); @@ -100,17 +103,18 @@ bool Protocol_cursor::write() byte *to; new_record= (MYSQL_ROWS *)alloc_root(alloc, - sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); + sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); if (!new_record) goto err; data_tmp= (byte **)(new_record + 1); new_record->data= (char **)data_tmp; - to= (byte *)(fields + field_count + 1); + to= (byte *)data_tmp + (field_count + 1)*sizeof(char *); for (; cur_field < fields_end; ++cur_field, ++data_tmp) { - if ((len=net_field_length((uchar **)&cp))) + if ((len= net_field_length((uchar **)&cp)) == 0 || + len == NULL_LENGTH) { *data_tmp= 0; } @@ -118,9 +122,10 @@ bool Protocol_cursor::write() { if ((long)len > (end_pos - cp)) { -// TODO error signal send_error(thd, CR_MALFORMED_PACKET); + // TODO error signal send_error(thd, CR_MALFORMED_PACKET); return TRUE; } + *data_tmp= to; memcpy(to,(char*) cp,len); to[len]=0; to+=len+1; @@ -129,6 +134,7 @@ bool Protocol_cursor::write() cur_field->max_length=len; } } + *data_tmp= 0; *prev_record= new_record; prev_record= &new_record->next; @@ -136,8 +142,6 @@ bool Protocol_cursor::write() row_count++; return FALSE; err: -// TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); + // TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); return TRUE; } - - diff --git a/sql/records.cc b/sql/records.cc index e5a0d102b10..5963c36afd9 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -189,7 +189,7 @@ static int rr_sequential(READ_RECORD *info) { if (info->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + info->thd->send_kill_message(); return 1; } if (tmp != HA_ERR_RECORD_DELETED) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 720e82f414e..253b2c96545 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -459,7 +459,8 @@ int show_new_master(THD* thd) field_list.push_back(new Item_empty_string("Log_name", 20)); field_list.push_back(new Item_return_int("Log_pos", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); protocol->prepare_for_resend(); protocol->store(lex_mi->log_file_name, &my_charset_bin); @@ -650,7 +651,8 @@ int show_slave_hosts(THD* thd) field_list.push_back(new Item_return_int("Master_id", 10, MYSQL_TYPE_LONG)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); pthread_mutex_lock(&LOCK_slave_list); diff --git a/sql/set_var.cc b/sql/set_var.cc index a97506ad07c..ccac3082d5b 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -83,7 +83,7 @@ TYPELIB delay_key_write_typelib= delay_key_write_type_names, NULL }; -static int sys_check_charset(THD *thd, set_var *var); +static int sys_check_charset(THD *thd, set_var *var); static bool sys_update_charset(THD *thd, set_var *var); static void sys_set_default_charset(THD *thd, enum_var_type type); static int sys_check_ftb_syntax(THD *thd, set_var *var); @@ -98,6 +98,7 @@ static bool set_option_autocommit(THD *thd, set_var *var); static int check_log_update(THD *thd, set_var *var); static bool set_log_update(THD *thd, set_var *var); static int check_pseudo_thread_id(THD *thd, set_var *var); +static bool set_log_bin(THD *thd, set_var *var); static void fix_low_priority_updates(THD *thd, enum_var_type type); static void fix_tx_isolation(THD *thd, enum_var_type type); static void fix_net_read_timeout(THD *thd, enum_var_type type); @@ -127,6 +128,11 @@ static byte *get_warning_count(THD *thd); alphabetic order */ +sys_var_thd_ulong sys_auto_increment_increment("auto_increment_increment", + &SV::auto_increment_increment); +sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset", + &SV::auto_increment_offset); + sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size", &binlog_cache_size); sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size", @@ -269,6 +275,10 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); +sys_var_thd_ulong sys_optimizer_prune_level("optimizer_prune_level", + &SV::optimizer_prune_level); +sys_var_thd_ulong sys_optimizer_search_depth("optimizer_search_depth", + &SV::optimizer_search_depth); sys_var_thd_ulong sys_preload_buff_size("preload_buffer_size", &SV::preload_buff_size); sys_var_thd_ulong sys_read_buff_size("read_buffer_size", @@ -328,6 +338,11 @@ sys_var_thd_ulong sys_sort_buffer("sort_buffer_size", &SV::sortbuff_size); sys_var_thd_sql_mode sys_sql_mode("sql_mode", &SV::sql_mode); +sys_var_thd_enum +sys_updatable_views_with_limit("updatable_views_with_limit", + &SV::updatable_views_with_limit, + &updatable_views_with_limit_typelib); + sys_var_thd_table_type sys_table_type("table_type", &SV::table_type); sys_var_thd_storage_engine sys_storage_engine("storage_engine", @@ -398,8 +413,8 @@ static sys_var_thd_bit sys_log_update("sql_log_update", OPTION_UPDATE_LOG); static sys_var_thd_bit sys_log_binlog("sql_log_bin", check_log_update, - set_log_update, - OPTION_BIN_LOG); + set_log_bin, + OPTION_BIN_LOG); static sys_var_thd_bit sys_sql_warnings("sql_warnings", 0, set_option_bit, OPTION_WARNINGS); @@ -474,6 +489,8 @@ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); sys_var *sys_variables[]= { &sys_auto_is_null, + &sys_auto_increment_increment, + &sys_auto_increment_offset, &sys_autocommit, &sys_big_tables, &sys_big_selects, @@ -552,6 +569,8 @@ sys_var *sys_variables[]= &sys_net_write_timeout, &sys_new_mode, &sys_old_passwords, + &sys_optimizer_prune_level, + &sys_optimizer_search_depth, &sys_preload_buff_size, &sys_pseudo_thread_id, &sys_query_alloc_block_size, @@ -612,8 +631,9 @@ sys_var *sys_variables[]= &sys_innodb_table_locks, &sys_innodb_max_purge_lag, &sys_innodb_autoextend_increment, -#endif +#endif &sys_unique_checks, + &sys_updatable_views_with_limit, &sys_warning_count }; @@ -623,6 +643,8 @@ sys_var *sys_variables[]= */ struct show_var_st init_vars[]= { + {"auto_increment_increment", (char*) &sys_auto_increment_increment, SHOW_SYS}, + {"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS}, {"back_log", (char*) &back_log, SHOW_LONG}, {"basedir", mysql_home, SHOW_CHAR}, #ifdef HAVE_BERKELEY_DB @@ -780,6 +802,10 @@ struct show_var_st init_vars[]= { {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, + {sys_optimizer_prune_level.name, (char*) &sys_optimizer_prune_level, + SHOW_SYS}, + {sys_optimizer_search_depth.name,(char*) &sys_optimizer_search_depth, + SHOW_SYS}, {"pid_file", (char*) pidfile_name, SHOW_CHAR}, {"port", (char*) &mysqld_port, SHOW_INT}, {sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS}, @@ -846,6 +872,8 @@ struct show_var_st init_vars[]= { SHOW_SYS}, {sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS}, {sys_tx_isolation.name, (char*) &sys_tx_isolation, SHOW_SYS}, + {sys_updatable_views_with_limit.name, + (char*) &sys_updatable_views_with_limit,SHOW_SYS}, {"version", server_version, SHOW_CHAR}, #ifdef HAVE_BERKELEY_DB {"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR}, @@ -1932,7 +1960,7 @@ void sys_var_character_set_server::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool sys_var_character_set_server::check(THD *thd, set_var *var) { if ((var->type == OPT_GLOBAL) && @@ -2039,7 +2067,7 @@ void sys_var_collation_database::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool sys_var_collation_server::check(THD *thd, set_var *var) { if ((var->type == OPT_GLOBAL) && @@ -2391,7 +2419,7 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) String str(buff, sizeof(buff), &my_charset_latin1); String *res= var->value->val_str(&str); -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) if ((var->type == OPT_GLOBAL) && (mysql_bin_log.is_open() || active_mi->slave_running || active_mi->rli.slave_running)) @@ -2403,7 +2431,7 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) #endif if (!(var->save_result.time_zone= - my_tz_find(res, thd->lex->time_zone_tables_used))) + my_tz_find(res, thd->lex->time_zone_tables_used))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); return 1; @@ -2517,6 +2545,30 @@ static int check_log_update(THD *thd, set_var *var) static bool set_log_update(THD *thd, set_var *var) { + /* + The update log is not supported anymore since 5.0. + See sql/mysqld.cc/, comments in function init_server_components() for an + explaination of the different warnings we send below + */ + + if (opt_sql_bin_update) + { + ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | + OPTION_UPDATE_LOG); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_TRANSLATED, + ER(ER_UPDATE_LOG_DEPRECATED_TRANSLATED)); + } + else + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_IGNORED, + ER(ER_UPDATE_LOG_DEPRECATED_IGNORED)); + set_option_bit(thd, var); + return 0; +} + +static bool set_log_bin(THD *thd, set_var *var) +{ if (opt_sql_bin_update) ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | OPTION_UPDATE_LOG); @@ -3043,7 +3095,7 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); if (sql_mode & MODE_MSSQL) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | @@ -3063,11 +3115,15 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); if (sql_mode & MODE_MYSQL40) sql_mode|= MODE_NO_FIELD_OPTIONS; if (sql_mode & MODE_MYSQL323) sql_mode|= MODE_NO_FIELD_OPTIONS; + if (sql_mode & MODE_TRADITIONAL) + sql_mode|= (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES | + MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER); return sql_mode; } diff --git a/sql/set_var.h b/sql/set_var.h index 4a4e631d88c..c34ebbca4b2 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -49,20 +49,14 @@ public: const char *name; sys_after_update_func after_update; -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot; -#endif sys_var(const char *name_arg) :name(name_arg), after_update(0) -#if MYSQL_VERSION_ID < 50000 , no_support_one_shot(1) -#endif {} sys_var(const char *name_arg,sys_after_update_func func) :name(name_arg), after_update(func) -#if MYSQL_VERSION_ID < 50000 , no_support_one_shot(1) -#endif {} virtual ~sys_var() {} virtual bool check(THD *thd, set_var *var); @@ -507,9 +501,7 @@ class sys_var_collation :public sys_var_thd public: sys_var_collation(const char *name_arg) :sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -529,13 +521,11 @@ public: sys_var_thd(name_arg) { nullable= 0; -#if MYSQL_VERSION_ID < 50000 /* In fact only almost all variables derived from sys_var_character_set support ONE_SHOT; character_set_results doesn't. But that's good enough. */ no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -574,7 +564,7 @@ class sys_var_character_set_server :public sys_var_character_set public: sys_var_character_set_server(const char *name_arg) : sys_var_character_set(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool check(THD *thd, set_var *var); #endif void set_default(THD *thd, enum_var_type type); @@ -612,7 +602,7 @@ class sys_var_collation_server :public sys_var_collation { public: sys_var_collation_server(const char *name_arg) :sys_var_collation(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool check(THD *thd, set_var *var); #endif bool update(THD *thd, set_var *var); @@ -722,9 +712,7 @@ public: sys_var_thd_time_zone(const char *name_arg): sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -752,9 +740,7 @@ public: virtual int update(THD *thd)=0; /* To set the value */ /* light check for PS */ virtual int light_check(THD *thd) { return check(thd); } -#if MYSQL_VERSION_ID < 50000 virtual bool no_support_one_shot() { return 1; } -#endif }; @@ -797,9 +783,7 @@ public: int check(THD *thd); int update(THD *thd); int light_check(THD *thd); -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot() { return var->no_support_one_shot; } -#endif }; diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt index 022a624c921..e3c08eeca89 100644 --- a/sql/share/czech/errmsg.txt +++ b/sql/share/czech/errmsg.txt @@ -244,8 +244,8 @@ character-set=latin2 "Deadlock found when trying to get lock; try restarting transaction", "The used table type doesn't support FULLTEXT indexes", "Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", +"Cannot add or update a child row: a foreign key constraint fails", +"Cannot delete or update a parent row: a foreign key constraint fails", "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", @@ -291,8 +291,8 @@ character-set=latin2 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -306,21 +306,21 @@ character-set=latin2 "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", "Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart", "SQL thread is not to be started so UNTIL options are ignored", "Incorrect index name '%-.100s'", "Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", +"Query cache failed to set size %lu; new query cache size is %lu", "Column '%-.64s' cannot be part of FULLTEXT index", "Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", +"MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work", "Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", +"'%s' is deprecated; use '%s' instead", "The target table %-.100s of the %s is not updatable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", +"The '%s' feature is disabled; you need MySQL built with '%s' to have it working", "The MySQL server is running with the %s option so it cannot execute this statement", "Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" +"Truncated incorrect %-.32s value: '%-.128s'" "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" "Invalid ON UPDATE clause for '%-.64s' column", "This command is not supported in the prepared statement protocol yet", @@ -331,3 +331,71 @@ character-set=latin2 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt index 18ebe5712f8..78610219ade 100644 --- a/sql/share/danish/errmsg.txt +++ b/sql/share/danish/errmsg.txt @@ -282,8 +282,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -322,3 +322,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt index 54377b5949a..051a2a13ab8 100644 --- a/sql/share/dutch/errmsg.txt +++ b/sql/share/dutch/errmsg.txt @@ -291,8 +291,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -331,3 +331,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt index 8ede3f61a0b..bb2a9cb59ba 100644 --- a/sql/share/english/errmsg.txt +++ b/sql/share/english/errmsg.txt @@ -279,8 +279,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -319,3 +319,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt index 5aab524e0d9..14764d764aa 100644 --- a/sql/share/estonian/errmsg.txt +++ b/sql/share/estonian/errmsg.txt @@ -284,8 +284,8 @@ character-set=latin7 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -324,3 +324,71 @@ character-set=latin7 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt index 355e784b156..7c3e833c903 100644 --- a/sql/share/french/errmsg.txt +++ b/sql/share/french/errmsg.txt @@ -279,8 +279,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -319,3 +319,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt index 03e838dd805..7cda8bef089 100644 --- a/sql/share/german/errmsg.txt +++ b/sql/share/german/errmsg.txt @@ -332,3 +332,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s, expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt index 06f31a79a73..7b9bf7e967e 100644 --- a/sql/share/greek/errmsg.txt +++ b/sql/share/greek/errmsg.txt @@ -279,8 +279,8 @@ character-set=greek "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -319,3 +319,71 @@ character-set=greek "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt index af10c33ee2d..d4e5c5c744f 100644 --- a/sql/share/hungarian/errmsg.txt +++ b/sql/share/hungarian/errmsg.txt @@ -284,8 +284,8 @@ character-set=latin2 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -324,3 +324,71 @@ character-set=latin2 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt index cd66f15db5f..c697edb4cd7 100644 --- a/sql/share/italian/errmsg.txt +++ b/sql/share/italian/errmsg.txt @@ -279,8 +279,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -319,3 +319,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt index eaab58d8403..e1a32f1894b 100644 --- a/sql/share/japanese/errmsg.txt +++ b/sql/share/japanese/errmsg.txt @@ -283,8 +283,8 @@ character-set=ujis "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -323,3 +323,71 @@ character-set=ujis "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt index 1cff50432e9..adb08b67474 100644 --- a/sql/share/korean/errmsg.txt +++ b/sql/share/korean/errmsg.txt @@ -279,8 +279,8 @@ character-set=euckr "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -319,3 +319,71 @@ character-set=euckr "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt index 27f7a18f029..057accd3c5b 100644 --- a/sql/share/norwegian-ny/errmsg.txt +++ b/sql/share/norwegian-ny/errmsg.txt @@ -281,8 +281,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -321,3 +321,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt index 772f30e5d94..5506d95592a 100644 --- a/sql/share/norwegian/errmsg.txt +++ b/sql/share/norwegian/errmsg.txt @@ -281,8 +281,8 @@ character-set=latin1 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -321,3 +321,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt index 634a4d93f42..38cefd45338 100644 --- a/sql/share/polish/errmsg.txt +++ b/sql/share/polish/errmsg.txt @@ -284,8 +284,8 @@ character-set=latin2 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -324,3 +324,71 @@ character-set=latin2 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt index c79c346008e..7689db06451 100644 --- a/sql/share/portuguese/errmsg.txt +++ b/sql/share/portuguese/errmsg.txt @@ -321,3 +321,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt index 7cb0427dc3f..ad486367ec9 100644 --- a/sql/share/romanian/errmsg.txt +++ b/sql/share/romanian/errmsg.txt @@ -284,8 +284,8 @@ character-set=latin2 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -324,3 +324,71 @@ character-set=latin2 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt index df354d5797f..d057ab53256 100644 --- a/sql/share/russian/errmsg.txt +++ b/sql/share/russian/errmsg.txt @@ -284,8 +284,8 @@ character-set=koi8r "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -324,3 +324,71 @@ character-set=koi8r "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'" +"îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'" +"îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'" +"ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')" +"îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'" +"EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ" +"æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ" +"'%-.64s.%-.64s' - ÎÅ %s" +"óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ" +"View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM" +"View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'" +"View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ" +"View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'" +"View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×" +"áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)" +"ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÏÊ × ÎÅÍ ÔÁÂÌÉÃ(Ù)" +"View '%-.64s.%-.64s' ÓÓÙÌÁÅÔÓÑ ÎÁ ÎÅÓÕÝÅÓÔ×ÕÀÝÉÅ ÔÁÂÌÉÃÙ ÉÌÉ ÓÔÏÌÂÃÙ" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'" +"ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt index 45b56c8269c..6e38b1c3d11 100644 --- a/sql/share/serbian/errmsg.txt +++ b/sql/share/serbian/errmsg.txt @@ -272,8 +272,8 @@ character-set=cp1250 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -312,3 +312,71 @@ character-set=cp1250 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt index 12c3eb2b6af..a9667b8ba62 100644 --- a/sql/share/slovak/errmsg.txt +++ b/sql/share/slovak/errmsg.txt @@ -287,8 +287,8 @@ character-set=latin2 "%d line(s) were cut by GROUP_CONCAT()", "Row %ld doesn't contain data for all columns", "Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld", +"Out of range value adjusted for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -327,3 +327,71 @@ character-set=latin2 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt index bd2439c44a6..17600f336c0 100644 --- a/sql/share/spanish/errmsg.txt +++ b/sql/share/spanish/errmsg.txt @@ -140,7 +140,7 @@ character-set=latin1 "Muchos campos", "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob", "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario", -"Dependencia cruzada encontrada en OUTER JOIN; examine su condición ON", +"Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON", "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL", "No puedo cargar función '%-.64s'", "No puedo inicializar función '%-.64s'; %-.80s", @@ -182,7 +182,7 @@ character-set=latin1 "Obtenido timeout leyendo paquetes de comunicación", "Obtenido un error de escribiendo paquetes de comunicación", "Obtenido timeout escribiendo paquetes de comunicación", -"La string resultante es mayor que 'max_allowed_packet'", +"La string resultante es mayor que max_allowed_packet", "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT", "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT", "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES", @@ -263,7 +263,7 @@ character-set=latin1 "Referencia de llave y referencia de tabla no coinciden", "Operando debe tener %d columna(s)", "Subconsulta retorna mas que 1 línea", -"Desconocido preparado comando handler (%.*s) dado para %s", +"Desconocido preparado comando handler (%ld) dado para %s", "Base de datos Help está corrupto o no existe", "Cíclica referencia en subconsultas", "Convirtiendo columna '%s' de %s para %s", @@ -272,15 +272,15 @@ character-set=latin1 "Select %u fué reducido durante optimización", "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s", "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL", -"Todas las partes de una SPATIAL index deben ser NOT NULL", +"Todas las partes de una SPATIAL KEY deben ser NOT NULL", "COLLATION '%s' no es válido para CHARACTER SET '%s'", "Slave ya está funcionando", "Slave ya fué parado", "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: No suficiente memoria", -"ZLIB: No suficiente espacio en el búfer de salida (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: Dato de entrada fué corrompido", -"%d línea(s) fueron cortadas por GROUP_CONCAT()", +"Z_MEM_ERROR: No suficiente memoria para zlib", +"Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)", +"Z_DATA_ERROR: Dato de entrada fué corrompido para zlib", +"%d línea(s) fue(fueron) cortadas por group_concat()", "Línea %ld no contiene datos para todas las columnas", "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada", "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld", @@ -323,3 +323,71 @@ character-set=latin1 "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt index da75e4fcede..8b061ac0eec 100644 --- a/sql/share/swedish/errmsg.txt +++ b/sql/share/swedish/errmsg.txt @@ -178,7 +178,7 @@ character-set=latin1 "Fick 'timeout' vid läsning från klienten", "Fick ett fel vid skrivning till klienten", "Fick 'timeout' vid skrivning till klienten", -"Resultatsträngen är längre än 'max_allowed_packet'", +"Resultatsträngen är längre än max_allowed_packet", "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner", "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner", "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES", @@ -259,7 +259,7 @@ character-set=latin1 "Nyckelreferensen och tabellreferensen stämmer inte överens", "Operand should contain %d column(s)", "Subquery returnerade mer än 1 rad", -"Okänd PREPARED STATEMENT id (%.*s) var given till %s", +"Okänd PREPARED STATEMENT id (%ld) var given till %s", "Hjälpdatabasen finns inte eller är skadad", "Cyklisk referens i subqueries", "Konvertar kolumn '%s' från %s till %s", @@ -268,19 +268,19 @@ character-set=latin1 "Select %u reducerades vid optimiering", "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s", "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet.", -"Alla delar av en SPATIAL index måste vara NOT NULL", +"Alla delar av en SPATIAL KEY måste vara NOT NULL", "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'", "Slaven har redan startat", "Slaven har redan stoppat", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d rad(er) kapades av GROUP_CONCAT()", +"Too big size of uncompressed data. The maximum size is %d. (probably, length of uncompressed data was corrupted)", +"Z_MEM_ERROR: Not enough memory available for zlib", +"Z_BUF_ERROR: Not enough room in the output buffer for zlib (probably, length of uncompressed data was corrupted)", +"Z_DATA_ERROR: Input data was corrupted for zlib", +"%d rad(er) kapades av group_concat()", "Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Row %ld was truncated; It contained more data than there were input columns", +"Data truncated, NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated, out of range for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Använder handler %s för tabell '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -288,13 +288,13 @@ character-set=latin1 "Can't revoke all privileges, grant for one or more of the requested users", "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", "Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Variable '%-.64s' is not a variable component (Can't be used as XXXX.variable_name)", "Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started", "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"Wrong parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL, otherwise you are not safe in case of unexpected slave's mysqld restart", "SQL thread is not to be started so UNTIL options are ignored", "Felaktigt index namn '%-.100s'", "Felaktigt katalog namn '%-.100s'", @@ -309,13 +309,81 @@ character-set=latin1 "MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando", "Column '%-.100s' has duplicated value '%-.64s' in %s" "Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", +"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' field", "This command is not supported in the prepared statement protocol yet", -"Fick felkod %d '%-.100s' från %s", -"Fick tilfällig felkod %d '%-.100s' från %s", +"Got error %d '%-.100s' from %s", +"Got temporary error %d '%-.100s' from %s", "Unknown or incorrect time zone: '%-.64s'", "Invalid TIMESTAMP value in column '%s' at row %ld", "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"Configuration file '%-.64s' is too big" +"Malformed file type header in file '%-.64s'" +"Unexpected end of file while parsing comment '%-.64s'" +"Error while parsing parameter '%-.64s' (line: '%-.64s')" +"Unexpected end of file while skipping unknown parameter '%-.64s'" +"EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" +"File '%-.64s' has unknown type '%-.64s' in its header" +"'%-.64s.%-.64s' is not %s" +"Column '%-.64s' is not updatable" +"View's SELECT contains a subquery in the FROM clause" +"View's SELECT contains a '%s' clause" +"View's SELECT contains a variable or parameter" +"View's SELECT contains a temporary table '%-.64s'" +"View's SELECT and view's field list have different column counts" +"View merge algorithm can't be used here for now (assumed undefined algorithm)" +"View being updated does not have complete key of underlying table in it" +"View '%-.64s.%-.64s' references invalid table(s) or column(s)" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION on non-updatable view '%-.64s.%-.64s'" +"CHECK OPTION failed '%-.64s.%-.64s'" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt index a19cf946cb1..317b2d596fa 100644 --- a/sql/share/ukrainian/errmsg.txt +++ b/sql/share/ukrainian/errmsg.txt @@ -175,7 +175,7 @@ character-set=koi8u "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s", "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ", "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö 'max_allowed_packet'", +"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()", "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ", @@ -184,7 +184,7 @@ character-set=koi8u "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö 'max_allowed_packet'", +"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ", "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", @@ -235,7 +235,7 @@ character-set=koi8u "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s", "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×", "Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", +"Deadlock found when trying to get lock; Try restarting transaction", "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×", "Cannot add foreign key constraint", "Cannot add a child row: a foreign key constraint fails", @@ -243,50 +243,50 @@ character-set=koi8u "Error connecting to master: %-.128s", "Error running query on master: %-.128s", "Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", +"Wrong usage of %s and %s", "The used SELECT statements have a different number of columns", "Can't execute the query because you have a conflicting read lock", "Mixing of transactional and non-transactional tables is disabled", "Option '%s' used twice in statement", "User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", +"Access denied. You need the %-.128s privilege for this operation", "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", "Variable '%-.64s' doesn't have a default value", "Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", +"Wrong argument type to variable '%-.64s'", "Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", +"Wrong usage/placement of '%s'", "This version of MySQL doesn't yet support '%s'", "Got fatal error %d: '%-.128s' from master when reading data from binary log", "Slave SQL thread ignored the query because of replicate-*-table rules", "Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", +"Wrong foreign key definition for '%-.64s': %s", +"Key reference and table reference doesn't match", "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×", "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ ¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ", -"Unknown prepared statement handler (%.*s) given to %s", +"Unknown prepared statement handler (%ld) given to %s", "Help database is corrupt or does not exist", "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ", "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s", "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)", -"Every derived table must have its own alias", +"Every derived table must have it's own alias", "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", +"Table '%-.64s' from one of SELECT's can not be used in %-.32s", "Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", +"All parts of a SPATIAL KEY must be NOT NULL", "COLLATION '%s' is not valid for CHARACTER SET '%s'", "Slave is already running", "Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", +"Too big size of uncompressed data. The maximum size is %d. (probably, length of uncompressed data was corrupted)", +"Z_MEM_ERROR: Not enough memory available for zlib", +"Z_BUF_ERROR: Not enough room in the output buffer for zlib (probably, length of uncompressed data was corrupted)", +"Z_DATA_ERROR: Input data was corrupted for zlib", +"%d line(s) was(were) cut by group_concat()", "Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", +"Row %ld was truncated; It contained more data than there were input columns", +"Data truncated, NULL supplied to NOT NULL column '%s' at row %ld", +"Data truncated, out of range for column '%s' at row %ld", "Data truncated for column '%s' at row %ld", "Using storage engine %s for table '%s'", "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", @@ -294,13 +294,13 @@ character-set=koi8u "Can't revoke all privileges, grant for one or more of the requested users", "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", "Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", +"Variable '%-.64s' is not a variable component (Can't be used as XXXX.variable_name)", "Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", +"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started", "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", +"Wrong parameter or combination of parameters for START SLAVE UNTIL", +"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL, otherwise you are not safe in case of unexpected slave's mysqld restart", "SQL thread is not to be started so UNTIL options are ignored", "Incorrect index name '%-.100s'", "Incorrect catalog name '%-.100s'", @@ -315,8 +315,8 @@ character-set=koi8u "The MySQL server is running with the %s option so it cannot execute this statement", "Column '%-.100s' has duplicated value '%-.64s' in %s" "Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", +"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +"Invalid ON UPDATE clause for '%-.64s' field", "This command is not supported in the prepared statement protocol yet", "Got error %d '%-.100s' from %s", "Got temporary error %d '%-.100s' from %s", @@ -325,3 +325,71 @@ character-set=koi8u "Invalid %s character string: '%.64s'", "Result of %s() was larger than max_allowed_packet (%ld) - truncated" "Conflicting declarations: '%s%s' and '%s%s'" +"Can't create a %s from within another stored routine" +"%s %s already exists" +"%s %s does not exist" +"Failed to DROP %s %s" +"Failed to CREATE %s %s" +"%s with no matching label: %s" +"Redefining label %s" +"End-label %s without match" +"Referring to uninitialized variable %s" +"SELECT in a stored procedure must have INTO" +"RETURN is only allowed in a FUNCTION" +"Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +"The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +"Query execution was interrupted" +"Incorrect number of arguments for %s %s; expected %u, got %u" +"Undefined CONDITION: %s" +"No RETURN found in FUNCTION %s" +"FUNCTION %s ended without RETURN" +"Cursor statement must be a SELECT" +"Cursor SELECT must not have INTO" +"Undefined CURSOR: %s" +"Cursor is already open" +"Cursor is not open" +"Undeclared variable: %s" +"Incorrect number of FETCH variables" +"No data to FETCH" +"Duplicate parameter: %s" +"Duplicate variable: %s" +"Duplicate condition: %s" +"Duplicate cursor: %s" +"Failed to ALTER %s %s" +"Subselect value not supported" +"USE is not allowed in a stored procedure" +"Variable or condition declaration after cursor or handler declaration" +"Cursor declaration after handler declaration" +"Case not found for CASE statement" +"úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'" +"îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'" +"îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'" +"ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')" +"îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'" +"EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ" +"æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ" +"'%-.64s.%-.64s' ÎÅ ¤ %s" +"óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ" +"View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM" +"View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'" +"View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ" +"View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'" +"View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØ˦ÓÔØ ÓËÏ×Âæ×" +"áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)" +"View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ" +"View '%-.64s.%-.64s' ÐÏÓÉÌÁ¤ÔÓÑ ÎÁ ÎŦÓÎÕÀÞ¦ ÔÁÂÌÉæ ÁÂÏ ÓÔÏ×Âæ" +"Can't drop a %s from within another stored routine" +"GOTO is not allowed in a stored procedure handler" +"Trigger already exists" +"Trigger does not exist" +"Trigger's '%-.64s' is view or temporary table" +"Updating of %s row is not allowed in %strigger" +"There is no %s row in %s trigger" +"Field '%-.64s' doesn't have a default value", +"Division by 0", +"Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld", +"Illegal %s '%-.64s' value found during parsing", +"CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ" +"ÐÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ" +"Access denied; you are not the procedure/function definer of '%s'" diff --git a/sql/slave.cc b/sql/slave.cc index b6ad35573ff..b25bdb7e926 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -74,7 +74,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); - /* Find out which replications threads are running @@ -218,6 +217,13 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, pos Position in relay log file need_data_lock Set to 1 if this functions should do mutex locks errmsg Store pointer to error message here + look_for_description_event + 1 if we should look for such an event. We only need + this when the SQL thread starts and opens an existing + relay log and has to execute it (possibly from an + offset >4); then we need to read the first event of + the relay log to be able to parse the events we have + to execute. DESCRIPTION - Close old open relay log files. @@ -235,15 +241,35 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, ulonglong pos, bool need_data_lock, - const char** errmsg) + const char** errmsg, + bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); + DBUG_PRINT("info", ("pos=%lu", pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); if (need_data_lock) pthread_mutex_lock(&rli->data_lock); + + /* + Slave threads are not the only users of init_relay_log_pos(). CHANGE MASTER + is, too, and init_slave() too; these 2 functions allocate a description + event in init_relay_log_pos, which is not freed by the terminating SQL slave + thread as that thread is not started by these functions. So we have to free + the description_event here, in case, so that there is no memory leak in + running, say, CHANGE MASTER. + */ + delete rli->relay_log.description_event_for_exec; + /* + By default the relay log is in binlog format 3 (4.0). + Even if format is 4, this will work enough to read the first event + (Format_desc) (remember that format 4 is just lenghtened compared to format + 3; format 3 is a prefix of format 4). + */ + rli->relay_log.description_event_for_exec= new + Format_description_log_event(3); pthread_mutex_lock(log_lock); @@ -283,9 +309,8 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, In this case, we will use the same IO_CACHE pointer to read data as the IO thread is using to write data. */ - rli->cur_log= rli->relay_log.get_log_file(); - if (my_b_tell(rli->cur_log) == 0 && - check_binlog_magic(rli->cur_log, errmsg)) + my_b_seek((rli->cur_log=rli->relay_log.get_log_file()), (off_t)0); + if (check_binlog_magic(rli->cur_log,errmsg)) goto err; rli->cur_log_old_open_count=rli->relay_log.get_open_count(); } @@ -299,8 +324,85 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, goto err; rli->cur_log = &rli->cache_buf; } - if (pos >= BIN_LOG_HEADER_SIZE) + /* + In all cases, check_binlog_magic() has been called so we're at offset 4 for + sure. + */ + if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */ + { + Log_event* ev; + while (look_for_description_event) + { + /* + Read the possible Format_description_log_event; if position + was 4, no need, it will be read naturally. + */ + DBUG_PRINT("info",("looking for a Format_description_log_event")); + + if (my_b_tell(rli->cur_log) >= pos) + break; + + /* + Because of we have rli->data_lock and log_lock, we can safely read an + event + */ + if (!(ev=Log_event::read_log_event(rli->cur_log,0, + rli->relay_log.description_event_for_exec))) + { + DBUG_PRINT("info",("could not read event, rli->cur_log->error=%d", + rli->cur_log->error)); + if (rli->cur_log->error) /* not EOF */ + { + *errmsg= "I/O error reading event at position 4"; + goto err; + } + break; + } + else if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info",("found Format_description_log_event")); + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= (Format_description_log_event*) ev; + /* + As ev was returned by read_log_event, it has passed is_valid(), so + my_malloc() in ctor worked, no need to check again. + */ + /* + Ok, we found a Format_description event. But it is not sure that this + describes the whole relay log; indeed, one can have this sequence + (starting from position 4): + Format_desc (of slave) + Rotate (of master) + Format_desc (of master) + So the Format_desc which really describes the rest of the relay log + is the 3rd event (it can't be further than that, because we rotate + the relay log when we queue a Rotate event from the master). + But what describes the Rotate is the first Format_desc. + So what we do is: + go on searching for Format_description events, until you exceed the + position (argument 'pos') or until you find another event than Rotate + or Format_desc. + */ + } + else + { + DBUG_PRINT("info",("found event of another type=%d", + ev->get_type_code())); + look_for_description_event= (ev->get_type_code() == ROTATE_EVENT); + delete ev; + } + } my_b_seek(rli->cur_log,(off_t)pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + } +#endif + + } err: /* @@ -315,13 +417,15 @@ err: if (need_data_lock) pthread_mutex_unlock(&rli->data_lock); + if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg) + *errmsg= "Invalid Format_description log event; could be out of memory"; DBUG_RETURN ((*errmsg) ? 1 : 0); } /* - Init functio to set up array for errors that should be skipped for slave + Init function to set up array for errors that should be skipped for slave SYNOPSIS init_slave_skip_errors() @@ -360,16 +464,15 @@ void init_slave_skip_errors(const char* arg) } -void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, - ulonglong log_pos, - bool skip_lock) +void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock) { if (!skip_lock) pthread_mutex_lock(&data_lock); - inc_event_relay_log_pos(val); + inc_event_relay_log_pos(); group_relay_log_pos= event_relay_log_pos; strmake(group_relay_log_name,event_relay_log_name, - sizeof(group_relay_log_name)-1); + sizeof(group_relay_log_name)-1); notify_group_relay_log_name_update(); @@ -383,24 +486,31 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, not advance as it should on the non-transactional slave (it advances by big leaps, whereas it should advance by small leaps). */ + /* + In 4.x we used the event's len to compute the positions here. This is + wrong if the event was 3.23/4.0 and has been converted to 5.0, because + then the event's len is not what is was in the master's binlog, so this + will make a wrong group_master_log_pos (yes it's a bug in 3.23->4.0 + replication: Exec_master_log_pos is wrong). Only way to solve this is to + have the original offset of the end of the event the relay log. This is + what we do in 5.0: log_pos has become "end_log_pos" (because the real use + of log_pos in 4.0 was to compute the end_log_pos; so better to store + end_log_pos instead of begin_log_pos. + If we had not done this fix here, the problem would also have appeared + when the slave and master are 5.0 but with different event length (for + example the slave is more recent than the master and features the event + UID). It would give false MASTER_POS_WAIT, false Exec_master_log_pos in + SHOW SLAVE STATUS, and so the user would do some CHANGE MASTER using this + value which would lead to badly broken replication. + Even the relay_log_pos will be corrupted in this case, because the len is + the relay log is not "val". + With the end_log_pos solution, we avoid computations involving lengthes. + */ + DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu", + (long) log_pos, (long) group_master_log_pos)); if (log_pos) // 3.23 binlogs don't have log_posx { -#if MYSQL_VERSION_ID < 50000 - /* - If the event was converted from a 3.23 format, get_event_len() has - grown by 6 bytes (at least for most events, except LOAD DATA INFILE - which is already a big problem for 3.23->4.0 replication); 6 bytes is - the difference between the header's size in 4.0 (LOG_EVENT_HEADER_LEN) - and the header's size in 3.23 (OLD_HEADER_LEN). Note that using - mi->old_format will not help if the I/O thread has not started yet. - Yes this is a hack but it's just to make 3.23->4.x replication work; - 3.23->5.0 replication is working much better. - */ - group_master_log_pos= log_pos + val - - (mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - group_master_log_pos= log_pos+ val; -#endif /* MYSQL_VERSION_ID < 5000 */ + group_master_log_pos= log_pos; } pthread_cond_broadcast(&data_cond); if (!skip_lock) @@ -481,13 +591,16 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, sizeof(rli->group_relay_log_name)-1); strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(), sizeof(rli->event_relay_log_name)-1); - // Just first log with magic number and nothing else - rli->log_space_total= BIN_LOG_HEADER_SIZE; rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; - rli->relay_log.reset_bytes_written(); + if (count_relay_log_space(rli)) + { + *errmsg= "Error counting relay log space"; + goto err; + } if (!just_reset) - error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 0 /* do not need data lock */, errmsg); + error= init_relay_log_pos(rli, rli->group_relay_log_name, + rli->group_relay_log_pos, + 0 /* do not need data lock */, errmsg, 0); err: #ifndef DBUG_OFF @@ -635,7 +748,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, thd->exit_cond(old_msg); pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released if (thd->killed) - DBUG_RETURN(ER_SERVER_SHUTDOWN); + DBUG_RETURN(thd->killed_errno()); } } if (start_lock) @@ -754,6 +867,11 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) second call will make the decision (because all_tables_not_ok() = !tables_ok(1st_list) && !tables_ok(2nd_list)). + Thought which arose from a question of a big customer "I want to include + all tables like "abc.%" except the "%.EFG"". This can't be done now. If we + supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/ + (I could not find an equivalent in the regex library MySQL uses). + RETURN VALUES 0 should not be logged/replicated 1 should be logged/replicated @@ -764,7 +882,7 @@ int tables_ok(THD* thd, TABLE_LIST* tables) bool some_tables_updating= 0; DBUG_ENTER("tables_ok"); - for (; tables; tables = tables->next) + for (; tables; tables= tables->next_global) { char hash_key[2*NAME_LEN+2]; char *end; @@ -1163,29 +1281,86 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) return 1; } +/* + Note that we rely on the master's version (3.23, 4.0.14 etc) instead of + relying on the binlog's version. This is not perfect: imagine an upgrade + of the master without waiting that all slaves are in sync with the master; + then a slave could be fooled about the binlog's format. This is what happens + when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0 + slaves are fooled. So we do this only to distinguish between 3.23 and more + recent masters (it's too late to change things for 3.23). + + RETURNS + 0 ok + 1 error +*/ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) { const char* errmsg= 0; - + /* - Note the following switch will bug when we have MySQL branch 30 ;) + Free old description_event_for_queue (that is needed if we are in + a reconnection). */ - switch (*mysql->server_version) { - case '3': - mi->old_format = - (strncmp(mysql->server_version, "3.23.57", 7) < 0) /* < .57 */ ? - BINLOG_FORMAT_323_LESS_57 : - BINLOG_FORMAT_323_GEQ_57 ; - break; - case '4': - mi->old_format = BINLOG_FORMAT_CURRENT; - break; - default: - /* 5.0 is not supported */ - errmsg = "Master reported an unrecognized MySQL version. Note that 4.1 \ -slaves can't replicate a 5.0 or newer master."; - break; + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; + + if (!my_isdigit(&my_charset_bin,*mysql->server_version)) + errmsg = "Master reported unrecognized MySQL version"; + else + { + /* + Note the following switch will bug when we have MySQL branch 30 ;) + */ + switch (*mysql->server_version) + { + case '0': + case '1': + case '2': + errmsg = "Master reported unrecognized MySQL version"; + break; + case '3': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(1, mysql->server_version); + break; + case '4': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3, mysql->server_version); + break; + default: + /* + Master is MySQL >=5.0. Give a default Format_desc event, so that we can + take the early steps (like tests for "is this a 3.23 master") which we + have to take before we receive the real master's Format_desc which will + override this one. Note that the Format_desc we create below is garbage + (it has the format of the *slave*); it's only good to help know if the + master is 3.23, 4.0, etc. + */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(4, mysql->server_version); + break; + } + } + + /* + This does not mean that a 5.0 slave will be able to read a 6.0 master; but + as we don't know yet, we don't want to forbid this for now. If a 5.0 slave + can't read a 6.0 master, this will show up when the slave can't read some + events sent by the master, and there will be error messages. + */ + + if (errmsg) + { + sql_print_error(errmsg); + return 1; + } + + /* as we are here, we tried to allocate the event */ + if (!mi->rli.relay_log.description_event_for_queue) + { + sql_print_error("Slave I/O thread failed to create a default Format_description_log_event"); + return 1; } /* @@ -1534,7 +1709,7 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) if (open_log(&rli->relay_log, glob_hostname, opt_relay_logname, "-relay-bin", opt_relaylog_index_name, LOG_BIN, 1 /* read_append cache */, - 1 /* no auto events */, + 0 /* starting from 5.0 we want relay logs to have auto events */, max_relay_log_size ? max_relay_log_size : max_binlog_size)) { pthread_mutex_unlock(&rli->data_lock); @@ -1569,7 +1744,7 @@ file '%s', errno %d)", fname, my_errno); /* Init relay log with first entry in the relay index file */ if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */, - &msg)) + &msg, 0)) { sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; @@ -1634,7 +1809,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", rli->group_relay_log_name, rli->group_relay_log_pos, 0 /* no data lock*/, - &msg)) + &msg, 0)) { char llbuf[22]; sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)", @@ -1643,8 +1818,18 @@ Failed to open the existing relay log info file '%s' (errno %d)", goto err; } } - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + } +#endif + /* Now change the cache from READ to WRITE - must do this before flush_relay_log_info @@ -1961,7 +2146,7 @@ file '%s')", fname); goto errwithmsg; #ifndef HAVE_OPENSSL if (ssl) - sql_print_error("SSL information in the master info file " + sql_print_warning("SSL information in the master info file " "('%s') are ignored because this MySQL slave was compiled " "without SSL support.", fname); #endif /* HAVE_OPENSSL */ @@ -2148,7 +2333,8 @@ int show_master_info(THD* thd, MASTER_INFO* mi) field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); if (mi->host[0]) @@ -2378,17 +2564,16 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, ulong init_abort_pos_wait; int error=0; struct timespec abstime; // for timeout checking - set_timespec(abstime,timeout); - + const char *msg; DBUG_ENTER("wait_for_pos"); - DBUG_PRINT("enter",("group_master_log_name: '%s' pos: %lu timeout: %ld", - group_master_log_name, (ulong) group_master_log_pos, - (long) timeout)); + DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu", + log_name->c_ptr(), (ulong) log_pos, (ulong) timeout)); + set_timespec(abstime,timeout); pthread_mutex_lock(&data_lock); - const char *msg= thd->enter_cond(&data_cond, &data_lock, - "Waiting for the slave SQL thread to " - "advance position"); + msg= thd->enter_cond(&data_cond, &data_lock, + "Waiting for the slave SQL thread to " + "advance position"); /* This function will abort when it notices that some CHANGE MASTER or RESET MASTER has changed the master info. @@ -2444,6 +2629,12 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, bool pos_reached; int cmp_result= 0; + DBUG_PRINT("info", + ("init_abort_pos_wait: %ld abort_pos_wait: %ld", + init_abort_pos_wait, abort_pos_wait)); + DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu", + group_master_log_name, (ulong) group_master_log_pos)); + /* group_master_log_name can be "", if we are just after a fresh replication start or after a CHANGE MASTER TO MASTER_HOST/PORT @@ -2526,7 +2717,7 @@ err: thd->exit_cond(msg); DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \ improper_arguments: %d timed_out: %d", - (int) thd->killed, + thd->killed_errno(), (int) (init_abort_pos_wait != abort_pos_wait), (int) slave_running, (int) (error == -2), @@ -2539,6 +2730,11 @@ improper_arguments: %d timed_out: %d", DBUG_RETURN( error ? error : event_count ); } +void set_slave_thread_options(THD* thd) +{ + thd->options = ((opt_log_slave_updates) ? OPTION_BIN_LOG:0) | + OPTION_AUTO_IS_NULL; +} /* init_slave_thread() @@ -2556,6 +2752,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) thd->master_access= ~0; thd->priv_user = 0; thd->slave_thread = 1; + set_slave_thread_options(thd); /* It's nonsense to constrain the slave threads with max_join_size; if a query succeeded on master, we HAVE to execute it. So set @@ -2744,8 +2941,8 @@ server_errno=%d)", /* Check if eof packet */ if (len < 8 && mysql->net.read_pos[0] == 254) { - sql_print_error("Slave: received end packet from server, apparent\ - master shutdown: %s", + sql_print_information("Slave: received end packet from server, apparent " + "master shutdown: %s", mysql_error(mysql)); return packet_error; } @@ -2817,13 +3014,14 @@ bool st_relay_log_info::is_until_satisfied() if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { - /* - We have no cached comaprison results so we should compare log names - and cache result + /* + We have no cached comparison results so we should compare log names + and cache result. + If we are after RESET SLAVE, and the SQL slave thread has not processed + any event yet, it could be that group_master_log_name is "". In that case, + just wait for more events (as there is no sensible comparison to do). */ - DBUG_ASSERT(*log_name || log_pos == 0); - if (*log_name) { const char *basename= log_name + dirname_length(log_name); @@ -2898,28 +3096,63 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) int exec_res; /* - Skip queries originating from this server or number of - queries specified by the user in slave_skip_counter - We can't however skip event's that has something to do with the + Queries originating from this server must be skipped. + Low-level events (Format_desc, Rotate, Stop) from this server + must also be skipped. But for those we don't want to modify + group_master_log_pos, because these events did not exist on the master. + Format_desc is not completely skipped. + Skip queries specified by the user in slave_skip_counter. + We can't however skip events that has something to do with the log files themselves. + Filtering on own server id is extremely important, to ignore execution of + events created by the creation/rotation of the relay log (remember that + now the relay log starts with its Format_desc, has a Rotate etc). */ - - if ((ev->server_id == (uint32) ::server_id && !replicate_same_server_id) || - (rli->slave_skip_counter && type_code != ROTATE_EVENT)) + + DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + + if ((ev->server_id == (uint32) ::server_id && + !replicate_same_server_id && + type_code != FORMAT_DESCRIPTION_EVENT) || + (rli->slave_skip_counter && + type_code != ROTATE_EVENT && type_code != STOP_EVENT && + type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) { - /* TODO: I/O thread should not even log events with the same server id */ - rli->inc_group_relay_log_pos(ev->get_event_len(), - type_code != STOP_EVENT ? ev->log_pos : LL(0), - 1/* skip lock*/); - flush_relay_log_info(rli); - + DBUG_PRINT("info", ("event skipped")); + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos((type_code == ROTATE_EVENT || + type_code == STOP_EVENT || + type_code == FORMAT_DESCRIPTION_EVENT) ? + LL(0) : ev->log_pos, + 1/* skip lock*/); + flush_relay_log_info(rli); + } + /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an failed auto-increment insert + Protect against common user error of setting the counter to 1 + instead of 2 while recovering from an insert which used auto_increment, + rand or user var. */ if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || type_code == STOP_EVENT) && - rli->slave_skip_counter == 1)) + !((type_code == INTVAR_EVENT || + type_code == RAND_EVENT || + type_code == USER_VAR_EVENT) && + rli->slave_skip_counter == 1) && + /* + The events from ourselves which have something to do with the relay + log itself must be skipped, true, but they mustn't decrement + rli->slave_skip_counter, because the user is supposed to not see + these events (they are not in the master's binlog) and if we + decremented, START SLAVE would for example decrement when it sees + the Rotate, so the event which the user probably wanted to skip + would not be skipped. + */ + !(ev->server_id == (uint32) ::server_id && + (type_code == ROTATE_EVENT || type_code == STOP_EVENT || + type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) --rli->slave_skip_counter; pthread_mutex_unlock(&rli->data_lock); delete ev; @@ -2935,7 +3168,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) ev->thd = thd; exec_res = ev->exec_event(rli); DBUG_ASSERT(rli->sql_thd==thd); - delete ev; + /* + Format_description_log_event should not be deleted because it will be + used to read info about the relay log's format; it will be deleted when + the SQL thread does not need it, i.e. when this thread terminates. + */ + if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info", ("Deleting the event after it has been executed")); + delete ev; + } return exec_res; } else @@ -3026,7 +3268,7 @@ slave_begin: llstr(mi->master_log_pos,llbuff)); else { - sql_print_error("Slave I/O thread killed while connecting to master"); + sql_print_information("Slave I/O thread killed while connecting to master"); goto err; } @@ -3036,7 +3278,8 @@ connected: thd->proc_info = "Checking master version"; if (get_master_version_and_clock(mysql, mi)) goto err; - if (!mi->old_format) + + if (mi->rli.relay_log.description_event_for_queue->binlog_version > 1) { /* Register ourselves with the master. @@ -3058,7 +3301,7 @@ connected: sql_print_error("Failed on request_dump()"); if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while requesting master \ + sql_print_information("Slave I/O thread killed while requesting master \ dump"); goto err; } @@ -3082,7 +3325,7 @@ dump"); } if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while retrying master \ + sql_print_information("Slave I/O thread killed while retrying master \ dump"); goto err; } @@ -3095,7 +3338,7 @@ reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME, if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed during or \ + sql_print_information("Slave I/O thread killed during or \ after reconnect"); goto err; } @@ -3117,7 +3360,7 @@ after reconnect"); if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while reading event"); + sql_print_information("Slave I/O thread killed while reading event"); goto err; } @@ -3154,20 +3397,20 @@ max_allowed_packet", if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while waiting to \ + sql_print_information("Slave I/O thread killed while waiting to \ reconnect after a failed read"); goto err; } thd->proc_info = "Reconnecting after a failed master event read"; if (!suppress_warnings) - sql_print_error("Slave I/O thread: Failed reading log event, \ + sql_print_information("Slave I/O thread: Failed reading log event, \ reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff)); if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed during or after a \ + sql_print_information("Slave I/O thread killed during or after a \ reconnect done to recover from failed read"); goto err; } @@ -3229,7 +3472,7 @@ log space"); // error = 0; err: // print the current replication position - sql_print_error("Slave I/O thread exiting, read up to log '%s', position %s", + sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query = thd->db = 0; // extra safety @@ -3244,6 +3487,9 @@ err: pthread_mutex_lock(&mi->run_lock); mi->slave_running = 0; mi->io_thd = 0; + /* Forget the relay log's format */ + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; // TODO: make rpl_status part of MASTER_INFO change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); mi->abort_slave = 0; // TODO: check if this is needed @@ -3339,15 +3585,38 @@ slave_begin: if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 1 /*need data lock*/, &errmsg)) + 1 /*need data lock*/, &errmsg, + 1 /*look for a description_event*/)) { sql_print_error("Error initializing relay log position: %s", errmsg); goto err; } THD_CHECK_SENTRY(thd); - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + /* + Wonder if this is correct. I (Guilhem) wonder if my_b_tell() returns the + correct position when it's called just after my_b_seek() (the questionable + stuff is those "seek is done on next read" comments in the my_b_seek() + source code). + The crude reality is that this assertion randomly fails whereas + replication seems to work fine. And there is no easy explanation why it + fails (as we my_b_seek(rli->event_relay_log_pos) at the very end of + init_relay_log_pos() called above). Maybe the assertion would be + meaningful if we held rli->data_lock between the my_b_seek() and the + DBUG_ASSERT(). + */ +#ifdef SHOULD_BE_CHECKED + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#endif + } +#endif DBUG_ASSERT(rli->sql_thd == thd); DBUG_PRINT("master_info",("log_file_name: %s position: %s", @@ -3391,12 +3660,18 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ } /* Thread stopped. Print the current replication position to the log */ - sql_print_information("Slave SQL thread exiting, replication stopped in log \ - '%s' at position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); + sql_print_information("Slave SQL thread exiting, replication stopped in log " + "'%s' at position %s", + RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety + /* + Some extra safety, which should not been needed (normally, event deletion + should already have done these assignments (each event which sets these + variables is supposed to set them to 0 before terminating)). + */ + thd->query= thd->db= thd->catalog= 0; thd->query_length= thd->db_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; @@ -3406,11 +3681,9 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun /* When master_pos_wait() wakes up it will check this and terminate */ rli->slave_running= 0; - /* - Going out of the transaction. Necessary to mark it, in case the user - restarts replication from a non-transactional statement (with CHANGE - MASTER). - */ + /* Forget the relay log's format */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= 0; /* Wake up master_pos_wait() */ pthread_mutex_unlock(&rli->data_lock); DBUG_PRINT("info",("Signaling possibly waiting master_pos_wait() functions")); @@ -3508,7 +3781,7 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) if (unlikely(cev_not_written)) break; Execute_load_log_event xev(thd,0,0); - xev.log_pos = mi->master_log_pos; + xev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&xev))) { sql_print_error("Slave I/O: error writing Exec_load event to \ @@ -3522,7 +3795,6 @@ relay log"); { cev->block = (char*)net->read_pos; cev->block_len = num_bytes; - cev->log_pos = mi->master_log_pos; if (unlikely(mi->rli.relay_log.append(cev))) { sql_print_error("Slave I/O: error writing Create_file event to \ @@ -3536,7 +3808,7 @@ relay log"); { aev.block = (char*)net->read_pos; aev.block_len = num_bytes; - aev.log_pos = mi->master_log_pos; + aev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&aev))) { sql_print_error("Slave I/O: error writing Append_block event to \ @@ -3564,6 +3836,7 @@ err: DESCRIPTION Updates the master info with the place in the next binary log where we should start reading. + Rotate the relay log to avoid mixed-format relay logs. NOTES We assume we already locked mi->data_lock @@ -3594,21 +3867,33 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) if (disconnect_slave_event_count) events_till_disconnect++; #endif + /* + If description_event_for_queue is format <4, there is conversion in the + relay log to the slave's format (4). And Rotate can mean upgrade or + nothing. If upgrade, it's to 5.0 or newer, so we will get a Format_desc, so + no need to reset description_event_for_queue now. And if it's nothing (same + master version as before), no need (still using the slave's format). + */ + if (mi->rli.relay_log.description_event_for_queue->binlog_version >= 4) + { + delete mi->rli.relay_log.description_event_for_queue; + /* start from format 3 (MySQL 4.0) again */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3); + } + /* + Rotate the relay log makes binlog format detection easier (at next slave + start or mysqlbinlog) + */ + rotate_relay_log(mi); /* will take the right mutexes */ DBUG_RETURN(0); } - /* - queue_old_event() - - Writes a 3.23 event to the relay log. - - TODO: - Test this code before release - it has to be tested on a separate - setup with 3.23 master + Reads a 3.23 event and converts it to the slave's format. This code was copied + from MySQL 4.0. */ - -static int queue_old_event(MASTER_INFO *mi, const char *buf, +static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, ulong event_len) { const char *errmsg = 0; @@ -3616,7 +3901,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, bool ignore_event= 0; char *tmp_buf = 0; RELAY_LOG_INFO *rli= &mi->rli; - DBUG_ENTER("queue_old_event"); + DBUG_ENTER("queue_binlog_ver_1_event"); /* If we get Load event, we need to pass a non-reusable buffer @@ -3648,7 +3933,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, connected to the master). */ Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, - 1 /*old format*/ ); + mi->rli.relay_log.description_event_for_queue); if (unlikely(!ev)) { sql_print_error("Read invalid event from master: '%s',\ @@ -3658,7 +3943,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(1); } pthread_mutex_lock(&mi->data_lock); - ev->log_pos = mi->master_log_pos; + ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { case STOP_EVENT: ignore_event= 1; @@ -3683,13 +3968,11 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, { /* We come here when and only when tmp_buf != 0 */ DBUG_ASSERT(tmp_buf); + inc_pos=event_len; + ev->log_pos+= inc_pos; int error = process_io_create_file(mi,(Create_file_log_event*)ev); delete ev; - /* - We had incremented event_len, but now when it is used to calculate the - position in the master's log, we must use the original value. - */ - mi->master_log_pos += --event_len; + mi->master_log_pos += inc_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); my_free((char*)tmp_buf, MYF(0)); @@ -3701,6 +3984,12 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, } if (likely(!ignore_event)) { + if (ev->log_pos) + /* + Don't do it for fake Rotate events (see comment in + Log_event::Log_event(const char* buf...) in log_event.cc). + */ + ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */ if (unlikely(rli->relay_log.append(ev))) { delete ev; @@ -3716,10 +4005,98 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(0); } +/* + Reads a 4.0 event and converts it to the slave's format. This code was copied + from queue_binlog_ver_1_event(), with some affordable simplifications. +*/ +static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + const char *errmsg = 0; + ulong inc_pos; + char *tmp_buf = 0; + RELAY_LOG_INFO *rli= &mi->rli; + DBUG_ENTER("queue_binlog_ver_3_event"); + + /* read_log_event() will adjust log_pos to be end_log_pos */ + Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue); + if (unlikely(!ev)) + { + sql_print_error("Read invalid event from master: '%s',\ + master could be corrupt but a more likely cause of this is a bug", + errmsg); + my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } + pthread_mutex_lock(&mi->data_lock); + switch (ev->get_type_code()) { + case STOP_EVENT: + goto err; + case ROTATE_EVENT: + if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + inc_pos= 0; + break; + default: + inc_pos= event_len; + break; + } + if (unlikely(rli->relay_log.append(ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + delete ev; + mi->master_log_pos+= inc_pos; +err: + DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(0); +} + +/* + queue_old_event() + + Writes a 3.23 or 4.0 event to the relay log, after converting it to the 5.0 + (exactly, slave's) format. To do the conversion, we create a 5.0 event from + the 3.23/4.0 bytes, then write this event to the relay log. + + TODO: + Test this code before release - it has to be tested on a separate + setup with 3.23 master or 4.0 master +*/ + +static int queue_old_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + switch (mi->rli.relay_log.description_event_for_queue->binlog_version) + { + case 1: + return queue_binlog_ver_1_event(mi,buf,event_len); + case 3: + return queue_binlog_ver_3_event(mi,buf,event_len); + default: /* unsupported format; eg version 2 */ + DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + return 1; + } +} /* queue_event() + If the event is 3.23/4.0, passes it to queue_old_event() which will convert + it. Otherwise, writes a 5.0 (or newer) event to the relay log. Then there is + no format conversion, it's pure read/write of bytes. + So a 5.0.0 slave's relay log can contain events in the slave's format or in + any >=5.0.0 format. */ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) @@ -3729,7 +4106,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) RELAY_LOG_INFO *rli= &mi->rli; DBUG_ENTER("queue_event"); - if (mi->old_format) + if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 && + buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */) DBUG_RETURN(queue_old_event(mi,buf,event_len)); pthread_mutex_lock(&mi->data_lock); @@ -3756,7 +4134,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) goto err; case ROTATE_EVENT: { - Rotate_log_event rev(buf,event_len,0); + Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue); if (unlikely(process_io_rotate(mi,&rev))) { error= 1; @@ -3769,6 +4147,42 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) inc_pos= 0; break; } + case FORMAT_DESCRIPTION_EVENT: + { + /* + Create an event, and save it (when we rotate the relay log, we will have + to write this event again). + */ + /* + We are the only thread which reads/writes description_event_for_queue. The + relay_log struct does not move (though some members of it can change), so + we needn't any lock (no rli->data_lock, no log lock). + */ + Format_description_log_event* tmp; + const char* errmsg; + if (!(tmp= (Format_description_log_event*) + Log_event::read_log_event(buf, event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue))) + { + error= 2; + goto err; + } + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= tmp; + /* + Though this does some conversion to the slave's format, this will + preserve the master's binlog format version, and number of event types. + */ + /* + If the event was not requested by the slave (the slave did not ask for + it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos + */ + inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0; + DBUG_PRINT("info",("binlog format is now %d", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + + } + break; default: inc_pos= event_len; break; @@ -3795,23 +4209,32 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) We still want to increment, so that we won't re-read this event from the master if the slave IO thread is now stopped/restarted (more efficient if the events we are ignoring are big LOAD DATA INFILE). + But events which were generated by this slave and which do not exist in + the master's binlog (i.e. Format_desc, Rotate & Stop) should not increment + mi->master_log_pos. */ - mi->master_log_pos+= inc_pos; + if (buf[EVENT_TYPE_OFFSET]!=FORMAT_DESCRIPTION_EVENT && + buf[EVENT_TYPE_OFFSET]!=ROTATE_EVENT && + buf[EVENT_TYPE_OFFSET]!=STOP_EVENT) + mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); } else { /* write the event to the relay log */ - if (likely(!(error= rli->relay_log.appendv(buf,event_len,0)))) + if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } + else + error=3; } err: pthread_mutex_unlock(&mi->data_lock); + DBUG_PRINT("info", ("error=%d", error)); DBUG_RETURN(error); } @@ -3836,6 +4259,7 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) } rli->inited = 0; rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT); + rli->relay_log.harvest_bytes_written(&rli->log_space_total); /* Delete the slave's temporary tables from memory. In the future there will be other actions than this, to ensure persistance @@ -4057,6 +4481,7 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) relay_log_pos Current log pos pending Number of bytes already processed from the event */ + rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); my_b_seek(cur_log,rli->event_relay_log_pos); DBUG_RETURN(cur_log); } @@ -4115,28 +4540,40 @@ Log_event* next_event(RELAY_LOG_INFO* rli) hot_log=0; // Using old binary log } } + #ifndef DBUG_OFF { + /* This is an assertion which sometimes fails, let's try to track it */ char llbuf1[22], llbuf2[22]; - DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); - /* - The next assertion sometimes (very rarely) fails, let's try to track - it - */ - DBUG_PRINT("info", ("\ -Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", + DBUG_PRINT("info", ("my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", llstr(my_b_tell(cur_log),llbuf1), - llstr(rli->group_relay_log_pos,llbuf2))); - DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); } #endif /* Relay log is always in new format - if the master is 3.23, the - I/O thread will convert the format for us + I/O thread will convert the format for us. + A problem: the description event may be in a previous relay log. So if + the slave has been shutdown meanwhile, we would have to look in old relay + logs, which may even have been deleted. So we need to write this + description event at the beginning of the relay log. + When the relay log is created when the I/O thread starts, easy: the + master will send the description event and we will queue it. + But if the relay log is created by new_file(): then the solution is: + MYSQL_LOG::open() will write the buffered description event. */ - if ((ev=Log_event::read_log_event(cur_log,0,(bool)0 /* new format */))) + if ((ev=Log_event::read_log_event(cur_log,0, + rli->relay_log.description_event_for_exec))) + { DBUG_ASSERT(thd==rli->sql_thd); + /* + read it while we have a lock, to avoid a mutex lock in + inc_event_relay_log_pos() + */ + rli->future_event_relay_log_pos= my_b_tell(cur_log); if (hot_log) pthread_mutex_unlock(log_lock); DBUG_RETURN(ev); @@ -4277,8 +4714,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", { #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is currently active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is currently active", + rli->linfo.log_file_name); #endif rli->cur_log= cur_log= rli->relay_log.get_log_file(); rli->cur_log_old_open_count= rli->relay_log.get_open_count(); @@ -4307,8 +4744,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", */ #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is not active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is not active", + rli->linfo.log_file_name); #endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, @@ -4334,7 +4771,11 @@ event(errno: %d cur_log->error: %d)", } } if (!errmsg && global_system_variables.log_warnings) - errmsg = "slave SQL thread was killed"; + { + sql_print_information("Error reading relay log event: %s", + "slave SQL thread was killed"); + DBUG_RETURN(0); + } err: if (errmsg) @@ -4354,8 +4795,9 @@ void rotate_relay_log(MASTER_INFO* mi) DBUG_ENTER("rotate_relay_log"); RELAY_LOG_INFO* rli= &mi->rli; - lock_slave_threads(mi); - pthread_mutex_lock(&rli->data_lock); + /* We don't lock rli->run_lock. This would lead to deadlocks. */ + pthread_mutex_lock(&mi->run_lock); + /* We need to test inited because otherwise, new_file() will attempt to lock LOCK_log, which may not be inited (if we're not a slave). @@ -4384,8 +4826,7 @@ void rotate_relay_log(MASTER_INFO* mi) */ rli->relay_log.harvest_bytes_written(&rli->log_space_total); end: - pthread_mutex_unlock(&rli->data_lock); - unlock_slave_threads(mi); + pthread_mutex_unlock(&mi->run_lock); DBUG_VOID_RETURN; } diff --git a/sql/slave.h b/sql/slave.h index 97f197fb50a..b8ff86cf301 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -93,11 +93,6 @@ extern my_bool opt_log_slave_updates; extern ulonglong relay_log_space_limit; struct st_master_info; -enum enum_binlog_formats { - BINLOG_FORMAT_CURRENT=0, /* 0 is important for easy 'if (mi->old_format)' */ - BINLOG_FORMAT_323_LESS_57, - BINLOG_FORMAT_323_GEQ_57 }; - /**************************************************************************** Replication SQL Thread @@ -199,6 +194,8 @@ typedef struct st_relay_log_info ulonglong group_relay_log_pos; char event_relay_log_name[FN_REFLEN]; ulonglong event_relay_log_pos; + ulonglong future_event_relay_log_pos; + /* Original log name and position of the group we're currently executing (whose coordinates are group_relay_log_name/pos in the relay log) @@ -302,12 +299,14 @@ typedef struct st_relay_log_info until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN; } - inline void inc_event_relay_log_pos(ulonglong val) + inline void inc_event_relay_log_pos() { - event_relay_log_pos+= val; + event_relay_log_pos= future_event_relay_log_pos; } - void inc_group_relay_log_pos(ulonglong val, ulonglong log_pos, bool skip_lock=0); + void inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock=0); + int wait_for_pos(THD* thd, String* log_name, longlong log_pos, longlong timeout); void close_temporary_tables(); @@ -384,7 +383,6 @@ typedef struct st_master_info int events_till_abort; #endif bool inited; - enum enum_binlog_formats old_format; volatile bool abort_slave, slave_running; volatile ulong slave_run_id; /* @@ -399,7 +397,7 @@ typedef struct st_master_info long clock_diff_with_master; st_master_info() - :ssl(0), fd(-1), io_thd(0), inited(0), old_format(BINLOG_FORMAT_CURRENT), + :ssl(0), fd(-1), io_thd(0), inited(0), abort_slave(0),slave_running(0), slave_run_id(0) { host[0] = 0; user[0] = 0; password[0] = 0; @@ -530,10 +528,12 @@ void lock_slave_threads(MASTER_INFO* mi); void unlock_slave_threads(MASTER_INFO* mi); void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse); int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, - bool need_data_lock, const char** errmsg); + bool need_data_lock, const char** errmsg, + bool look_for_description_event); int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); +void set_slave_thread_options(THD* thd); void rotate_relay_log(MASTER_INFO* mi); extern "C" pthread_handler_decl(handle_slave_io,arg); diff --git a/sql/sp.cc b/sql/sp.cc new file mode 100644 index 00000000000..a99a3ee8a2a --- /dev/null +++ b/sql/sp.cc @@ -0,0 +1,1225 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#include "mysql_priv.h" +#include "sql_acl.h" +#include "sp.h" +#include "sp_head.h" +#include "sp_cache.h" + +static bool +create_string(THD *thd, String *buf, + int sp_type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics); + +/* + * + * DB storage of Stored PROCEDUREs and FUNCTIONs + * + */ + +enum +{ + MYSQL_PROC_FIELD_DB = 0, + MYSQL_PROC_FIELD_NAME, + MYSQL_PROC_FIELD_TYPE, + MYSQL_PROC_FIELD_SPECIFIC_NAME, + MYSQL_PROC_FIELD_LANGUAGE, + MYSQL_PROC_FIELD_ACCESS, + MYSQL_PROC_FIELD_DETERMINISTIC, + MYSQL_PROC_FIELD_SECURITY_TYPE, + MYSQL_PROC_FIELD_PARAM_LIST, + MYSQL_PROC_FIELD_RETURNS, + MYSQL_PROC_FIELD_BODY, + MYSQL_PROC_FIELD_DEFINER, + MYSQL_PROC_FIELD_CREATED, + MYSQL_PROC_FIELD_MODIFIED, + MYSQL_PROC_FIELD_SQL_MODE, + MYSQL_PROC_FIELD_COMMENT, + MYSQL_PROC_FIELD_COUNT +}; + +bool mysql_proc_table_exists= 1; + +/* *opened=true means we opened ourselves */ +static int +db_find_routine_aux(THD *thd, int type, sp_name *name, + enum thr_lock_type ltype, TABLE **tablep, bool *opened) +{ + TABLE *table; + byte key[64+64+1]; // db, name, type + uint keylen; + DBUG_ENTER("db_find_routine_aux"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + /* + Speed up things if mysql.proc doesn't exists + mysql_proc_table_exists is set when on creates a stored procedure + or on flush privileges + */ + if (!mysql_proc_table_exists && ltype == TL_READ) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + + // Put the key used to read the row together + keylen= name->m_db.length; + if (keylen > 64) + keylen= 64; + memcpy(key, name->m_db.str, keylen); + memset(key+keylen, (int)' ', 64-keylen); // Pad with space + keylen= name->m_name.length; + if (keylen > 64) + keylen= 64; + memcpy(key+64, name->m_name.str, keylen); + memset(key+64+keylen, (int)' ', 64-keylen); // Pad with space + key[128]= type; + keylen= sizeof(key); + + if (thd->lex->proc_table) + table= thd->lex->proc_table->table; + else + { + for (table= thd->open_tables ; table ; table= table->next) + if (strcmp(table->table_cache_key, "mysql") == 0 && + strcmp(table->real_name, "proc") == 0) + break; + } + if (table) + *opened= FALSE; + else + { + TABLE_LIST tables; + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + if (! (table= open_ltable(thd, &tables, ltype))) + { + *tablep= NULL; + mysql_proc_table_exists= 0; + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + } + *opened= TRUE; + } + mysql_proc_table_exists= 1; + + if (table->file->index_read_idx(table->record[0], 0, + key, keylen, + HA_READ_KEY_EXACT)) + { + *tablep= NULL; + DBUG_RETURN(SP_KEY_NOT_FOUND); + } + *tablep= table; + + DBUG_RETURN(SP_OK); +} + + +static int +db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) +{ + extern int yyparse(void *thd); + TABLE *table; + const char *params, *returns, *body; + int ret; + bool opened; + const char *definer; + longlong created; + longlong modified; + st_sp_chistics chistics; + char *ptr; + uint length; + char buff[65]; + String str(buff, sizeof(buff), &my_charset_bin); + ulong sql_mode; + DBUG_ENTER("db_find_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_READ, &table, &opened); + if (ret != SP_OK) + goto done; + + if (table->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + bzero((char *)&chistics, sizeof(chistics)); + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_ACCESS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + switch (ptr[0]) { + case 'N': + chistics.daccess= SP_NO_SQL; + break; + case 'C': + chistics.daccess= SP_CONTAINS_SQL; + break; + case 'R': + chistics.daccess= SP_READS_SQL_DATA; + break; + case 'M': + chistics.daccess= SP_MODIFIES_SQL_DATA; + break; + default: + chistics.daccess= SP_CONTAINS_SQL; + } + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DETERMINISTIC])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.detistic= (ptr[0] == 'N' ? FALSE : TRUE); + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.suid= (ptr[0] == 'I' ? SP_IS_NOT_SUID : SP_IS_SUID); + + if ((params= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_PARAM_LIST])) == NULL) + { + params= ""; + } + + if (type == TYPE_ENUM_PROCEDURE) + returns= ""; + else if ((returns= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_RETURNS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + if ((body= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_BODY])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + // Get additional information + if ((definer= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DEFINER])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + modified= table->field[MYSQL_PROC_FIELD_MODIFIED]->val_int(); + created= table->field[MYSQL_PROC_FIELD_CREATED]->val_int(); + + sql_mode= (ulong) table->field[MYSQL_PROC_FIELD_SQL_MODE]->val_int(); + + table->field[MYSQL_PROC_FIELD_COMMENT]->val_str(&str, &str); + + ptr= 0; + if ((length= str.length())) + ptr= thd->strmake(str.ptr(), length); + chistics.comment.str= ptr; + chistics.comment.length= length; + + if (opened) + { + opened= FALSE; + close_thread_tables(thd, 0, 1); + } + + { + String defstr; + LEX *oldlex= thd->lex; + char olddb[128]; + bool dbchanged; + enum enum_sql_command oldcmd= thd->lex->sql_command; + ulong old_sql_mode= thd->variables.sql_mode; + ha_rows select_limit= thd->variables.select_limit; + + thd->variables.sql_mode= sql_mode; + thd->variables.select_limit= HA_POS_ERROR; + + defstr.set_charset(system_charset_info); + if (!create_string(thd, &defstr, + type, + name, + params, strlen(params), + returns, strlen(returns), + body, strlen(body), + &chistics)) + { + ret= SP_INTERNAL_ERROR; + goto done; + } + + dbchanged= FALSE; + if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb), + 1, &dbchanged))) + goto done; + + { + /* This is something of a kludge. We need to initialize some fields + * in thd->lex (the unit and master stuff), and the easiest way to + * do it is, is to call mysql_init_query(), but this unfortunately + * resets teh value_list where we keep the CALL parameters. So we + * copy the list and then restore it. + */ + List<Item> vals= thd->lex->value_list; + + lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); + thd->lex->value_list= vals; + } + + if (yyparse(thd) || thd->is_fatal_error || thd->lex->sphead == NULL) + { + LEX *newlex= thd->lex; + sp_head *sp= newlex->sphead; + + if (dbchanged && (ret= sp_change_db(thd, olddb, 1))) + goto done; + if (sp) + { + if (oldlex != newlex) + sp->restore_lex(thd); + delete sp; + newlex->sphead= NULL; + } + ret= SP_PARSE_ERROR; + } + else + { + if (dbchanged && (ret= sp_change_db(thd, olddb, 1))) + goto done; + *sphp= thd->lex->sphead; + (*sphp)->set_info((char *)definer, (uint)strlen(definer), + created, modified, &chistics, sql_mode); + (*sphp)->optimize(); + } + thd->lex->sql_command= oldcmd; + thd->variables.sql_mode= old_sql_mode; + thd->variables.select_limit= select_limit; + } + + done: + + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_create_routine(THD *thd, int type, sp_head *sp) +{ + int ret; + TABLE *table; + TABLE_LIST tables; + char definer[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; + char olddb[128]; + bool dbchanged; + DBUG_ENTER("db_create_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s",type,sp->m_name.length,sp->m_name.str)); + + dbchanged= FALSE; + if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb), + 0, &dbchanged))) + { + ret= SP_NO_DB_ERROR; + goto done; + } + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + ret= SP_OPEN_TABLE_FAILED; + else + { + restore_record(table, default_values); // Get default values for fields + strxmov(definer, thd->priv_user, "@", thd->priv_host, NullS); + + if (table->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + table->field[MYSQL_PROC_FIELD_DB]-> + store(sp->m_db.str, sp->m_db.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_TYPE]-> + store((longlong)type); + table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + if (sp->m_chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)sp->m_chistics->daccess); + table->field[MYSQL_PROC_FIELD_DETERMINISTIC]-> + store((longlong)(sp->m_chistics->detistic ? 1 : 2)); + if (sp->m_chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)sp->m_chistics->suid); + table->field[MYSQL_PROC_FIELD_PARAM_LIST]-> + store(sp->m_params.str, sp->m_params.length, system_charset_info); + if (sp->m_retstr.str) + table->field[MYSQL_PROC_FIELD_RETURNS]-> + store(sp->m_retstr.str, sp->m_retstr.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_BODY]-> + store(sp->m_body.str, sp->m_body.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_DEFINER]-> + store(definer, (uint)strlen(definer), system_charset_info); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_CREATED])->set_time(); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + table->field[MYSQL_PROC_FIELD_SQL_MODE]-> + store((longlong)thd->variables.sql_mode); + if (sp->m_chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]-> + store(sp->m_chistics->comment.str, sp->m_chistics->comment.length, + system_charset_info); + + ret= SP_OK; + if (table->file->write_row(table->record[0])) + ret= SP_WRITE_ROW_FAILED; + } + +done: + close_thread_tables(thd); + if (dbchanged) + (void)sp_change_db(thd, olddb, 1); + DBUG_RETURN(ret); +} + + +static int +db_drop_routine(THD *thd, int type, sp_name *name) +{ + TABLE *table; + int ret; + bool opened; + DBUG_ENTER("db_drop_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_WRITE, &table, &opened); + if (ret == SP_OK) + { + if (table->file->delete_row(table->record[0])) + ret= SP_DELETE_ROW_FAILED; + } + + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics) +{ + TABLE *table; + int ret; + bool opened; + DBUG_ENTER("db_update_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_WRITE, &table, &opened); + if (ret == SP_OK) + { + store_record(table,record[1]); + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + if (chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)chistics->suid); + if (chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)chistics->daccess); + if (chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]->store(chistics->comment.str, + chistics->comment.length, + system_charset_info); + if ((table->file->update_row(table->record[1],table->record[0]))) + ret= SP_WRITE_ROW_FAILED; + } + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +struct st_used_field +{ + const char *field_name; + uint field_length; + enum enum_field_types field_type; + Field *field; +}; + +static struct st_used_field init_fields[]= +{ + { "Db", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Name", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Type", 9, MYSQL_TYPE_STRING, 0}, + { "Definer", 77, MYSQL_TYPE_STRING, 0}, + { "Modified", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Created", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Security_type", 1, MYSQL_TYPE_STRING, 0}, + { "Comment", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { 0, 0, MYSQL_TYPE_STRING, 0} +}; + + +static int +print_field_values(THD *thd, TABLE *table, + struct st_used_field *used_fields, + int type, const char *wild) +{ + Protocol *protocol= thd->protocol; + + if (table->field[MYSQL_PROC_FIELD_TYPE]->val_int() == type) + { + String db_string; + String name_string; + struct st_used_field *used_field= used_fields; + + if (get_field(thd->mem_root, used_field->field, &db_string)) + db_string.set_ascii("", 0); + used_field+= 1; + get_field(thd->mem_root, used_field->field, &name_string); + + if (!wild || !wild[0] || !wild_compare(name_string.ptr(), wild, 0)) + { + protocol->prepare_for_resend(); + protocol->store(&db_string); + protocol->store(&name_string); + for (used_field++; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + { + TIME tmp_time; + + bzero((char *)&tmp_time, sizeof(tmp_time)); + ((Field_timestamp *) used_field->field)->get_time(&tmp_time); + protocol->store(&tmp_time); + } + break; + default: + { + String tmp_string; + + get_field(thd->mem_root, used_field->field, &tmp_string); + protocol->store(&tmp_string); + } + break; + } + } + if (protocol->write()) + return SP_INTERNAL_ERROR; + } + } + return SP_OK; +} + + +static int +db_show_routine_status(THD *thd, int type, const char *wild) +{ + TABLE *table; + TABLE_LIST tables; + int res; + DBUG_ENTER("db_show_routine_status"); + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + + if (! (table= open_ltable(thd, &tables, TL_READ))) + { + res= SP_OPEN_TABLE_FAILED; + goto done; + } + else + { + Item *item; + List<Item> field_list; + struct st_used_field *used_field; + st_used_field used_fields[array_elements(init_fields)]; + + memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields)); + /* Init header */ + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + field_list.push_back(item=new Item_datetime(used_field->field_name)); + break; + default: + field_list.push_back(item=new Item_empty_string(used_field->field_name, + used_field-> + field_length)); + break; + } + } + /* Print header */ + if (thd->protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + { + res= SP_INTERNAL_ERROR; + goto err_case; + } + + /* + Init fields + + tables is not VIEW for sure => we can pass 0 as condition + */ + setup_tables(thd, &tables, 0); + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + Item_field *field= new Item_field("mysql", "proc", + used_field->field_name); + if (!(used_field->field= find_field_in_tables(thd, field, &tables, + 0, REPORT_ALL_ERRORS, 1))) + { + res= SP_INTERNAL_ERROR; + goto err_case1; + } + } + + table->file->ha_index_init(0); + if ((res= table->file->index_first(table->record[0]))) + { + res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR; + goto err_case1; + } + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + while (!table->file->index_next(table->record[0])) + { + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + } + res= SP_OK; + } + +err_case1: + send_eof(thd); +err_case: + table->file->ha_index_end(); + close_thread_tables(thd); +done: + DBUG_RETURN(res); +} + + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db) +{ + TABLE *table; + byte key[64]; // db + uint keylen; + int ret; + DBUG_ENTER("sp_drop_db_routines"); + DBUG_PRINT("enter", ("db: %s", db)); + + // Put the key used to read the row together + keylen= strlen(db); + if (keylen > 64) + keylen= 64; + memcpy(key, db, keylen); + memset(key+keylen, (int)' ', 64-keylen); // Pad with space + keylen= sizeof(key); + + for (table= thd->open_tables ; table ; table= table->next) + if (strcmp(table->table_cache_key, "mysql") == 0 && + strcmp(table->real_name, "proc") == 0) + break; + if (! table) + { + TABLE_LIST tables; + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + } + + ret= SP_OK; + table->file->ha_index_init(0); + if (! table->file->index_read(table->record[0], + key, keylen, HA_READ_KEY_EXACT)) + { + int nxtres; + bool deleted= FALSE; + + do { + if (! table->file->delete_row(table->record[0])) + deleted= TRUE; /* We deleted something */ + else + { + ret= SP_DELETE_ROW_FAILED; + nxtres= 0; + break; + } + } while (! (nxtres= table->file->index_next_same(table->record[0], + key, keylen))); + if (nxtres != HA_ERR_END_OF_FILE) + ret= SP_KEY_NOT_FOUND; + if (deleted) + sp_cache_invalidate(); + } + table->file->ha_index_end(); + + close_thread_tables(thd); + + DBUG_RETURN(ret); +} + + +/***************************************************************************** + PROCEDURE +******************************************************************************/ + +sp_head * +sp_find_procedure(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_find_procedure"); + DBUG_PRINT("enter", ("name: %*s.%*s", + name->m_db.length, name->m_db.str, + name->m_name.length, name->m_name.str)); + + if (!(sp= sp_cache_lookup(&thd->sp_proc_cache, name))) + { + if (db_find_routine(thd, TYPE_ENUM_PROCEDURE, name, &sp) == SP_OK) + sp_cache_insert(&thd->sp_proc_cache, sp); + } + + DBUG_RETURN(sp); +} + + +int +sp_create_procedure(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_procedure"); + DBUG_PRINT("enter", ("name: %*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_PROCEDURE, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_procedure(THD *thd, sp_name *name) +{ + int ret; + bool found; + DBUG_ENTER("sp_drop_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_proc_cache, name); + ret= db_drop_routine(thd, TYPE_ENUM_PROCEDURE, name); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + bool found; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_proc_cache, name); + ret= db_update_routine(thd, TYPE_ENUM_PROCEDURE, name, chistics); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_procedure(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_show_create_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if ((sp= sp_find_procedure(thd, name))) + { + int ret= sp->show_create_procedure(thd); + + DBUG_RETURN(ret); + } + + DBUG_RETURN(SP_KEY_NOT_FOUND); +} + + +int +sp_show_status_procedure(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_procedure"); + + ret= db_show_routine_status(thd, TYPE_ENUM_PROCEDURE, wild); + DBUG_RETURN(ret); +} + + +/***************************************************************************** + FUNCTION +******************************************************************************/ + +sp_head * +sp_find_function(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_find_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if (!(sp= sp_cache_lookup(&thd->sp_func_cache, name))) + { + if (db_find_routine(thd, TYPE_ENUM_FUNCTION, name, &sp) != SP_OK) + sp= NULL; + else + sp_cache_insert(&thd->sp_func_cache, sp); + } + DBUG_RETURN(sp); +} + + +int +sp_create_function(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_function"); + DBUG_PRINT("enter", ("name: %*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_FUNCTION, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_function(THD *thd, sp_name *name) +{ + int ret; + bool found; + DBUG_ENTER("sp_drop_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_func_cache, name); + ret= db_drop_routine(thd, TYPE_ENUM_FUNCTION, name); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + bool found; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_func_cache, name); + ret= db_update_routine(thd, TYPE_ENUM_FUNCTION, name, chistics); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_function(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_show_create_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if ((sp= sp_find_function(thd, name))) + { + int ret= sp->show_create_function(thd); + + DBUG_RETURN(ret); + } + DBUG_RETURN(SP_KEY_NOT_FOUND); +} + + +int +sp_show_status_function(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_function"); + ret= db_show_routine_status(thd, TYPE_ENUM_FUNCTION, wild); + DBUG_RETURN(ret); +} + + +bool +sp_function_exists(THD *thd, sp_name *name) +{ + TABLE *table; + bool ret= FALSE; + bool opened= FALSE; + DBUG_ENTER("sp_function_exists"); + + if (sp_cache_lookup(&thd->sp_func_cache, name) || + db_find_routine_aux(thd, TYPE_ENUM_FUNCTION, + name, TL_READ, + &table, &opened) == SP_OK) + ret= TRUE; + if (opened) + close_thread_tables(thd, 0, 1); + thd->clear_error(); + DBUG_RETURN(ret); +} + + +byte * +sp_lex_spfuns_key(const byte *ptr, uint *plen, my_bool first) +{ + LEX_STRING *lsp= (LEX_STRING *)ptr; + *plen= lsp->length; + return (byte *)lsp->str; +} + + +void +sp_add_fun_to_lex(LEX *lex, sp_name *fun) +{ + if (! hash_search(&lex->spfuns, + (byte *)fun->m_qname.str, fun->m_qname.length)) + { + LEX_STRING *ls= (LEX_STRING *)sql_alloc(sizeof(LEX_STRING)); + ls->str= sql_strmake(fun->m_qname.str, fun->m_qname.length); + ls->length= fun->m_qname.length; + + my_hash_insert(&lex->spfuns, (byte *)ls); + } +} + + +void +sp_merge_funs(LEX *dst, LEX *src) +{ + for (uint i=0 ; i < src->spfuns.records ; i++) + { + LEX_STRING *ls= (LEX_STRING *)hash_element(&src->spfuns, i); + + if (! hash_search(&dst->spfuns, (byte *)ls->str, ls->length)) + my_hash_insert(&dst->spfuns, (byte *)ls); + } +} + + +int +sp_cache_functions(THD *thd, LEX *lex) +{ + HASH *h= &lex->spfuns; + int ret= 0; + + for (uint i=0 ; i < h->records ; i++) + { + LEX_STRING *ls= (LEX_STRING *)hash_element(h, i); + sp_name name(*ls); + + name.m_qname= *ls; + if (! sp_cache_lookup(&thd->sp_func_cache, &name)) + { + sp_head *sp; + LEX *oldlex= thd->lex; + LEX *newlex= new st_lex; + + thd->lex= newlex; + newlex->proc_table= oldlex->proc_table; // hint if mysql.oper is opened + name.m_name.str= strchr(name.m_qname.str, '.'); + name.m_db.length= name.m_name.str - name.m_qname.str; + name.m_db.str= strmake_root(thd->mem_root, + name.m_qname.str, name.m_db.length); + name.m_name.str+= 1; + name.m_name.length= name.m_qname.length - name.m_db.length - 1; + + if (db_find_routine(thd, TYPE_ENUM_FUNCTION, &name, &sp) + == SP_OK) + { + sp_cache_insert(&thd->sp_func_cache, sp); + ret= sp_cache_functions(thd, newlex); + delete newlex; + thd->lex= oldlex; + if (ret) + break; + } + else + { + delete newlex; + thd->lex= oldlex; + net_printf(thd, ER_SP_DOES_NOT_EXIST, "FUNCTION", ls->str); + ret= 1; + break; + } + } + } + return ret; +} + +/* + * Generates the CREATE... string from the table information. + * Returns TRUE on success, FALSE on (alloc) failure. + */ +static bool +create_string(THD *thd, String *buf, + int type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics) +{ + /* Make some room to begin with */ + if (buf->alloc(100 + name->m_qname.length + paramslen + returnslen + bodylen + + chistics->comment.length)) + return FALSE; + + buf->append("CREATE ", 7); + if (type == TYPE_ENUM_FUNCTION) + buf->append("FUNCTION ", 9); + else + buf->append("PROCEDURE ", 10); + append_identifier(thd, buf, name->m_db.str, name->m_db.length); + buf->append('.'); + append_identifier(thd, buf, name->m_name.str, name->m_name.length); + buf->append('('); + buf->append(params, paramslen); + buf->append(')'); + if (type == TYPE_ENUM_FUNCTION) + { + buf->append(" RETURNS ", 9); + buf->append(returns, returnslen); + } + buf->append('\n'); + switch (chistics->daccess) { + case SP_NO_SQL: + buf->append(" NO SQL\n"); + break; + case SP_READS_SQL_DATA: + buf->append(" READS SQL DATA\n"); + break; + case SP_MODIFIES_SQL_DATA: + buf->append(" MODIFIES SQL DATA\n"); + break; + case SP_DEFAULT_ACCESS: + case SP_CONTAINS_SQL: + /* Do nothing */ + break; + } + if (chistics->detistic) + buf->append(" DETERMINISTIC\n", 18); + if (chistics->suid == SP_IS_NOT_SUID) + buf->append(" SQL SECURITY INVOKER\n", 25); + if (chistics->comment.length) + { + buf->append(" COMMENT "); + append_unescaped(buf, chistics->comment.str, chistics->comment.length); + buf->append('\n'); + } + buf->append(body, bodylen); + return TRUE; +} + + +// +// Utilities... +// + +int +sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen, + bool no_access_check, bool *dbchangedp) +{ + bool changeit; + DBUG_ENTER("sp_use_new_db"); + DBUG_PRINT("enter", ("newdb: %s", newdb)); + + if (! newdb) + newdb= (char *)""; + if (thd->db && thd->db[0]) + { + if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0) + changeit= 0; + else + { + changeit= 1; + strnmov(olddb, thd->db, olddblen); + } + } + else + { // thd->db empty + if (newdb[0]) + changeit= 1; + else + changeit= 0; + olddb[0] = '\0'; + } + if (!changeit) + { + *dbchangedp= FALSE; + DBUG_RETURN(0); + } + else + { + int ret= sp_change_db(thd, newdb, no_access_check); + + if (! ret) + *dbchangedp= TRUE; + DBUG_RETURN(ret); + } +} + +/* + Change database. + + SYNOPSIS + sp_change_db() + thd Thread handler + name Database name + empty_is_ok True= it's ok with "" as name + no_access_check True= don't do access check + + DESCRIPTION + This is the same as mysql_change_db(), but with some extra + arguments for Stored Procedure usage; doing implicit "use" + when executing an SP in a different database. + We also use different error routines, since this might be + invoked from a function when executing a query or statement. + Note: We would have prefered to reuse mysql_change_db(), but + the error handling in particular made that too awkward, so + we (reluctantly) have a "copy" here. + + RETURN VALUES + 0 ok + 1 error +*/ + +int +sp_change_db(THD *thd, char *name, bool no_access_check) +{ + int length, db_length; + char *dbname=my_strdup((char*) name,MYF(MY_WME)); + char path[FN_REFLEN]; + ulong db_access; + HA_CREATE_INFO create; + DBUG_ENTER("sp_change_db"); + DBUG_PRINT("enter", ("db: %s, no_access_check: %d", name, no_access_check)); + + db_length= (!dbname ? 0 : strip_sp(dbname)); + if (dbname && db_length) + { + if ((db_length > NAME_LEN) || check_db_name(dbname)) + { + my_printf_error(ER_WRONG_DB_NAME, ER(ER_WRONG_DB_NAME), MYF(0), dbname); + x_free(dbname); + DBUG_RETURN(1); + } + } + + if (dbname && db_length) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (! no_access_check) + { + if (test_all_bits(thd->master_access,DB_ACLS)) + db_access=DB_ACLS; + else + db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | + thd->master_access); + if (!(db_access & DB_ACLS) && + (!grant_option || check_grant_db(thd,dbname))) + { + my_printf_error(ER_DBACCESS_DENIED_ERROR, ER(ER_DBACCESS_DENIED_ERROR), + MYF(0), + thd->priv_user, + thd->priv_host, + dbname); + mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), + thd->priv_user, + thd->priv_host, + dbname); + my_free(dbname,MYF(0)); + DBUG_RETURN(1); + } + } +#endif + (void) sprintf(path,"%s/%s",mysql_data_home,dbname); + length=unpack_dirname(path,path); // Convert if not unix + if (length && path[length-1] == FN_LIBCHAR) + path[length-1]=0; // remove ending '\' + if (access(path,F_OK)) + { + my_printf_error(ER_BAD_DB_ERROR, ER(ER_BAD_DB_ERROR), MYF(0), dbname); + my_free(dbname,MYF(0)); + DBUG_RETURN(1); + } + } + + x_free(thd->db); + thd->db=dbname; // THD::~THD will free this + thd->db_length=db_length; + + if (dbname && db_length) + { + strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); + thd->db_charset= create.default_table_charset ? + create.default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; + } + DBUG_RETURN(0); +} diff --git a/sql/sp.h b/sql/sp.h new file mode 100644 index 00000000000..acdfe824b97 --- /dev/null +++ b/sql/sp.h @@ -0,0 +1,102 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_H_ +#define _SP_H_ + +// Return codes from sp_create_*, sp_drop_*, and sp_show_*: +#define SP_OK 0 +#define SP_KEY_NOT_FOUND -1 +#define SP_OPEN_TABLE_FAILED -2 +#define SP_WRITE_ROW_FAILED -3 +#define SP_DELETE_ROW_FAILED -4 +#define SP_GET_FIELD_FAILED -5 +#define SP_PARSE_ERROR -6 +#define SP_INTERNAL_ERROR -7 +#define SP_NO_DB_ERROR -8 + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db); + +sp_head * +sp_find_procedure(THD *thd, sp_name *name); + +int +sp_create_procedure(THD *thd, sp_head *sp); + +int +sp_drop_procedure(THD *thd, sp_name *name); + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_procedure(THD *thd, sp_name *name); + +int +sp_show_status_procedure(THD *thd, const char *wild); + +sp_head * +sp_find_function(THD *thd, sp_name *name); + +int +sp_create_function(THD *thd, sp_head *sp); + +int +sp_drop_function(THD *thd, sp_name *name); + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_function(THD *thd, sp_name *name); + +int +sp_show_status_function(THD *thd, const char *wild); + +bool +sp_function_exists(THD *thd, sp_name *name); + + +// This is needed since we have to read the functions before we +// do anything else. +void +sp_add_fun_to_lex(LEX *lex, sp_name *fun); +void +sp_merge_funs(LEX *dst, LEX *src); +int +sp_cache_functions(THD *thd, LEX *lex); + + +// +// Utilities... +// + +// Do a "use newdb". The current db is stored at olddb. +// If newdb is the same as the current one, nothing is changed. +// dbchangedp is set to true if the db was actually changed. +int +sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax, + bool no_access_check, bool *dbchangedp); + +// Like mysql_change_db() but handles empty db name and the send_ok() problem. +int +sp_change_db(THD *thd, char *db, bool no_access_check); + +#endif /* _SP_H_ */ diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc new file mode 100644 index 00000000000..056ac6d7e96 --- /dev/null +++ b/sql/sp_cache.cc @@ -0,0 +1,166 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#include "mysql_priv.h" +#include "sp_cache.h" +#include "sp_head.h" + +static pthread_mutex_t Cversion_lock; +static ulong Cversion = 0; + +void +sp_cache_init() +{ + pthread_mutex_init(&Cversion_lock, MY_MUTEX_INIT_FAST); +} + +void +sp_cache_clear(sp_cache **cp) +{ + sp_cache *c= *cp; + + if (c) + { + delete c; + *cp= NULL; + } +} + +void +sp_cache_insert(sp_cache **cp, sp_head *sp) +{ + sp_cache *c= *cp; + + if (! c) + c= new sp_cache(); + if (c) + { + ulong v; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + { + if (*cp) + c->remove_all(); + c->version= v; + } + c->insert(sp); + if (*cp == NULL) + *cp= c; + } +} + +sp_head * +sp_cache_lookup(sp_cache **cp, sp_name *name) +{ + ulong v; + sp_cache *c= *cp; + + if (! c) + return NULL; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + { + c->remove_all(); + c->version= v; + return NULL; + } + return c->lookup(name->m_qname.str, name->m_qname.length); +} + +bool +sp_cache_remove(sp_cache **cp, sp_name *name) +{ + sp_cache *c= *cp; + bool found= FALSE; + + if (c) + { + ulong v; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion++; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + c->remove_all(); + else + found= c->remove(name->m_qname.str, name->m_qname.length); + c->version= v+1; + } + return found; +} + +void +sp_cache_invalidate() +{ + pthread_mutex_lock(&Cversion_lock); // LOCK + Cversion++; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK +} + +static byte * +hash_get_key_for_sp_head(const byte *ptr, uint *plen, + my_bool first) +{ + sp_head *sp= (sp_head *)ptr; + + *plen= sp->m_qname.length; + return (byte*) sp->m_qname.str; +} + +static void +hash_free_sp_head(void *p) +{ + sp_head *sp= (sp_head *)p; + + delete sp; +} + +sp_cache::sp_cache() +{ + init(); +} + +sp_cache::~sp_cache() +{ + hash_free(&m_hashtable); +} + +void +sp_cache::init() +{ + hash_init(&m_hashtable, system_charset_info, 0, 0, 0, + hash_get_key_for_sp_head, hash_free_sp_head, 0); + version= 0; +} + +void +sp_cache::cleanup() +{ + hash_free(&m_hashtable); +} diff --git a/sql/sp_cache.h b/sql/sp_cache.h new file mode 100644 index 00000000000..754a987090e --- /dev/null +++ b/sql/sp_cache.h @@ -0,0 +1,107 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_CACHE_H_ +#define _SP_CACHE_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +class sp_head; +class sp_cache; + +/* Initialize the SP caching once at startup */ +void sp_cache_init(); + +/* Clear the cache *cp and set *cp to NULL */ +void sp_cache_clear(sp_cache **cp); + +/* Insert an SP to cache. If 'cp' points to NULL, it's set to a new cache */ +void sp_cache_insert(sp_cache **cp, sp_head *sp); + +/* Lookup an SP in cache */ +sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name); + +/* Remove an SP from cache. Returns true if something was removed */ +bool sp_cache_remove(sp_cache **cp, sp_name *name); + +/* Invalidate a cache */ +void sp_cache_invalidate(); + + +/* + * + * The cache class. Don't use this directly, use the C API above + * + */ + +class sp_cache +{ +public: + + ulong version; + + sp_cache(); + + ~sp_cache(); + + void + init(); + + void + cleanup(); + + inline void + insert(sp_head *sp) + { + my_hash_insert(&m_hashtable, (const byte *)sp); + } + + inline sp_head * + lookup(char *name, uint namelen) + { + return (sp_head *)hash_search(&m_hashtable, (const byte *)name, namelen); + } + + inline bool + remove(char *name, uint namelen) + { + sp_head *sp= lookup(name, namelen); + + if (sp) + { + hash_delete(&m_hashtable, (byte *)sp); + return TRUE; + } + return FALSE; + } + + inline void + remove_all() + { + cleanup(); + init(); + } + +private: + + HASH m_hashtable; + +}; // class sp_cache + +#endif /* _SP_CACHE_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc new file mode 100644 index 00000000000..47e3952bcdd --- /dev/null +++ b/sql/sp_head.cc @@ -0,0 +1,1865 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#include "mysql_priv.h" +#include "sql_acl.h" +#include "sp_head.h" +#include "sp.h" +#include "sp_pcontext.h" +#include "sp_rcontext.h" + +Item_result +sp_map_result_type(enum enum_field_types type) +{ + switch (type) + { + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_INT24: + return INT_RESULT; + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + return REAL_RESULT; + default: + return STRING_RESULT; + } +} + +/* + * Returns TRUE if the 'cmd' is a command that might result in + * multiple result sets being sent back. + * Note: This does not include SQLCOM_SELECT which is treated + * separately in sql_yacc.yy. + */ +bool +sp_multi_results_command(enum enum_sql_command cmd) +{ + switch (cmd) { + case SQLCOM_ANALYZE: + case SQLCOM_CHECKSUM: + case SQLCOM_HA_READ: + case SQLCOM_SHOW_BINLOGS: + case SQLCOM_SHOW_BINLOG_EVENTS: + case SQLCOM_SHOW_CHARSETS: + case SQLCOM_SHOW_COLLATIONS: + case SQLCOM_SHOW_COLUMN_TYPES: + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_CREATE_DB: + case SQLCOM_SHOW_CREATE_FUNC: + case SQLCOM_SHOW_CREATE_PROC: + case SQLCOM_SHOW_DATABASES: + case SQLCOM_SHOW_ERRORS: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_SHOW_GRANTS: + case SQLCOM_SHOW_INNODB_STATUS: + case SQLCOM_SHOW_KEYS: + case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_MASTER_STAT: + case SQLCOM_SHOW_NEW_MASTER: + case SQLCOM_SHOW_OPEN_TABLES: + case SQLCOM_SHOW_PRIVILEGES: + case SQLCOM_SHOW_PROCESSLIST: + case SQLCOM_SHOW_SLAVE_HOSTS: + case SQLCOM_SHOW_SLAVE_STAT: + case SQLCOM_SHOW_STATUS: + case SQLCOM_SHOW_STATUS_FUNC: + case SQLCOM_SHOW_STATUS_PROC: + case SQLCOM_SHOW_STORAGE_ENGINES: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_VARIABLES: + case SQLCOM_SHOW_WARNS: + return TRUE; + default: + return FALSE; + } +} + +/* Evaluate a (presumed) func item. Always returns an item, the parameter +** if nothing else. +*/ +Item * +sp_eval_func_item(THD *thd, Item *it, enum enum_field_types type) +{ + DBUG_ENTER("sp_eval_func_item"); + it= it->this_item(); + DBUG_PRINT("info", ("type: %d", type)); + + if (!it->fixed && it->fix_fields(thd, 0, &it)) + { + DBUG_PRINT("info", ("fix_fields() failed")); + DBUG_RETURN(NULL); + } + + /* QQ How do we do this? Is there some better way? */ + if (type == MYSQL_TYPE_NULL) + it= new Item_null(); + else + { + switch (sp_map_result_type(type)) { + case INT_RESULT: + { + longlong i= it->val_int(); + + if (it->null_value) + { + DBUG_PRINT("info", ("INT_RESULT: null")); + it= new Item_null(); + } + else + { + DBUG_PRINT("info", ("INT_RESULT: %d", i)); + it= new Item_int(it->val_int()); + } + break; + } + case REAL_RESULT: + { + double d= it->val(); + + if (it->null_value) + { + DBUG_PRINT("info", ("REAL_RESULT: null")); + it= new Item_null(); + } + else + { + /* There's some difference between Item::new_item() and the + * constructor; the former crashes, the latter works... weird. */ + uint8 decimals= it->decimals; + uint32 max_length= it->max_length; + DBUG_PRINT("info", ("REAL_RESULT: %g", d)); + it= new Item_real(it->val()); + it->decimals= decimals; + it->max_length= max_length; + } + break; + } + default: + { + char buffer[MAX_FIELD_WIDTH]; + String tmp(buffer, sizeof(buffer), it->collation.collation); + String *s= it->val_str(&tmp); + + if (it->null_value) + { + DBUG_PRINT("info", ("default result: null")); + it= new Item_null(); + } + else + { + DBUG_PRINT("info",("default result: %*s",s->length(),s->c_ptr_quick())); + it= new Item_string(thd->strmake(s->c_ptr_quick(), s->length()), + s->length(), it->collation.collation); + } + break; + } + } + } + + DBUG_RETURN(it); +} + + +/* + * + * sp_name + * + */ + +void +sp_name::init_qname(THD *thd) +{ + m_qname.length= m_db.length+m_name.length+1; + m_qname.str= thd->alloc(m_qname.length+1); + sprintf(m_qname.str, "%*s.%*s", + m_db.length, (m_db.length ? m_db.str : ""), + m_name.length, m_name.str); +} + +sp_name * +sp_name_current_db_new(THD *thd, LEX_STRING name) +{ + sp_name *qname; + + if (! thd->db) + qname= new sp_name(name); + else + { + LEX_STRING db; + + db.length= strlen(thd->db); + db.str= thd->strmake(thd->db, db.length); + qname= new sp_name(db, name); + } + qname->init_qname(thd); + return qname; +} + + +/* ------------------------------------------------------------------ */ + + +/* + * + * sp_head + * + */ + +void * +sp_head::operator new(size_t size) +{ + DBUG_ENTER("sp_head::operator new"); + MEM_ROOT own_root; + sp_head *sp; + + init_alloc_root(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC); + sp= (sp_head *) alloc_root(&own_root, size); + sp->main_mem_root= own_root; + DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root)); + DBUG_RETURN(sp); +} + +void +sp_head::operator delete(void *ptr, size_t size) +{ + DBUG_ENTER("sp_head::operator delete"); + MEM_ROOT own_root; + sp_head *sp= (sp_head *) ptr; + + /* Make a copy of main_mem_root as free_root will free the sp */ + own_root= sp->main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx", + (ulong) &sp->mem_root, (ulong) &own_root)); + free_root(&own_root, MYF(0)); + + DBUG_VOID_RETURN; +} + + +sp_head::sp_head() + :Item_arena((bool)FALSE), m_returns_cs(NULL), m_has_return(FALSE), + m_simple_case(FALSE), m_multi_results(FALSE), m_in_handler(FALSE) +{ + DBUG_ENTER("sp_head::sp_head"); + + state= INITIALIZED; + m_backpatch.empty(); + m_lex.empty(); + DBUG_VOID_RETURN; +} + + +void +sp_head::init(LEX *lex) +{ + DBUG_ENTER("sp_head::init"); + + lex->spcont= m_pcont= new sp_pcontext(NULL); + my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8); + m_param_begin= m_param_end= m_returns_begin= m_returns_end= m_body_begin= 0; + m_qname.str= m_db.str= m_name.str= m_params.str= m_retstr.str= + m_body.str= m_defstr.str= 0; + m_qname.length= m_db.length= m_name.length= m_params.length= + m_retstr.length= m_body.length= m_defstr.length= 0; + m_returns_cs= NULL; + DBUG_VOID_RETURN; +} + +void +sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) +{ + DBUG_ENTER("sp_head::init_strings"); + uint n; /* Counter for nul trimming */ + /* During parsing, we must use thd->mem_root */ + MEM_ROOT *root= thd->mem_root; + + /* We have to copy strings to get them into the right memroot */ + if (name) + { + m_db.length= name->m_db.length; + if (name->m_db.length == 0) + m_db.str= NULL; + else + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_name.length= name->m_name.length; + m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + + if (name->m_qname.length == 0) + name->init_qname(thd); + m_qname.length= name->m_qname.length; + m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); + } + else if (thd->db) + { + m_db.length= thd->db_length; + m_db.str= strmake_root(root, thd->db, m_db.length); + } + + if (m_param_begin && m_param_end) + { + m_params.length= m_param_end - m_param_begin; + m_params.str= strmake_root(root, + (char *)m_param_begin, m_params.length); + } + + if (m_returns_begin && m_returns_end) + { + /* QQ KLUDGE: We can't seem to cut out just the type in the parser + (without the RETURNS), so we'll have to do it here. :-( + Furthermore, if there's a character type as well, it's not include + (beyond the m_returns_end pointer), in which case we need + m_returns_cs. */ + char *p= (char *)m_returns_begin+strspn((char *)m_returns_begin,"\t\n\r "); + p+= strcspn(p, "\t\n\r "); + p+= strspn(p, "\t\n\r "); + if (p < (char *)m_returns_end) + m_returns_begin= (uchar *)p; + /* While we're at it, trim the end too. */ + p= (char *)m_returns_end-1; + while (p > (char *)m_returns_begin && + (*p == '\t' || *p == '\n' || *p == '\r' || *p == ' ')) + p-= 1; + m_returns_end= (uchar *)p+1; + if (m_returns_cs) + { + String s((char *)m_returns_begin, m_returns_end - m_returns_begin, + system_charset_info); + + s.append(' '); + s.append(m_returns_cs->csname); + m_retstr.length= s.length(); + m_retstr.str= strmake_root(root, s.ptr(), m_retstr.length); + } + else + { + m_retstr.length= m_returns_end - m_returns_begin; + m_retstr.str= strmake_root(root, + (char *)m_returns_begin, m_retstr.length); + } + } + m_body.length= lex->ptr - m_body_begin; + /* Trim nuls at the end */ + n= 0; + while (m_body.length && m_body_begin[m_body.length-1] == '\0') + { + m_body.length-= 1; + n+= 1; + } + m_body.str= strmake_root(root, (char *)m_body_begin, m_body.length); + m_defstr.length= lex->ptr - lex->buf; + m_defstr.length-= n; + m_defstr.str= strmake_root(root, (char *)lex->buf, m_defstr.length); + DBUG_VOID_RETURN; +} + +int +sp_head::create(THD *thd) +{ + DBUG_ENTER("sp_head::create"); + int ret; + + DBUG_PRINT("info", ("type: %d name: %s params: %s body: %s", + m_type, m_name.str, m_params.str, m_body.str)); + +#ifndef DBUG_OFF + optimize(); + { + String s; + sp_instr *i; + uint ip= 0; + while ((i = get_instr(ip))) + { + char buf[8]; + + sprintf(buf, "%4u: ", ip); + s.append(buf); + i->print(&s); + s.append('\n'); + ip+= 1; + } + s.append('\0'); + DBUG_PRINT("info", ("Code %s\n%s", m_qname.str, s.ptr())); + } +#endif + + if (m_type == TYPE_ENUM_FUNCTION) + ret= sp_create_function(thd, this); + else + ret= sp_create_procedure(thd, this); + + DBUG_RETURN(ret); +} + +sp_head::~sp_head() +{ + destroy(); + if (m_thd) + restore_thd_mem_root(m_thd); +} + +void +sp_head::destroy() +{ + DBUG_ENTER("sp_head::destroy"); + DBUG_PRINT("info", ("name: %s", m_name.str)); + sp_instr *i; + LEX *lex; + + for (uint ip = 0 ; (i = get_instr(ip)) ; ip++) + delete i; + delete_dynamic(&m_instr); + m_pcont->destroy(); + free_items(free_list); + while ((lex= (LEX *)m_lex.pop())) + { + if (lex != &m_thd->main_lex) // We got interrupted and have lex'es left + delete lex; + } + DBUG_VOID_RETURN; +} + +int +sp_head::execute(THD *thd) +{ + DBUG_ENTER("sp_head::execute"); + char olddb[128]; + bool dbchanged; + sp_rcontext *ctx; + int ret= 0; + uint ip= 0; + Item_arena *old_arena; + + +#ifndef EMBEDDED_LIBRARY + if (check_stack_overrun(thd, olddb)) + { + DBUG_RETURN(-1); + } +#endif + + dbchanged= FALSE; + if (m_db.length && + (ret= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0, &dbchanged))) + goto done; + + if ((ctx= thd->spcont)) + ctx->clear_handler(); + thd->query_error= 0; + old_arena= thd->current_arena; + thd->current_arena= this; + + do + { + sp_instr *i; + uint hip; // Handler ip + + i = get_instr(ip); // Returns NULL when we're done. + if (i == NULL) + break; + DBUG_PRINT("execute", ("Instruction %u", ip)); + ret= i->execute(thd, &ip); + if (i->free_list) + cleanup_items(i->free_list); + // Check if an exception has occurred and a handler has been found + // Note: We havo to check even if ret==0, since warnings (and some + // errors don't return a non-zero value. + if (!thd->killed && ctx) + { + uint hf; + + switch (ctx->found_handler(&hip, &hf)) + { + case SP_HANDLER_NONE: + break; + case SP_HANDLER_CONTINUE: + ctx->save_variables(hf); + ctx->push_hstack(ip); + // Fall through + default: + ip= hip; + ret= 0; + ctx->clear_handler(); + ctx->in_handler= TRUE; + continue; + } + } + } while (ret == 0 && !thd->killed && !thd->query_error); + + cleanup_items(thd->current_arena->free_list); + thd->current_arena= old_arena; + + done: + DBUG_PRINT("info", ("ret=%d killed=%d query_error=%d", + ret, thd->killed, thd->query_error)); + + if (thd->killed || thd->query_error) + ret= -1; + /* If the DB has changed, the pointer has changed too, but the + original thd->db will then have been freed */ + if (dbchanged) + { + if (! thd->killed) + ret= sp_change_db(thd, olddb, 0); + } + DBUG_RETURN(ret); +} + + +int +sp_head::execute_function(THD *thd, Item **argp, uint argcount, Item **resp) +{ + DBUG_ENTER("sp_head::execute_function"); + DBUG_PRINT("info", ("function %s", m_name.str)); + uint csize = m_pcont->max_pvars(); + uint params = m_pcont->current_pvars(); + uint hmax = m_pcont->max_handlers(); + uint cmax = m_pcont->max_cursors(); + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + uint i; + int ret; + + if (argcount != params) + { + // Need to use my_printf_error here, or it will not terminate the + // invoking query properly. + my_printf_error(ER_SP_WRONG_NO_OF_ARGS, ER(ER_SP_WRONG_NO_OF_ARGS), MYF(0), + "FUNCTION", m_name.str, params, argcount); + DBUG_RETURN(-1); + } + + // QQ Should have some error checking here? (types, etc...) + nctx= new sp_rcontext(csize, hmax, cmax); + for (i= 0 ; i < params && i < argcount ; i++) + { + sp_pvar_t *pvar = m_pcont->find_pvar(i); + Item *it= sp_eval_func_item(thd, *argp++, pvar->type); + + if (it) + nctx->push_item(it); + else + { + DBUG_RETURN(-1); + } + } +#ifdef NOT_WORKING + /* + Close tables opened for subselect in argument list + This can't be done as this will close all other tables used + by the query. + */ + close_thread_tables(thd); +#endif + // The rest of the frame are local variables which are all IN. + // Default all variables to null (those with default clauses will + // be set by an set instruction). + { + Item_null *nit= NULL; // Re-use this, and only create if needed + for (; i < csize ; i++) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); + } + } + thd->spcont= nctx; + + ret= execute(thd); + + if (m_type == TYPE_ENUM_FUNCTION && ret == 0) + { + /* We need result only in function but not in trigger */ + Item *it= nctx->get_result(); + + if (it) + *resp= it; + else + { + my_printf_error(ER_SP_NORETURNEND, ER(ER_SP_NORETURNEND), MYF(0), + m_name.str); + ret= -1; + } + } + + nctx->pop_all_cursors(); // To avoid memory leaks after an error + thd->spcont= octx; + DBUG_RETURN(ret); +} + +int +sp_head::execute_procedure(THD *thd, List<Item> *args) +{ + DBUG_ENTER("sp_head::execute_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + int ret= 0; + uint csize = m_pcont->max_pvars(); + uint params = m_pcont->current_pvars(); + uint hmax = m_pcont->max_handlers(); + uint cmax = m_pcont->max_cursors(); + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + my_bool tmp_octx = FALSE; // True if we have allocated a temporary octx + + if (args->elements != params) + { + net_printf(thd, ER_SP_WRONG_NO_OF_ARGS, "PROCEDURE", m_name.str, + params, args->elements); + DBUG_RETURN(-1); + } + + if (csize > 0 || hmax > 0 || cmax > 0) + { + Item_null *nit= NULL; // Re-use this, and only create if needed + uint i; + List_iterator_fast<Item> li(*args); + Item *it; + + nctx= new sp_rcontext(csize, hmax, cmax); + if (! octx) + { // Create a temporary old context + octx= new sp_rcontext(csize, hmax, cmax); + tmp_octx= TRUE; + } + // QQ: Should do type checking? + for (i = 0 ; (it= li++) && i < params ; i++) + { + sp_pvar_t *pvar = m_pcont->find_pvar(i); + + if (! pvar) + nctx->set_oindex(i, -1); // Shouldn't happen + else + { + if (pvar->mode == sp_param_out) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); // OUT + } + else + { + Item *it2= sp_eval_func_item(thd, it,pvar->type); + + if (it2) + nctx->push_item(it2); // IN or INOUT + else + { + ret= -1; // Eval failed + break; + } + } + // Note: If it's OUT or INOUT, it must be a variable. + // QQ: We can check for global variables here, or should we do it + // while parsing? + if (pvar->mode == sp_param_in) + nctx->set_oindex(i, -1); // IN + else // OUT or INOUT + nctx->set_oindex(i, static_cast<Item_splocal *>(it)->get_offset()); + } + } + // Close tables opened for subselect in argument list + close_thread_tables(thd); + + // The rest of the frame are local variables which are all IN. + // Default all variables to null (those with default clauses will + // be set by an set instruction). + for (; i < csize ; i++) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); + } + thd->spcont= nctx; + } + + if (! ret) + ret= execute(thd); + + // Don't copy back OUT values if we got an error + if (ret) + { + if (thd->net.report_error) + send_error(thd, 0, NullS); + } + else if (csize > 0) + { + List_iterator_fast<Item> li(*args); + Item *it; + + // Copy back all OUT or INOUT values to the previous frame, or + // set global user variables + for (uint i = 0 ; (it= li++) && i < params ; i++) + { + int oi = nctx->get_oindex(i); + + if (oi >= 0) + { + if (! tmp_octx) + octx->set_item(nctx->get_oindex(i), nctx->get_item(i)); + else + { + // QQ Currently we just silently ignore non-user-variable arguments. + // We should check this during parsing, when setting up the call + // above + if (it->type() == Item::FUNC_ITEM) + { + Item_func *fi= static_cast<Item_func*>(it); + + if (fi->functype() == Item_func::GUSERVAR_FUNC) + { // A global user variable + Item *item= nctx->get_item(i); + Item_func_set_user_var *suv; + Item_func_get_user_var *guv= + static_cast<Item_func_get_user_var*>(fi); + + suv= new Item_func_set_user_var(guv->get_name(), item); + suv->fix_fields(thd, NULL, &item); + suv->fix_length_and_dec(); + suv->check(); + suv->update(); + } + } + } + } + } + } + + if (tmp_octx) + octx= NULL; + if (nctx) + nctx->pop_all_cursors(); // To avoid memory leaks after an error + thd->spcont= octx; + + DBUG_RETURN(ret); +} + + +// Reset lex during parsing, before we parse a sub statement. +void +sp_head::reset_lex(THD *thd) +{ + DBUG_ENTER("sp_head::reset_lex"); + LEX *sublex; + LEX *oldlex= thd->lex; + + (void)m_lex.push_front(oldlex); + thd->lex= sublex= new st_lex; + + /* Reset most stuff. The length arguments doesn't matter here. */ + lex_start(thd, oldlex->buf, (ulong) (oldlex->end_of_query - oldlex->ptr)); + + /* We must reset ptr and end_of_query again */ + sublex->ptr= oldlex->ptr; + sublex->end_of_query= oldlex->end_of_query; + sublex->tok_start= oldlex->tok_start; + sublex->yylineno= oldlex->yylineno; + /* And keep the SP stuff too */ + sublex->sphead= oldlex->sphead; + sublex->spcont= oldlex->spcont; + /* And trigger related stuff too */ + sublex->trg_chistics= oldlex->trg_chistics; + sublex->trg_table= oldlex->trg_table; + sublex->sp_lex_in_use= FALSE; + DBUG_VOID_RETURN; +} + +// Restore lex during parsing, after we have parsed a sub statement. +void +sp_head::restore_lex(THD *thd) +{ + DBUG_ENTER("sp_head::restore_lex"); + LEX *sublex= thd->lex; + LEX *oldlex= (LEX *)m_lex.pop(); + + if (! oldlex) + return; // Nothing to restore + + // Update some state in the old one first + oldlex->ptr= sublex->ptr; + oldlex->next_state= sublex->next_state; + + // Collect some data from the sub statement lex. + sp_merge_funs(oldlex, sublex); +#ifdef NOT_USED_NOW + // QQ We're not using this at the moment. + if (sublex.sql_command == SQLCOM_CALL) + { + // It would be slightly faster to keep the list sorted, but we need + // an "insert before" method to do that. + char *proc= sublex.udf.name.str; + + List_iterator_fast<char *> li(m_calls); + char **it; + + while ((it= li++)) + if (my_strcasecmp(system_charset_info, proc, *it) == 0) + break; + if (! it) + m_calls.push_back(&proc); + + } + // Merge used tables + // QQ ...or just open tables in thd->open_tables? + // This is not entirerly clear at the moment, but for now, we collect + // tables here. + for (sl= sublex.all_selects_list ; + sl ; + sl= sl->next_select()) + { + for (TABLE_LIST *tables= sl->get_table_list() ; + tables ; + tables= tables->next) + { + List_iterator_fast<char *> li(m_tables); + char **tb; + + while ((tb= li++)) + if (my_strcasecmp(system_charset_info, tables->real_name, *tb) == 0) + break; + if (! tb) + m_tables.push_back(&tables->real_name); + } + } +#endif + if (! sublex->sp_lex_in_use) + delete sublex; + thd->lex= oldlex; + DBUG_VOID_RETURN; +} + +void +sp_head::push_backpatch(sp_instr *i, sp_label_t *lab) +{ + bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t)); + + if (bp) + { + bp->lab= lab; + bp->instr= i; + (void)m_backpatch.push_front(bp); + } +} + +void +sp_head::backpatch(sp_label_t *lab) +{ + bp_t *bp; + uint dest= instructions(); + List_iterator_fast<bp_t> li(m_backpatch); + + while ((bp= li++)) + { + if (bp->lab == lab || + (bp->lab->type == SP_LAB_REF && + my_strcasecmp(system_charset_info, bp->lab->name, lab->name) == 0)) + { + if (bp->lab->type != SP_LAB_REF) + bp->instr->backpatch(dest, lab->ctx); + else + { + sp_label_t *dstlab= bp->lab->ctx->find_label(lab->name); + + if (dstlab) + { + bp->lab= lab; + bp->instr->backpatch(dest, dstlab->ctx); + } + } + } + } +} + +int +sp_head::check_backpatch(THD *thd) +{ + bp_t *bp; + List_iterator_fast<bp_t> li(m_backpatch); + + while ((bp= li++)) + { + if (bp->lab->type == SP_LAB_REF) + { + net_printf(thd, ER_SP_LILABEL_MISMATCH, "GOTO", bp->lab->name); + return -1; + } + } + return 0; +} + +void +sp_head::set_info(char *definer, uint definerlen, + longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode) +{ + char *p= strchr(definer, '@'); + uint len; + + if (! p) + p= definer; // Weird... + len= p-definer; + m_definer_user.str= strmake_root(mem_root, definer, len); + m_definer_user.length= len; + len= definerlen-len-1; + m_definer_host.str= strmake_root(mem_root, p+1, len); + m_definer_host.length= len; + m_created= created; + m_modified= modified; + m_chistics= (st_sp_chistics *) memdup_root(mem_root, (char*) chistics, + sizeof(*chistics)); + if (m_chistics->comment.length == 0) + m_chistics->comment.str= 0; + else + m_chistics->comment.str= strmake_root(mem_root, + m_chistics->comment.str, + m_chistics->comment.length); + m_sql_mode= sql_mode; +} + +void +sp_head::reset_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::reset_thd_mem_root"); + m_thd_root= thd->mem_root; + thd->mem_root= &main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + free_list= thd->free_list; // Keep the old list + thd->free_list= NULL; // Start a new one + /* Copy the db, since substatements will point to it */ + m_thd_db= thd->db; + thd->db= thd->strmake(thd->db, thd->db_length); + m_thd= thd; + DBUG_VOID_RETURN; +} + +void +sp_head::restore_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::restore_thd_mem_root"); + Item *flist= free_list; // The old list + set_item_arena(thd); // Get new free_list and mem_root + state= INITIALIZED; + + DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + thd->free_list= flist; // Restore the old one + thd->db= m_thd_db; // Restore the original db pointer + thd->mem_root= m_thd_root; + m_thd= NULL; + DBUG_VOID_RETURN; +} + + +int +sp_head::show_create_procedure(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + ulong old_sql_mode; + sys_var *sql_mode_var; + byte *sql_mode_str; + ulong sql_mode_len; + + DBUG_ENTER("sp_head::show_create_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + old_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= m_sql_mode; + sql_mode_var= find_sys_var("SQL_MODE", 8); + if (sql_mode_var) + { + sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0); + sql_mode_len= strlen((char*) sql_mode_str); + } + + field_list.push_back(new Item_empty_string("Procedure", NAME_LEN)); + if (sql_mode_var) + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Procedure", + max(buffer.length(), 1024))); + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + { + res= 1; + goto done; + } + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + if (sql_mode_var) + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + res= protocol->write(); + send_eof(thd); + + done: + thd->variables.sql_mode= old_sql_mode; + DBUG_RETURN(res); +} + + +/* + Add instruction to SP + + SYNOPSIS + sp_head::add_instr() + instr Instruction +*/ + +void sp_head::add_instr(sp_instr *instr) +{ + instr->free_list= m_thd->free_list; + m_thd->free_list= 0; + insert_dynamic(&m_instr, (gptr)&instr); +} + + +int +sp_head::show_create_function(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + ulong old_sql_mode; + sys_var *sql_mode_var; + byte *sql_mode_str; + ulong sql_mode_len; + DBUG_ENTER("sp_head::show_create_function"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + old_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= m_sql_mode; + sql_mode_var= find_sys_var("SQL_MODE", 8); + if (sql_mode_var) + { + sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0); + sql_mode_len= strlen((char*) sql_mode_str); + } + + field_list.push_back(new Item_empty_string("Function",NAME_LEN)); + if (sql_mode_var) + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + field_list.push_back(new Item_empty_string("Create Function", + max(buffer.length(),1024))); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + { + res= 1; + goto done; + } + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + if (sql_mode_var) + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + res= protocol->write(); + send_eof(thd); + + done: + thd->variables.sql_mode= old_sql_mode; + DBUG_RETURN(res); +} + +void +sp_head::optimize() +{ + List<sp_instr> bp; + sp_instr *i; + uint src, dst; + + opt_mark(0); + + bp.empty(); + src= dst= 0; + while ((i= get_instr(src))) + { + if (! i->marked) + { + delete i; + src+= 1; + } + else + { + if (src != dst) + { + sp_instr *ibp; + List_iterator_fast<sp_instr> li(bp); + + set_dynamic(&m_instr, (gptr)&i, dst); + while ((ibp= li++)) + { + sp_instr_jump *ji= static_cast<sp_instr_jump *>(ibp); + if (ji->m_dest == src) + ji->m_dest= dst; + } + } + i->opt_move(dst, &bp); + src+= 1; + dst+= 1; + } + } + m_instr.elements= dst; + bp.empty(); +} + +void +sp_head::opt_mark(uint ip) +{ + sp_instr *i; + + while ((i= get_instr(ip)) && !i->marked) + ip= i->opt_mark(this); +} + +// ------------------------------------------------------------------ + +// +// sp_instr_stmt +// +sp_instr_stmt::~sp_instr_stmt() +{ + if (m_lex) + delete m_lex; +} + +int +sp_instr_stmt::execute(THD *thd, uint *nextp) +{ + char *query; + uint32 query_length; + DBUG_ENTER("sp_instr_stmt::execute"); + DBUG_PRINT("info", ("command: %d", m_lex->sql_command)); + int res; + + query= thd->query; + query_length= thd->query_length; + if (!(res= alloc_query(thd, m_query.str, m_query.length+1))) + { + if (query_cache_send_result_to_client(thd, + thd->query, thd->query_length) <= 0) + { + res= exec_stmt(thd, m_lex); + query_cache_end_of_result(thd); + } + thd->query= query; + thd->query_length= query_length; + } + *nextp = m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_stmt::print(String *str) +{ + str->reserve(12); + str->append("stmt "); + str->qs_append((uint)m_lex->sql_command); +} + + +int +sp_instr_stmt::exec_stmt(THD *thd, LEX *lex) +{ + LEX *olex; // The other lex + int res; + + olex= thd->lex; // Save the other lex + thd->lex= lex; // Use my own lex + thd->lex->thd = thd; // QQ Not reentrant! + thd->lex->unit.thd= thd; // QQ Not reentrant + thd->free_list= NULL; + + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->query_id= query_id++; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + + reset_stmt_for_execute(thd, lex); + + res= mysql_execute_command(thd); + + lex->unit.cleanup(); + if (thd->lock || thd->open_tables || thd->derived_tables) + { + thd->proc_info="closing tables"; + close_thread_tables(thd); /* Free tables */ + } + + thd->lex= olex; // Restore the other lex + + return res; +} + +// +// sp_instr_set +// +int +sp_instr_set::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_set::execute"); + DBUG_PRINT("info", ("offset: %u", m_offset)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_value, m_type); + if (! it) + res= -1; + else + { + res= 0; + thd->spcont->set_item(m_offset, it); + } + *nextp = m_ip+1; + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_set::print(String *str) +{ + str->reserve(12); + str->append("set "); + str->qs_append(m_offset); + str->append(' '); + m_value->print(str); +} + +// +// sp_instr_set_user_var +// +int +sp_instr_set_user_var::execute(THD *thd, uint *nextp) +{ + int res= 0; + + DBUG_ENTER("sp_instr_set_user_var::execute"); + /* + It is ok to pass 0 as 3rd argument to fix_fields() since + Item_func_set_user_var::fix_fields() won't use it. + QQ: Still unsure what should we return in case of error 1 or -1 ? + */ + if (!m_set_var_item.fixed && m_set_var_item.fix_fields(thd, 0, 0) || + m_set_var_item.check() || m_set_var_item.update()) + res= -1; + *nextp= m_ip + 1; + DBUG_RETURN(res); +} + +void +sp_instr_set_user_var::print(String *str) +{ + m_set_var_item.print_as_stmt(str); +} + +// +// sp_instr_set_trigger_field +// +int +sp_instr_set_trigger_field::execute(THD *thd, uint *nextp) +{ + int res= 0; + + DBUG_ENTER("sp_instr_set_trigger_field::execute"); + /* QQ: Still unsure what should we return in case of error 1 or -1 ? */ + if (!value->fixed && value->fix_fields(thd, 0, &value) || + trigger_field.fix_fields(thd, 0, 0) || + (value->save_in_field(trigger_field.field, 0) < 0)) + res= -1; + *nextp= m_ip + 1; + DBUG_RETURN(res); +} + +void +sp_instr_set_trigger_field::print(String *str) +{ + str->append("set ", 4); + trigger_field.print(str); + str->append(":=", 2); + value->print(str); +} + +// +// sp_instr_jump +// +int +sp_instr_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + + *nextp= m_dest; + DBUG_RETURN(0); +} + +void +sp_instr_jump::print(String *str) +{ + str->reserve(12); + str->append("jump "); + str->qs_append(m_dest); +} + +uint +sp_instr_jump::opt_mark(sp_head *sp) +{ + m_dest= opt_shortcut_jump(sp, this); + if (m_dest != m_ip+1) /* Jumping to following instruction? */ + marked= 1; + m_optdest= sp->get_instr(m_dest); + return m_dest; +} + +uint +sp_instr_jump::opt_shortcut_jump(sp_head *sp, sp_instr *start) +{ + uint dest= m_dest; + sp_instr *i; + + while ((i= sp->get_instr(dest))) + { + uint ndest; + + if (start == i) + break; + ndest= i->opt_shortcut_jump(sp, start); + if (ndest == dest) + break; + dest= ndest; + } + return dest; +} + +void +sp_instr_jump::opt_move(uint dst, List<sp_instr> *bp) +{ + if (m_dest > m_ip) + bp->push_back(this); // Forward + else if (m_optdest) + m_dest= m_optdest->m_ip; // Backward + m_ip= dst; +} + +// +// sp_instr_jump_if +// +int +sp_instr_jump_if::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump_if::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_expr, MYSQL_TYPE_TINY); + if (!it) + res= -1; + else + { + res= 0; + if (it->val_int()) + *nextp = m_dest; + else + *nextp = m_ip+1; + } + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_jump_if::print(String *str) +{ + str->reserve(12); + str->append("jump_if "); + str->qs_append(m_dest); + str->append(' '); + m_expr->print(str); +} + +uint +sp_instr_jump_if::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_jump_if_not +// +int +sp_instr_jump_if_not::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump_if_not::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_expr, MYSQL_TYPE_TINY); + if (! it) + res= -1; + else + { + res= 0; + if (! it->val_int()) + *nextp = m_dest; + else + *nextp = m_ip+1; + } + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_jump_if_not::print(String *str) +{ + str->reserve(16); + str->append("jump_if_not "); + str->qs_append(m_dest); + str->append(' '); + m_expr->print(str); +} + +uint +sp_instr_jump_if_not::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_freturn +// +int +sp_instr_freturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_freturn::execute"); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_value, m_type); + if (! it) + res= -1; + else + { + res= 0; + thd->spcont->set_result(it); + } + *nextp= UINT_MAX; + DBUG_RETURN(res); +} + +void +sp_instr_freturn::print(String *str) +{ + str->reserve(12); + str->append("freturn "); + str->qs_append((uint)m_type); + str->append(' '); + m_value->print(str); +} + +// +// sp_instr_hpush_jump +// +int +sp_instr_hpush_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpush_jump::execute"); + List_iterator_fast<sp_cond_type_t> li(m_cond); + sp_cond_type_t *p; + + while ((p= li++)) + thd->spcont->push_handler(p, m_handler, m_type, m_frame); + + *nextp= m_dest; + DBUG_RETURN(0); +} + +void +sp_instr_hpush_jump::print(String *str) +{ + str->reserve(32); + str->append("hpush_jump "); + str->qs_append(m_dest); + str->append(" t="); + str->qs_append(m_type); + str->append(" f="); + str->qs_append(m_frame); + str->append(" h="); + str->qs_append(m_handler); +} + +uint +sp_instr_hpush_jump::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_hpop +// +int +sp_instr_hpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpop::execute"); + thd->spcont->pop_handlers(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +void +sp_instr_hpop::print(String *str) +{ + str->reserve(12); + str->append("hpop "); + str->qs_append(m_count); +} + +void +sp_instr_hpop::backpatch(uint dest, sp_pcontext *dst_ctx) +{ + m_count= m_ctx->diff_handlers(dst_ctx); +} + + +// +// sp_instr_hreturn +// +int +sp_instr_hreturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hreturn::execute"); + if (m_dest) + *nextp= m_dest; + else + { + thd->spcont->restore_variables(m_frame); + *nextp= thd->spcont->pop_hstack(); + } + thd->spcont->in_handler= FALSE; + DBUG_RETURN(0); +} + +void +sp_instr_hreturn::print(String *str) +{ + str->reserve(16); + str->append("hreturn "); + str->qs_append(m_frame); + if (m_dest) + str->qs_append(m_dest); +} + +uint +sp_instr_hreturn::opt_mark(sp_head *sp) +{ + if (m_dest) + return sp_instr_jump::opt_mark(sp); + else + { + marked= 1; + return UINT_MAX; + } +} + + +// +// sp_instr_cpush +// +int +sp_instr_cpush::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_cpush::execute"); + thd->spcont->push_cursor(m_lex); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +sp_instr_cpush::~sp_instr_cpush() +{ + if (m_lex) + delete m_lex; +} + +void +sp_instr_cpush::print(String *str) +{ + str->append("cpush"); +} + +// +// sp_instr_cpop +// +int +sp_instr_cpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_cpop::execute"); + thd->spcont->pop_cursors(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +void +sp_instr_cpop::print(String *str) +{ + str->reserve(12); + str->append("cpop "); + str->qs_append(m_count); +} + +void +sp_instr_cpop::backpatch(uint dest, sp_pcontext *dst_ctx) +{ + m_count= m_ctx->diff_cursors(dst_ctx); +} + +// +// sp_instr_copen +// +int +sp_instr_copen::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_copen::execute"); + + if (! c) + res= -1; + else + { + LEX *lex= c->pre_open(thd); + + if (! lex) + res= -1; + else + res= exec_stmt(thd, lex); + c->post_open(thd, (lex ? TRUE : FALSE)); + } + + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_copen::print(String *str) +{ + str->reserve(12); + str->append("copen "); + str->qs_append(m_cursor); +} + +// +// sp_instr_cclose +// +int +sp_instr_cclose::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_cclose::execute"); + + if (! c) + res= -1; + else + res= c->close(thd); + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_cclose::print(String *str) +{ + str->reserve(12); + str->append("cclose "); + str->qs_append(m_cursor); +} + +// +// sp_instr_cfetch +// +int +sp_instr_cfetch::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_cfetch::execute"); + + if (! c) + res= -1; + else + res= c->fetch(thd, &m_varlist); + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_cfetch::print(String *str) +{ + List_iterator_fast<struct sp_pvar> li(m_varlist); + sp_pvar_t *pv; + + str->reserve(12); + str->append("cfetch "); + str->qs_append(m_cursor); + while ((pv= li++)) + { + str->reserve(8); + str->append(' '); + str->qs_append(pv->offset); + } +} + +// +// sp_instr_error +// +int +sp_instr_error::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_error::execute"); + + my_error(m_errcode, MYF(0)); + *nextp= m_ip+1; + DBUG_RETURN(-1); +} + +void +sp_instr_error::print(String *str) +{ + str->reserve(12); + str->append("error "); + str->qs_append(m_errcode); +} + +/* ------------------------------------------------------------------ */ + + +// +// Security context swapping +// + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +void +sp_change_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp) +{ + ctxp->changed= (sp->m_chistics->suid != SP_IS_NOT_SUID && + (strcmp(sp->m_definer_user.str, thd->priv_user) || + strcmp(sp->m_definer_host.str, thd->priv_host))); + + if (ctxp->changed) + { + ctxp->master_access= thd->master_access; + ctxp->db_access= thd->db_access; + ctxp->priv_user= thd->priv_user; + strncpy(ctxp->priv_host, thd->priv_host, sizeof(ctxp->priv_host)); + ctxp->user= thd->user; + ctxp->host= thd->host; + ctxp->ip= thd->ip; + + /* Change thise just to do the acl_getroot_no_password */ + thd->user= sp->m_definer_user.str; + thd->host= thd->ip = sp->m_definer_host.str; + + if (acl_getroot_no_password(thd)) + { // Failed, run as invoker for now + ctxp->changed= FALSE; + thd->master_access= ctxp->master_access; + thd->db_access= ctxp->db_access; + thd->priv_user= ctxp->priv_user; + strncpy(thd->priv_host, ctxp->priv_host, sizeof(thd->priv_host)); + } + + /* Restore these immiediately */ + thd->user= ctxp->user; + thd->host= ctxp->host; + thd->ip= ctxp->ip; + } +} + +void +sp_restore_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp) +{ + if (ctxp->changed) + { + ctxp->changed= FALSE; + thd->master_access= ctxp->master_access; + thd->db_access= ctxp->db_access; + thd->priv_user= ctxp->priv_user; + strncpy(thd->priv_host, ctxp->priv_host, sizeof(thd->priv_host)); + } +} + +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ diff --git a/sql/sp_head.h b/sql/sp_head.h new file mode 100644 index 00000000000..4bfe1076f65 --- /dev/null +++ b/sql/sp_head.h @@ -0,0 +1,904 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_HEAD_H_ +#define _SP_HEAD_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +#include <stddef.h> + +// Values for the type enum. This reflects the order of the enum declaration +// in the CREATE TABLE command. +#define TYPE_ENUM_FUNCTION 1 +#define TYPE_ENUM_PROCEDURE 2 +#define TYPE_ENUM_TRIGGER 3 + +Item_result +sp_map_result_type(enum enum_field_types type); + +bool +sp_multi_results_command(enum enum_sql_command cmd); + +struct sp_label; +class sp_instr; +struct sp_cond_type; +struct sp_pvar; + +class sp_name : public Sql_alloc +{ +public: + + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_qname; + + sp_name(LEX_STRING name) + : m_name(name) + { + m_db.str= m_qname.str= 0; + m_db.length= m_qname.length= 0; + } + + sp_name(LEX_STRING db, LEX_STRING name) + : m_db(db), m_name(name) + { + m_qname.str= 0; + m_qname.length= 0; + } + + // Init. the qualified name from the db and name. + void init_qname(THD *thd); // thd for memroot allocation + + ~sp_name() + {} +}; + +sp_name * +sp_name_current_db_new(THD *thd, LEX_STRING name); + + +class sp_head :private Item_arena +{ + sp_head(const sp_head &); /* Prevent use of these */ + void operator=(sp_head &); + +public: + + int m_type; // TYPE_ENUM_FUNCTION or TYPE_ENUM_PROCEDURE + enum enum_field_types m_returns; // For FUNCTIONs only + CHARSET_INFO *m_returns_cs; // For FUNCTIONs only + my_bool m_has_return; // For FUNCTIONs only + my_bool m_simple_case; // TRUE if parsing simple case, FALSE otherwise + my_bool m_multi_results; // TRUE if a procedure with SELECT(s) + my_bool m_in_handler; // TRUE if parser in a handler body + uchar *m_tmp_query; // Temporary pointer to sub query string + uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value + st_sp_chistics *m_chistics; + ulong m_sql_mode; // For SHOW CREATE +#if NOT_USED_NOW + // QQ We're not using this at the moment. + List<char *> m_calls; // Called procedures. + List<char *> m_tables; // Used tables. +#endif + LEX_STRING m_qname; // db.name + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_params; + LEX_STRING m_retstr; // For FUNCTIONs only + LEX_STRING m_body; + LEX_STRING m_defstr; + LEX_STRING m_definer_user; + LEX_STRING m_definer_host; + longlong m_created; + longlong m_modified; + // Pointers set during parsing + uchar *m_param_begin, *m_param_end, *m_returns_begin, *m_returns_end, + *m_body_begin; + + static void * + operator new(size_t size); + + static void + operator delete(void *ptr, size_t size); + + sp_head(); + + // Initialize after we have reset mem_root + void + init(LEX *lex); + + // Initialize strings after parsing header + void + init_strings(THD *thd, LEX *lex, sp_name *name); + + int + create(THD *thd); + + virtual ~sp_head(); + + // Free memory + void + destroy(); + + int + execute_function(THD *thd, Item **args, uint argcount, Item **resp); + + int + execute_procedure(THD *thd, List<Item> *args); + + int + show_create_procedure(THD *thd); + + int + show_create_function(THD *thd); + + void + add_instr(sp_instr *instr); + + inline uint + instructions() + { + return m_instr.elements; + } + + inline sp_instr * + last_instruction() + { + sp_instr *i; + + get_dynamic(&m_instr, (gptr)&i, m_instr.elements-1); + return i; + } + + // Resets lex in 'thd' and keeps a copy of the old one. + void + reset_lex(THD *thd); + + // Restores lex in 'thd' from our copy, but keeps some status from the + // one in 'thd', like ptr, tables, fields, etc. + void + restore_lex(THD *thd); + + // Put the instruction on the backpatch list, associated with the label. + void + push_backpatch(sp_instr *, struct sp_label *); + + // Update all instruction with this label in the backpatch list to + // the current position. + void + backpatch(struct sp_label *); + + // Check that no unresolved references exist. + // If none found, 0 is returned, otherwise errors have been issued + // and -1 is returned. + // This is called by the parser at the end of a create procedure/function. + int + check_backpatch(THD *thd); + + char *name(uint *lenp = 0) const + { + if (lenp) + *lenp= m_name.length; + return m_name.str; + } + + char *create_string(THD *thd, ulong *lenp); + + inline Item_result result() + { + return sp_map_result_type(m_returns); + } + + void set_info(char *definer, uint definerlen, + longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode); + + void reset_thd_mem_root(THD *thd); + + void restore_thd_mem_root(THD *thd); + + void optimize(); + void opt_mark(uint ip); + + inline sp_instr * + get_instr(uint i) + { + sp_instr *ip; + + if (i < m_instr.elements) + get_dynamic(&m_instr, (gptr)&ip, i); + else + ip= NULL; + return ip; + } + +private: + + MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root + THD *m_thd; // Set if we have reset mem_root + char *m_thd_db; // Original thd->db pointer + + sp_pcontext *m_pcont; // Parse context + List<LEX> m_lex; // Temp. store for the other lex + DYNAMIC_ARRAY m_instr; // The "instructions" + typedef struct + { + struct sp_label *lab; + sp_instr *instr; + } bp_t; + List<bp_t> m_backpatch; // Instructions needing backpatching + + int + execute(THD *thd); + +}; // class sp_head : public Sql_alloc + + +// +// "Instructions"... +// + +class sp_instr : public Sql_alloc +{ + sp_instr(const sp_instr &); /* Prevent use of these */ + void operator=(sp_instr &); + +public: + + uint marked; + Item *free_list; // My Items + uint m_ip; // My index + sp_pcontext *m_ctx; // My parse context + + // Should give each a name or type code for debugging purposes? + sp_instr(uint ip, sp_pcontext *ctx) + :Sql_alloc(), marked(0), free_list(0), m_ip(ip), m_ctx(ctx) + {} + + virtual ~sp_instr() + { free_items(free_list); } + + // Execute this instrution. '*nextp' will be set to the index of the next + // instruction to execute. (For most instruction this will be the + // instruction following this one.) + // Returns 0 on success, non-zero if some error occured. + virtual int execute(THD *thd, uint *nextp) = 0; + + virtual void print(String *str) = 0; + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + {} + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return m_ip+1; + } + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + virtual void opt_move(uint dst, List<sp_instr> *ibp) + { + m_ip= dst; + } + +}; // class sp_instr : public Sql_alloc + + +// +// Call out to some prepared SQL statement. +// +class sp_instr_stmt : public sp_instr +{ + sp_instr_stmt(const sp_instr_stmt &); /* Prevent use of these */ + void operator=(sp_instr_stmt &); + +public: + + LEX_STRING m_query; // For thd->query + + sp_instr_stmt(uint ip, sp_pcontext *ctx) + : sp_instr(ip, ctx), m_lex(NULL) + { + m_query.str= 0; + m_query.length= 0; + } + + virtual ~sp_instr_stmt(); + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + inline void + set_lex(LEX *lex) + { + m_lex= lex; + } + + inline LEX * + get_lex() + { + return m_lex; + } + +protected: + + int exec_stmt(THD *thd, LEX *lex); // Execute a statement + +private: + + LEX *m_lex; // My own lex + +}; // class sp_instr_stmt : public sp_instr + + +class sp_instr_set : public sp_instr +{ + sp_instr_set(const sp_instr_set &); /* Prevent use of these */ + void operator=(sp_instr_set &); + +public: + + TABLE_LIST *tables; + + sp_instr_set(uint ip, sp_pcontext *ctx, + uint offset, Item *val, enum enum_field_types type) + : sp_instr(ip, ctx), + tables(NULL), m_offset(offset), m_value(val), m_type(type) + {} + + virtual ~sp_instr_set() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_offset; // Frame offset + Item *m_value; + enum enum_field_types m_type; // The declared type + +}; // class sp_instr_set : public sp_instr + + +/* + Set user variable instruction. + Used in functions and triggers to set user variables because we don't + want use sp_instr_stmt + "SET @a:=..." statement in this case since + latter will close all tables and thus will ruin execution of statement + calling/invoking this function/trigger. +*/ +class sp_instr_set_user_var : public sp_instr +{ + sp_instr_set_user_var(const sp_instr_set_user_var &); + void operator=(sp_instr_set_user_var &); + +public: + + sp_instr_set_user_var(uint ip, sp_pcontext *ctx, LEX_STRING var, Item *val) + : sp_instr(ip, ctx), m_set_var_item(var, val) + {} + + virtual ~sp_instr_set_user_var() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + Item_func_set_user_var m_set_var_item; +}; // class sp_instr_set_user_var : public sp_instr + + +/* + Set NEW/OLD row field value instruction. Used in triggers. +*/ +class sp_instr_set_trigger_field : public sp_instr +{ + sp_instr_set_trigger_field(const sp_instr_set_trigger_field &); + void operator=(sp_instr_set_trigger_field &); + +public: + + sp_instr_set_trigger_field(uint ip, sp_pcontext *ctx, + LEX_STRING field_name, Item *val) + : sp_instr(ip, ctx), + trigger_field(Item_trigger_field::NEW_ROW, field_name.str), + value(val) + {} + + virtual ~sp_instr_set_trigger_field() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + bool setup_field(THD *thd, TABLE *table, enum trg_event_type event) + { + return trigger_field.setup_field(thd, table, event); + } +private: + + Item_trigger_field trigger_field; + Item *value; +}; // class sp_instr_trigger_field : public sp_instr + + +class sp_instr_jump : public sp_instr +{ + sp_instr_jump(const sp_instr_jump &); /* Prevent use of these */ + void operator=(sp_instr_jump &); + +public: + + uint m_dest; // Where we will go + + sp_instr_jump(uint ip, sp_pcontext *ctx) + : sp_instr(ip, ctx), m_dest(0), m_optdest(0) + {} + + sp_instr_jump(uint ip, sp_pcontext *ctx, uint dest) + : sp_instr(ip, ctx), m_dest(dest), m_optdest(0) + {} + + virtual ~sp_instr_jump() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start); + + virtual void opt_move(uint dst, List<sp_instr> *ibp); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + { + if (m_dest == 0) // Don't reset + m_dest= dest; + } + +protected: + + sp_instr *m_optdest; // Used during optimization + +}; // class sp_instr_jump : public sp_instr + + +class sp_instr_jump_if : public sp_instr_jump +{ + sp_instr_jump_if(const sp_instr_jump_if &); /* Prevent use of these */ + void operator=(sp_instr_jump_if &); + +public: + + TABLE_LIST *tables; + + sp_instr_jump_if(uint ip, sp_pcontext *ctx, Item *i) + : sp_instr_jump(ip, ctx), tables(NULL), m_expr(i) + {} + + sp_instr_jump_if(uint ip, sp_pcontext *ctx, Item *i, uint dest) + : sp_instr_jump(ip, ctx, dest), tables(NULL), m_expr(i) + {} + + virtual ~sp_instr_jump_if() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + +private: + + Item *m_expr; // The condition + +}; // class sp_instr_jump_if : public sp_instr_jump + + +class sp_instr_jump_if_not : public sp_instr_jump +{ + sp_instr_jump_if_not(const sp_instr_jump_if_not &); /* Prevent use of these */ + void operator=(sp_instr_jump_if_not &); + +public: + + TABLE_LIST *tables; + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i) + : sp_instr_jump(ip, ctx), tables(NULL), m_expr(i) + {} + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i, uint dest) + : sp_instr_jump(ip, ctx, dest), tables(NULL), m_expr(i) + {} + + virtual ~sp_instr_jump_if_not() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + +private: + + Item *m_expr; // The condition + +}; // class sp_instr_jump_if_not : public sp_instr_jump + + +class sp_instr_freturn : public sp_instr +{ + sp_instr_freturn(const sp_instr_freturn &); /* Prevent use of these */ + void operator=(sp_instr_freturn &); + +public: + + TABLE_LIST *tables; + + sp_instr_freturn(uint ip, sp_pcontext *ctx, + Item *val, enum enum_field_types type) + : sp_instr(ip, ctx), tables(NULL), m_value(val), m_type(type) + {} + + virtual ~sp_instr_freturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return UINT_MAX; + } + +protected: + + Item *m_value; + enum enum_field_types m_type; + +}; // class sp_instr_freturn : public sp_instr + + +class sp_instr_hpush_jump : public sp_instr_jump +{ + sp_instr_hpush_jump(const sp_instr_hpush_jump &); /* Prevent use of these */ + void operator=(sp_instr_hpush_jump &); + +public: + + sp_instr_hpush_jump(uint ip, sp_pcontext *ctx, int htype, uint fp) + : sp_instr_jump(ip, ctx), m_type(htype), m_frame(fp) + { + m_handler= ip+1; + m_cond.empty(); + } + + virtual ~sp_instr_hpush_jump() + { + m_cond.empty(); + } + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + inline void add_condition(struct sp_cond_type *cond) + { + m_cond.push_front(cond); + } + +private: + + int m_type; // Handler type + uint m_frame; + uint m_handler; // Location of handler + List<struct sp_cond_type> m_cond; + +}; // class sp_instr_hpush_jump : public sp_instr_jump + + +class sp_instr_hpop : public sp_instr +{ + sp_instr_hpop(const sp_instr_hpop &); /* Prevent use of these */ + void operator=(sp_instr_hpop &); + +public: + + sp_instr_hpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_hpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx); + + virtual uint opt_mark(sp_head *sp) + { + if (m_count) + marked= 1; + return m_ip+1; + } + +private: + + uint m_count; + +}; // class sp_instr_hpop : public sp_instr + + +class sp_instr_hreturn : public sp_instr_jump +{ + sp_instr_hreturn(const sp_instr_hreturn &); /* Prevent use of these */ + void operator=(sp_instr_hreturn &); + +public: + + sp_instr_hreturn(uint ip, sp_pcontext *ctx, uint fp) + : sp_instr_jump(ip, ctx), m_frame(fp) + {} + + virtual ~sp_instr_hreturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + +private: + + uint m_frame; + +}; // class sp_instr_hreturn : public sp_instr + + +class sp_instr_cpush : public sp_instr +{ + sp_instr_cpush(const sp_instr_cpush &); /* Prevent use of these */ + void operator=(sp_instr_cpush &); + +public: + + sp_instr_cpush(uint ip, sp_pcontext *ctx, LEX *lex) + : sp_instr(ip, ctx), m_lex(lex) + {} + + virtual ~sp_instr_cpush(); + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + LEX *m_lex; + +}; // class sp_instr_cpush : public sp_instr + + +class sp_instr_cpop : public sp_instr +{ + sp_instr_cpop(const sp_instr_cpop &); /* Prevent use of these */ + void operator=(sp_instr_cpop &); + +public: + + sp_instr_cpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_cpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx); + + virtual uint opt_mark(sp_head *sp) + { + if (m_count) + marked= 1; + return m_ip+1; + } + +private: + + uint m_count; + +}; // class sp_instr_cpop : public sp_instr + + +class sp_instr_copen : public sp_instr_stmt +{ + sp_instr_copen(const sp_instr_copen &); /* Prevent use of these */ + void operator=(sp_instr_copen &); + +public: + + sp_instr_copen(uint ip, sp_pcontext *ctx, uint c) + : sp_instr_stmt(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_copen() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; // Stack index + +}; // class sp_instr_copen : public sp_instr_stmt + + +class sp_instr_cclose : public sp_instr +{ + sp_instr_cclose(const sp_instr_cclose &); /* Prevent use of these */ + void operator=(sp_instr_cclose &); + +public: + + sp_instr_cclose(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_cclose() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; + +}; // class sp_instr_cclose : public sp_instr + + +class sp_instr_cfetch : public sp_instr +{ + sp_instr_cfetch(const sp_instr_cfetch &); /* Prevent use of these */ + void operator=(sp_instr_cfetch &); + +public: + + sp_instr_cfetch(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + { + m_varlist.empty(); + } + + virtual ~sp_instr_cfetch() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + void add_to_varlist(struct sp_pvar *var) + { + m_varlist.push_back(var); + } + +private: + + uint m_cursor; + List<struct sp_pvar> m_varlist; + +}; // class sp_instr_cfetch : public sp_instr + + +class sp_instr_error : public sp_instr +{ + sp_instr_error(const sp_instr_error &); /* Prevent use of these */ + void operator=(sp_instr_error &); + +public: + + sp_instr_error(uint ip, sp_pcontext *ctx, int errcode) + : sp_instr(ip, ctx), m_errcode(errcode) + {} + + virtual ~sp_instr_error() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return UINT_MAX; + } + +private: + + int m_errcode; + +}; // class sp_instr_error : public sp_instr + + +struct st_sp_security_context +{ + bool changed; + uint master_access; + uint db_access; + char *priv_user; + char priv_host[MAX_HOSTNAME]; + char *user; + char *host; + char *ip; +}; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +void +sp_change_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp); +void +sp_restore_security_context(THD *thd, sp_head *sp,st_sp_security_context *ctxp); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + +#endif /* _SP_HEAD_H_ */ diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc new file mode 100644 index 00000000000..3b60f0936f9 --- /dev/null +++ b/sql/sp_pcontext.cc @@ -0,0 +1,274 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "mysql_priv.h" +#include "sp_pcontext.h" +#include "sp_head.h" + +sp_pcontext::sp_pcontext(sp_pcontext *prev) + : Sql_alloc(), m_psubsize(0), m_csubsize(0), m_hsubsize(0), + m_handlers(0), m_parent(prev) +{ + VOID(my_init_dynamic_array(&m_pvar, sizeof(sp_pvar_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_cond, sizeof(sp_cond_type_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_cursor, sizeof(LEX_STRING), 16, 8)); + m_label.empty(); + m_children.empty(); + if (!prev) + m_poffset= m_coffset= 0; + else + { + m_poffset= prev->current_pvars(); + m_coffset= prev->current_cursors(); + } +} + +void +sp_pcontext::destroy() +{ + List_iterator_fast<sp_pcontext> li(m_children); + sp_pcontext *child; + + while ((child= li++)) + child->destroy(); + + m_children.empty(); + m_label.empty(); + delete_dynamic(&m_pvar); + delete_dynamic(&m_cond); + delete_dynamic(&m_cursor); +} + +sp_pcontext * +sp_pcontext::push_context() +{ + sp_pcontext *child= new sp_pcontext(this); + + if (child) + m_children.push_back(child); + return child; +} + +sp_pcontext * +sp_pcontext::pop_context() +{ + uint submax= max_pvars(); + + if (submax > m_parent->m_psubsize) + m_parent->m_psubsize= submax; + submax= max_handlers(); + if (submax > m_parent->m_hsubsize) + m_parent->m_hsubsize= submax; + submax= max_cursors(); + if (submax > m_parent->m_csubsize) + m_parent->m_csubsize= submax; + return m_parent; +} + +uint +sp_pcontext::diff_handlers(sp_pcontext *ctx) +{ + uint n= 0; + sp_pcontext *pctx= this; + + while (pctx && pctx != ctx) + { + n+= pctx->m_handlers; + pctx= pctx->parent_context(); + } + if (pctx) + return n; + return 0; // Didn't find ctx +} + +uint +sp_pcontext::diff_cursors(sp_pcontext *ctx) +{ + uint n= 0; + sp_pcontext *pctx= this; + + while (pctx && pctx != ctx) + pctx= pctx->parent_context(); + if (pctx) + return ctx->current_cursors() - pctx->current_cursors(); + return 0; // Didn't find ctx +} + +/* This does a linear search (from newer to older variables, in case +** we have shadowed names). +** It's possible to have a more efficient allocation and search method, +** but it might not be worth it. The typical number of parameters and +** variables will in most cases be low (a handfull). +** ...and, this is only called during parsing. +*/ +sp_pvar_t * +sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped) +{ + uint i= m_pvar.elements; + + while (i--) + { + sp_pvar_t *p; + + get_dynamic(&m_pvar, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p; + } + } + if (!scoped && m_parent) + return m_parent->find_pvar(name, scoped); + return NULL; +} + +void +sp_pcontext::push_pvar(LEX_STRING *name, enum enum_field_types type, + sp_param_mode_t mode) +{ + sp_pvar_t *p= (sp_pvar_t *)sql_alloc(sizeof(sp_pvar_t)); + + if (p) + { + if (m_pvar.elements == m_psubsize) + m_psubsize+= 1; + p->name.str= name->str; + p->name.length= name->length; + p->type= type; + p->mode= mode; + p->offset= current_pvars(); + p->isset= (mode == sp_param_out ? FALSE : TRUE); + p->dflt= NULL; + insert_dynamic(&m_pvar, (gptr)&p); + } +} + +sp_label_t * +sp_pcontext::push_label(char *name, uint ip) +{ + sp_label_t *lab = (sp_label_t *)sql_alloc(sizeof(sp_label_t)); + + if (lab) + { + lab->name= name; + lab->ip= ip; + lab->type= SP_LAB_GOTO; + lab->ctx= this; + m_label.push_front(lab); + } + return lab; +} + +sp_label_t * +sp_pcontext::find_label(char *name) +{ + List_iterator_fast<sp_label_t> li(m_label); + sp_label_t *lab; + + while ((lab= li++)) + if (my_strcasecmp(system_charset_info, name, lab->name) == 0) + return lab; + + if (m_parent) + return m_parent->find_label(name); + return NULL; +} + +void +sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val) +{ + sp_cond_t *p= (sp_cond_t *)sql_alloc(sizeof(sp_cond_t)); + + if (p) + { + p->name.str= name->str; + p->name.length= name->length; + p->val= val; + insert_dynamic(&m_cond, (gptr)&p); + } +} + +/* + * See comment for find_pvar() above + */ +sp_cond_type_t * +sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped) +{ + uint i= m_cond.elements; + + while (i--) + { + sp_cond_t *p; + + get_dynamic(&m_cond, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p->val; + } + } + if (!scoped && m_parent) + return m_parent->find_cond(name, scoped); + return NULL; +} + +void +sp_pcontext::push_cursor(LEX_STRING *name) +{ + LEX_STRING n; + + if (m_cursor.elements == m_csubsize) + m_csubsize+= 1; + n.str= name->str; + n.length= name->length; + insert_dynamic(&m_cursor, (gptr)&n); +} + +/* + * See comment for find_pvar() above + */ +my_bool +sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped) +{ + uint i= m_cursor.elements; + + while (i--) + { + LEX_STRING n; + + get_dynamic(&m_cursor, (gptr)&n, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)n.str, n.length) == 0) + { + *poff= i; + return TRUE; + } + } + if (!scoped && m_parent) + return m_parent->find_cursor(name, poff, scoped); + return FALSE; +} diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h new file mode 100644 index 00000000000..36e4ed06aa7 --- /dev/null +++ b/sql/sp_pcontext.h @@ -0,0 +1,298 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_PCONTEXT_H_ +#define _SP_PCONTEXT_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +typedef enum +{ + sp_param_in, + sp_param_out, + sp_param_inout +} sp_param_mode_t; + +typedef struct sp_pvar +{ + LEX_STRING name; + enum enum_field_types type; + sp_param_mode_t mode; + uint offset; // Offset in current frame + my_bool isset; + Item *dflt; +} sp_pvar_t; + + +#define SP_LAB_REF 0 // Unresolved reference (for goto) +#define SP_LAB_GOTO 1 // Free goto label +#define SP_LAB_BEGIN 2 // Label at BEGIN +#define SP_LAB_ITER 3 // Label at iteration control + +typedef struct sp_label +{ + char *name; + uint ip; // Instruction index + int type; // begin/iter or ref/free + struct sp_pcontext *ctx; // The label's context +} sp_label_t; + +typedef struct sp_cond_type +{ + enum { number, state, warning, notfound, exception } type; + char sqlstate[6]; + uint mysqlerr; +} sp_cond_type_t; + +typedef struct sp_cond +{ + LEX_STRING name; + sp_cond_type_t *val; +} sp_cond_t; + +class sp_pcontext : public Sql_alloc +{ + sp_pcontext(const sp_pcontext &); /* Prevent use of these */ + void operator=(sp_pcontext &); + + public: + + sp_pcontext(sp_pcontext *prev); + + // Free memory + void + destroy(); + + sp_pcontext * + push_context(); + + // Returns the previous context, not the one we pop + sp_pcontext * + pop_context(); + + sp_pcontext * + parent_context() + { + return m_parent; + } + + uint + diff_handlers(sp_pcontext *ctx); + + uint + diff_cursors(sp_pcontext *ctx); + + + // + // Parameters and variables + // + + inline uint + max_pvars() + { + return m_psubsize + m_pvar.elements; + } + + inline uint + current_pvars() + { + return m_poffset + m_pvar.elements; + } + + inline uint + context_pvars() + { + return m_pvar.elements; + } + + inline uint + pvar_context2index(uint i) + { + return m_poffset + i; + } + + inline void + set_type(uint i, enum enum_field_types type) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->type= type; + } + + inline void + set_isset(uint i, my_bool val) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->isset= val; + } + + inline void + set_default(uint i, Item *it) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->dflt= it; + } + + void + push_pvar(LEX_STRING *name, enum enum_field_types type, sp_param_mode_t mode); + + // Pop the last 'num' slots of the frame + inline void + pop_pvar(uint num = 1) + { + while (num--) + pop_dynamic(&m_pvar); + } + + // Find by name + sp_pvar_t * + find_pvar(LEX_STRING *name, my_bool scoped=0); + + // Find by index + sp_pvar_t * + find_pvar(uint i) + { + sp_pvar_t *p; + + if (i < m_pvar.elements) + get_dynamic(&m_pvar, (gptr)&p, i); + else + p= NULL; + return p; + } + + // + // Labels + // + + sp_label_t * + push_label(char *name, uint ip); + + sp_label_t * + find_label(char *name); + + inline sp_label_t * + last_label() + { + sp_label_t *lab= m_label.head(); + + if (!lab && m_parent) + lab= m_parent->last_label(); + return lab; + } + + inline sp_label_t * + pop_label() + { + return m_label.pop(); + } + + // + // Conditions + // + + void + push_cond(LEX_STRING *name, sp_cond_type_t *val); + + inline void + pop_cond(uint num) + { + while (num--) + pop_dynamic(&m_cond); + } + + sp_cond_type_t * + find_cond(LEX_STRING *name, my_bool scoped=0); + + // + // Handlers + // + + inline void + add_handler() + { + m_handlers+= 1; + } + + inline uint + max_handlers() + { + return m_hsubsize + m_handlers; + } + + inline void + push_handlers(uint n) + { + m_handlers+= n; + } + + // + // Cursors + // + + void + push_cursor(LEX_STRING *name); + + my_bool + find_cursor(LEX_STRING *name, uint *poff, my_bool scoped=0); + + inline uint + max_cursors() + { + return m_csubsize + m_cursor.elements; + } + + inline uint + current_cursors() + { + return m_coffset + m_cursor.elements; + } + +protected: + + // The maximum sub context's framesizes + uint m_psubsize; + uint m_csubsize; + uint m_hsubsize; + uint m_handlers; // No. of handlers in this context + +private: + + sp_pcontext *m_parent; // Parent context + + uint m_poffset; // Variable offset for this context + uint m_coffset; // Cursor offset for this context + + DYNAMIC_ARRAY m_pvar; // Parameters/variables + DYNAMIC_ARRAY m_cond; // Conditions + DYNAMIC_ARRAY m_cursor; // Cursors + + List<sp_label_t> m_label; // The label list + + List<sp_pcontext> m_children; // Children contexts, used for destruction + +}; // class sp_pcontext : public Sql_alloc + + +#endif /* _SP_PCONTEXT_H_ */ diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc new file mode 100644 index 00000000000..51a1bb2e550 --- /dev/null +++ b/sql/sp_rcontext.cc @@ -0,0 +1,268 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "mysql_priv.h" +#include "mysql.h" +#include "sp_head.h" +#include "sp_rcontext.h" +#include "sp_pcontext.h" + +sp_rcontext::sp_rcontext(uint fsize, uint hmax, uint cmax) + : m_count(0), m_fsize(fsize), m_result(NULL), m_hcount(0), m_hsp(0), + m_hfound(-1), m_ccount(0) +{ + in_handler= FALSE; + m_frame= (Item **)sql_alloc(fsize * sizeof(Item*)); + m_outs= (int *)sql_alloc(fsize * sizeof(int)); + m_handler= (sp_handler_t *)sql_alloc(hmax * sizeof(sp_handler_t)); + m_hstack= (uint *)sql_alloc(hmax * sizeof(uint)); + m_cstack= (sp_cursor **)sql_alloc(cmax * sizeof(sp_cursor *)); + m_saved.empty(); +} + +int +sp_rcontext::set_item_eval(uint idx, Item *i, enum_field_types type) +{ + extern Item *sp_eval_func_item(THD *thd, Item *it, enum_field_types type); + Item *it= sp_eval_func_item(current_thd, i, type); + + if (! it) + return -1; + else + { + set_item(idx, it); + return 0; + } +} + +bool +sp_rcontext::find_handler(uint sql_errno, + MYSQL_ERROR::enum_warning_level level) +{ + if (in_handler) + return 0; // Already executing a handler + if (m_hfound >= 0) + return 1; // Already got one + + const char *sqlstate= mysql_errno_to_sqlstate(sql_errno); + int i= m_hcount, found= -1; + + while (i--) + { + sp_cond_type_t *cond= m_handler[i].cond; + + switch (cond->type) + { + case sp_cond_type_t::number: + if (sql_errno == cond->mysqlerr) + found= i; // Always the most specific + break; + case sp_cond_type_t::state: + if (strcmp(sqlstate, cond->sqlstate) == 0 && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::number)) + found= i; + break; + case sp_cond_type_t::warning: + if ((sqlstate[0] == '0' && sqlstate[1] == '1' || + level == MYSQL_ERROR::WARN_LEVEL_WARN) && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + case sp_cond_type_t::notfound: + if (sqlstate[0] == '0' && sqlstate[1] == '2' && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + case sp_cond_type_t::exception: + if ((sqlstate[0] != '0' || sqlstate[1] > '2' || + level == MYSQL_ERROR::WARN_LEVEL_ERROR) && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + } + } + if (found < 0) + return FALSE; + m_hfound= found; + return TRUE; +} + +void +sp_rcontext::save_variables(uint fp) +{ + while (fp < m_count) + m_saved.push_front(m_frame[fp++]); +} + +void +sp_rcontext::restore_variables(uint fp) +{ + uint i= m_count; + + while (i-- > fp) + m_frame[i]= m_saved.pop(); +} + +void +sp_rcontext::push_cursor(LEX *lex) +{ + m_cstack[m_ccount++]= new sp_cursor(lex); +} + +void +sp_rcontext::pop_cursors(uint count) +{ + while (count--) + { + delete m_cstack[--m_ccount]; + } +} + + +/* + * + * sp_cursor + * + */ + +// We have split this in two to make it easy for sp_instr_copen +// to reuse the sp_instr::exec_stmt() code. +LEX * +sp_cursor::pre_open(THD *thd) +{ + if (m_isopen) + { + send_error(thd, ER_SP_CURSOR_ALREADY_OPEN); + return NULL; + } + + bzero((char *)&m_mem_root, sizeof(m_mem_root)); + init_alloc_root(&m_mem_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC); + if ((m_prot= new Protocol_cursor(thd, &m_mem_root)) == NULL) + return NULL; + + m_oprot= thd->protocol; // Save the original protocol + thd->protocol= m_prot; + + m_nseof= thd->net.no_send_eof; + thd->net.no_send_eof= TRUE; + return m_lex; +} + +void +sp_cursor::post_open(THD *thd, my_bool was_opened) +{ + thd->net.no_send_eof= m_nseof; // Restore the originals + thd->protocol= m_oprot; + if (was_opened) + { + m_isopen= was_opened; + m_current_row= m_prot->data; + } +} + +int +sp_cursor::close(THD *thd) +{ + if (! m_isopen) + { + send_error(thd, ER_SP_CURSOR_NOT_OPEN); + return -1; + } + destroy(); + return 0; +} + +void +sp_cursor::destroy() +{ + if (m_prot) + { + delete m_prot; + m_prot= NULL; + free_root(&m_mem_root, MYF(0)); + bzero((char *)&m_mem_root, sizeof(m_mem_root)); + } + m_isopen= FALSE; +} + +int +sp_cursor::fetch(THD *thd, List<struct sp_pvar> *vars) +{ + List_iterator_fast<struct sp_pvar> li(*vars); + sp_pvar_t *pv; + MYSQL_ROW row; + uint fldcount; + + if (! m_isopen) + { + send_error(thd, ER_SP_CURSOR_NOT_OPEN); + return -1; + } + if (m_current_row == NULL) + { + send_error(thd, ER_SP_FETCH_NO_DATA); + return -1; + } + + row= m_current_row->data; + for (fldcount= 0 ; (pv= li++) ; fldcount++) + { + Item *it; + const char *s; + + if (fldcount >= m_prot->get_field_count()) + { + send_error(thd, ER_SP_WRONG_NO_OF_FETCH_ARGS); + return -1; + } + s= row[fldcount]; + if (!s) + it= new Item_null(); + else + switch (sp_map_result_type(pv->type)) + { + case INT_RESULT: + it= new Item_int(s); + break; + case REAL_RESULT: + it= new Item_real(s, strlen(s)); + break; + default: + { + uint len= strlen(s); + it= new Item_string(thd->strmake(s, len), len, thd->db_charset); + break; + } + } + thd->spcont->set_item(pv->offset, it); + } + if (fldcount < m_prot->get_field_count()) + { + send_error(thd, ER_SP_WRONG_NO_OF_FETCH_ARGS); + return -1; + } + m_current_row= m_current_row->next; + return 0; +} diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h new file mode 100644 index 00000000000..8e818ab76d1 --- /dev/null +++ b/sql/sp_rcontext.h @@ -0,0 +1,255 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_RCONTEXT_H_ +#define _SP_RCONTEXT_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +struct sp_cond_type; +class sp_cursor; +struct sp_pvar; + +#define SP_HANDLER_NONE 0 +#define SP_HANDLER_EXIT 1 +#define SP_HANDLER_CONTINUE 2 +#define SP_HANDLER_UNDO 3 + +typedef struct +{ + struct sp_cond_type *cond; + uint handler; // Location of handler + int type; + uint foffset; // Frame offset for the handlers declare level +} sp_handler_t; + +class sp_rcontext : public Sql_alloc +{ + sp_rcontext(const sp_rcontext &); /* Prevent use of these */ + void operator=(sp_rcontext &); + + public: + + bool in_handler; + + sp_rcontext(uint fsize, uint hmax, uint cmax); + + ~sp_rcontext() + { + // Not needed? + //sql_element_free(m_frame); + //m_saved.empty(); + } + + inline void + push_item(Item *i) + { + if (m_count < m_fsize) + m_frame[m_count++] = i; + } + + inline void + set_item(uint idx, Item *i) + { + if (idx < m_count) + m_frame[idx] = i; + } + + /* Returns 0 on success, -1 on (eval) failure */ + int + set_item_eval(uint idx, Item *i, enum_field_types type); + + inline Item * + get_item(uint idx) + { + return m_frame[idx]; + } + + inline void + set_oindex(uint idx, int oidx) + { + m_outs[idx] = oidx; + } + + inline int + get_oindex(uint idx) + { + return m_outs[idx]; + } + + inline void + set_result(Item *it) + { + m_result= it; + } + + inline Item * + get_result() + { + return m_result; + } + + inline void + push_handler(struct sp_cond_type *cond, uint h, int type, uint f) + { + m_handler[m_hcount].cond= cond; + m_handler[m_hcount].handler= h; + m_handler[m_hcount].type= type; + m_handler[m_hcount].foffset= f; + m_hcount+= 1; + } + + inline void + pop_handlers(uint count) + { + m_hcount-= count; + } + + // Returns 1 if a handler was found, 0 otherwise. + bool + find_handler(uint sql_errno,MYSQL_ERROR::enum_warning_level level); + + // Returns handler type and sets *ip to location if one was found + inline int + found_handler(uint *ip, uint *fp) + { + if (m_hfound < 0) + return SP_HANDLER_NONE; + *ip= m_handler[m_hfound].handler; + *fp= m_handler[m_hfound].foffset; + return m_handler[m_hfound].type; + } + + // Clears the handler find state + inline void + clear_handler() + { + m_hfound= -1; + } + + inline void + push_hstack(uint h) + { + m_hstack[m_hsp++]= h; + } + + inline uint + pop_hstack() + { + return m_hstack[--m_hsp]; + } + + // Save variables starting at fp and up + void + save_variables(uint fp); + + // Restore variables down to fp + void + restore_variables(uint fp); + + void + push_cursor(LEX *lex); + + void + pop_cursors(uint count); + + void + pop_all_cursors() + { + pop_cursors(m_ccount); + } + + inline sp_cursor * + get_cursor(uint i) + { + return m_cstack[i]; + } + +private: + + uint m_count; + uint m_fsize; + Item **m_frame; + int *m_outs; + + Item *m_result; // For FUNCTIONs + + sp_handler_t *m_handler; + uint m_hcount; + uint *m_hstack; + uint m_hsp; + int m_hfound; // Set by find_handler; -1 if not found + List<Item> m_saved; // Saved variables + + sp_cursor **m_cstack; + uint m_ccount; + +}; // class sp_rcontext : public Sql_alloc + + +class sp_cursor : public Sql_alloc +{ +public: + + sp_cursor(LEX *lex) + : m_lex(lex), m_prot(NULL), m_isopen(0), m_current_row(NULL) + { + /* Empty */ + } + + virtual ~sp_cursor() + { + destroy(); + } + + // We have split this in two to make it easy for sp_instr_copen + // to reuse the sp_instr::exec_stmt() code. + LEX * + pre_open(THD *thd); + void + post_open(THD *thd, my_bool was_opened); + + int + close(THD *thd); + + inline my_bool + is_open() + { + return m_isopen; + } + + int + fetch(THD *, List<struct sp_pvar> *vars); + +private: + + MEM_ROOT m_mem_root; // My own mem_root + LEX *m_lex; + Protocol_cursor *m_prot; + my_bool m_isopen; + my_bool m_nseof; // Original no_send_eof + Protocol *m_oprot; // Original protcol + MYSQL_ROWS *m_current_row; + + void + destroy(); + +}; // class sp_cursor : public Sql_alloc + +#endif /* _SP_RCONTEXT_H_ */ diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 98dde1071c3..3e3642dd0bd 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -17,8 +17,8 @@ /* The privileges are saved in the following tables: - mysql/user ; super user who are allowed to do almoust anything - mysql/host ; host priviliges. This is used if host is empty in mysql/db. + mysql/user ; super user who are allowed to do almost anything + mysql/host ; host privileges. This is used if host is empty in mysql/db. mysql/db ; database privileges / user data in tables is sorted according to how many not-wild-cards there is @@ -140,7 +140,6 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) MYSQL_LOCK *lock; my_bool return_val=1; bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; - DBUG_ENTER("acl_init"); if (!acl_cache) @@ -153,6 +152,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) } priv_version++; /* Privileges updated */ + mysql_proc_table_exists= 1; // Assume mysql.proc exists /* To be able to run this from boot, we allocate a temporary THD @@ -168,8 +168,8 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) tables[0].alias=tables[0].real_name=(char*) "host"; tables[1].alias=tables[1].real_name=(char*) "user"; tables[2].alias=tables[2].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=tables+2; + tables[0].next_local= tables[0].next_global= tables+1; + tables[1].next_local= tables[1].next_global= tables+2; tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; tables[0].db=tables[1].db=tables[2].db=thd->db; @@ -301,9 +301,16 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) { uint next_field; user.access= get_access(table,3,&next_field) & GLOBAL_ACLS; + /* + if it is pre 5.0.1 privilege table then map CREATE privilege on + CREATE VIEW & SHOW VIEW privileges + */ + if (table->fields <= 31 && (user.access & CREATE_ACL)) + user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL); user.sort= get_sort(2,user.host.hostname,user.user); user.hostname_length= (user.host.hostname ? (uint) strlen(user.host.hostname) : 0); + if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ { char *ssl_type=get_field(&mem, table->field[next_field++]); @@ -492,8 +499,8 @@ void acl_reload(THD *thd) Get all access bits from table after fieldnr IMPLEMENTATION - We know that the access privileges ends when there is no more fields - or the field is not an enum with two elements. + We know that the access privileges ends when there is no more fields + or the field is not an enum with two elements. SYNOPSIS get_access() @@ -596,7 +603,7 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) thd->host, thd->ip, thd->user are used for checks. mqh user resources; on success mqh is reset, else unchanged - passwd scrambled & crypted password, recieved from client + passwd scrambled & crypted password, received from client (to check): thd->scramble or thd->scramble_323 is used to decrypt passwd, so they must contain original random string, @@ -607,7 +614,7 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) RETURN VALUE 0 success: thd->priv_user, thd->priv_host, thd->master_access, mqh are updated - 1 user not found or authentification failure + 1 user not found or authentication failure 2 user found, has long (4.1.1) salt, but passwd is in old (3.23) format. -1 user found, has short (3.23) salt, but passwd is in new (4.1.1) format. */ @@ -747,7 +754,7 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, break; } DBUG_PRINT("info",("checkpoint 2")); - /* If X509 issuer is speified, we check it... */ + /* If X509 issuer is specified, we check it... */ if (acl_user->x509_issuer) { DBUG_PRINT("info",("checkpoint 3")); @@ -806,6 +813,86 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, } +/* + * This is like acl_getroot() above, but it doesn't check password, + * and we don't care about the user resources. + * Used to get access rights for SQL SECURITY DEFINER invocation of + * stored procedures. + */ +int acl_getroot_no_password(THD *thd) +{ + ulong user_access= NO_ACCESS; + int res= 1; + uint i; + ACL_USER *acl_user= 0; + DBUG_ENTER("acl_getroot_no_password"); + + if (!initialized) + { + /* + here if mysqld's been started with --skip-grant-tables option. + */ + thd->priv_user= (char *) ""; // privileges for + *thd->priv_host= '\0'; // the user are unknown + thd->master_access= ~NO_ACCESS; // everything is allowed + DBUG_RETURN(0); + } + + VOID(pthread_mutex_lock(&acl_cache->lock)); + + thd->master_access= 0; + thd->db_access= 0; + + /* + Find acl entry in user database. + This is specially tailored to suit the check we do for CALL of + a stored procedure; thd->user is set to what is actually a + priv_user, which can be ''. + */ + for (i=0 ; i < acl_users.elements ; i++) + { + acl_user= dynamic_element(&acl_users,i,ACL_USER*); + if ((!acl_user->user && (!thd->user || !thd->user[0])) || + (acl_user->user && strcmp(thd->user, acl_user->user) == 0)) + { + if (compare_hostname(&acl_user->host, thd->host, thd->ip)) + { + res= 0; + break; + } + } + } + + if (acl_user) + { + for (i=0 ; i < acl_dbs.elements ; i++) + { + ACL_DB *acl_db= dynamic_element(&acl_dbs, i, ACL_DB*); + if (!acl_db->user || + (thd->user && thd->user[0] && !strcmp(thd->user, acl_db->user))) + { + if (compare_hostname(&acl_db->host, thd->host, thd->ip)) + { + if (!acl_db->db || (thd->db && !strcmp(acl_db->db, thd->db))) + { + thd->db_access= acl_db->access; + break; + } + } + } + } + thd->master_access= acl_user->access; + thd->priv_user= acl_user->user ? thd->user : (char *) ""; + + if (acl_user->host.hostname) + strmake(thd->priv_host, acl_user->host.hostname, MAX_HOSTNAME); + else + *thd->priv_host= 0; + } + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(res); +} + static byte* check_get_key(ACL_USER *buff,uint *length, my_bool not_used __attribute__((unused))) { @@ -1226,7 +1313,6 @@ bool change_password(THD *thd, const char *host, const char *user, acl_user->host.hostname ? acl_user->host.hostname : "", new_password)); thd->clear_error(); - mysql_update_log.write(thd, buff, query_length); Query_log_event qinfo(thd, buff, query_length, 0); mysql_bin_log.write(&qinfo); DBUG_RETURN(0); @@ -1464,13 +1550,28 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, (byte*) table->field[0]->ptr,0, HA_READ_KEY_EXACT)) { - if (!create_user) + /* what == 'N' means revoke */ + if (what == 'N') { - if (what == 'N') - my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); - else - my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), - thd->user, thd->host_or_ip); + my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); + goto end; + } + /* + There are four options which affect the process of creation of + a new user(mysqld option --safe-create-user, 'insert' privilege + on 'mysql.user' table, using 'GRANT' with 'IDENTIFIED BY' and + SQL_MODE flag NO_AUTO_CREATE_USER). Below is the simplified rule + how it should work. + if (safe-user-create && ! INSERT_priv) => reject + else if (identified_by) => create + else if (no_auto_create_user) => reject + else create + */ + else if (((thd->variables.sql_mode & MODE_NO_AUTO_CREATE_USER) && + !password_len) || !create_user) + { + my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), + thd->user, thd->host_or_ip); goto end; } old_row_exists = 0; @@ -1484,6 +1585,17 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, } else { + /* + Check that the user isn't trying to change a password for another + user if he doesn't have UPDATE privilege to the MySQL database + */ + DBUG_ASSERT(combo.host.str); + if (thd->user && combo.password.str && + (strcmp(thd->user,combo.user.str) || + my_strcasecmp(&my_charset_latin1, + combo.host.str, thd->host_or_ip)) && + check_access(thd, UPDATE_ACL, "mysql",0,1,0)) + goto end; old_row_exists = 1; store_record(table,record[1]); // Save copy for update if (combo.password.str) // If password given @@ -1751,7 +1863,7 @@ GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, { /* Host given by user */ orig_host= strdup_root(&memex,h); - /* Convert empty hostname to '%' for easy comparision */ + /* Convert empty hostname to '%' for easy comparison */ host= orig_host[0] ? orig_host : (char*) "%"; db = strdup_root(&memex,d); user = strdup_root(&memex,u); @@ -1820,7 +1932,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) col_privs->field[1]->pack_length()+ col_privs->field[2]->pack_length()+ col_privs->field[3]->pack_length()); - key_copy(key,col_privs,0,key_len); + key_copy(key,col_privs->record[0],col_privs->key_info,key_len); col_privs->field[4]->store("",0, &my_charset_latin1); col_privs->file->ha_index_init(0); if (col_privs->file->index_read(col_privs->record[0], @@ -1932,7 +2044,7 @@ static int replace_column_table(GRANT_TABLE *g_t, table->field[3]->store(table_name,(uint) strlen(table_name), &my_charset_latin1); key_length=(table->field[0]->pack_length()+ table->field[1]->pack_length()+ table->field[2]->pack_length()+ table->field[3]->pack_length()); - key_copy(key,table,0,key_length); + key_copy(key,table->record[0],table->key_info,key_length); rights &= COL_ACLS; // Only ACL for columns @@ -1945,7 +2057,7 @@ static int replace_column_table(GRANT_TABLE *g_t, { ulong privileges = xx->rights; bool old_row_exists=0; - key_restore(table,key,0,key_length); + key_restore(table->record[0],key,table->key_info,key_length); table->field[4]->store(xx->column.ptr(),xx->column.length(), &my_charset_latin1); @@ -1961,7 +2073,7 @@ static int replace_column_table(GRANT_TABLE *g_t, } old_row_exists = 0; restore_record(table,default_values); // Get empty record - key_restore(table,key,0,key_length); + key_restore(table->record[0],key,table->key_info,key_length); table->field[4]->store(xx->column.ptr(),xx->column.length(), &my_charset_latin1); } @@ -2218,6 +2330,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, LEX_USER *Str; TABLE_LIST tables[3]; bool create_new_users=0; + char *db_name, *real_name; DBUG_ENTER("mysql_table_grant"); if (!initialized) @@ -2234,17 +2347,19 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (columns.elements && !revoke_grant) { - TABLE *table; class LEX_COLUMN *column; List_iterator <LEX_COLUMN> column_iter(columns); + int res; - if (!(table=open_ltable(thd,table_list,TL_READ))) - DBUG_RETURN(-1); + if ((res= open_and_lock_tables(thd, table_list))) + DBUG_RETURN(res); + while ((column = column_iter++)) { uint unused_field_idx= NO_CACHED_FIELD_INDEX; - if (!find_field_in_table(thd,table,column->column.ptr(), - column->column.length(),0,0, + if (!find_field_in_table(thd, table_list, column->column.ptr(), + column->column.ptr(), + column->column.length(), 0, 0, 0, 0, &unused_field_idx)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), @@ -2274,11 +2389,13 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, tables[0].alias=tables[0].real_name= (char*) "user"; tables[1].alias=tables[1].real_name= (char*) "tables_priv"; tables[2].alias=tables[2].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; + tables[0].next_local= tables[0].next_global= tables+1; /* Don't open column table if we don't need it ! */ - tables[1].next=((column_priv || - (revoke_grant && ((rights & COL_ACLS) || columns.elements))) - ? tables+2 : 0); + tables[1].next_local= + tables[1].next_global= ((column_priv || + (revoke_grant && + ((rights & COL_ACLS) || columns.elements))) + ? tables+2 : 0); tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_WRITE; tables[0].db=tables[1].db=tables[2].db=(char*) "mysql"; @@ -2334,10 +2451,16 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, continue; // Add next user } + db_name= (table_list->view_db.length ? + table_list->view_db.str : + table_list->db); + real_name= (table_list->view_name.length ? + table_list->view_name.str : + table_list->real_name); + /* Find/create cached table grant */ - grant_table= table_hash_search(Str->host.str,NullS,table_list->db, - Str->user.str, - table_list->real_name,1); + grant_table= table_hash_search(Str->host.str, NullS, db_name, + Str->user.str, real_name, 1); if (!grant_table) { if (revoke_grant) @@ -2347,9 +2470,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, result= -1; continue; } - grant_table = new GRANT_TABLE (Str->host.str,table_list->db, - Str->user.str, - table_list->real_name, + grant_table = new GRANT_TABLE (Str->host.str, db_name, + Str->user.str, real_name, rights, column_priv); if (!grant_table) // end of memory @@ -2394,9 +2516,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* update table and columns */ - if (replace_table_table(thd,grant_table,tables[1].table,*Str, - table_list->db, - table_list->real_name, + if (replace_table_table(thd, grant_table, tables[1].table, *Str, + db_name, real_name, rights, column_priv, revoke_grant)) { /* Should only happen if table is crashed */ @@ -2404,10 +2525,9 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, } else if (tables[2].table) { - if ((replace_column_table(grant_table,tables[2].table, *Str, + if ((replace_column_table(grant_table, tables[2].table, *Str, columns, - table_list->db, - table_list->real_name, + db_name, real_name, rights, revoke_grant))) { result= -1; @@ -2451,11 +2571,9 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, bzero((char*) &tables,sizeof(tables)); tables[0].alias=tables[0].real_name=(char*) "user"; tables[1].alias=tables[1].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=0; + tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; - tables[0].table=tables[1].table=0; #ifdef HAVE_REPLICATION /* @@ -2573,7 +2691,7 @@ my_bool grant_init(THD *org_thd) bzero((char*) &tables, sizeof(tables)); tables[0].alias=tables[0].real_name= (char*) "tables_priv"; tables[1].alias=tables[1].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; + tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_READ; tables[0].db=tables[1].db=thd->db; @@ -2694,7 +2812,22 @@ void grant_reload(THD *thd) /**************************************************************************** Check table level grants - All errors are written directly to the client if no_errors is given ! + + SYNOPSIS + bool check_grant() + thd Thread handler + want_access Bits of privileges user needs to have + tables List of tables to check. The user should have 'want_access' + to all tables in list. + show_table <> 0 if we are in show table. In this case it's enough to have + any privilege for the table + number Check at most this number of tables. + no_errors If 0 then we write an error. The error is sent directly to + the client + + RETURN + 0 ok + 1 Error: User did not have the requested privileges ****************************************************************************/ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, @@ -2703,23 +2836,27 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, TABLE_LIST *table; char *user = thd->priv_user; DBUG_ENTER("check_grant"); + DBUG_ASSERT(number > 0); - want_access &= ~thd->master_access; + want_access&= ~thd->master_access; if (!want_access) DBUG_RETURN(0); // ok rw_rdlock(&LOCK_grant); - for (table= tables; table && number--; table= table->next) + for (table= tables; table && number--; table= table->next_global) { + GRANT_TABLE *grant_table; if (!(~table->grant.privilege & want_access) || table->derived) { - table->grant.want_privilege=0; + /* + It is subquery in the FROM clause. VIEW set table->derived after + table opening, but this function always called before table opening. + */ + table->grant.want_privilege= 0; continue; // Already checked } - GRANT_TABLE *grant_table = table_hash_search(thd->host,thd->ip, - table->db,user, - table->real_name,0); - if (!grant_table) + if (!(grant_table= table_hash_search(thd->host,thd->ip, + table->db,user, table->real_name,0))) { want_access &= ~table->grant.privilege; goto err; // No grants @@ -2768,6 +2905,10 @@ err: command= "index"; else if (want_access & GRANT_ACL) command= "grant"; + else if (want_access & CREATE_VIEW_ACL) + command= "create view"; + else if (want_access & SHOW_VIEW_ACL) + command= "show create view"; net_printf(thd,ER_TABLEACCESS_DENIED_ERROR, command, thd->priv_user, @@ -2778,13 +2919,14 @@ err: } -bool check_grant_column(THD *thd,TABLE *table, const char *name, - uint length, uint show_tables) +bool check_grant_column(THD *thd, GRANT_INFO *grant, + char*db_name, char *table_name, + const char *name, uint length, uint show_tables) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; - ulong want_access=table->grant.want_privilege; + ulong want_access= grant->want_privilege & ~grant->privilege; if (!want_access) return 0; // Already checked @@ -2792,15 +2934,15 @@ bool check_grant_column(THD *thd,TABLE *table, const char *name, /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name, 0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: deadcode */ grant_column=column_hash_search(grant_table, name, length); @@ -2810,7 +2952,7 @@ bool check_grant_column(THD *thd,TABLE *table, const char *name, return 0; } #ifdef NOT_USED - if (show_tables && (grant_column || table->grant.privilege & COL_ACLS)) + if (show_tables && (grant_column || grant->privilege & COL_ACLS)) { rw_unlock(&LOCK_grant); /* purecov: deadcode */ return 0; /* purecov: deadcode */ @@ -2831,47 +2973,47 @@ err: thd->priv_user, thd->host_or_ip, name, - table ? table->real_name : "unknown"); + table_name); } return 1; } -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table) +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + char* db_name, char *table_name, + Field_iterator *fields) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; - Field *field=0,**ptr; + Field *field=0; - want_access &= ~table->grant.privilege; + want_access &= ~grant->privilege; if (!want_access) return 0; // Already checked if (!grant_option) - { - field= table->field[0]; // To give a meaningful error message goto err2; - } rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } /* The following should always be true */ - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: inspected */ - for (ptr=table->field; (field= *ptr) ; ptr++) + for (; !fields->end_of_fields(); fields->next()) { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + const char *field_name= fields->name(); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column || (~grant_column->rights & want_access)) goto err; } @@ -2893,8 +3035,8 @@ err2: command, thd->priv_user, thd->host_or_ip, - field ? field->field_name : "unknown", - table->real_name); + fields->name(), + table_name); return 1; } @@ -2961,7 +3103,9 @@ ulong get_table_grant(THD *thd, TABLE_LIST *table) } -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; @@ -2969,30 +3113,31 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->db, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) - priv=table->grant.privilege; + if (!(grant_table= grant->grant_table)) + priv= grant->privilege; else { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column) - priv=table->grant.privilege; + priv= grant->privilege; else - priv=table->grant.privilege | grant_column->rights; + priv= grant->privilege | grant_column->rights; } rw_unlock(&LOCK_grant); return priv; } + /* Help function for mysql_show_grants */ static void add_user_option(String *grant, ulong value, const char *name) @@ -3010,15 +3155,16 @@ static void add_user_option(String *grant, ulong value, const char *name) static const char *command_array[]= { - "SELECT", "INSERT","UPDATE","DELETE","CREATE", "DROP", "RELOAD","SHUTDOWN", - "PROCESS","FILE","GRANT","REFERENCES","INDEX", "ALTER", "SHOW DATABASES", - "SUPER", "CREATE TEMPORARY TABLES", "LOCK TABLES", "EXECUTE", - "REPLICATION SLAVE", "REPLICATION CLIENT", + "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "RELOAD", + "SHUTDOWN", "PROCESS","FILE", "GRANT", "REFERENCES", "INDEX", + "ALTER", "SHOW DATABASES", "SUPER", "CREATE TEMPORARY TABLES", + "LOCK TABLES", "EXECUTE", "REPLICATION SLAVE", "REPLICATION CLIENT", + "CREATE VIEW", "SHOW VIEW" }; static uint command_lengths[]= { - 6,6,6,6,6,4,6,8,7,4,5,10,5,5,14,5,23,11,7,17,18 + 6, 6, 6, 6, 6, 4, 6, 8, 7, 4, 5, 10, 5, 5, 14, 5, 23, 11, 7, 17, 18, 11, 9 }; @@ -3085,7 +3231,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) strxmov(buff,"Grants for ",lex_user->user.str,"@", lex_user->host.str,NullS); field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); rw_wrlock(&LOCK_grant); @@ -3417,10 +3564,9 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables) (tables+1)->alias= (tables+1)->real_name= (char*) "db"; (tables+2)->alias= (tables+2)->real_name= (char*) "tables_priv"; (tables+3)->alias= (tables+3)->real_name= (char*) "columns_priv"; - tables->next= tables+1; - (tables+1)->next= tables+2; - (tables+2)->next= tables+3; - (tables+3)->next= 0; + tables->next_local= tables->next_global= tables+1; + (tables+1)->next_local= (tables+1)->next_global= tables+2; + (tables+2)->next_local= (tables+2)->next_global= tables+3; tables->lock_type= (tables+1)->lock_type= (tables+2)->lock_type= (tables+3)->lock_type= TL_WRITE; tables->db= (tables+1)->db= (tables+2)->db= (tables+3)->db=(char*) "mysql"; @@ -3777,3 +3923,51 @@ int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr) DBUG_RETURN (*str != '\0'); } +#ifndef NO_EMBEDDED_ACCESS_CHECKS +/* + fill effective privileges for table + + SYNOPSIS + fill_effective_table_privileges() + thd thread handler + grant grants table descriptor + db db name + table table name +*/ + +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table) +{ + /* --skip-grants */ + if (!initialized) + { + grant->privilege= ~NO_ACCESS; // everything is allowed + return; + } + + /* global privileges */ + grant->privilege= thd->master_access; + + /* db privileges */ + grant->privilege|= acl_get(thd->host, thd->ip, thd->priv_user, db, 0); + + if (!grant_option) + return; + + /* table privileges */ + if (grant->version != grant_version) + { + rw_rdlock(&LOCK_grant); + grant->grant_table= + table_hash_search(thd->host, thd->ip, db, + thd->priv_user, + table, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ + rw_unlock(&LOCK_grant); + } + if (grant->grant_table != 0) + { + grant->privilege|= grant->grant_table->privs; + } +} +#endif diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 68cb1476eb5..390106c1546 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -35,7 +35,8 @@ #define EXECUTE_ACL (1L << 18) #define REPL_SLAVE_ACL (1L << 19) #define REPL_CLIENT_ACL (1L << 20) - +#define CREATE_VIEW_ACL (1L << 21) +#define SHOW_VIEW_ACL (1L << 22) /* don't forget to update static struct show_privileges_st sys_privileges[] @@ -45,11 +46,13 @@ #define DB_ACLS \ (UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | LOCK_TABLES_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \ + LOCK_TABLES_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL) #define TABLE_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_VIEW_ACL | \ + SHOW_VIEW_ACL) #define COL_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | REFERENCES_ACL) @@ -59,7 +62,7 @@ RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL | GRANT_ACL | \ REFERENCES_ACL | INDEX_ACL | ALTER_ACL | SHOW_DB_ACL | SUPER_ACL | \ CREATE_TMP_ACL | LOCK_TABLES_ACL | REPL_SLAVE_ACL | REPL_CLIENT_ACL | \ - EXECUTE_ACL) + EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL) #define EXTRA_ACL (1L << 29) #define NO_ACCESS (1L << 30) @@ -72,13 +75,21 @@ /* Continius bit-segments that needs to be shifted */ #define DB_REL1 (RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL) #define DB_REL2 (GRANT_ACL | REFERENCES_ACL) +#define DB_REL3 (INDEX_ACL | ALTER_ACL) /* Privileges that needs to be reallocated (in continous chunks) */ #define DB_CHUNK1 (GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) #define DB_CHUNK2 (CREATE_TMP_ACL | LOCK_TABLES_ACL) - -#define fix_rights_for_db(A) (((A) & 63) | (((A) & DB_REL1) << 4) | (((A) & DB_REL2) << 6)) -#define get_rights_for_db(A) (((A) & 63) | (((A) & DB_CHUNK1) >> 4) | (((A) & DB_CHUNK2) >> 6)) +#define DB_CHUNK3 (CREATE_VIEW_ACL | SHOW_VIEW_ACL) + +#define fix_rights_for_db(A) (((A) & 63) | \ + (((A) & DB_REL1) << 4) | \ + (((A) & DB_REL2) << 6) | \ + (((A) & DB_REL3) << 9)) +#define get_rights_for_db(A) (((A) & 63) | \ + (((A) & DB_CHUNK1) >> 4) | \ + (((A) & DB_CHUNK2) >> 6) | \ + (((A) & DB_CHUNK3) >> 9)) #define fix_rights_for_table(A) (((A) & 63) | (((A) & ~63) << 4)) #define get_rights_for_table(A) (((A) & 63) | (((A) & ~63) >> 4)) #define fix_rights_for_column(A) (((A) & 7) | (((A) & ~7) << 8)) @@ -141,6 +152,7 @@ ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern); int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd, uint passwd_len); +int acl_getroot_no_password(THD *thd); bool acl_check_host(const char *host, const char *ip); bool check_change_password(THD *thd, const char *host, const char *user, char *password); @@ -156,17 +168,24 @@ void grant_free(void); void grant_reload(THD *thd); bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint show_command, uint number, bool dont_print_error); -bool check_grant_column (THD *thd,TABLE *table, const char *name, uint length, - uint show_command=0); -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table); +bool check_grant_column (THD *thd, GRANT_INFO *grant, + char *db_name, char *table_name, + const char *name, uint length, uint show_command=0); +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + char* db_name, char *table_name, + Field_iterator *fields); bool check_grant_db(THD *thd,const char *db); ulong get_table_grant(THD *thd, TABLE_LIST *table); -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field); +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name); int mysql_show_grants(THD *thd, LEX_USER *user); void get_privilege_desc(char *to, uint max_length, ulong access); void get_mqh(const char *user, const char *host, USER_CONN *uc); int mysql_drop_user(THD *thd, List <LEX_USER> &list); int mysql_revoke_all(THD *thd, List <LEX_USER> &list); +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table); #ifdef NO_EMBEDDED_ACCESS_CHECKS #define check_grant(A,B,C,D,E,F) 0 diff --git a/sql/sql_base.cc b/sql/sql_base.cc index a5db02478ac..b518af80a43 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -20,6 +20,8 @@ #include "mysql_priv.h" #include "sql_acl.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" #include <m_ctype.h> #include <my_dir.h> #include <hash.h> @@ -32,11 +34,17 @@ TABLE *unused_tables; /* Used by mysql_test */ HASH open_cache; /* Used by mysql_test */ HASH assign_cache; -static int open_unireg_entry(THD *thd,TABLE *entry,const char *db, - const char *name, const char *alias); +static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, + const char *name, const char *alias, + TABLE_LIST *table_list, MEM_ROOT *mem_root); static void free_cache_entry(TABLE *entry); static void mysql_rm_tmp_tables(void); - +static my_bool open_new_frm(const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, + TABLE_LIST *table_desc, MEM_ROOT *mem_root); +static void relink_tables_for_multidelete(THD *thd); extern "C" byte *table_cache_key(const byte *record,uint *length, my_bool not_used __attribute__((unused))) @@ -201,6 +209,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) void intern_close_table(TABLE *table) { // Free all structures free_io_cache(table); + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file } @@ -277,7 +286,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, else { bool found=0; - for (TABLE_LIST *table=tables ; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_local) { if (remove_table_from_cache(thd, table->db, table->real_name, 1)) found=1; @@ -328,7 +337,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, result=reopen_tables(thd,1,1); thd->in_lock_tables=0; /* Set version for table */ - for (TABLE *table=thd->open_tables; table ; table=table->next) + for (TABLE *table=thd->open_tables; table ; table= table->next) table->version=refresh_version; } VOID(pthread_mutex_unlock(&LOCK_open)); @@ -396,7 +405,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - DBUG_PRINT("info", ("thd->open_tables=%p", thd->open_tables)); + DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables)); found_old_table= 0; while (thd->open_tables) @@ -427,6 +436,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) bool found_old_table= 0; TABLE *table= *table_ptr; DBUG_ASSERT(table->key_read == 0); + DBUG_ASSERT(table->file->inited == handler::NONE); *table_ptr=table->next; if (table->version != refresh_version || @@ -540,56 +550,119 @@ void close_temporary_tables(THD *thd) thd->temporary_tables=0; } + /* - Find first suitable table by alias in given list. + Find table in list. SYNOPSIS find_table_in_list() - table - pointer to table list - db_name - data base name or 0 for any - table_name - table name or 0 for any + table Pointer to table list + offset Offset to which list in table structure to use + db_name Data base name + table_name Table name + + NOTES: + This is called by find_table_in_local_list() and + find_table_in_global_list(). RETURN VALUES NULL Table not found # Pointer to found table. */ -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name) +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + uint offset, + const char *db_name, + const char *table_name) { - for (; table; table= table->next) - if ((!db_name || !strcmp(table->db, db_name)) && - (!table_name || !my_strcasecmp(table_alias_charset, - table->alias, table_name))) - break; + if (lower_case_table_names) + { + for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) + { + if ((!strcmp(table->db, db_name) && + !strcmp(table->real_name, table_name)) || + (table->view && + table->table->table_cache_key && // it is not temporary table + !my_strcasecmp(table_alias_charset, + table->table->table_cache_key, db_name) && + !my_strcasecmp(table_alias_charset, + table->table->table_name, table_name))) + break; + } + } + else + { + for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) + { + if ((!strcmp(table->db, db_name) && + !strcmp(table->real_name, table_name)) || + (table->view && // it is VIEW and + table->table->table_cache_key && // it is not temporary table + !strcmp(table->table->table_cache_key, db_name) && + !strcmp(table->table->table_name, table_name))) + break; + } + } return table; } + /* - Find real table in given list. + Test that table is unique SYNOPSIS - find_real_table_in_list() - table - pointer to table list - db_name - data base name - table_name - table name + unique_table() + table table which should be chaked + table_list list of tables - RETURN VALUES - NULL Table not found - # Pointer to found table. + RETURN + found duplicate + 0 if table is unique */ -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name) +TABLE_LIST* unique_table(TABLE_LIST *table, TABLE_LIST *table_list) { - for (; table; table= table->next) - if (!strcmp(table->db, db_name) && - !strcmp(table->real_name, table_name)) + DBUG_ENTER("unique_table"); + DBUG_PRINT("enter", ("table alias: %s", table->alias)); + TABLE_LIST *res; + const char *d_name= table->db, *t_name= table->real_name; + char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; + /* temporary table is always unique */ + if (table->table && table->table->tmp_table != NO_TMP_TABLE) + return 0; + if (table->view) + { + /* it is view and table opened */ + if (lower_case_table_names) + { + strmov(t_name_buff, table->table->table_name); + my_casedn_str(files_charset_info, t_name_buff); + t_name= t_name_buff; + strmov(d_name_buff, table->table->table_cache_key); + my_casedn_str(files_charset_info, d_name_buff); + d_name= d_name_buff; + } + else + { + d_name= table->table->table_cache_key; + t_name= table->table->table_name; + } + } + + DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name)); + for(;;) + { + if (!(res= find_table_in_global_list(table_list, d_name, t_name)) || + !res->table || res->table != table->table) break; - return table; + /* if we found entry of this table try again. */ + table_list= res->next_global; + DBUG_PRINT("info", ("found same copy of table")); + } + DBUG_RETURN(res); } + TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; @@ -749,10 +822,12 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; pthread_mutex_lock(&LOCK_open); - if (open_unireg_entry(thd, table, db, table_name, table_name) || + if (open_unireg_entry(thd, table, db, table_name, table_name, 0, + thd->mem_root) || !(table->table_cache_key =memdup_root(&table->mem_root,(char*) key, key_length))) { + delete table->triggers; closefrm(table); pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(0); @@ -787,12 +862,13 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) ******************************************************************************/ -TABLE *open_table(THD *thd,const char *db,const char *table_name, - const char *alias,bool *refresh) +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, + bool *refresh) { reg1 TABLE *table; char key[MAX_DBKEY_LENGTH]; uint key_length; + char *alias= table_list->alias; DBUG_ENTER("open_table"); /* find a unused table in the open table cache */ @@ -800,27 +876,31 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, *refresh=0; if (thd->killed) DBUG_RETURN(0); - key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + key_length= (uint) (strmov(strmov(key, table_list->db)+1, + table_list->real_name)-key)+1; int4store(key + key_length, thd->server_id); int4store(key + key_length + 4, thd->variables.pseudo_thread_id); - for (table=thd->temporary_tables; table ; table=table->next) + if (!table_list->skip_temporary) { - if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && - !memcmp(table->table_cache_key, key, - key_length + TMP_TABLE_KEY_EXTRA)) + for (table= thd->temporary_tables; table ; table=table->next) { - if (table->query_id == thd->query_id) + if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && + !memcmp(table->table_cache_key, key, + key_length + TMP_TABLE_KEY_EXTRA)) { - my_printf_error(ER_CANT_REOPEN_TABLE, - ER(ER_CANT_REOPEN_TABLE),MYF(0),table->table_name); - DBUG_RETURN(0); + if (table->query_id == thd->query_id) + { + my_printf_error(ER_CANT_REOPEN_TABLE, + ER(ER_CANT_REOPEN_TABLE), MYF(0), table->table_name); + DBUG_RETURN(0); + } + table->query_id= thd->query_id; + table->clear_query_id= 1; + thd->tmp_table_used= 1; + DBUG_PRINT("info",("Using temporary table")); + goto reset; } - table->query_id=thd->query_id; - table->clear_query_id=1; - thd->tmp_table_used= 1; - DBUG_PRINT("info",("Using temporary table")); - goto reset; } } @@ -838,6 +918,37 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, goto reset; } } + /* + is it view? + (it is work around to allow to open view with locked tables, + real fix will be made after definition cache will be made) + */ + { + char path[FN_REFLEN]; + strxnmov(path, FN_REFLEN, mysql_data_home, "/", table_list->db, "/", + table_list->real_name, reg_ext, NullS); + (void) unpack_filename(path, path); + if (mysql_frm_type(path) == FRMTYPE_VIEW) + { + TABLE tab;// will not be used (because it's VIEW) but have to be passed + table= &tab; + VOID(pthread_mutex_lock(&LOCK_open)); + if (open_unireg_entry(thd, table, table_list->db, + table_list->real_name, + alias, table_list, mem_root)) + { + table->next=table->prev=table; + free_cache_entry(table); + } + else + { + DBUG_ASSERT(table_list->view); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } + VOID(pthread_mutex_unlock(&LOCK_open)); + } + } my_printf_error(ER_TABLE_NOT_LOCKED,ER(ER_TABLE_NOT_LOCKED),MYF(0),alias); DBUG_RETURN(0); } @@ -855,7 +966,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } /* close handler tables which are marked for flush */ - mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE); + if (thd->handler_tables) + mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE); for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ; table && table->in_use ; @@ -871,7 +983,9 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, if (table->in_use != thd) wait_for_refresh(thd); else + { VOID(pthread_mutex_unlock(&LOCK_open)); + } if (refresh) *refresh=1; DBUG_RETURN(0); @@ -887,7 +1001,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } table->prev->next=table->next; /* Remove from unused list */ table->next->prev=table->prev; - + table->in_use= thd; } else { @@ -901,15 +1015,23 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } - if (open_unireg_entry(thd, table,db,table_name,alias) || - !(table->table_cache_key=memdup_root(&table->mem_root,(char*) key, - key_length))) + if (open_unireg_entry(thd, table, table_list->db, table_list->real_name, + alias, table_list, mem_root) || + (!table_list->view && + !(table->table_cache_key= memdup_root(&table->mem_root, (char*) key, + key_length)))) { table->next=table->prev=table; free_cache_entry(table); VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } + if (table_list->view) + { + my_free((gptr)table, MYF(0)); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } table->key_length=key_length; table->version=refresh_version; table->flush_version=flush_version; @@ -917,9 +1039,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, VOID(my_hash_insert(&open_cache,(byte*) table)); } - table->in_use=thd; check_unused(); // Debugging call - + VOID(pthread_mutex_unlock(&LOCK_open)); if (refresh) { @@ -974,6 +1095,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->used_keys= table->keys_for_keyread; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); + table_list->updatable= 1; // It is not derived table nor non-updatable VIEW DBUG_ASSERT(table->key_read == 0); DBUG_RETURN(table); } @@ -1020,13 +1142,15 @@ bool reopen_table(TABLE *table,bool locked) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - if (open_unireg_entry(current_thd,&tmp,db,table_name,table->table_name)) + if (open_unireg_entry(table->in_use, &tmp, db, table_name, + table->table_name, 0, table->in_use->mem_root)) goto end; free_io_cache(table); if (!(tmp.table_cache_key= memdup_root(&tmp.mem_root,db, table->key_length))) { + delete tmp.triggers; closefrm(&tmp); // End of memory goto end; } @@ -1055,6 +1179,7 @@ bool reopen_table(TABLE *table,bool locked) tmp.next= table->next; tmp.prev= table->prev; + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file, free everything @@ -1337,6 +1462,8 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) db Database name name Table name alias Alias name + table_desc TABLE_LIST descriptor (used with views) + mem_root temporary mem_root for parsing NOTES Extra argument for open is taken from thd->open_options @@ -1345,9 +1472,9 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) 0 ok # Error */ - static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, - const char *name, const char *alias) + const char *name, const char *alias, + TABLE_LIST *table_desc, MEM_ROOT *mem_root) { char path[FN_REFLEN]; int error; @@ -1355,19 +1482,28 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, DBUG_ENTER("open_unireg_entry"); strxmov(path, mysql_data_home, "/", db, "/", name, NullS); - while (openfrm(path,alias, - (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | - HA_TRY_READ_ONLY), - READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, - thd->open_options, entry)) + while ((error= openfrm(thd, path, alias, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY | + NO_ERR_ON_NEW_FRM), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, + thd->open_options, entry)) && + (error != 5 || + (fn_format(path, path, 0, reg_ext, MY_UNPACK_FILENAME), + open_new_frm(path, alias, db, name, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, + thd->open_options, entry, table_desc, mem_root)))) + { if (!entry->crashed) { /* - Frm file could not be found on disk - Since it does not exist, no one can be using it - LOCK_open has been locked to protect from someone else - trying to discover the table at the same time. + Frm file could not be found on disk + Since it does not exist, no one can be using it + LOCK_open has been locked to protect from someone else + trying to discover the table at the same time. */ if (discover_retry_count++ != 0) goto err; @@ -1401,7 +1537,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, pthread_mutex_unlock(&LOCK_open); thd->clear_error(); // Clear error message error= 0; - if (openfrm(path,alias, + if (openfrm(thd, path, alias, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, @@ -1426,6 +1562,13 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, goto err; break; } + + if (error == 5) + DBUG_RETURN(0); // we have just opened VIEW + + if (Table_triggers_list::check_n_load(thd, db, name, entry)) + goto err; + /* If we are here, there was no fatal error (but error may be still unitialized). @@ -1454,6 +1597,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, */ sql_print_error("When opening HEAP table, could not allocate \ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); + delete entry->triggers; if (entry->file) closefrm(entry); goto err; @@ -1462,6 +1606,14 @@ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); } DBUG_RETURN(0); err: + /* Hide "Table doesn't exist" errors if table belong to view */ + if (thd->net.last_errno == ER_NO_SUCH_TABLE && + table_desc && table_desc->belong_to_view) + { + TABLE_LIST * view= table_desc->belong_to_view; + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), view->view_db.str, view->view_name.str); + } DBUG_RETURN(1); } @@ -1485,12 +1637,18 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) bool refresh; int result=0; DBUG_ENTER("open_tables"); + MEM_ROOT new_frm_mem; + /* + temporary mem_root for new .frm parsing. + TODO: variables for size + */ + init_alloc_root(&new_frm_mem, 8024, 8024); thd->current_tablenr= 0; restart: *counter= 0; thd->proc_info="Opening tables"; - for (tables=start ; tables ; tables=tables->next) + for (tables= start; tables ;tables= tables->next_global) { /* Ignore placeholders for derived tables. After derived tables @@ -1500,11 +1658,15 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) continue; (*counter)++; if (!tables->table && - !(tables->table= open_table(thd, - tables->db, - tables->real_name, - tables->alias, &refresh))) + !(tables->table= open_table(thd, tables, &new_frm_mem, &refresh))) { + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + if (tables->view) + { + (*counter)--; + continue; //VIEW placeholder + } + if (refresh) // Refresh in progress { /* close all 'old' tables used by this thread */ @@ -1514,7 +1676,7 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) thd->version=refresh_version; TABLE **prev_table= &thd->open_tables; bool found=0; - for (TABLE_LIST *tmp=start ; tmp ; tmp=tmp->next) + for (TABLE_LIST *tmp= start; tmp; tmp= tmp->next_global) { /* Close normal (not temporary) changed tables */ if (tmp->table && ! tmp->table->tmp_table) @@ -1542,11 +1704,15 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) result= -1; // Fatal error break; } + else + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables) tables->table->reginfo.lock_type=tables->lock_type; tables->table->grant= tables->grant; } thd->proc_info=0; + free_root(&new_frm_mem, MYF(0)); // Free pre-alloced block DBUG_RETURN(result); } @@ -1614,9 +1780,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) thd->proc_info="Opening table"; thd->current_tablenr= 0; - while (!(table=open_table(thd,table_list->db, - table_list->real_name,table_list->alias, - &refresh)) && refresh) ; + /* open_ltable can be used only for BASIC TABLEs */ + table_list->required_type= FRMTYPE_TABLE; + while (!(table= open_table(thd, table_list, thd->mem_root, &refresh)) && + refresh) + ; if (table) { @@ -1687,6 +1855,7 @@ int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables) RETURN 0 - ok -1 - error + 1 - error reported to user NOTE The lock will automaticly be freed by close_thread_tables() @@ -1696,19 +1865,23 @@ int open_and_lock_tables(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("open_and_lock_tables"); uint counter; - if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) - DBUG_RETURN(-1); /* purecov: inspected */ - relink_tables_for_derived(thd); - DBUG_RETURN(mysql_handle_derived(thd->lex)); + if (open_tables(thd, tables, &counter) || + lock_tables(thd, tables, counter) || + mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(thd->net.report_error ? -1 : 1); /* purecov: inspected */ + relink_tables_for_multidelete(thd); + DBUG_RETURN(0); } /* Let us propagate pointers to open tables from global table list - to table lists in particular selects if needed. + to table lists for multi-delete */ -void relink_tables_for_derived(THD *thd) +static void relink_tables_for_multidelete(THD *thd) { if (thd->lex->all_selects_list->next_select_in_list() || thd->lex->time_zone_tables_used) @@ -1716,11 +1889,15 @@ void relink_tables_for_derived(THD *thd) for (SELECT_LEX *sl= thd->lex->all_selects_list; sl; sl= sl->next_select_in_list()) + { for (TABLE_LIST *cursor= (TABLE_LIST *) sl->table_list.first; cursor; - cursor=cursor->next) - if (cursor->table_list) - cursor->table= cursor->table_list->table; + cursor=cursor->next_local) + { + if (cursor->correspondent_table) + cursor->table= cursor->correspondent_table->table; + } + } } } @@ -1756,9 +1933,9 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count) TABLE **start,**ptr; if (!(ptr=start=(TABLE**) sql_alloc(sizeof(TABLE*)*count))) return -1; - for (table = tables ; table ; table=table->next) + for (table= tables; table; table= table->next_global) { - if (!table->derived) + if (!table->placeholder()) *(ptr++)= table->table; } if (!(thd->lock=mysql_lock_tables(thd,start, (uint) (ptr - start)))) @@ -1766,9 +1943,9 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count) } else { - for (table = tables ; table ; table=table->next) + for (table= tables; table; table= table->next_global) { - if (!table->derived && + if (!table->placeholder() && check_lock_and_start_stmt(thd, table->table, table->lock_type)) { ha_rollback_stmt(thd); @@ -1804,7 +1981,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, MYF(MY_WME)))) DBUG_RETURN(0); /* purecov: inspected */ - if (openfrm(path, table_name, + if (openfrm(thd, path, table_name, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, @@ -1815,7 +1992,6 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, } tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked - tmp_table->in_use= thd; tmp_table->tmp_table = (tmp_table->file->has_transactions() ? TRANSACTIONAL_TMP_TABLE : TMP_TABLE); tmp_table->table_cache_key=(char*) (tmp_table+1); @@ -1868,11 +2044,124 @@ bool rm_temporary_table(enum db_type base, char *path) ** return unique field ******************************************************************************/ +/* Special Field pointers for find_field_in_tables returning */ +const Field *not_found_field= (Field*) 0x1; +const Field *view_ref_found= (Field*) 0x2; + #define WRONG_GRANT (Field*) -1 -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grants, bool allow_rowid, - uint *cached_field_index_ptr) + +/* + Find field in table or view + + SYNOPSIS + find_field_in_table() + thd thread handler + table_list table where to find + name name of field + item_name name of item if it will be created (VIEW) + length length of name + ref expression substituted in VIEW should be + passed using this reference (return + view_ref_found) + check_grants_table do check columns grants for table? + check_grants_view do check columns grants for view? + allow_rowid do allow finding of "_rowid" field? + cached_field_index_ptr cached position in field list (used to + speedup prepared tables field finding) + + RETURN + 0 field is not found + view_ref_found found value in VIEW (real result is in *ref) + # pointer to field +*/ + +Field * +find_field_in_table(THD *thd, TABLE_LIST *table_list, + const char *name, const char *item_name, + uint length, Item **ref, + bool check_grants_table, bool check_grants_view, + bool allow_rowid, + uint *cached_field_index_ptr) +{ + Field *fld; + if (table_list->field_translation) + { + DBUG_ASSERT(ref != 0 && table_list->view != 0); + uint num= table_list->view->select_lex.item_list.elements; + Item **trans= table_list->field_translation; + for (uint i= 0; i < num; i ++) + { + if (strcmp(trans[i]->name, name) == 0) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_grants_view && + check_grant_column(thd, &table_list->grant, + table_list->view_db.str, + table_list->view_name.str, + name, length)) + return WRONG_GRANT; +#endif + if (thd->lex->current_select->no_wrap_view_item) + *ref= trans[i]; + else + { + Item_ref *item_ref= new Item_ref(trans + i, table_list->view_name.str, + item_name); + /* as far as Item_ref have defined reference it do not need tables */ + if (item_ref) + { + thd->change_item_tree(ref, item_ref); + (*ref)->fix_fields(thd, 0, ref); + } + } + return (Field*) view_ref_found; + } + } + return 0; + } + fld= find_field_in_real_table(thd, table_list->table, name, length, + check_grants_table, allow_rowid, + cached_field_index_ptr); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* check for views with temporary table algorithm */ + if (check_grants_view && table_list->view && + fld && fld != WRONG_GRANT && + check_grant_column(thd, &table_list->grant, + table_list->view_db.str, + table_list->view_name.str, + name, length)) + { + return WRONG_GRANT; + } +#endif + return fld; +} + + +/* + Find field in table + + SYNOPSIS + find_field_in_real_table() + thd thread handler + table_list table where to find + name name of field + length length of name + check_grants do check columns grants? + allow_rowid do allow finding of "_rowid" field? + cached_field_index_ptr cached position in field list (used to + speedup prepared tables field finding) + + RETURN + 0 field is not found + # pointer to field +*/ + +Field *find_field_in_real_table(THD *thd, TABLE *table, + const char *name, uint length, + bool check_grants, bool allow_rowid, + uint *cached_field_index_ptr) { Field **field_ptr, *field; uint cached_field_index= *cached_field_index_ptr; @@ -1919,7 +2208,9 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, thd->dupp_field=field; } #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (check_grants && check_grant_column(thd,table,name,length)) + if (check_grants && check_grant_column(thd, &table->grant, + table->table_cache_key, + table->real_name, name, length)) return WRONG_GRANT; #endif return field; @@ -1933,26 +2224,33 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, find_field_in_tables() thd Pointer to current thread structure item Field item that should be found - tables Tables for scanning - where Table where field found will be returned via - this parameter - report_error If FALSE then do not report error if item not found - and return not_found_field + tables Tables to be searched for item + ref If 'item' is resolved to a view field, ref is set to + point to the found view field + report_error Degree of error reporting: + - IGNORE_ERRORS then do not report any error + - IGNORE_EXCEPT_NON_UNIQUE report only non-unique + fields, suppress all other errors + - REPORT_EXCEPT_NON_UNIQUE report all other errors + except when non-unique fields were found + - REPORT_ALL_ERRORS + check_privileges need to check privileges RETURN VALUES - 0 Field is not found or field is not unique- error - message is reported - not_found_field Function was called with report_error == FALSE and - field was not found. no error message reported. - found field + 0 If error: the found field is not unique, or there are + no sufficient access priviliges for the found field, + or the field is qualified with non-existing table. + not_found_field The function was called with report_error == + (IGNORE_ERRORS || IGNORE_EXCEPT_NON_UNIQUE) and a + field was not found. + view_ref_found View field is found, item passed through ref parameter + found field If a item was resolved to some field */ -// Special Field pointer for find_field_in_tables returning -const Field *not_found_field= (Field*) 0x1; - Field * find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error) + Item **ref, find_item_error_report_type report_error, + bool check_privileges) { Field *found=0; const char *db=item->db_name; @@ -1969,18 +2267,19 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, TABLE_LIST *tables is not changed during query execution (which is true for all queries except RENAME but luckily RENAME doesn't use fields...) so we can rely on reusing pointer to its member. - With this optimisation we also miss case when addition of one more - field makes some prepared query ambiguous and so erronous, but we + With this optimization we also miss case when addition of one more + field makes some prepared query ambiguous and so erroneous, but we accept this trade off. */ - found= find_field_in_table(thd, item->cached_table->table, name, length, - test(item->cached_table-> - table->grant.want_privilege), - 1, &(item->cached_field_index)); + found= find_field_in_real_table(thd, item->cached_table->table, + name, length, + test(item->cached_table-> + table->grant.want_privilege) && + check_privileges, + 1, &(item->cached_field_index)); if (found) { - (*where)= tables; if (found == WRONG_GRANT) return (Field*) 0; return found; @@ -1990,7 +2289,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (db && lower_case_table_names) { /* - convert database to lower case for comparision. + convert database to lower case for comparison. We can't do this in Item_field as this would change the 'name' of the item which may be used in the select list */ @@ -2002,19 +2301,23 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (table_name && table_name[0]) { /* Qualified field */ bool found_table=0; - for (; tables ; tables=tables->next) + for (; tables; tables= tables->next_local) { if (!my_strcasecmp(table_alias_charset, tables->alias, table_name) && (!db || !tables->db || !tables->db[0] || !strcmp(db,tables->db))) { found_table=1; - Field *find=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant. - want_privilege), - 1, &(item->cached_field_index)); + Field *find= find_field_in_table(thd, tables, name, item->name, + length, ref, + (test(tables->table->grant. + want_privilege) && + check_privileges), + (test(tables->grant.want_privilege) && + check_privileges), + 1, &(item->cached_field_index)); if (find) { - (*where)= item->cached_table= tables; + item->cached_table= tables; if (!tables->cacheable_table) item->cached_table= 0; if (find == WRONG_GRANT) @@ -2023,8 +2326,10 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, return find; if (found) { - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - item->full_name(),thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == IGNORE_EXCEPT_NON_UNIQUE) + my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), + item->full_name(),thd->where); return (Field*) 0; } found=find; @@ -2033,7 +2338,8 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, } if (found) return found; - if (!found_table && report_error) + if (!found_table && (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE)) { char buff[NAME_LEN*2+1]; if (db && db[0]) @@ -2041,49 +2347,58 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, strxnmov(buff,sizeof(buff)-1,db,".",table_name,NullS); table_name=buff; } - if (report_error) - { + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), table_name, thd->where); - } else return (Field*) not_found_field; } else - if (report_error) + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), item->full_name(),thd->where); else return (Field*) not_found_field; return (Field*) 0; } - bool allow_rowid= tables && !tables->next; // Only one table - for (; tables ; tables=tables->next) + + bool allow_rowid= tables && !tables->next_local; // Only one table + for (; tables ; tables= tables->next_local) { if (!tables->table) { - if (report_error) + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), item->full_name(),thd->where); return (Field*) not_found_field; } - Field *field=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant.want_privilege), - allow_rowid, &(item->cached_field_index)); + Field *field= find_field_in_table(thd, tables, name, item->name, + length, ref, + (test(tables->table->grant. + want_privilege) && + check_privileges), + (test(tables->grant.want_privilege) && + check_privileges), + allow_rowid, &(item->cached_field_index)); if (field) { if (field == WRONG_GRANT) return (Field*) 0; - (*where)= item->cached_table= tables; + item->cached_table= tables; if (!tables->cacheable_table) item->cached_table= 0; if (found) { if (!thd->where) // Returns first found break; - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - name,thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == IGNORE_EXCEPT_NON_UNIQUE) + my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), + name,thd->where); return (Field*) 0; } found=field; @@ -2091,7 +2406,8 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, } if (found) return found; - if (report_error) + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), item->full_name(), thd->where); else @@ -2130,7 +2446,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, found field */ -// Special Item pointer for find_item_in_list returning +/* Special Item pointer to serve as a return value from find_item_in_list(). */ const Item **not_found_item= (const Item**) 0x1; @@ -2306,28 +2622,29 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num) { - bool is_stmt_prepare; - DBUG_ENTER("setup_wild"); if (!wild_num) - DBUG_RETURN(0); + return(0); - reg2 Item *item; + Item *item; List_iterator<Item> it(fields); Item_arena *arena, backup; + DBUG_ENTER("setup_wild"); + /* - If we are in preparing prepared statement phase then we have change - temporary mem_root to statement mem root to save changes of SELECT list + Don't use arena if we are not in prepared statements or stored procedures + For PS/SP we have to use arena to remember the changes */ arena= thd->change_arena_if_needed(&backup); while (wild_num && (item= it++)) - { + { if (item->type() == Item::FIELD_ITEM && ((Item_field*) item)->field_name && ((Item_field*) item)->field_name[0] == '*' && !((Item_field*) item)->field) { uint elem= fields.elements; + bool any_privileges= ((Item_field *) item)->any_privileges; Item_subselect *subsel= thd->lex->current_select->master_unit()->item; if (subsel && subsel->substype() == Item_subselect::EXISTS_SUBS) @@ -2340,9 +2657,10 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, it.replace(new Item_int("Not_used", (longlong) 1, 21)); } else if (insert_fields(thd,tables,((Item_field*) item)->db_name, - ((Item_field*) item)->table_name, &it)) + ((Item_field*) item)->table_name, &it, + any_privileges, arena != 0)) { - if (arena) + if (arena) thd->restore_backup_item_arena(arena, &backup); DBUG_RETURN(-1); } @@ -2359,7 +2677,14 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, } } if (arena) + { + /* make * substituting permanent */ + SELECT_LEX *select_lex= thd->lex->current_select; + select_lex->with_wild= 0; + select_lex->item_list= fields; + thd->restore_backup_item_arena(arena, &backup); + } DBUG_RETURN(0); } @@ -2373,6 +2698,7 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, { reg2 Item *item; List_iterator<Item> it(fields); + SELECT_LEX *select_lex= thd->lex->current_select; DBUG_ENTER("setup_fields"); thd->set_query_id=set_query_id; @@ -2384,7 +2710,10 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, { if (!item->fixed && item->fix_fields(thd, tables, it.ref()) || (item= *(it.ref()))->check_cols(1)) + { + select_lex->no_wrap_view_item= 0; DBUG_RETURN(-1); /* purecov: inspected */ + } if (ref) *(ref++)= item; if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && @@ -2401,28 +2730,33 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, SYNOPSIS setup_tables() - tables table list - + thd Thread handler + tables Table list + conds Condition of current SELECT (can be changed by VIEW) - NOTE - Remap table numbers if INSERT ... SELECT - Check also that the 'used keys' and 'ignored keys' exists and set up the - table structure accordingly + NOTE + Remap table numbers if INSERT ... SELECT + Check also that the 'used keys' and 'ignored keys' exists and set up the + table structure accordingly - This has to be called for all tables that are used by items, as otherwise - table->map is not set and all Item_field will be regarded as const items. + This has to be called for all tables that are used by items, as otherwise + table->map is not set and all Item_field will be regarded as const items. - RETURN - 0 ok; In this case *map will includes the choosed index - 1 error + RETURN + 0 ok; In this case *map will includes the choosed index + 1 error */ -bool setup_tables(TABLE_LIST *tables) +bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds) { DBUG_ENTER("setup_tables"); + if (!tables || tables->setup_is_done) + DBUG_RETURN(0); + tables->setup_is_done= 1; uint tablenr=0; - for (TABLE_LIST *table_list=tables ; table_list ; - table_list=table_list->next,tablenr++) + for (TABLE_LIST *table_list= tables; + table_list; + table_list= table_list->next_local, tablenr++) { TABLE *table= table_list->table; setup_table_map(table, table_list, tablenr); @@ -2444,6 +2778,10 @@ bool setup_tables(TABLE_LIST *tables) table->keys_in_use_for_query.subtract(map); } table->used_keys.intersect(table->keys_in_use_for_query); + if (table_list->ancestor && + table_list->setup_ancestor(thd, conds, + table_list->effective_with_check)) + DBUG_RETURN(1); } if (tablenr > MAX_TABLES) { @@ -2478,8 +2816,9 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, map->clear_all(); while ((name=it++)) { - if ((pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= - 0) + if (table->keynames.type_names == 0 || + (pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= + 0) { my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), name->c_ptr(), table->real_name); @@ -2492,17 +2831,37 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, } -/**************************************************************************** - This just drops in all fields instead of current '*' field - Returns pointer to last inserted field if ok -****************************************************************************/ +/* + Drops in all fields instead of current '*' field + + SYNOPSIS + insert_fields() + thd Thread handler + tables List of tables + db_name Database name in case of 'database_name.table_name.*' + table_name Table name in case of 'table_name.*' + it Pointer to '*' + any_privileges 0 If we should ensure that we have SELECT privileges + for all columns + 1 If any privilege is ok + allocate_view_names if true view names will be copied to current Item_arena + memory (made for SP/PS) + RETURN + 0 ok + 'it' is updated to point at last inserted + 1 error. Error message is generated but not sent to client +*/ bool -insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, - const char *table_name, List_iterator<Item> *it) +insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges, bool allocate_view_names) { - char name_buff[NAME_LEN+1]; + /* allocate variables on stack to avoid pool alloaction */ + Field_iterator_table table_iter; + Field_iterator_view view_iter; uint found; + char name_buff[NAME_LEN+1]; DBUG_ENTER("insert_fields"); if (db_name && lower_case_table_names) @@ -2517,66 +2876,173 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, db_name= name_buff; } - - found=0; - for (; tables ; tables=tables->next) + found= 0; + for (; tables; tables= tables->next_local) { - TABLE *table=tables->table; + Field_iterator *iterator; + TABLE_LIST *natural_join_table; + Field *field; + TABLE_LIST *embedded; + TABLE_LIST *last; + TABLE_LIST *embedding; + TABLE *table= tables->table; + if (!table_name || (!my_strcasecmp(table_alias_charset, table_name, tables->alias) && (!db_name || !strcmp(tables->db,db_name)))) { #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Ensure that we have access right to all columns */ - if (!(table->grant.privilege & SELECT_ACL) && - check_grant_all_columns(thd,SELECT_ACL,table)) - DBUG_RETURN(-1); + if (!(table->grant.privilege & SELECT_ACL) && !any_privileges) + { + if (tables->view) + { + view_iter.set(tables); + if (check_grant_all_columns(thd, SELECT_ACL, &tables->grant, + tables->view_db.str, + tables->view_name.str, + &view_iter)) + goto err; + } + else + { + table_iter.set(tables); + if (check_grant_all_columns(thd, SELECT_ACL, &table->grant, + table->table_cache_key, table->real_name, + &table_iter)) + goto err; + } + } #endif - Field **ptr=table->field,*field; - TABLE *natural_join_table= 0; + natural_join_table= 0; + thd->used_tables|= table->map; + last= embedded= tables; - thd->used_tables|=table->map; - if (!table->outer_join && - tables->natural_join && - !tables->natural_join->table->outer_join) - natural_join_table= tables->natural_join->table; + while ((embedding= embedded->embedding) && + embedding->join_list->elements != 1) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(embedding->nested_join->join_list); + last= it++; + while ((next= it++)) + last= next; + if (last != tables) + break; + embedded= embedding; + } - while ((field = *ptr++)) + if (tables == last && + !embedded->outer_join && + embedded->natural_join && + !embedded->natural_join->outer_join) { + embedding= embedded->natural_join; + while (embedding->nested_join) + embedding= embedding->nested_join->join_list.head(); + natural_join_table= embedding; + } + if (tables->field_translation) + iterator= &view_iter; + else + iterator= &table_iter; + iterator->set(tables); + + for (; !iterator->end_of_fields(); iterator->next()) + { + Item *not_used_item; uint not_used_field_index= NO_CACHED_FIELD_INDEX; + const char *field_name= iterator->name(); /* Skip duplicate field names if NATURAL JOIN is used */ if (!natural_join_table || - !find_field_in_table(thd, natural_join_table, field->field_name, - strlen(field->field_name), 0, 0, + !find_field_in_table(thd, natural_join_table, field_name, + field_name, + strlen(field_name), ¬_used_item, 0, 0, 0, ¬_used_field_index)) { - Item_field *item= new Item_field(thd, field); + Item *item= iterator->item(thd); if (!found++) (void) it->replace(item); // Replace '*' else it->after(item); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (any_privileges) + { + /* + In time of view creation MEGRGE algorithm for underlying + VIEWs can't be used => it should be Item_field + */ + DBUG_ASSERT(item->type() == Item::FIELD_ITEM); + Item_field *fld= (Item_field*)item; + char *db, *tab; + if (tables->view) + { + db= tables->view_db.str; + tab= tables->view_name.str; + } + else + { + db= tables->db; + tab= tables->real_name; + } + if (!(fld->have_privileges= (get_column_grant(thd, + &table->grant, + db, + tab, + fld->field_name) & + VIEW_ANY_ACL))) + { + my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, + ER(ER_COLUMNACCESS_DENIED_ERROR), + MYF(0), + "ANY", + thd->priv_user, + thd->host_or_ip, + fld->field_name, + tab); + goto err; + } + } +#endif + } + if ((field= iterator->field())) + { + /* + Mark if field used before in this select. + Used by 'insert' to verify if a field name is used twice + */ + if (field->query_id == thd->query_id) + thd->dupp_field=field; + field->query_id=thd->query_id; + table->used_keys.intersect(field->part_of_key); + } + else if (allocate_view_names && + thd->lex->current_select->first_execution) + { + Item_field *item= new Item_field(thd->strdup(tables->view_db.str), + thd->strdup(tables->view_name.str), + thd->strdup(field_name)); + /* + during cleunup() this item will be put in list to replace + expression from VIEW + */ + thd->nocheck_register_item_tree_change(it->ref(), item, + thd->mem_root); } - /* - Mark if field used before in this select. - Used by 'insert' to verify if a field name is used twice - */ - if (field->query_id == thd->query_id) - thd->dupp_field=field; - field->query_id=thd->query_id; - table->used_keys.intersect(field->part_of_key); } /* All fields are used */ table->used_fields=table->fields; } } - if (!found) - { - if (!table_name) - my_error(ER_NO_TABLES_USED,MYF(0)); - else - my_error(ER_BAD_TABLE_ERROR,MYF(0),table_name); - } - DBUG_RETURN(!found); + if (found) + DBUG_RETURN(0); + + if (!table_name) + my_error(ER_NO_TABLES_USED, MYF(0)); + else + my_error(ER_BAD_TABLE_ERROR, MYF(0), table_name); + +err: + DBUG_RETURN(1); } @@ -2587,144 +3053,204 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) { table_map not_null_tables= 0; - Item_arena *arena= 0, backup; + SELECT_LEX *select_lex= thd->lex->current_select; + Item_arena *arena= thd->current_arena, backup; + bool save_wrapper= thd->lex->current_select->no_wrap_view_item; DBUG_ENTER("setup_conds"); + if (select_lex->conds_processed_with_permanent_arena || + arena->is_conventional()) + arena= 0; // For easier test + thd->set_query_id=1; - thd->lex->current_select->cond_count= 0; + + thd->lex->current_select->no_wrap_view_item= 1; + select_lex->cond_count= 0; if (*conds) { thd->where="where clause"; if (!(*conds)->fixed && (*conds)->fix_fields(thd, tables, conds) || (*conds)->check_cols(1)) - DBUG_RETURN(1); - not_null_tables= (*conds)->not_null_tables(); + goto err_no_arena; } - /* Check if we are using outer joins */ - for (TABLE_LIST *table=tables ; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_local) { - if (table->on_expr) + TABLE_LIST *embedded; + TABLE_LIST *embedding= table; + do { - /* Make a join an a expression */ - thd->where="on clause"; - - if (!table->on_expr->fixed && - table->on_expr->fix_fields(thd, tables, &table->on_expr) || - table->on_expr->check_cols(1)) - DBUG_RETURN(1); - thd->lex->current_select->cond_count++; - - /* - If it's a normal join or a LEFT JOIN which can be optimized away - add the ON/USING expression to the WHERE - */ - if (!table->outer_join || - ((table->table->map & not_null_tables) && - !(specialflag & SPECIAL_NO_NEW_FUNC))) + embedded= embedding; + if (embedded->on_expr) { - table->outer_join= 0; - arena= thd->change_arena_if_needed(&backup); - *conds= and_conds(*conds, table->on_expr); - table->on_expr=0; - if (arena) - { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err - } - if ((*conds) && !(*conds)->fixed && - (*conds)->fix_fields(thd, tables, conds)) - DBUG_RETURN(1); + /* Make a join an a expression */ + thd->where="on clause"; + if (!embedded->on_expr->fixed && + embedded->on_expr->fix_fields(thd, tables, &embedded->on_expr) || + embedded->on_expr->check_cols(1)) + goto err_no_arena; + select_lex->cond_count++; } - } - if (table->natural_join) - { - arena= thd->change_arena_if_needed(&backup); - /* Make a join of all fields with have the same name */ - TABLE *t1= table->table; - TABLE *t2= table->natural_join->table; - Item_cond_and *cond_and= new Item_cond_and(); - if (!cond_and) // If not out of memory - goto err; - cond_and->top_level_item(); - - Field **t1_field, *t2_field; - for (t1_field= t1->field; (*t1_field); t1_field++) + if (embedded->natural_join) { - const char *t1_field_name= (*t1_field)->field_name; - uint not_used_field_index= NO_CACHED_FIELD_INDEX; - - if ((t2_field= find_field_in_table(thd, t2, t1_field_name, - strlen(t1_field_name), 0, 0, - ¬_used_field_index))) + /* Make a join of all fields wich have the same name */ + TABLE_LIST *tab1= embedded; + TABLE_LIST *tab2= embedded->natural_join; + if (!(embedded->outer_join & JOIN_TYPE_RIGHT)) { - Item_func_eq *tmp=new Item_func_eq(new Item_field(thd, *t1_field), - new Item_field(thd, t2_field)); - if (!tmp) - goto err; - /* Mark field used for table cache */ - (*t1_field)->query_id= t2_field->query_id= thd->query_id; - cond_and->list.push_back(tmp); - t1->used_keys.intersect((*t1_field)->part_of_key); - t2->used_keys.intersect(t2_field->part_of_key); + while (tab1->nested_join) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(tab1->nested_join->join_list); + tab1= it++; + while ((next= it++)) + tab1= next; + } + } + else + { + while (tab1->nested_join) + tab1= tab1->nested_join->join_list.head(); + } + if (embedded->outer_join & JOIN_TYPE_RIGHT) + { + while (tab2->nested_join) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(tab2->nested_join->join_list); + tab2= it++; + while ((next= it++)) + tab2= next; + } + } + else + { + while (tab2->nested_join) + tab2= tab2->nested_join->join_list.head(); } - } - thd->lex->current_select->cond_count+= cond_and->list.elements; - // to prevent natural join processing during PS re-execution - table->natural_join= 0; + if (arena) + arena= thd->change_arena_if_needed(&backup); + + TABLE *t1=tab1->table; + TABLE *t2=tab2->table; + Field_iterator_table table_iter; + Field_iterator_view view_iter; + Field_iterator *iterator; + Field *t1_field, *t2_field; + Item *item_t2; + Item_cond_and *cond_and=new Item_cond_and(); + + if (!cond_and) // If not out of memory + goto err_no_arena; + cond_and->top_level_item(); + + if (table->field_translation) + { + iterator= &view_iter; + view_iter.set(tab1); + } + else + { + iterator= &table_iter; + table_iter.set(tab1); + } - if (cond_and->list.elements) - { - if (!table->outer_join) // Not left join + for (; !iterator->end_of_fields(); iterator->next()) { - *conds= and_conds(*conds, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (*conds && !(*conds)->fixed) + const char *t1_field_name= iterator->name(); + uint not_used_field_index= NO_CACHED_FIELD_INDEX; + + if ((t2_field= find_field_in_table(thd, tab2, t1_field_name, + t1_field_name, + strlen(t1_field_name), &item_t2, + 0, 0, 0, + ¬_used_field_index))) { - if ((*conds)->fix_fields(thd, tables, conds)) - DBUG_RETURN(1); + if (t2_field != view_ref_found) + { + if (!(item_t2= new Item_field(thd, t2_field))) + goto err; + /* Mark field used for table cache */ + t2_field->query_id= thd->query_id; + t2->used_keys.intersect(t2_field->part_of_key); + } + if ((t1_field= iterator->field())) + { + /* Mark field used for table cache */ + t1_field->query_id= thd->query_id; + t1->used_keys.intersect(t1_field->part_of_key); + } + Item_func_eq *tmp= new Item_func_eq(iterator->item(thd), + item_t2); + if (!tmp) + goto err; + cond_and->list.push_back(tmp); } } - else + select_lex->cond_count+= cond_and->list.elements; + + // to prevent natural join processing during PS re-execution + embedding->natural_join= 0; + + if (cond_and->list.elements) { - table->on_expr= and_conds(table->on_expr, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (table->on_expr && !table->on_expr->fixed) + COND *on_expr= cond_and; + on_expr->fix_fields(thd, 0, &on_expr); + if (!embedded->outer_join) // Not left join { - if (table->on_expr->fix_fields(thd, tables, &table->on_expr)) - DBUG_RETURN(1); + *conds= and_conds(*conds, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (*conds && !(*conds)->fixed) + { + if ((*conds)->fix_fields(thd, tables, conds)) + goto err_no_arena; + } + } + else + { + embedded->on_expr= and_conds(embedded->on_expr, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (embedded->on_expr && !embedded->on_expr->fixed) + { + if (embedded->on_expr->fix_fields(thd, tables, &table->on_expr)) + goto err_no_arena; + } } } + else if (arena) + thd->restore_backup_item_arena(arena, &backup); } - else if (arena) - { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err - } + embedding= embedded->embedding; } + while (embedding && + embedding->nested_join->join_list.head() == embedded); } - if (thd->current_arena->is_stmt_prepare()) + if (!thd->current_arena->is_conventional()) { /* We are in prepared statement preparation code => we should store WHERE clause changing for next executions. - We do this ON -> WHERE transformation only once per PS statement. + We do this ON -> WHERE transformation only once per PS/SP statement. */ - thd->lex->current_select->where= *conds; + select_lex->where= *conds; + select_lex->conds_processed_with_permanent_arena= 1; } + thd->lex->current_select->no_wrap_view_item= save_wrapper; DBUG_RETURN(test(thd->net.report_error)); err: if (arena) thd->restore_backup_item_arena(arena, &backup); +err_no_arena: + thd->lex->current_select->no_wrap_view_item= save_wrapper; DBUG_RETURN(1); } @@ -2898,7 +3424,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) && ! in_use->killed) { - in_use->killed=1; + in_use->killed= THD::KILL_CONNECTION; pthread_mutex_lock(&in_use->mysys_var->mutex); if (in_use->mysys_var->current_cond) { @@ -2964,3 +3490,62 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) } return 0; } + + +/* + open new .frm format table + + SYNOPSIS + open_new_frm() + path path to .frm + alias alias for table + db database + table_name name of table + db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..) + can be 0 (example in ha_example_table) + prgflag READ_ALL etc.. + ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc.. + outparam result table + table_desc TABLE_LIST descriptor + mem_root temporary MEM_ROOT for parsing +*/ + +static my_bool +open_new_frm(const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc, + MEM_ROOT *mem_root) +{ + LEX_STRING pathstr; + File_parser *parser; + DBUG_ENTER("open_new_frm"); + + pathstr.str= (char*) path; + pathstr.length= strlen(path); + + if ((parser= sql_parse_prepare(&pathstr, mem_root, 1))) + { + if (!strncmp("VIEW", parser->type()->str, parser->type()->length)) + { + if (table_desc == 0 || table_desc->required_type == FRMTYPE_TABLE) + { + my_error(ER_WRONG_OBJECT, MYF(0), db, table_name, "BASE TABLE"); + goto err; + } + if (mysql_make_view(parser, table_desc)) + goto err; + } + else + { + /* only VIEWs are supported now */ + my_error(ER_FRM_UNKNOWN_TYPE, MYF(0), path, parser->type()->str); + goto err; + } + DBUG_RETURN(0); + } + +err: + bzero(outparam, sizeof(TABLE)); // do not run repair + DBUG_RETURN(1); +} diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 1bf8d179770..39ee8f61f1a 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -611,6 +611,7 @@ void query_cache_insert(NET *net, const char *packet, ulong length) DBUG_VOID_RETURN; } header->result(result); + header->last_pkt_nr= net->pkt_nr; BLOCK_UNLOCK_WR(query_block); } else @@ -935,11 +936,16 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) /* Test if the query is a SELECT - (pre-space is removed in dispatch_command) + (pre-space is removed in dispatch_command). + + First '/' looks like comment before command it is not + frequently appeared in real lihe, consequently we can + check all such queries, too. */ - if (my_toupper(system_charset_info, sql[0]) != 'S' || - my_toupper(system_charset_info, sql[1]) != 'E' || - my_toupper(system_charset_info,sql[2]) !='L') + if ((my_toupper(system_charset_info, sql[0]) != 'S' || + my_toupper(system_charset_info, sql[1]) != 'E' || + my_toupper(system_charset_info,sql[2]) !='L') && + sql[0] != '/') { DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached")); goto err; @@ -1115,6 +1121,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ALIGN_SIZE(sizeof(Query_cache_result)))) break; // Client aborted result_block = result_block->next; + thd->net.pkt_nr= query->last_pkt_nr; // Keep packet number updated } while (result_block != first_result_block); #else { @@ -1153,7 +1160,7 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, using_transactions = using_transactions && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - for (; tables_used; tables_used=tables_used->next) + for (; tables_used; tables_used= tables_used->next_local) { DBUG_ASSERT(!using_transactions || tables_used->table!=0); if (tables_used->derived) @@ -1185,7 +1192,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) if (query_cache_size > 0) { DUMP(this); - for (; tables_used; tables_used=tables_used->next) + for (; tables_used; tables_used= tables_used->next) { invalidate_table((byte*) tables_used->key, tables_used->key_length); DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key, @@ -1218,7 +1225,7 @@ void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used) if (query_cache_size > 0) { DUMP(this); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_local) { if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE)) invalidate_table(tables_used->table); @@ -2104,7 +2111,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, Query_cache_block_table *block_table = block->table(0); - for (n=0; tables_used; tables_used=tables_used->next, n++, block_table++) + for (n= 0; + tables_used; + tables_used= tables_used->next_global, n++, block_table++) { DBUG_PRINT("qcache", ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", @@ -2651,7 +2660,7 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, lex->select_lex.options, (int) thd->variables.query_cache_type)); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_global) { table_count++; DBUG_PRINT("qcache", ("table %s, db %s, type %u", @@ -2718,7 +2727,7 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, { DBUG_ENTER("Query_cache::ask_handler_allowance"); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_global) { TABLE *table= tables_used->table; if (!ha_caching_allowed(thd, table->table_cache_key, @@ -3167,7 +3176,7 @@ void Query_cache::wreck(uint line, const char *message) DBUG_PRINT("warning", ("%5d QUERY CACHE WRECK => DISABLED",line)); DBUG_PRINT("warning", ("==================================")); if (thd) - thd->killed = 1; + thd->killed= THD::KILL_CONNECTION; cache_dump(); /* check_integrity(0); */ /* Can't call it here because of locks */ bins_dump(); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..93d89aeae4f 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -112,6 +112,7 @@ struct Query_cache_query NET *wri; ulong len; uint8 tbls_type; + unsigned int last_pkt_nr; inline void init_n_lock(); void unlock_n_destroy(); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index eda60b5cfdb..15e1cc7e212 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -36,6 +36,9 @@ #endif #include <mysys_err.h> +#include "sp_rcontext.h" +#include "sp_cache.h" + /* The following is used to initialise Table_ident with a internal table name @@ -157,18 +160,22 @@ bool foreign_key_prefix(Key *a, Key *b) THD::THD() :user_time(0), global_read_lock(0), is_fatal_error(0), - last_insert_id_used(0), - insert_id_used(0), rand_used(0), time_zone_used(0), - in_lock_tables(0), bootstrap(0) + rand_used(0), time_zone_used(0), + last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0), + in_lock_tables(0), bootstrap(0), spcont(NULL) { current_arena= this; - host= user= priv_user= db= ip=0; +#ifndef DBUG_OFF + backup_arena= 0; +#endif + host= user= priv_user= db= ip= 0; + catalog= (char*)"std"; // the only catalog we have for now host_or_ip= "connecting host"; locked=some_tables_deleted=no_errors=password= 0; - killed=0; query_start_used= 0; count_cuted_fields= CHECK_FIELD_IGNORE; - db_length= col_access= 0; + killed= NOT_KILLED; + db_length= col_access=0; query_error= tmp_table_used= 0; next_insert_id=last_insert_id=0; open_tables= temporary_tables= handler_tables= derived_tables= 0; @@ -197,7 +204,7 @@ THD::THD() #endif net.last_error[0]=0; // If error on boot ull=0; - system_thread=cleanup_done=0; + system_thread= cleanup_done= abort_on_warning= 0; peer_port= 0; // For SHOW PROCESSLIST transaction.changed_tables = 0; #ifdef __WIN__ @@ -224,9 +231,12 @@ THD::THD() /* Initialize sub structures */ init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE); user_connect=(USER_CONN *)0; - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, - (hash_free_key) free_user_var,0); + (hash_free_key) free_user_var, 0); + + sp_proc_cache= NULL; + sp_func_cache= NULL; /* For user vars replication*/ if (opt_bin_log) @@ -254,7 +264,7 @@ THD::THD() if (open_cached_file(&transaction.trans_log, mysql_tmpdir, LOG_PREFIX, binlog_cache_size, MYF(MY_WME))) - killed=1; + killed= KILL_CONNECTION; transaction.trans_log.end_of_file= max_binlog_cache_size; } #endif @@ -292,6 +302,7 @@ void THD::init(void) bzero((char*) warn_count, sizeof(warn_count)); total_warn_count= 0; update_charset(); + bzero((char *) &status_var, sizeof(status_var)); } @@ -330,9 +341,11 @@ void THD::change_user(void) cleanup_done= 0; init(); stmt_map.reset(); - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, (hash_free_key) free_user_var, 0); + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); } @@ -356,6 +369,8 @@ void THD::cleanup(void) my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR)); delete_dynamic(&user_var_events); hash_free(&user_vars); + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); if (global_read_lock) unlock_global_read_lock(this); if (ull) @@ -365,6 +380,7 @@ void THD::cleanup(void) pthread_mutex_unlock(&LOCK_user_locks); ull= 0; } + cleanup_done=1; DBUG_VOID_RETURN; } @@ -377,6 +393,7 @@ THD::~THD() /* Ensure that no one is using THD */ pthread_mutex_lock(&LOCK_delete); pthread_mutex_unlock(&LOCK_delete); + add_to_status(&global_status_var, &status_var); /* Close connection */ #ifndef EMBEDDED_LIBRARY @@ -396,6 +413,9 @@ THD::~THD() } #endif + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); + DBUG_PRINT("info", ("freeing host")); if (host != my_localhost) // If not pointer to constant safeFree(host); @@ -408,7 +428,7 @@ THD::~THD() mysys_var=0; // Safety (shouldn't be needed) pthread_mutex_destroy(&LOCK_delete); #ifndef DBUG_OFF - dbug_sentry = THD_SENTRY_GONE; + dbug_sentry= THD_SENTRY_GONE; #endif /* Reset stmt_backup.mem_root to not double-free memory from thd.mem_root */ clear_alloc_root(&stmt_backup.main_mem_root); @@ -416,14 +436,35 @@ THD::~THD() } -void THD::awake(bool prepare_to_die) +/* + Add to one status variable another status variable + + NOTES + This function assumes that all variables are long/ulong. + If this assumption will change, then we have to explictely add + the other variables after the while loop +*/ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var) +{ + ulong *end= (ulong*) ((byte*) to_var + offsetof(STATUS_VAR, + last_system_status_var) + + sizeof(ulong)); + ulong *to= (ulong*) to_var, *from= (ulong*) from_var; + + while (to != end) + *(to++)+= *(from++); +} + + +void THD::awake(THD::killed_state state_to_set) { THD_CHECK_SENTRY(this); safe_mutex_assert_owner(&LOCK_delete); - if (prepare_to_die) - killed = 1; - thr_alarm_kill(real_id); + killed= state_to_set; + if (state_to_set != THD::KILL_QUERY) + thr_alarm_kill(real_id); #ifdef SIGNAL_WITH_VIO_CLOSE close_active_vio(); #endif @@ -482,6 +523,24 @@ bool THD::store_globals() } +/* Cleanup after a query */ + +void THD::cleanup_after_query() +{ + if (clear_next_insert_id) + { + clear_next_insert_id= 0; + next_insert_id= 0; + } + /* Free Items that were created during this execution */ + free_items(free_list); + /* + In the rest of code we assume that free_list never points to garbage: + Keep this predicate true. + */ + free_list= 0; +} + /* Convert a string to another character set @@ -644,7 +703,7 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length) { my_error(EE_OUTOFMEMORY, MYF(ME_BELL), ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1); - killed= 1; + killed= KILL_CONNECTION; return 0; } @@ -671,15 +730,16 @@ int THD::send_explain_fields(select_result *result) item->maybe_null=1; field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs)); item->maybe_null=1; - field_list.push_back(item=new Item_return_int("key_len",3, - MYSQL_TYPE_LONGLONG)); + field_list.push_back(item=new Item_empty_string("key_len", + NAME_LEN*MAX_KEY)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("ref", NAME_LEN*MAX_REF_PARTS, cs)); item->maybe_null=1; field_list.push_back(new Item_return_int("rows", 10, MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Extra", 255, cs)); - return (result->send_fields(field_list,1)); + return (result->send_fields(field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)); } #ifdef SIGNAL_WITH_VIO_CLOSE @@ -740,10 +800,13 @@ void THD::rollback_item_tree_changes() { I_List_iterator<Item_change_record> it(change_list); Item_change_record *change; + DBUG_ENTER("rollback_item_tree_changes"); + while ((change= it++)) *change->place= change->old_value; /* We can forget about changes memory: it's allocated in runtime memroot */ change_list.empty(); + DBUG_VOID_RETURN; } @@ -780,9 +843,9 @@ sql_exchange::sql_exchange(char *name,bool flag) escaped= &default_escaped; } -bool select_send::send_fields(List<Item> &list,uint flag) +bool select_send::send_fields(List<Item> &list, uint flags) { - return thd->protocol->send_fields(&list,flag); + return thd->protocol->send_fields(&list, flags); } /* Send data to client. Returns 0 if ok */ @@ -1349,27 +1412,35 @@ bool select_exists_subselect::send_data(List<Item> &items) int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) { List_iterator_fast<Item> li(list); - List_iterator_fast<LEX_STRING> gl(var_list); + List_iterator_fast<my_var> gl(var_list); Item *item; - LEX_STRING *ls; + + local_vars.empty(); // Clear list if SP + unit= u; + row_count= 0; + if (var_list.elements != list.elements) { my_error(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, MYF(0)); return 1; } - unit=u; while ((item=li++)) { - ls= gl++; - Item_func_set_user_var *xx = new Item_func_set_user_var(*ls,item); - /* - Item_func_set_user_var can't substitute something else on its place => - 0 can be passed as last argument (reference on item) - */ - xx->fix_fields(thd,(TABLE_LIST*) thd->lex->select_lex.table_list.first, - 0); - xx->fix_length_and_dec(); - vars.push_back(xx); + my_var *mv= gl++; + if (mv->local) + (void)local_vars.push_back(new Item_splocal(mv->s, mv->offset)); + else + { + Item_func_set_user_var *xx = new Item_func_set_user_var(mv->s, item); + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + xx->fix_fields(thd, (TABLE_LIST*) thd->lex->select_lex.table_list.first, + 0); + xx->fix_length_and_dec(); + vars.push_back(xx); + } } return 0; } @@ -1430,7 +1501,7 @@ Item_arena::Item_arena(bool init_mem_root) Item_arena::Type Item_arena::type() const { - DBUG_ASSERT("Item_arena::type()" == "abstract"); + DBUG_ASSERT(0); /* Should never be called */ return STATEMENT; } @@ -1446,7 +1517,8 @@ Statement::Statement(THD *thd) allow_sum_func(0), lex(&main_lex), query(0), - query_length(0) + query_length(0), + cursor(0) { name.str= NULL; } @@ -1464,7 +1536,8 @@ Statement::Statement() allow_sum_func(0), /* initialized later */ lex(&main_lex), query(0), /* these two are set */ - query_length(0) /* in alloc_query() */ + query_length(0), /* in alloc_query() */ + cursor(0) { } @@ -1483,6 +1556,7 @@ void Statement::set_statement(Statement *stmt) lex= stmt->lex; query= stmt->query; query_length= stmt->query_length; + cursor= stmt->cursor; } @@ -1507,8 +1581,8 @@ void THD::end_statement() lex_end(lex); delete lex->result; lex->result= 0; - free_items(free_list); - free_list= 0; + /* Note that free_list is freed in cleanup_after_query() */ + /* Don't free mem_root, as mem_root is freed in the end of dispatch_command (once for any command). @@ -1519,8 +1593,12 @@ void THD::end_statement() void Item_arena::set_n_backup_item_arena(Item_arena *set, Item_arena *backup) { DBUG_ENTER("Item_arena::set_n_backup_item_arena"); + DBUG_ASSERT(backup_arena == 0); backup->set_item_arena(this); set_item_arena(set); +#ifndef DBUG_OFF + backup_arena= 1; +#endif DBUG_VOID_RETURN; } @@ -1530,6 +1608,9 @@ void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup) DBUG_ENTER("Item_arena::restore_backup_item_arena"); set->set_item_arena(this); set_item_arena(backup); +#ifndef DBUG_OFF + backup_arena= 0; +#endif #ifdef NOT_NEEDED_NOW /* Reset backup mem_root to avoid its freeing. @@ -1620,8 +1701,19 @@ int Statement_map::insert(Statement *statement) bool select_dumpvar::send_data(List<Item> &items) { List_iterator_fast<Item_func_set_user_var> li(vars); + List_iterator_fast<Item_splocal> var_li(local_vars); + List_iterator_fast<my_var> my_li(var_list); + List_iterator_fast<Item> it(items); Item_func_set_user_var *xx; + Item_splocal *yy; + Item *item; + my_var *zz; DBUG_ENTER("send_data"); + if (unit->offset_limit_cnt) + { // using limit offset,count + unit->offset_limit_cnt--; + DBUG_RETURN(0); + } if (unit->offset_limit_cnt) { // Using limit offset,count @@ -1633,26 +1725,34 @@ bool select_dumpvar::send_data(List<Item> &items) my_error(ER_TOO_MANY_ROWS, MYF(0)); DBUG_RETURN(1); } - while ((xx=li++)) + while ((zz=my_li++) && (item=it++)) { - xx->check(); - xx->update(); + if (zz->local) + { + if ((yy=var_li++)) + { + if (thd->spcont->set_item_eval(yy->get_offset(), item, zz->type)) + DBUG_RETURN(1); + } + } + else + { + if ((xx=li++)) + { + xx->check(); + xx->update(); + } + } } DBUG_RETURN(0); } bool select_dumpvar::send_eof() { - if (row_count) - { - ::send_ok(thd,row_count); - return 0; - } - else - { - my_error(ER_EMPTY_QUERY,MYF(0)); - return 1; - } + if (! row_count) + send_warning(thd, ER_SP_FETCH_NO_DATA); + ::send_ok(thd,row_count); + return 0; } /**************************************************************************** @@ -1665,3 +1765,27 @@ void TMP_TABLE_PARAM::init() group_parts= group_length= group_null_parts= 0; quick_group= 1; } + + +void thd_increment_bytes_sent(ulong length) +{ + current_thd->status_var.bytes_sent+= length; +} + + +void thd_increment_bytes_received(ulong length) +{ + current_thd->status_var.bytes_received+= length; +} + + +void thd_increment_net_big_packet_count(ulong length) +{ + current_thd->status_var.net_big_packet_count+= length; +} + + +void THD::set_status_var_init() +{ + bzero((char*) &status_var, sizeof(status_var)); +} diff --git a/sql/sql_class.h b/sql/sql_class.h index 312d9de9794..85a18b10671 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -26,6 +26,9 @@ class Query_log_event; class Load_log_event; class Slave_log_event; +class Format_description_log_event; +class sp_rcontext; +class sp_cache; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; @@ -97,7 +100,14 @@ class MYSQL_LOG enum cache_type io_cache_type; bool write_error, inited; bool need_start_event; - bool no_auto_events; // For relay binlog + /* + no_auto_events means we don't want any of these automatic events : + Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't want + a Rotate_log event to be written to the relay log. When we start a relay log + etc. So in 4.x this is 1 for relay logs, 0 for binlogs. + In 5.0 it's 0 for relay logs too! + */ + bool no_auto_events; /* The max size before rotation (usable only if log_type == LOG_BIN: binary logs and relay logs). @@ -114,6 +124,18 @@ class MYSQL_LOG public: MYSQL_LOG(); ~MYSQL_LOG(); + + /* + These describe the log's format. This is used only for relay logs. + _for_exec is used by the SQL thread, _for_queue by the I/O thread. It's + necessary to have 2 distinct objects, because the I/O thread may be reading + events in a different format from what the SQL thread is reading (consider + the case of a master which has been upgraded from 5.0 to 5.1 without doing + RESET MASTER, or from 4.x to 5.0). + */ + Format_description_log_event *description_event_for_exec, + *description_event_for_queue; + void reset_bytes_written() { bytes_written = 0; @@ -142,7 +164,8 @@ public: bool open(const char *log_name,enum_log_type log_type, const char *new_name, const char *index_file_name_arg, enum cache_type io_cache_type_arg, - bool no_auto_events_arg, ulong max_size); + bool no_auto_events_arg, ulong max_size, + bool null_created); void new_file(bool need_lock= 1); bool write(THD *thd, enum enum_server_command command, const char *format,...); @@ -204,6 +227,9 @@ typedef struct st_copy_info { /* for INSERT ... UPDATE */ List<Item> *update_fields; List<Item> *update_values; +/* for VIEW ... WITH CHECK OPTION */ + TABLE_LIST *view; + bool ignore; } COPY_INFO; @@ -350,6 +376,7 @@ struct system_variables ulonglong myisam_max_sort_file_size; ha_rows select_limit; ha_rows max_join_size; + ulong auto_increment_increment, auto_increment_offset; ulong bulk_insert_buff_size; ulong join_buff_size; ulong long_query_time; @@ -368,6 +395,8 @@ struct system_variables ulong net_retry_count; ulong net_wait_timeout; ulong net_write_timeout; + ulong optimizer_prune_level; + ulong optimizer_search_depth; ulong preload_buff_size; ulong query_cache_type; ulong read_buff_size; @@ -378,6 +407,8 @@ struct system_variables ulong tx_isolation; /* Determines which non-standard SQL behaviour should be enabled */ ulong sql_mode; + /* check of key presence in updatable view */ + ulong updatable_views_with_limit; ulong default_week_format; ulong max_seeks_for_key; ulong range_alloc_block_size; @@ -418,6 +449,61 @@ struct system_variables DATE_TIME_FORMAT *time_format; }; + +/* per thread status variables */ + +typedef struct system_status_var +{ + ulong bytes_received; + ulong bytes_sent; + ulong com_other; + ulong com_stat[(uint) SQLCOM_END]; + ulong created_tmp_disk_tables; + ulong created_tmp_tables; + ulong ha_commit_count; + ulong ha_delete_count; + ulong ha_read_first_count; + ulong ha_read_last_count; + ulong ha_read_key_count; + ulong ha_read_next_count; + ulong ha_read_prev_count; + ulong ha_read_rnd_count; + ulong ha_read_rnd_next_count; + ulong ha_rollback_count; + ulong ha_update_count; + ulong ha_write_count; + + /* KEY_CACHE parts. These are copies of the original */ + ulong key_blocks_changed; + ulong key_blocks_used; + ulong key_cache_r_requests; + ulong key_cache_read; + ulong key_cache_w_requests; + ulong key_cache_write; + /* END OF KEY_CACHE parts */ + + ulong net_big_packet_count; + ulong opened_tables; + ulong select_full_join_count; + ulong select_full_range_join_count; + ulong select_range_count; + ulong select_range_check_count; + ulong select_scan_count; + ulong long_query_count; + ulong filesort_merge_passes; + ulong filesort_range_count; + ulong filesort_rows; + ulong filesort_scan_count; +} STATUS_VAR; + +/* + This is used for 'show status'. It must be updated to the last ulong + variable in system_status_var +*/ + +#define last_system_status_var filesort_scan_count + + void free_tmp_table(THD *thd, TABLE *entry); @@ -431,6 +517,9 @@ public: Item *free_list; MEM_ROOT main_mem_root; MEM_ROOT *mem_root; // Pointer to current memroot +#ifndef DBUG_OFF + bool backup_arena; +#endif enum enum_state { INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2, @@ -467,7 +556,7 @@ public: inline bool is_first_stmt_execute() const { return state == PREPARED; } inline bool is_stmt_execute() const { return state == PREPARED || state == EXECUTED; } - inline bool is_conventional_execution() const + inline bool is_conventional() const { return state == CONVENTIONAL_EXECUTION; } inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); } inline gptr calloc(unsigned int size) @@ -497,6 +586,8 @@ public: }; +class Cursor; + /* State of a single command executed against this connection. One connection can contain a lot of simultaneously running statements, @@ -570,6 +661,7 @@ public: */ char *query; uint32 query_length; // current query length + Cursor *cursor; public: @@ -675,7 +767,7 @@ typedef I_List<Item_change_record> Item_change_list; a thread/connection descriptor */ -class THD :public ilink, +class THD :public ilink, public Statement { public: @@ -700,6 +792,7 @@ public: struct sockaddr_in remote; // client socket address struct rand_struct rand; // used for authentication struct system_variables variables; // Changeable local variables + struct system_status_var status_var; // Per thread statistic vars pthread_mutex_t LOCK_delete; // Locked before thd is deleted /* all prepared statements and cursors of this connection */ Statement_map stmt_map; @@ -720,9 +813,17 @@ public: the connection priv_user - The user privilege we are using. May be '' for anonymous user. db - currently selected database + catalog - currently selected catalog ip - client IP + WARNING: some members of THD (currently 'db', 'catalog' and 'query') are + set and alloced by the slave SQL thread (for the THD of that thread); that + thread is (and must remain, for now) the only responsible for freeing these + 3 members. If you add members here, and you add code to set them in + replication, don't forget to free_them_and_set_them_to_0 in replication + properly. For details see the 'err:' label of the pthread_handler_decl of + the slave SQL thread, in sql/slave.cc. */ - char *host,*user,*priv_user,*db,*ip; + char *host,*user,*priv_user,*db,*catalog,*ip; char priv_host[MAX_HOSTNAME]; /* remote (peer) port */ uint16 peer_port; @@ -837,6 +938,8 @@ public: generated auto_increment value in handler.cc */ ulonglong next_insert_id; + /* Remember last next_insert_id to reset it if something went wrong */ + ulonglong prev_insert_id; /* The insert_id used for the last statement or set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#). Used for binary log and returned by @@ -891,6 +994,9 @@ public: /* for user variables replication*/ DYNAMIC_ARRAY user_var_events; + enum killed_state { NOT_KILLED=0, KILL_BAD_DATA=1, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED }; + killed_state volatile killed; + /* scramble - random string sent to client on handshake */ char scramble[SCRAMBLE_LENGTH+1]; @@ -898,14 +1004,18 @@ public: bool locked, some_tables_deleted; bool last_cuted_field; bool no_errors, password, is_fatal_error; - bool query_start_used,last_insert_id_used,insert_id_used,rand_used; - bool time_zone_used; + bool query_start_used, rand_used, time_zone_used; + bool last_insert_id_used,insert_id_used, clear_next_insert_id; bool in_lock_tables; bool query_error, bootstrap, cleanup_done; bool tmp_table_used; bool charset_is_system_charset, charset_is_collation_connection; bool slow_command; - my_bool volatile killed; + bool no_trans_update, abort_on_warning; + longlong row_count_func; /* For the ROW_COUNT() function */ + sp_rcontext *spcont; // SP runtime context + sp_cache *sp_proc_cache; + sp_cache *sp_func_cache; /* If we do a purge of binary logs, log index info of the threads @@ -937,6 +1047,7 @@ public: void init_for_queries(); void change_user(void); void cleanup(void); + void cleanup_after_query(); bool store_globals(); #ifdef SIGNAL_WITH_VIO_CLOSE inline void set_active_vio(Vio* vio) @@ -953,7 +1064,7 @@ public: } void close_active_vio(); #endif - void awake(bool prepare_to_die); + void awake(THD::killed_state state_to_set); /* For enter_cond() / exit_cond() to work the mutex must be got before enter_cond() (in 4.1 an assertion will soon ensure this); this mutex is @@ -1017,8 +1128,16 @@ public: return 0; #endif } - inline gptr trans_alloc(unsigned int size) - { + inline bool only_prepare() + { + return command == COM_PREPARE; + } + inline bool fill_derived_tables() + { + return !only_prepare() && !lex->only_view_structure(); + } + inline gptr trans_alloc(unsigned int size) + { return alloc_root(&transaction.mem_root,size); } @@ -1059,7 +1178,7 @@ public: use new arena if we are in a prepared statements and we have not already changed to use this arena. */ - if (current_arena->is_stmt_prepare() && + if (!current_arena->is_conventional() && mem_root != ¤t_arena->main_mem_root) { set_n_backup_item_arena(current_arena, backup); @@ -1071,7 +1190,7 @@ public: void change_item_tree(Item **place, Item *new_value) { /* TODO: check for OOM condition here */ - if (!current_arena->is_conventional_execution()) + if (!current_arena->is_conventional()) nocheck_register_item_tree_change(place, *place, mem_root); *place= new_value; } @@ -1084,6 +1203,22 @@ public: state after execution of a non-prepared SQL statement. */ void end_statement(); + inline int killed_errno() const + { + return killed != KILL_BAD_DATA ? killed : 0; + } + inline void send_kill_message() const + { + my_error(killed_errno(), MYF(0)); + } + /* return TRUE if we will abort query if we make a warning now */ + inline bool really_abort_on_warning() + { + return (abort_on_warning && + (!no_trans_update || + (variables.sql_mode & MODE_STRICT_ALL_TABLES))); + } + void set_status_var_init(); }; /* Flags for the THD::system_thread (bitmap) variable */ @@ -1137,11 +1272,12 @@ public: */ virtual uint field_count(List<Item> &fields) const { return fields.elements; } - virtual bool send_fields(List<Item> &list,uint flag)=0; + virtual bool send_fields(List<Item> &list, uint flags)=0; virtual bool send_data(List<Item> &items)=0; virtual bool initialize_tables (JOIN *join=0) { return 0; } virtual void send_error(uint errcode,const char *err); virtual bool send_eof()=0; + virtual bool simple_select() { return 0; } virtual void abort() {} /* Cleanup instance of this class for next execution of a prepared @@ -1168,9 +1304,10 @@ public: class select_send :public select_result { public: select_send() {} - bool send_fields(List<Item> &list,uint flag); + bool send_fields(List<Item> &list, uint flags); bool send_data(List<Item> &items); bool send_eof(); + bool simple_select() { return 1; } }; @@ -1214,18 +1351,16 @@ public: class select_insert :public select_result_interceptor { public: + TABLE_LIST *table_list; TABLE *table; List<Item> *fields; ulonglong last_insert_id; COPY_INFO info; + bool insert_into_view; - select_insert(TABLE *table_par, List<Item> *fields_par, - enum_duplicates duplic) - :table(table_par), fields(fields_par), last_insert_id(0) - { - bzero((char*) &info,sizeof(info)); - info.handle_duplicates=duplic; - } + select_insert(TABLE_LIST *table_list_par, TABLE *table_par, + List<Item> *fields_par, enum_duplicates duplic, + bool ignore_check_option_errors); ~select_insert(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); @@ -1238,22 +1373,21 @@ class select_insert :public select_result_interceptor { class select_create: public select_insert { ORDER *group; - const char *db; - const char *name; + TABLE_LIST *create_table; List<create_field> *extra_fields; List<Key> *keys; HA_CREATE_INFO *create_info; MYSQL_LOCK *lock; Field **field; public: - select_create(const char *db_name, const char *table_name, - HA_CREATE_INFO *create_info_par, - List<create_field> &fields_par, - List<Key> &keys_par, - List<Item> &select_fields,enum_duplicates duplic) - :select_insert (NULL, &select_fields, duplic), db(db_name), - name(table_name), extra_fields(&fields_par),keys(&keys_par), - create_info(create_info_par), lock(0) + select_create (TABLE_LIST *table, + HA_CREATE_INFO *create_info_par, + List<create_field> &fields_par, + List<Key> &keys_par, + List<Item> &select_fields,enum_duplicates duplic) + :select_insert (NULL, NULL, &select_fields, duplic, 0), create_table(table), + extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par), + lock(0) {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &values); @@ -1436,8 +1570,13 @@ class user_var_entry DTCollation collation; }; - -/* Class for unique (removing of duplicates) */ +/* + Unique -- class for unique (removing of duplicates). + Puts all values to the TREE. If the tree becomes too big, + it's dumped to the file. User can request sorted values, or + just iterate through them. In the last case tree merging is performed in + memory simultaneously with iteration, so it should be ~2-3x faster. + */ class Unique :public Sql_alloc { @@ -1451,10 +1590,10 @@ class Unique :public Sql_alloc public: ulong elements; - Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, + Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg, uint size_arg, ulong max_in_memory_size_arg); ~Unique(); - inline bool unique_add(gptr ptr) + inline bool unique_add(void *ptr) { if (tree.elements_in_tree > max_elements && flush()) return 1; @@ -1462,6 +1601,18 @@ public: } bool get(TABLE *table); + static double get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulong max_in_memory_size); + inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size, + ulong max_in_memory_size) + { + register ulong max_elems_in_tree= + (1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); + return sizeof(uint)*(1 + nkeys/max_elems_in_tree); + } + + void reset(); + bool walk(tree_walk_action action, void *walk_action_arg); friend int unique_write_to_file(gptr key, element_count count, Unique *unique); friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique); @@ -1515,16 +1666,32 @@ public: bool send_eof(); }; +class my_var : public Sql_alloc { +public: + LEX_STRING s; + bool local; + uint offset; + enum_field_types type; + my_var (LEX_STRING& j, bool i, uint o, enum_field_types t) + :s(j), local(i), offset(o), type(t) + {} + ~my_var() {} +}; class select_dumpvar :public select_result_interceptor { ha_rows row_count; public: - List<LEX_STRING> var_list; + List<my_var> var_list; List<Item_func_set_user_var> vars; - select_dumpvar(void) { var_list.empty(); vars.empty(); row_count=0;} + List<Item_splocal> local_vars; + select_dumpvar(void) { var_list.empty(); local_vars.empty(); vars.empty(); row_count=0;} ~select_dumpvar() {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool send_eof(); void cleanup(); }; + +/* Functions in sql_class.cc */ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 350a7432990..ad74bb8f833 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -20,6 +20,7 @@ #include "mysql_priv.h" #include <mysys_err.h> #include "sql_acl.h" +#include "sp.h" #include <my_dir.h> #include <m_ctype.h> #ifdef __WIN__ @@ -33,7 +34,8 @@ static TYPELIB deletable_extentions= static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, const char *path, uint level); - +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path); +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); /* Database options hash */ static HASH dboptions; static my_bool dboptions_init= 0; @@ -464,7 +466,6 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, query= thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0); @@ -514,7 +515,6 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) thd->variables.collation_database= thd->db_charset; } - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0); @@ -547,7 +547,6 @@ exit2: -1 Error generated */ - int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { long deleted=0; @@ -622,7 +621,6 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) query =thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0); @@ -633,6 +631,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } exit: + (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ start_waiting_global_read_lock(thd); /* If this database was the client's selected database, we silently change the @@ -701,9 +700,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, continue; /* Check if file is a raid directory */ - if ((my_isdigit(&my_charset_latin1, file->name[0]) || + if ((my_isdigit(system_charset_info, file->name[0]) || (file->name[0] >= 'a' && file->name[0] <= 'f')) && - (my_isdigit(&my_charset_latin1, file->name[1]) || + (my_isdigit(system_charset_info, file->name[1]) || (file->name[1] >= 'a' && file->name[1] <= 'f')) && !file->name[2] && !level) { @@ -729,6 +728,25 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, found_other_files++; continue; } + else if (file->name[0] == 'a' && file->name[1] == 'r' && + file->name[2] == 'c' && file->name[3] == '\0') + { + /* .frm archive */ + char newpath[FN_REFLEN], *copy_of_path; + MY_DIR *new_dirp; + uint length; + strxmov(newpath, org_path, "/", "arc", NullS); + length= unpack_filename(newpath, newpath); + if ((new_dirp = my_dir(newpath, MYF(MY_DONT_SORT)))) + { + DBUG_PRINT("my",("Archive subdir found: %s", newpath)); + if ((mysql_rm_arc_files(thd, new_dirp, newpath)) < 0) + goto err; + continue; + } + found_other_files++; + continue; + } extension= fn_ext(file->name); if (find_type(extension, &deletable_extentions,1+2) <= 0) { @@ -751,7 +769,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, table_list->alias= table_list->real_name; // If lower_case_table_names=2 /* Link into list */ (*tot_list_next)= table_list; - tot_list_next= &table_list->next; + tot_list_next= &table_list->next_local; deleted++; } else @@ -788,44 +806,140 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } else { - char tmp_path[FN_REFLEN], *pos; - char *path= tmp_path; - unpack_filename(tmp_path,org_path); + /* Don't give errors if we can't delete 'RAID' directory */ + if (rm_dir_w_symlink(org_path, level == 0)) + DBUG_RETURN(-1); + } + + DBUG_RETURN(deleted); + +err: + my_dirend(dirp); + DBUG_RETURN(-1); +} + + +/* + Remove directory with symlink + + SYNOPSIS + rm_dir_w_symlink() + org_path path of derictory + send_error send errors + RETURN + 0 OK + 1 ERROR +*/ + +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) +{ + char tmp_path[FN_REFLEN], tmp2_path[FN_REFLEN], *pos; + char *path= tmp_path; + DBUG_ENTER("rm_dir_w_symlink"); + unpack_filename(tmp_path, org_path); #ifdef HAVE_READLINK - int error; - - /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ - pos=strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; + int error; - if ((error=my_readlink(filePath, path, MYF(MY_WME))) < 0) - DBUG_RETURN(-1); - if (!error) + /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ + pos= strend(path); + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + + if ((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0) + DBUG_RETURN(1); + if (!error) + { + if (my_delete(path, MYF(send_error ? MY_WME : 0))) { - if (my_delete(path,MYF(!level ? MY_WME : 0))) - { - /* Don't give errors if we can't delete 'RAID' directory */ - if (level) - DBUG_RETURN(deleted); - DBUG_RETURN(-1); - } - /* Delete directory symbolic link pointed at */ - path= filePath; + DBUG_RETURN(send_error); } + /* Delete directory symbolic link pointed at */ + path= tmp2_path; + } #endif - /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ - pos=strend(path); + /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ + pos= strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; - /* Don't give errors if we can't delete 'RAID' directory */ - if (rmdir(path) < 0 && !level) + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + if (rmdir(path) < 0 && send_error) + { + my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + + +/* + Remove .frm archives from directory + + SYNOPSIS + thd thread handler + dirp list of files in archive directory + db data base name + org_path path of archive directory + + RETURN + > 0 number of removed files + -1 error +*/ +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, + const char *org_path) +{ + long deleted= 0; + ulong found_other_files= 0; + char filePath[FN_REFLEN]; + DBUG_ENTER("mysql_rm_arc_files"); + DBUG_PRINT("enter", ("path: %s", org_path)); + + for (uint idx=0 ; + idx < (uint) dirp->number_off_files && !thd->killed ; + idx++) + { + FILEINFO *file=dirp->dir_entry+idx; + char *extension, *revision; + DBUG_PRINT("info",("Examining: %s", file->name)); + + /* skiping . and .. */ + if (file->name[0] == '.' && (!file->name[1] || + (file->name[1] == '.' && !file->name[2]))) + continue; + + extension= fn_ext(file->name); + if (extension[0] != '.' || + extension[1] != 'f' || extension[2] != 'r' || + extension[3] != 'm' || extension[4] != '-') { - my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); - DBUG_RETURN(-1); + found_other_files++; + continue; + } + revision= extension+5; + while (*revision && my_isdigit(system_charset_info, *revision)) + revision++; + if (*revision) + { + found_other_files++; + continue; + } + strxmov(filePath, org_path, "/", file->name, NullS); + if (my_delete_with_symlink(filePath,MYF(MY_WME))) + { + goto err; } } + if (thd->killed) + goto err; + + my_dirend(dirp); + + /* + If the directory is a symbolic link, remove the link first, then + remove the directory the symbolic link pointed at + */ + if (!found_other_files && + rm_dir_w_symlink(org_path, 0)) + DBUG_RETURN(-1); DBUG_RETURN(deleted); err: diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 09893970803..8bdf19195f3 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -26,6 +26,8 @@ #include "mysql_priv.h" #include "ha_innodb.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, ha_rows limit, ulong options) @@ -39,8 +41,8 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, ha_rows deleted; DBUG_ENTER("mysql_delete"); - if ((open_and_lock_tables(thd, table_list))) - DBUG_RETURN(-1); + if ((error= open_and_lock_tables(thd, table_list))) + DBUG_RETURN(error); table= table_list->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); thd->proc_info="init"; @@ -88,6 +90,7 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, { delete select; free_underlaid_joins(thd, &thd->lex->select_lex); + thd->row_count_func= 0; send_ok(thd,0L); DBUG_RETURN(0); // Nothing to delete } @@ -142,6 +145,13 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, select= 0; } + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(-1); // This will force out message + } init_read_record(&info,thd,table,select,1,1); deleted=0L; init_ftfuncs(thd, &thd->lex->select_lex, 1); @@ -152,6 +162,11 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, // thd->net.report_error is tested to disallow delete row on error if (!(select && select->skip_record())&& !thd->net.report_error ) { + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE); + if (!(error=table->file->delete_row(table->record[0]))) { deleted++; @@ -175,6 +190,10 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, error= 1; break; } + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER); } else table->file->unlock_row(); // Row failed selection, release lock on it @@ -210,7 +229,6 @@ cleanup: */ if ((deleted || (error < 0)) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -236,9 +254,10 @@ cleanup: } free_underlaid_joins(thd, &thd->lex->select_lex); if (error >= 0 || thd->net.report_error) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0); + send_error(thd,thd->killed_errno()); else { + thd->row_count_func= deleted; send_ok(thd,deleted); DBUG_PRINT("info",("%d records deleted",deleted)); } @@ -252,7 +271,7 @@ cleanup: SYNOPSIS mysql_prepare_delete() thd - thread handler - table_list - global table list + table_list - global/local table list conds - conditions RETURN VALUE @@ -262,19 +281,24 @@ cleanup: */ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) { - TABLE_LIST *delete_table_list= ((TABLE_LIST*) thd->lex-> - select_lex.table_list.first); + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_delete"); - if (setup_conds(thd, delete_table_list, conds) || - setup_ftfuncs(&thd->lex->select_lex)) + if (setup_tables(thd, table_list, conds) || + setup_conds(thd, table_list, conds) || + setup_ftfuncs(select_lex)) + DBUG_RETURN(-1); + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); DBUG_RETURN(-1); - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + } + if (unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); DBUG_RETURN(-1); } + select_lex->fix_prepare_information(thd, conds); DBUG_RETURN(0); } @@ -285,12 +309,79 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) #define MEM_STRIP_BUF_SIZE current_thd->variables.sortbuff_size -extern "C" int refposcmp2(void* arg, const void *a,const void *b) +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b) { - /* arg is a pointer to file->ref_length */ - return memcmp(a,b, *(int*) arg); + handler *file= (handler*)arg; + return file->cmp_ref((const byte*)a, (const byte*)b); +} + +/* + make delete specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_delete_prepare() + thd thread handler + + RETURN + 0 OK + -1 Error +*/ + +int mysql_multi_delete_prepare(THD *thd) +{ + LEX *lex= thd->lex; + TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxilliary_table_list.first; + TABLE_LIST *target_tbl; + int res= 0; + DBUG_ENTER("mysql_multi_delete_prepare"); + + /* + setup_tables() need for VIEWs. JOIN::prepare() will not do it second + time. + + lex->query_tables also point on local list of DELETE SELECT_LEX + */ + if (setup_tables(thd, lex->query_tables, &lex->select_lex.where)) + DBUG_RETURN(-1); + + /* Fix tables-to-be-deleted-from list to point at opened tables */ + for (target_tbl= (TABLE_LIST*) aux_tables; + target_tbl; + target_tbl= target_tbl->next_local) + { + target_tbl->table= target_tbl->correspondent_table->table; + if (!target_tbl->correspondent_table->updatable || + check_key_in_view(thd, target_tbl->correspondent_table)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), target_tbl->real_name, + "DELETE"); + DBUG_RETURN(-1); + } + /* + Check are deleted table used somewhere inside subqueries. + + Multi-delete can't be constructed over-union => we always have + single SELECT on top and have to check underlaying SELECTs of it + */ + for (SELECT_LEX_UNIT *un= lex->select_lex.first_inner_unit(); + un; + un= un->next_unit()) + { + if (un->first_select()->linkage != DERIVED_TABLE_TYPE && + un->check_updateable(target_tbl->correspondent_table->db, + target_tbl->correspondent_table->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), + target_tbl->correspondent_table->real_name); + res= -1; + break; + } + } + } + DBUG_RETURN(res); } + multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt, uint num_of_tables_arg) : delete_tables(dt), thd(thd_arg), deleted(0), found(0), @@ -323,7 +414,7 @@ multi_delete::initialize_tables(JOIN *join) DBUG_RETURN(1); table_map tables_to_delete_from=0; - for (walk= delete_tables ; walk ; walk=walk->next) + for (walk= delete_tables; walk; walk= walk->next_local) tables_to_delete_from|= walk->table->map; walk= delete_tables; @@ -335,7 +426,7 @@ multi_delete::initialize_tables(JOIN *join) { /* We are going to delete from this table */ TABLE *tbl=walk->table=tab->table; - walk=walk->next; + walk= walk->next_local; /* Don't use KEYREAD optimization on this table */ tbl->no_keyread=1; /* Don't use record cache */ @@ -351,11 +442,11 @@ multi_delete::initialize_tables(JOIN *join) } walk= delete_tables; tempfiles_ptr= tempfiles; - for (walk=walk->next ; walk ; walk=walk->next) + for (walk= walk->next_local ;walk ;walk= walk->next_local) { TABLE *table=walk->table; - *tempfiles_ptr++= new Unique (refposcmp2, - (void *) &table->file->ref_length, + *tempfiles_ptr++= new Unique (refpos_order_cmp, + (void *) table->file, table->file->ref_length, MEM_STRIP_BUF_SIZE); } @@ -366,9 +457,9 @@ multi_delete::initialize_tables(JOIN *join) multi_delete::~multi_delete() { - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next) + for (table_being_deleted= delete_tables; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local) { TABLE *t=table_being_deleted->table; free_io_cache(t); // Alloced by unique @@ -388,9 +479,9 @@ bool multi_delete::send_data(List<Item> &values) int secure_counter= -1; DBUG_ENTER("multi_delete::send_data"); - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, secure_counter++) + for (table_being_deleted= delete_tables; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local, secure_counter++) { TABLE *table=table_being_deleted->table; @@ -407,7 +498,8 @@ bool multi_delete::send_data(List<Item> &values) table->status|= STATUS_DELETED; if (!(error=table->file->delete_row(table->record[0]))) deleted++; - else if (!table_being_deleted->next || table_being_deleted->table->file->has_transactions()) + else if (!table_being_deleted->next_local || + table_being_deleted->table->file->has_transactions()) { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -477,9 +569,9 @@ int multi_delete::do_deletes(bool from_send_error) if (from_send_error) { /* Found out table number for 'table_being_deleted*/ - for (TABLE_LIST *aux=delete_tables; + for (TABLE_LIST *aux= delete_tables; aux != table_being_deleted; - aux=aux->next) + aux= aux->next_local) counter++; } else @@ -488,9 +580,9 @@ int multi_delete::do_deletes(bool from_send_error) do_delete= 0; if (!found) DBUG_RETURN(0); - for (table_being_deleted=table_being_deleted->next; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, counter++) + for (table_being_deleted= table_being_deleted->next_local; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local, counter++) { TABLE *table = table_being_deleted->table; if (tempfiles[counter]->get(table)) @@ -547,7 +639,9 @@ bool multi_delete::send_eof() ha_autocommit_... */ if (deleted) + { query_cache_invalidate3(thd, delete_tables, 1); + } /* Write the SQL statement to the binlog if we deleted @@ -559,7 +653,6 @@ bool multi_delete::send_eof() */ if (deleted && (error <= 0 || normal_tables)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -580,7 +673,10 @@ bool multi_delete::send_eof() if (local_error) ::send_error(thd); else + { + thd->row_count_func= deleted; ::send_ok(thd, deleted); + } return 0; } @@ -669,7 +765,6 @@ end: { if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 3259e0a4f22..10ef6a081b6 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -25,15 +25,15 @@ #include "sql_select.h" #include "sql_acl.h" -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, - TABLE_LIST *t); + /* - Resolve derived tables in all queries + call given derived table processor (preparing or filling tables) SYNOPSIS mysql_handle_derived() lex LEX for this thread + processor procedure of derived table processing RETURN 0 ok @@ -42,7 +42,7 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, */ int -mysql_handle_derived(LEX *lex) +mysql_handle_derived(LEX *lex, int (*processor)(THD*, LEX*, TABLE_LIST*)) { if (lex->derived_tables) { @@ -52,15 +52,11 @@ mysql_handle_derived(LEX *lex) { for (TABLE_LIST *cursor= sl->get_table_list(); cursor; - cursor= cursor->next) + cursor= cursor->next_local) { int res; - if (cursor->derived && (res=mysql_derived(lex->thd, lex, - cursor->derived, - cursor))) - { + if ((res= (*processor)(lex->thd, lex, cursor))) return res; - } } if (lex->describe) { @@ -78,20 +74,16 @@ mysql_handle_derived(LEX *lex) /* - Resolve derived tables in all queries + Create temporary table structure (but do not fill it) SYNOPSIS mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, TABLE_LIST *t) thd Thread handle lex LEX for this thread - unit node that contains all SELECT's for derived tables - t TABLE_LIST for the upper SELECT + orig_table_list TABLE_LIST for the upper SELECT IMPLEMENTATION - Derived table is resolved with temporary table. It is created based on the - queries defined. After temporary table is created, if this is not EXPLAIN, - then the entire unit / node is deleted. unit is deleted if UNION is used - for derived table and node is deleted is it is a simple SELECT. + Derived table is resolved with temporary table. After table creation, the above TABLE_LIST is updated with a new table. @@ -105,59 +97,126 @@ mysql_handle_derived(LEX *lex) 0 ok 1 Error -1 Error and error message given -*/ - + */ -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, - TABLE_LIST *org_table_list) +int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) { - SELECT_LEX *first_select= unit->first_select(); - TABLE *table; - int res; - select_union *derived_result; - bool is_union= first_select->next_select() && - first_select->next_select()->linkage == UNION_TYPE; - SELECT_LEX *save_current_select= lex->current_select; - DBUG_ENTER("mysql_derived"); - - if (!(derived_result= new select_union(0))) - DBUG_RETURN(1); // out of memory - - // st_select_lex_unit::prepare correctly work for single select - if ((res= unit->prepare(thd, derived_result, 0))) - goto exit; - - - derived_result->tmp_table_param.init(); - derived_result->tmp_table_param.field_count= unit->types.elements; - /* - Temp table is created so that it hounours if UNION without ALL is to be - processed - */ - if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, - unit->types, (ORDER*) 0, - is_union && unit->union_distinct, 1, - (first_select->options | thd->options | - TMP_TABLE_ALL_COLUMNS), - HA_POS_ERROR, - org_table_list->alias))) + SELECT_LEX_UNIT *unit= orig_table_list->derived; + int res= 0; + if (unit) { - res= -1; - goto exit; + SELECT_LEX *first_select= unit->first_select(); + TABLE *table= 0; + select_union *derived_result; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; + DBUG_ENTER("mysql_derived"); + + if (!(derived_result= new select_union(0))) + DBUG_RETURN(1); // out of memory + + // st_select_lex_unit::prepare correctly work for single select + if ((res= unit->prepare(thd, derived_result, 0))) + goto exit; + + + derived_result->tmp_table_param.init(); + derived_result->tmp_table_param.field_count= unit->types.elements; + /* + Temp table is created so that it hounours if UNION without ALL is to be + processed + */ + if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, + unit->types, (ORDER*) 0, + is_union && unit->union_distinct, 1, + (first_select->options | thd->options | + TMP_TABLE_ALL_COLUMNS), + HA_POS_ERROR, + orig_table_list->alias))) + { + res= -1; + goto exit; + } + derived_result->set_table(table); + +exit: + /* + if it is preparation PS only or commands that need only VIEW structure + then we do not need real data and we can skip execution (and parameters + is not defined, too) + */ + if (res) + { + if (table) + free_tmp_table(thd, table); + delete derived_result; + } + else + { + if (!thd->fill_derived_tables()) + delete derived_result; + orig_table_list->derived_result= derived_result; + orig_table_list->table= table; + orig_table_list->real_name= table->real_name; + table->derived_select_number= first_select->select_number; + table->tmp_table= TMP_TABLE; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table->grant.privilege= SELECT_ACL; +#endif + orig_table_list->db= (char *)""; + // Force read of table stats in the optimizer + table->file->info(HA_STATUS_VARIABLE); + /* Add new temporary table to list of open derived tables */ + table->next= thd->derived_tables; + thd->derived_tables= table; + } } - derived_result->set_table(table); + else if (orig_table_list->ancestor) + orig_table_list->set_ancestor(); + return (res); +} + + +/* + fill derived table - /* - if it is preparation PS only then we do not need real data and we - can skip execution (and parameters is not defined, too) + SYNOPSIS + mysql_derived_filling() + thd Thread handle + lex LEX for this thread + unit node that contains all SELECT's for derived tables + orig_table_list TABLE_LIST for the upper SELECT + + IMPLEMENTATION + Derived table is resolved with temporary table. It is created based on the + queries defined. After temporary table is filled, if this is not EXPLAIN, + then the entire unit / node is deleted. unit is deleted if UNION is used + for derived table and node is deleted is it is a simple SELECT. + + RETURN + 0 ok + 1 Error + -1 Error and error message given */ - if (! thd->current_arena->is_stmt_prepare()) + +int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) +{ + TABLE *table= orig_table_list->table; + SELECT_LEX_UNIT *unit= orig_table_list->derived; + int res= 0; + + /*check that table creation pass without problem and it is derived table */ + if (table && unit) { + SELECT_LEX *first_select= unit->first_select(); + select_union *derived_result= orig_table_list->derived_result; + SELECT_LEX *save_current_select= lex->current_select; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; if (is_union) { // execute union without clean up - if (!(res= unit->prepare(thd, derived_result, SELECT_NO_UNLOCK))) - res= unit->exec(); + res= unit->exec(); } else { @@ -170,7 +229,7 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, first_select->options&= ~OPTION_FOUND_ROWS; lex->current_select= first_select; - res= mysql_select(thd, &first_select->ref_pointer_array, + res= mysql_select(thd, &first_select->ref_pointer_array, (TABLE_LIST*) first_select->table_list.first, first_select->with_wild, first_select->item_list, first_select->where, @@ -183,54 +242,27 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, SELECT_NO_UNLOCK), derived_result, unit, first_select); } - } - if (!res) - { - /* - Here we entirely fix both TABLE_LIST and list of SELECT's as - there were no derived tables - */ - if (derived_result->flush()) - res= 1; - else + if (!res) { - org_table_list->real_name= table->real_name; - org_table_list->table= table; - if (org_table_list->table_list) - { - org_table_list->table_list->real_name= table->real_name; - org_table_list->table_list->table= table; - } - table->derived_select_number= first_select->select_number; - table->tmp_table= TMP_TABLE; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.privilege= SELECT_ACL; -#endif - org_table_list->db= (char *)""; - // Force read of table stats in the optimizer - table->file->info(HA_STATUS_VARIABLE); - } + /* + Here we entirely fix both TABLE_LIST and list of SELECT's as + there were no derived tables + */ + if (derived_result->flush()) + res= 1; - if (!lex->describe) - unit->cleanup(); - if (res) - free_tmp_table(thd, table); + if (!lex->describe) + unit->cleanup(); + } else { - /* Add new temporary table to list of open derived tables */ - table->next= thd->derived_tables; - thd->derived_tables= table; + free_tmp_table(thd, table); + unit->cleanup(); } + lex->current_select= save_current_select; + if (res) + free_tmp_table(thd, table); } - else - { - free_tmp_table(thd, table); - unit->cleanup(); - } - -exit: - delete derived_result; - lex->current_select= save_current_select; - DBUG_RETURN(res); + return res; } diff --git a/sql/sql_error.cc b/sql/sql_error.cc index eab5ec890df..3fbefdd37ef 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -43,6 +43,7 @@ This file contains the implementation of error and warnings related ***********************************************************************/ #include "mysql_priv.h" +#include "sp_rcontext.h" /* Store a new message in an error object @@ -104,8 +105,27 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, { MYSQL_ERROR *err= 0; DBUG_ENTER("push_warning"); + if (thd->query_id != thd->warn_id) mysql_reset_errors(thd); + if (thd->spcont && + thd->spcont->find_handler(code, + ((int) level >= + (int) MYSQL_ERROR::WARN_LEVEL_WARN && + thd->really_abort_on_warning()) ? + MYSQL_ERROR::WARN_LEVEL_ERROR : level)) + { + DBUG_RETURN(NULL); + } + + /* Abort if we are using strict mode and we are not using IGNORE */ + if ((int) level >= (int) MYSQL_ERROR::WARN_LEVEL_WARN && + thd->really_abort_on_warning()) + { + thd->killed= THD::KILL_BAD_DATA; + my_message(code, msg, MYF(0)); + DBUG_RETURN(NULL); + } if (thd->warn_list.elements < thd->variables.max_error_count) { @@ -115,8 +135,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, */ MEM_ROOT *old_root= thd->mem_root; thd->mem_root= &thd->warn_root; - err= new MYSQL_ERROR(thd, code, level, msg); - if (err) + if ((err= new MYSQL_ERROR(thd, code, level, msg))) thd->warn_list.push_back(err); thd->mem_root= old_root; } @@ -180,7 +199,8 @@ my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Message",MYSQL_ERRMSG_SIZE)); - if (thd->protocol->send_fields(&field_list,1)) + if (thd->protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); MYSQL_ERROR *err; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index f98bb0a9131..edb895fd24a 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -64,7 +64,7 @@ #define HANDLER_TABLES_HASH_SIZE 120 static enum enum_ha_read_modes rkey_to_rnext[]= - { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; +{ RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; #define HANDLER_TABLES_HACK(thd) { \ TABLE *tmp=thd->open_tables; \ @@ -149,7 +149,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) TABLE_LIST *hash_tables; char *db, *name, *alias; uint dblen, namelen, aliaslen, counter; - int err; + int error; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", tables->db, tables->real_name, tables->alias, @@ -185,9 +185,13 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) */ DBUG_ASSERT(! tables->table); HANDLER_TABLES_HACK(thd); - err=open_tables(thd, tables, &counter); + + /* for now HANDLER can be used only for real TABLES */ + tables->required_type= FRMTYPE_TABLE; + error= open_tables(thd, tables, &counter); + HANDLER_TABLES_HACK(thd); - if (err) + if (error) goto err; /* There can be only one table in '*tables'. */ @@ -277,47 +281,26 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) */ for (table_ptr= &(thd->handler_tables); *table_ptr && (*table_ptr != hash_tables->table); - table_ptr= &(*table_ptr)->next); + table_ptr= &(*table_ptr)->next) + ; -#if MYSQL_VERSION_ID < 40100 - if (*tables->db && strcmp(hash_tables->db, tables->db)) + if (*table_ptr) { - DBUG_PRINT("info",("wrong db")); - hash_tables= NULL; - } - else -#endif - { - if (*table_ptr) + (*table_ptr)->file->ha_index_or_rnd_end(); + VOID(pthread_mutex_lock(&LOCK_open)); + if (close_thread_table(thd, table_ptr)) { - (*table_ptr)->file->ha_index_or_rnd_end(); - VOID(pthread_mutex_lock(&LOCK_open)); - if (close_thread_table(thd, table_ptr)) - { - /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); - } - VOID(pthread_mutex_unlock(&LOCK_open)); + /* Tell threads waiting for refresh that something has happened */ + VOID(pthread_cond_broadcast(&COND_refresh)); } - - hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); + VOID(pthread_mutex_unlock(&LOCK_open)); } + hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); } - - if (! hash_tables) + else { -#if MYSQL_VERSION_ID < 40100 - char buff[MAX_DBKEY_LENGTH]; - if (*tables->db) - strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); - else - strncpy(buff, tables->alias, sizeof(buff)); - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - buff, "HANDLER"); -#else my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), tables->alias, "HANDLER"); -#endif DBUG_PRINT("exit",("ERROR")); DBUG_RETURN(-1); } @@ -349,9 +332,10 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) */ int mysql_ha_read(THD *thd, TABLE_LIST *tables, - enum enum_ha_read_modes mode, char *keyname, List<Item> *key_expr, - enum ha_rkey_function ha_rkey_mode, Item *cond, - ha_rows select_limit,ha_rows offset_limit) + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, + enum ha_rkey_function ha_rkey_mode, Item *cond, + ha_rows select_limit,ha_rows offset_limit) { TABLE_LIST *hash_tables; TABLE *table; @@ -360,7 +344,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, Protocol *protocol= thd->protocol; char buff[MAX_FIELD_WIDTH]; String buffer(buff, sizeof(buff), system_charset_info); - int err, keyno= -1; + int error, keyno= -1; uint num_rows; byte *key; uint key_len; @@ -439,25 +423,25 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, if ((keyno=find_type(keyname, &table->keynames, 1+2)-1)<0) { my_printf_error(ER_KEY_DOES_NOT_EXITS,ER(ER_KEY_DOES_NOT_EXITS),MYF(0), - keyname,tables->alias); + keyname,tables->alias); goto err0; } table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno); } - if (insert_fields(thd,tables,tables->db,tables->alias,&it)) + if (insert_fields(thd, tables, tables->db, tables->alias, &it, 0, 0)) goto err0; select_limit+=offset_limit; - protocol->send_fields(&list,1); + protocol->send_fields(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); HANDLER_TABLES_HACK(thd); lock= mysql_lock_tables(thd, &tables->table, 1); HANDLER_TABLES_HACK(thd); if (!lock) - goto err0; // mysql_lock_tables() printed error message already + goto err0; // mysql_lock_tables() printed error message already /* In ::external_lock InnoDB resets the fields which tell it that @@ -472,33 +456,33 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, switch (mode) { case RFIRST: if (keyname) - err=table->file->index_first(table->record[0]); + error= table->file->index_first(table->record[0]); else { table->file->ha_index_or_rnd_end(); - if (!(err=table->file->ha_rnd_init(1))) - err=table->file->rnd_next(table->record[0]); + if (!(error= table->file->ha_rnd_init(1))) + error= table->file->rnd_next(table->record[0]); } mode=RNEXT; break; case RLAST: DBUG_ASSERT(keyname != 0); - err=table->file->index_last(table->record[0]); + error= table->file->index_last(table->record[0]); mode=RPREV; break; case RNEXT: - err=keyname ? - table->file->index_next(table->record[0]) : - table->file->rnd_next(table->record[0]); + error= (keyname ? + table->file->index_next(table->record[0]) : + table->file->rnd_next(table->record[0])); break; case RPREV: DBUG_ASSERT(keyname != 0); - err=table->file->index_prev(table->record[0]); + error= table->file->index_prev(table->record[0]); break; case RNEXT_SAME: /* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */ DBUG_ASSERT(keyname != 0); - err= table->file->index_next_same(table->record[0], key, key_len); + error= table->file->index_next_same(table->record[0], key, key_len); break; case RKEY: { @@ -532,8 +516,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, send_error(thd,ER_OUTOFMEMORY); goto err; } - key_copy(key, table, keyno, key_len); - err=table->file->index_read(table->record[0], + key_copy(key, table->record[0], table->key_info + keyno, key_len); + error= table->file->index_read(table->record[0], key,key_len,ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; @@ -543,15 +527,15 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, goto err; } - if (err == HA_ERR_RECORD_DELETED) - continue; - if (err) + if (error) { - if (err != HA_ERR_KEY_NOT_FOUND && err != HA_ERR_END_OF_FILE) + if (error == HA_ERR_RECORD_DELETED) + continue; + if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { sql_print_error("mysql_ha_read: Got error %d when reading table '%s'", - err, tables->real_name); - table->file->print_error(err,MYF(0)); + error, tables->real_name); + table->file->print_error(error,MYF(0)); goto err; } goto ok; @@ -629,7 +613,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags) if (tables) { /* Close all tables in the list. */ - for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next) + for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next_local) { DBUG_PRINT("info-in-tables-list",("'%s'.'%s' as '%s'", tmp_tables->db, tmp_tables->real_name, diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 04ecbbd43b9..aed86b78e93 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -84,12 +84,11 @@ static bool init_fields(THD *thd, TABLE_LIST *tables, DBUG_ENTER("init_fields"); for (; count-- ; find_fields++) { - TABLE_LIST *not_used; /* We have to use 'new' here as field will be re_linked on free */ Item_field *field= new Item_field("mysql", find_fields->table_name, find_fields->field_name); if (!(find_fields->field= find_field_in_tables(thd, field, tables, - ¬_used, TRUE))) + 0, REPORT_ALL_ERRORS, 1))) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -427,7 +426,8 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) field_list.push_back(new Item_empty_string("description",1000)); field_list.push_back(new Item_empty_string("example",1000)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); protocol->prepare_for_resend(); @@ -469,7 +469,8 @@ int send_header_2(Protocol *protocol, bool for_category) field_list.push_back(new Item_empty_string("source_category_name",64)); field_list.push_back(new Item_empty_string("name",64)); field_list.push_back(new Item_empty_string("is_it_category",1)); - DBUG_RETURN(protocol->send_fields(&field_list,1)); + DBUG_RETURN(protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)); } /* @@ -622,16 +623,15 @@ int mysqld_help(THD *thd, const char *mask) bzero((gptr)tables,sizeof(tables)); tables[0].alias= tables[0].real_name= (char*) "help_topic"; tables[0].lock_type= TL_READ; - tables[0].next= &tables[1]; + tables[0].next_global= tables[0].next_local= &tables[1]; tables[1].alias= tables[1].real_name= (char*) "help_category"; tables[1].lock_type= TL_READ; - tables[1].next= &tables[2]; + tables[1].next_global= tables[1].next_local= &tables[2]; tables[2].alias= tables[2].real_name= (char*) "help_relation"; tables[2].lock_type= TL_READ; - tables[2].next= &tables[3]; + tables[2].next_global= tables[2].next_local= &tables[3]; tables[3].alias= tables[3].real_name= (char*) "help_keyword"; tables[3].lock_type= TL_READ; - tables[3].next= 0; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; List<String> topics_list, categories_list, subcategories_list; @@ -640,13 +640,14 @@ int mysqld_help(THD *thd, const char *mask) uint mlen= strlen(mask); MEM_ROOT *mem_root= thd->mem_root; - if (open_and_lock_tables(thd, tables)) - { - res= -1; + if ((res= open_and_lock_tables(thd, tables))) goto end; - } - /* Init tables and fields to be usable from items */ - setup_tables(tables); + /* + Init tables and fields to be usable from items + + tables do not contain VIEWs => we can pass 0 as conds + */ + setup_tables(thd, tables, 0); memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields)); if (init_fields(thd, tables, used_fields, array_elements(used_fields))) { diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 822ce622b1b..6e0260d1acb 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -19,12 +19,14 @@ #include "mysql_priv.h" #include "sql_acl.h" +#include "sp_head.h" +#include "sql_trigger.h" static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, - char *query, uint query_length, int log_on); + char *query, uint query_length, bool log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); static void unlink_blobs(register TABLE *table); @@ -40,19 +42,18 @@ static void unlink_blobs(register TABLE *table); #define my_safe_afree(ptr, size, min_length) if (size > min_length) my_free(ptr,MYF(0)) #endif -#define DELAYED_LOG_UPDATE 1 -#define DELAYED_LOG_BIN 2 - /* Check if insert fields are correct. Sets table->timestamp_field_type to TIMESTAMP_NO_AUTO_SET or leaves it as is, depending on if timestamp should be updated or not. */ -int -check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, - List<Item> &values, ulong counter) +static int +check_insert_fields(THD *thd, TABLE_LIST *table_list, List<Item> &fields, + List<Item> &values, ulong counter, bool check_unique) { + TABLE *table= table_list->table; + if (fields.elements == 0 && values.elements != 0) { if (values.elements != table->fields) @@ -63,14 +64,22 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, return -1; } #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (grant_option && - check_grant_all_columns(thd,INSERT_ACL,table)) - return -1; + if (grant_option) + { + Field_iterator_table fields; + fields.set_table(table); + if (check_grant_all_columns(thd, INSERT_ACL, &table->grant, + table->table_cache_key, table->real_name, + &fields)) + return -1; + } #endif table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; } else { // Part field list + TABLE_LIST *save_next; + int res; if (fields.elements != values.elements) { my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, @@ -78,19 +87,18 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, MYF(0),counter); return -1; } - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= table->table_cache_key; - table_list.real_name= table_list.alias= table->table_name; - table_list.table=table; - table_list.grant=table->grant; thd->dupp_field=0; - if (setup_tables(&table_list) || - setup_fields(thd, 0, &table_list,fields,1,0,0)) + thd->lex->select_lex.no_wrap_view_item= 1; + save_next= table_list->next_local; // fields only from first table + table_list->next_local= 0; + res= setup_fields(thd, 0, table_list, fields, 1, 0, 0); + table_list->next_local= save_next; + thd->lex->select_lex.no_wrap_view_item= 0; + if (res) return -1; - if (thd->dupp_field) + if (check_unique && thd->dupp_field) { my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name); return -1; @@ -120,8 +128,9 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, By default, both logs are enabled (this won't cause problems if the server runs without --log-update or --log-bin). */ - int log_on= DELAYED_LOG_UPDATE | DELAYED_LOG_BIN ; + bool log_on= (thd->options & OPTION_BIN_LOG) || (!(thd->master_access & SUPER_ACL)); bool transactional_table, log_delayed; + bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); uint value_count; ulong counter = 1; ulonglong id; @@ -133,14 +142,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, char *query= thd->query; #endif thr_lock_type lock_type = table_list->lock_type; - TABLE_LIST *insert_table_list= (TABLE_LIST*) - thd->lex->select_lex.table_list.first; DBUG_ENTER("mysql_insert"); - if (!(thd->options & OPTION_UPDATE_LOG)) - log_on&= ~(int) DELAYED_LOG_UPDATE; - if (!(thd->options & OPTION_BIN_LOG)) - log_on&= ~(int) DELAYED_LOG_BIN; /* in safe mode or with skip-new change delayed insert to be regular if we are told to replace duplicates, the insert cannot be concurrent @@ -177,8 +180,14 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if ((table= delayed_get_table(thd,table_list)) && !thd->is_fatal_error) { res= 0; - if (table_list->next) /* if sub select */ - res= open_and_lock_tables(thd, table_list->next); + if (table_list->next_global) /* if sub select */ + res= open_and_lock_tables(thd, table_list->next_global); + /* + First is not processed by open_and_lock_tables() => we need set + updateability flags "by hands". + */ + if (!table_list->derived && !table_list->view) + table_list->updatable= 1; // usual table } else { @@ -201,17 +210,17 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if (duplic == DUP_UPDATE && !table->insert_values) { /* it should be allocated before Item::fix_fields() */ - table->insert_values= + table->insert_values= (byte *)alloc_root(thd->mem_root, table->rec_buff_length); if (!table->insert_values) goto abort; } - if (mysql_prepare_insert(thd, table_list, insert_table_list, table, - fields, values, update_fields, - update_values, duplic)) + if (mysql_prepare_insert(thd, table_list, table, fields, values, + update_fields, update_values, duplic)) goto abort; + // is table which we are changing used somewhere in other parts of query value_count= values->elements; while ((values= its++)) { @@ -223,7 +232,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, MYF(0),counter); goto abort; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + if (setup_fields(thd, 0, table_list, *values, 0, 0, 0)) goto abort; } its.rewind (); @@ -235,12 +244,15 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, info.handle_duplicates=duplic; info.update_fields=&update_fields; info.update_values=&update_values; + info.view= (table_list->view ? table_list : 0); + info.ignore= ignore_err; /* Count warnings for all inserts. For single line insert, generate an error if try to set a NOT NULL field - to NULL + to NULL. */ - thd->count_cuted_fields= ((values_list.elements == 1) ? + thd->count_cuted_fields= ((values_list.elements == 1 && + duplic != DUP_IGNORE) ? CHECK_FIELD_ERROR_FOR_NULL : CHECK_FIELD_WARN); thd->cuted_fields = 0L; @@ -262,13 +274,24 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if (lock_type != TL_WRITE_DELAYED) table->file->start_bulk_insert(values_list.elements); + thd->no_trans_update= 0; + thd->abort_on_warning= (duplic != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + + if (fields.elements && check_that_all_fields_are_given_values(thd, table)) + { + /* thd->net.report_error is now set, which will abort the next loop */ + error= 1; + } + while ((values= its++)) { if (fields.elements || !value_count) { restore_record(table,default_values); // Get empty record - if (fill_record(fields, *values, 0)|| thd->net.report_error || - check_null_fields(thd,table)) + if (fill_record(fields, *values, 0)|| thd->net.report_error) { if (values_list.elements != 1 && !thd->net.report_error) { @@ -296,6 +319,26 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, break; } } + + /* + FIXME: Actually we should do this before + check_that_all_fields_are_given_values Or even go into write_record ? + */ + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_INSERT, + TRG_ACTION_BEFORE); + + if ((res= table_list->view_check_option(thd, + (values_list.elements == 1 ? + 0 : + ignore_err))) == + VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { @@ -304,9 +347,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, } else #endif - error=write_record(table,&info); - if (error) - break; + error=write_record(thd, table ,&info); /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the update log. @@ -317,7 +358,12 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, { // Get auto increment value id= thd->last_insert_id; } + if (error) + break; thd->row_count++; + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_INSERT, TRG_ACTION_AFTER); } /* @@ -354,7 +400,9 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, before binlog writing and ha_autocommit_... */ if (info.copied || info.deleted || info.updated) + { query_cache_invalidate3(thd, table_list, 1); + } transactional_table= table->file->has_transactions(); @@ -362,7 +410,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if ((info.copied || info.deleted || info.updated) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -401,7 +448,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, goto abort; if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) || !thd->cuted_fields)) - send_ok(thd,info.copied+info.deleted+info.updated,id); + { + thd->row_count_func= info.copied+info.deleted+info.updated; + send_ok(thd, (ulong) thd->row_count_func, id); + } else { char buff[160]; @@ -412,10 +462,12 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,(ulonglong)id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); } free_underlaid_joins(thd, &thd->lex->select_lex); table->insert_values=0; + thd->abort_on_warning= 0; DBUG_RETURN(0); abort: @@ -425,43 +477,176 @@ abort: #endif free_underlaid_joins(thd, &thd->lex->select_lex); table->insert_values=0; + thd->abort_on_warning= 0; DBUG_RETURN(-1); } /* + Additional check for insertability for VIEW + + SYNOPSIS + check_view_insertability() + view - reference on VIEW + + IMPLEMENTATION + A view is insertable if the folloings are true: + - All columns in the view are columns from a table + - All not used columns in table have a default values + - All field in view are unique (not referring to the same column) + + RETURN + FALSE - OK + view->contain_auto_increment is 1 if and only if the view contains an + auto_increment field + + TRUE - can't be used for insert +*/ + +static bool check_view_insertability(TABLE_LIST *view, ulong query_id) +{ + uint num= view->view->select_lex.item_list.elements; + TABLE *table= view->table; + Item **trans_start= view->field_translation, **trans_end=trans_start+num; + Item **trans; + Field **field_ptr= table->field; + ulong other_query_id= query_id - 1; + DBUG_ENTER("check_key_in_view"); + + DBUG_ASSERT(view->table != 0 && view->field_translation != 0); + + view->contain_auto_increment= 0; + /* check simplicity and prepare unique test of view */ + for (trans= trans_start; trans != trans_end; trans++) + { + Item_field *field; + /* simple SELECT list entry (field without expression) */ + if (!(field= (*trans)->filed_for_view_update())) + DBUG_RETURN(TRUE); + if (field->field->unireg_check == Field::NEXT_NUMBER) + view->contain_auto_increment= 1; + /* prepare unique test */ + field->field->query_id= other_query_id; + *trans= field; // remove collation if we have it + } + /* unique test */ + for (trans= trans_start; trans != trans_end; trans++) + { + /* Thanks to test above, we know that all columns are of type Item_field */ + Item_field *field= (Item_field *)(*trans); + if (field->field->query_id == query_id) + DBUG_RETURN(TRUE); + field->field->query_id= query_id; + } + + /* VIEW contain all fields without default value */ + for (; *field_ptr; field_ptr++) + { + Field *field= *field_ptr; + /* field have not default value */ + if ((field->type() == FIELD_TYPE_BLOB) && + (table->timestamp_field != field || + field->unireg_check == Field::TIMESTAMP_UN_FIELD)) + { + for (trans= trans_start; ; trans++) + { + if (trans == trans_end) + DBUG_RETURN(TRUE); // Field was not part of view + if (((Item_field *)(*trans))->field == *field_ptr) + break; // ok + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + Check if table can be updated + + SYNOPSIS + mysql_prepare_insert_check_table() + thd Thread handle + table_list Table list (only one table) + fields List of fields to be updated + where Pointer to where clause + + RETURN + 0 ok + 1 ERROR +*/ + +static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, + List<Item> &fields, COND **where) +{ + bool insert_into_view= (table_list->view != 0); + DBUG_ENTER("mysql_prepare_insert_check_table"); + + if (setup_tables(thd, table_list, where)) + DBUG_RETURN(1); + + if (insert_into_view && !fields.elements) + { + thd->lex->empty_field_list_on_rset= 1; + insert_view_fields(&fields, table_list); + } + + if (!table_list->updatable || + check_key_in_view(thd, table_list) || + (insert_into_view && + check_view_insertability(table_list, thd->query_id))) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* Prepare items in INSERT statement SYNOPSIS mysql_prepare_insert() - thd - thread handler - table_list - global table list - insert_table_list - local table list of INSERT SELECT_LEX + thd Thread handler + table_list Global/local table list RETURN VALUE - 0 - OK - -1 - error (message is not sent to user) + 0 OK + -1 error (message is not sent to user) */ -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, TABLE *table, + +int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic) { + bool insert_into_view= (table_list->view != 0); + /* TODO: use this condition for 'WITH CHECK OPTION' */ + Item *unused_conds= 0; + int res; DBUG_ENTER("mysql_prepare_insert"); - if (check_insert_fields(thd, table, fields, *values, 1) || - setup_tables(insert_table_list) || - setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0) || + + if (mysql_prepare_insert_check_table(thd, table_list, fields, &unused_conds)) + DBUG_RETURN(-1); + + if (check_insert_fields(thd, table_list, fields, *values, 1, + !insert_into_view) || + setup_fields(thd, 0, table_list, *values, 0, 0, 0) || (duplic == DUP_UPDATE && - (setup_fields(thd, 0, insert_table_list, update_fields, 0, 0, 0) || - setup_fields(thd, 0, insert_table_list, update_values, 0, 0, 0)))) + ((thd->lex->select_lex.no_wrap_view_item= 1, + (res= setup_fields(thd, 0, table_list, update_fields, 0, 0, 0)), + thd->lex->select_lex.no_wrap_view_item= 0, + res) || + setup_fields(thd, 0, table_list, update_values, 0, 0, 0)))) DBUG_RETURN(-1); - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + + if (unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); DBUG_RETURN(-1); } + thd->lex->select_lex.first_execution= 0; DBUG_RETURN(0); } @@ -479,10 +664,12 @@ static int last_uniq_key(TABLE *table,uint keynr) /* Write a record to table with optional deleting of conflicting records + + Sets thd->no_trans_update if table which is updated didn't have transactions */ -int write_record(TABLE *table,COPY_INFO *info) +int write_record(THD *thd, TABLE *table,COPY_INFO *info) { int error; char *key=0; @@ -494,9 +681,10 @@ int write_record(TABLE *table,COPY_INFO *info) { while ((error=table->file->write_row(table->record[0]))) { + uint key_nr; if (error != HA_WRITE_SKIP) goto err; - uint key_nr; + table->file->restore_auto_increment(); if ((int) (key_nr = table->file->get_dup_key(error)) < 0) { error=HA_WRITE_SKIP; /* Database can't find key */ @@ -534,7 +722,7 @@ int write_record(TABLE *table,COPY_INFO *info) goto err; } } - key_copy((byte*) key,table,key_nr,0); + key_copy((byte*) key,table->record[0],table->key_info+key_nr,0); if ((error=(table->file->index_read_idx(table->record[1],key_nr, (byte*) key, table->key_info[key_nr]. @@ -544,6 +732,7 @@ int write_record(TABLE *table,COPY_INFO *info) } if (info->handle_duplicates == DUP_UPDATE) { + int res= 0; /* we don't check for other UNIQUE keys - the first row that matches, is updated. If update causes a conflict again, an error is returned @@ -552,6 +741,15 @@ int write_record(TABLE *table,COPY_INFO *info) restore_record(table,record[1]); if (fill_record(*info->update_fields, *info->update_values, 0)) goto err; + + /* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */ + if (info->view && + (res= info->view->view_check_option(current_thd, info->ignore)) == + VIEW_CHECK_SKIP) + break; + else if (res == VIEW_CHECK_ERROR) + goto err; + if ((error=table->file->update_row(table->record[1],table->record[0]))) goto err; info->updated++; @@ -581,6 +779,8 @@ int write_record(TABLE *table,COPY_INFO *info) else if ((error=table->file->delete_row(table->record[1]))) goto err; info->deleted++; + if (!table->file->has_transactions()) + thd->no_trans_update= 1; } } info->copied++; @@ -590,11 +790,14 @@ int write_record(TABLE *table,COPY_INFO *info) if (info->handle_duplicates != DUP_IGNORE || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; + table->file->restore_auto_increment(); } else info->copied++; if (key) my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); + if (!table->file->has_transactions()) + thd->no_trans_update= 1; DBUG_RETURN(0); err: @@ -608,26 +811,24 @@ err: /****************************************************************************** Check that all fields with arn't null_fields are used - If DONT_USE_DEFAULT_FIELDS isn't defined use default value for not set - fields. ******************************************************************************/ -static int check_null_fields(THD *thd __attribute__((unused)), - TABLE *entry __attribute__((unused))) +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry) { -#ifdef DONT_USE_DEFAULT_FIELDS + if (!thd->abort_on_warning) // No check if not strict mode + return 0; + for (Field **field=entry->field ; *field ; field++) { - if ((*field)->query_id != thd->query_id && !(*field)->maybe_null() && - *field != entry->timestamp_field && - *field != entry->next_number_field) + if ((*field)->query_id != thd->query_id && + ((*field)->flags & NO_DEFAULT_VALUE_FLAG)) { - my_printf_error(ER_BAD_NULL_ERROR, ER(ER_BAD_NULL_ERROR),MYF(0), + my_printf_error(ER_NO_DEFAULT_FOR_FIELD, + ER(ER_NO_DEFAULT_FOR_FIELD),MYF(0), (*field)->field_name); return 1; } } -#endif return 0; } @@ -643,13 +844,12 @@ public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used; - int log_query; + bool query_start_used,last_insert_id_used,insert_id_used, log_query; ulonglong last_insert_id; timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, int log_query_arg) + delayed_row(enum_duplicates dup_arg, bool log_query_arg) :record(0),query(0),dup(dup_arg),log_query(log_query_arg) {} ~delayed_row() { @@ -963,7 +1163,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, - char *query, uint query_length, int log_on) + char *query, uint query_length, bool log_on) { delayed_row *row=0; delayed_insert *di=thd->di; @@ -1043,7 +1243,7 @@ void kill_delayed_threads(void) { /* Ensure that the thread doesn't kill itself while we are looking at it */ pthread_mutex_lock(&tmp->mutex); - tmp->thd.killed=1; + tmp->thd.killed= THD::KILL_CONNECTION; if (tmp->thd.mysys_var) { pthread_mutex_lock(&tmp->thd.mysys_var->mutex); @@ -1082,7 +1282,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) thd->thread_id=thread_id++; thd->end_time(); threads.append(thd); - thd->killed=abort_loop; + thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; pthread_mutex_unlock(&LOCK_thread_count); pthread_mutex_lock(&di->mutex); @@ -1135,7 +1335,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) for (;;) { - if (thd->killed) + if (thd->killed == THD::KILL_CONNECTION) { uint lock_count; /* @@ -1183,7 +1383,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) break; if (error == ETIME || error == ETIMEDOUT) { - thd->killed=1; + thd->killed= THD::KILL_CONNECTION; break; } } @@ -1202,8 +1402,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) /* request for new delayed insert */ if (!(thd->lock=mysql_lock_tables(thd,&di->table,1))) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } pthread_cond_broadcast(&di->cond_client); } @@ -1211,8 +1412,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) { if (di->handle_inserts()) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Some fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } } di->status=0; @@ -1242,7 +1444,7 @@ end: close_thread_tables(thd); // Free the table di->table=0; di->dead= 1; // If error - thd->killed= 1; + thd->killed= THD::KILL_CONNECTION; // If error pthread_cond_broadcast(&di->cond_client); // Safety pthread_mutex_unlock(&di->mutex); @@ -1311,7 +1513,7 @@ bool delayed_insert::handle_inserts(void) max_rows=delayed_insert_limit; if (thd.killed || table->version != refresh_version) { - thd.killed=1; + thd.killed= THD::KILL_CONNECTION; max_rows= ~0; // Do as much as possible } @@ -1344,7 +1546,7 @@ bool delayed_insert::handle_inserts(void) using_ignore=1; } thd.clear_error(); // reset error for binlog - if (write_record(table,&info)) + if (write_record(&thd, table, &info)) { info.error_count++; // Ignore errors thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); @@ -1355,15 +1557,10 @@ bool delayed_insert::handle_inserts(void) using_ignore=0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); } - if (row->query) + if (row->query && row->log_query && using_bin_log) { - if (row->log_query & DELAYED_LOG_UPDATE) - mysql_update_log.write(&thd,row->query, row->query_length); - if (row->log_query & DELAYED_LOG_BIN && using_bin_log) - { - Query_log_event qinfo(&thd, row->query, row->query_length,0); - mysql_bin_log.write(&qinfo); - } + Query_log_event qinfo(&thd, row->query, row->query_length,0); + mysql_bin_log.write(&qinfo); } if (table->blob_fields) free_delayed_insert_blobs(table); @@ -1379,7 +1576,7 @@ bool delayed_insert::handle_inserts(void) on this table until all entries has been processed */ if (group_count++ >= max_rows && (row= rows.head()) && - (!(row->log_query & DELAYED_LOG_BIN && using_bin_log) || + (!(row->log_query & using_bin_log) || row->query)) { group_count=0; @@ -1443,13 +1640,59 @@ bool delayed_insert::handle_inserts(void) Store records in INSERT ... SELECT * ***************************************************************************/ + +/* + make insert specific preparation and checks after opening tables + + SYNOPSIS + mysql_insert_select_prepare() + thd thread handler + + RETURN + 0 OK + -1 Error +*/ + +int mysql_insert_select_prepare(THD *thd) +{ + LEX *lex= thd->lex; + DBUG_ENTER("mysql_insert_select_prepare"); + /* + SELECT_LEX do not belong to INSERT statement, so we can't add WHERE + clasue if table is VIEW + */ + lex->query_tables->no_where_clause= 1; + if (mysql_prepare_insert_check_table(thd, lex->query_tables, + lex->field_list, + &lex->select_lex.where)) + DBUG_RETURN(-1); + DBUG_RETURN(0); +} + + +select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, + List<Item> *fields_par, enum_duplicates duplic, + bool ignore_check_option_errors) + :table_list(table_list_par), table(table_par), fields(fields_par), + last_insert_id(0), + insert_into_view(table_list_par && table_list_par->view != 0) +{ + bzero((char*) &info,sizeof(info)); + info.handle_duplicates=duplic; + if (table_list_par) + info.view= (table_list_par->view ? table_list_par : 0); + info.ignore= ignore_check_option_errors; +} + + int select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { DBUG_ENTER("select_insert::prepare"); unit= u; - if (check_insert_fields(thd,table,*fields,values,1)) + if (check_insert_fields(thd, table_list, *fields, values, 1, + !insert_into_view)) DBUG_RETURN(1); restore_record(table,default_values); // Get empty record @@ -1460,7 +1703,13 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); - DBUG_RETURN(0); + thd->no_trans_update= 0; + thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + DBUG_RETURN(fields->elements && + check_that_all_fields_are_given_values(thd, table)); } @@ -1472,12 +1721,15 @@ void select_insert::cleanup() select_insert::~select_insert() { + DBUG_ENTER("~select_insert"); if (table) { table->next_number_field=0; table->file->reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; + thd->abort_on_warning= 0; + DBUG_VOID_RETURN; } @@ -1493,7 +1745,15 @@ bool select_insert::send_data(List<Item> &values) fill_record(*fields, values, 1); else fill_record(table->field, values, 1); - if (thd->net.report_error || write_record(table,&info)) + switch (table_list->view_check_option(thd, + thd->lex->duplicates == DUP_IGNORE)) + { + case VIEW_CHECK_SKIP: + DBUG_RETURN(0); + case VIEW_CHECK_ERROR: + DBUG_RETURN(1); + } + if (thd->net.report_error || write_record(thd, table, &info)) DBUG_RETURN(1); if (table->next_number_field) // Clear for next record { @@ -1532,7 +1792,6 @@ void select_insert::send_error(uint errcode,const char *err) { if (last_insert_id) thd->insert_id(last_insert_id); // For binary log - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, @@ -1543,7 +1802,9 @@ void select_insert::send_error(uint errcode,const char *err) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (info.copied || info.deleted || info.updated) + { query_cache_invalidate3(thd, table, 1); + } ha_rollback_stmt(thd); DBUG_VOID_RETURN; } @@ -1572,7 +1833,6 @@ bool select_insert::send_eof() if (last_insert_id) thd->insert_id(last_insert_id); // For binary log /* Write to binlog before commiting transaction */ - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (!error) @@ -1597,7 +1857,8 @@ bool select_insert::send_eof() else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,last_insert_id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); DBUG_RETURN(0); } @@ -1612,7 +1873,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) DBUG_ENTER("select_create::prepare"); unit= u; - table= create_table_from_items(thd, create_info, db, name, + table= create_table_from_items(thd, create_info, create_table, extra_fields, keys, &values, &lock); if (!table) DBUG_RETURN(-1); // abort() deletes table @@ -1634,13 +1895,19 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) table->next_number_field=table->found_next_number_field; restore_record(table,default_values); // Get empty record - thd->count_cuted_fields= CHECK_FIELD_WARN; // count warnings + /* Count warnings. This is reset in ~select_insert() */ + thd->count_cuted_fields= CHECK_FIELD_WARN; thd->cuted_fields=0; if (info.handle_duplicates == DUP_IGNORE || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); - DBUG_RETURN(0); + thd->no_trans_update= 0; + thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + DBUG_RETURN(check_that_all_fields_are_given_values(thd, table)); } @@ -1652,7 +1919,7 @@ bool select_create::send_data(List<Item> &values) return 0; } fill_record(field, values, 1); - if (thd->net.report_error ||write_record(table,&info)) + if (thd->net.report_error || write_record(thd, table, &info)) return 1; if (table->next_number_field) // Clear for next record { @@ -1711,13 +1978,13 @@ void select_create::abort() ulong version= table->version; hash_delete(&open_cache,(byte*) table); if (!create_info->table_existed) - quick_rm_table(table_type, db, name); + quick_rm_table(table_type, create_table->db, create_table->real_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) VOID(pthread_cond_broadcast(&COND_refresh)); } else if (!create_info->table_existed) - close_temporary_table(thd, db, name); + close_temporary_table(thd, create_table->db, create_table->real_name); table=0; } VOID(pthread_mutex_unlock(&LOCK_open)); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 679ffb2140e..86219abc632 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -21,7 +21,14 @@ #include "item_create.h" #include <m_ctype.h> #include <hash.h> +#include "sp.h" +#include "sp_head.h" +/* + We are using pointer to this variable for distinguishing between assignment + to NEW row field (when parsing trigger definition) and structured variable. +*/ +sys_var_long_ptr trg_new_row_fake_var(0, 0); /* Fake table list object, pointer to which is used as special value for @@ -119,11 +126,11 @@ void lex_start(THD *thd, uchar *buf,uint length) LEX *lex= thd->lex; lex->unit.init_query(); lex->unit.init_select(); - lex->thd= thd; - lex->unit.thd= thd; + lex->thd= lex->unit.thd= thd; lex->select_lex.init_query(); lex->value_list.empty(); lex->param_list.empty(); + lex->view_list.empty(); lex->unit.next= lex->unit.master= lex->unit.link_next= lex->unit.return_to= 0; lex->unit.prev= lex->unit.link_prev= 0; @@ -134,15 +141,24 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); lex->select_lex.options= 0; + lex->select_lex.init_order(); + lex->select_lex.group_list.empty(); lex->describe= 0; lex->derived_tables= FALSE; + lex->view_prepare_mode= FALSE; lex->lock_option= TL_READ; lex->found_colon= 0; lex->safe_to_cache_query= 1; lex->time_zone_tables_used= 0; + lex->proc_table= lex->query_tables= 0; + lex->query_tables_last= &lex->query_tables; + lex->variables_used= 0; + lex->select_lex.parent_lex= lex; + lex->empty_field_list_on_rset= 0; lex->select_lex.select_number= 1; lex->next_state=MY_LEX_START; - lex->end_of_query=(lex->ptr=buf)+length; + lex->buf= lex->ptr= buf; + lex->end_of_query=buf+length; lex->yylineno = 1; lex->in_comment=0; lex->length=0; @@ -157,6 +173,13 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); lex->sql_command=SQLCOM_END; lex->duplicates= DUP_ERROR; + lex->sphead= NULL; + lex->spcont= NULL; + lex->trg_table= NULL; + lex->proc_list.first= 0; + + if (lex->spfuns.records) + hash_reset(&lex->spfuns); } void lex_end(LEX *lex) @@ -179,27 +202,6 @@ static int find_keyword(LEX *lex, uint len, bool function) lex->yylval->symbol.length=len; return symbol->tok; } -#ifdef HAVE_DLOPEN - udf_func *udf; - if (function && using_udf_functions && (udf=find_udf((char*) tok, len))) - { - lex->safe_to_cache_query=0; - lex->yylval->udf=udf; - switch (udf->returns) { - case STRING_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_CHAR_FUNC : UDA_CHAR_SUM; - case REAL_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_FLOAT_FUNC : UDA_FLOAT_SUM; - case INT_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_INT_FUNC : UDA_INT_SUM; - case ROW_RESULT: - default: - // This case should never be choosen - DBUG_ASSERT(0); - return 0; - } - } -#endif return 0; } @@ -284,7 +286,8 @@ static char *get_text(LEX *lex) continue; } #endif - if (c == '\\') + if (c == '\\' && + !(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)) { // Escaped character found_escape=1; if (lex->ptr == lex->end_of_query) @@ -1029,24 +1032,31 @@ void st_select_lex_unit::init_query() cleaned= 0; item_list.empty(); describe= 0; + found_rows_for_union= 0; } void st_select_lex::init_query() { st_select_lex_node::init_query(); table_list.empty(); + top_join_list.empty(); + join_list= &top_join_list; + embedding= 0; item_list.empty(); join= 0; - where= 0; + where= prep_where= 0; olap= UNSPECIFIED_OLAP_TYPE; having_fix_field= 0; resolve_mode= NOMATTER_MODE; cond_count= with_wild= 0; + conds_processed_with_permanent_arena= 0; ref_pointer_array= 0; select_n_having_items= 0; - prep_where= 0; subquery_in_having= explicit_limit= 0; + first_execution= 1; + first_cond_optimization= 1; parsing_place= NO_MATTER; + no_wrap_view_item= 0; } void st_select_lex::init_select() @@ -1308,147 +1318,6 @@ bool st_select_lex::test_limit() return(0); } -/* - Interface method of table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - result pointer on result list of tables pointer - check_derived force derived table chacking (used for creating - table list for derived query) - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table entry in all used tables are set to point - to the entries in this list. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit::create_total_list(THD *thd_arg, st_lex *lex, - TABLE_LIST **result_arg) -{ - *result_arg= 0; - if (!(res= create_total_list_n_last_return(thd_arg, lex, &result_arg))) - { - /* - If time zone tables were used implicitly in statement we should add - them to global table list. - */ - if (lex->time_zone_tables_used) - { - /* - Altough we are modifying lex data, it won't raise any problem in - case when this lex belongs to some prepared statement or stored - procedure: such modification does not change any invariants imposed - by requirement to reuse the same lex for multiple executions. - */ - if ((lex->time_zone_tables_used= my_tz_get_table_list(thd)) != - &fake_time_zone_tables_list) - { - *result_arg= lex->time_zone_tables_used; - } - else - { - send_error(thd, 0); - res= 1; - } - } - } - return res; -} - -/* - Table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - lex pointer on LEX stricture - result pointer on pointer on result list of tables pointer - - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table_list in all tables of global list are set to point - to the local SELECT_LEX entries. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit:: -create_total_list_n_last_return(THD *thd_arg, - st_lex *lex, - TABLE_LIST ***result_arg) -{ - TABLE_LIST *slave_list_first=0, **slave_list_last= &slave_list_first; - TABLE_LIST **new_table_list= *result_arg, *aux; - SELECT_LEX *sl= (SELECT_LEX*)slave; - - /* - iterate all inner selects + fake_select (if exists), - fake_select->next_select() always is 0 - */ - for (; - sl; - sl= (sl->next_select() ? - sl->next_select() : - (sl == fake_select_lex ? - 0 : - fake_select_lex))) - { - // check usage of ORDER BY in union - if (sl->order_list.first && sl->next_select() && !sl->braces && - sl->linkage != GLOBAL_OPTIONS_TYPE) - { - net_printf(thd_arg,ER_WRONG_USAGE,"UNION","ORDER BY"); - return 1; - } - - for (SELECT_LEX_UNIT *inner= sl->first_inner_unit(); - inner; - inner= inner->next_unit()) - { - if (inner->create_total_list_n_last_return(thd, lex, - &slave_list_last)) - return 1; - } - - if ((aux= (TABLE_LIST*) sl->table_list.first)) - { - TABLE_LIST *next_table; - for (; aux; aux= next_table) - { - TABLE_LIST *cursor; - next_table= aux->next; - /* Add to the total table list */ - if (!(cursor= (TABLE_LIST *) thd->memdup((char*) aux, - sizeof(*aux)))) - { - send_error(thd,0); - return 1; - } - *new_table_list= cursor; - cursor->table_list= aux; - new_table_list= &cursor->next; - *new_table_list= 0; // end result list - aux->table_list= cursor; - } - } - } - - if (slave_list_first) - { - *new_table_list= slave_list_first; - new_table_list= slave_list_last; - } - *result_arg= new_table_list; - return 0; -} - st_select_lex_unit* st_select_lex_unit::master_unit() { @@ -1470,7 +1339,9 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc) bool st_select_lex::add_item_to_list(THD *thd, Item *item) { - return item_list.push_back(item); + DBUG_ENTER("st_select_lex::add_item_to_list"); + DBUG_PRINT("info", ("Item: %p", item)); + DBUG_RETURN(item_list.push_back(item)); } @@ -1558,10 +1429,10 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) */ Item_arena *arena= thd->current_arena; return (ref_pointer_array= - (Item **)arena->alloc(sizeof(Item*) * - (item_list.elements + - select_n_having_items + - order_group_num)* 5)) == 0; + (Item **)arena->alloc(sizeof(Item*) * + (item_list.elements + + select_n_having_items + + order_group_num)* 5)) == 0; } @@ -1603,7 +1474,7 @@ bool st_select_lex_unit::check_updateable(char *db, char *table) bool st_select_lex::check_updateable(char *db, char *table) { - if (find_real_table_in_list(get_table_list(), db, table)) + if (find_table_in_local_list(get_table_list(), db, table)) return 1; return check_updateable_in_subqueries(db, table); @@ -1670,7 +1541,14 @@ void st_select_lex::print_order(String *str, ORDER *order) { for (; order; order= order->next) { - (*order->item)->print(str); + if (order->counter_used) + { + char buffer[20]; + my_snprintf(buffer, 20, "%u", order->counter); + str->append(buffer); + } + else + (*order->item)->print(str); if (!order->asc) str->append(" desc", 5); if (order->next) @@ -1681,6 +1559,17 @@ void st_select_lex::print_order(String *str, ORDER *order) void st_select_lex::print_limit(THD *thd, String *str) { + SELECT_LEX_UNIT *unit= master_unit(); + Item_subselect *item= unit->item; + if (item && unit->global_parameters == this && + (item->substype() == Item_subselect::EXISTS_SUBS || + item->substype() == Item_subselect::IN_SUBS || + item->substype() == Item_subselect::ALL_SUBS)) + { + DBUG_ASSERT(select_limit == 1L && offset_limit == 0L); + return; + } + if (!thd) thd= current_thd; @@ -1702,46 +1591,256 @@ void st_select_lex::print_limit(THD *thd, String *str) } -st_lex::st_lex() - :result(0) -{} +/* + Check whether the merging algorithm can be used on this VIEW + + SYNOPSIS + st_lex::can_be_merged() + + DESCRIPTION + We can apply merge algorithm if it is single SELECT view with + subqueries only in WHERE clause (we do not count SELECTs of underlying + views, and second level subqueries) and we have not grpouping, ordering, + HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and + several underlying tables. + + RETURN + FALSE - only temporary table algorithm can be used + TRUE - merge algorithm can be used +*/ + +bool st_lex::can_be_merged() +{ + // TODO: do not forget implement case when select_lex.table_list.elements==0 + + /* find non VIEW subqueries/unions */ + bool selects_allow_merge= select_lex.next_select() == 0; + if (selects_allow_merge) + { + for (SELECT_LEX_UNIT *unit= select_lex.first_inner_unit(); + unit; + unit= unit->next_unit()) + { + if (unit->first_select()->parent_lex == this && + (unit->item == 0 || unit->item->place() != IN_WHERE)) + { + selects_allow_merge= 0; + break; + } + } + } + + return (selects_allow_merge && + select_lex.order_list.elements == 0 && + select_lex.group_list.elements == 0 && + select_lex.having == 0 && + select_lex.with_sum_func == 0 && + select_lex.table_list.elements == 1 && + !(select_lex.options & SELECT_DISTINCT) && + select_lex.select_limit == HA_POS_ERROR); +} /* - Unlink first table from global table list and first table from outer select - list (lex->select_lex) + check if command can use VIEW with MERGE algorithm (for top VIEWs) + + SYNOPSIS + st_lex::can_use_merged() + + DESCRIPTION + Only listed here commands can use merge algorithm in top level + SELECT_LEX (for subqueries will be used merge algorithm if + st_lex::can_not_use_merged() is not TRUE). + + RETURN + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used +*/ + +bool st_lex::can_use_merged() +{ + switch (sql_command) + { + case SQLCOM_SELECT: + case SQLCOM_CREATE_TABLE: + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_LOAD: + return TRUE; + default: + return FALSE; + } +} + +/* + Check if command can't use merged views in any part of command + + SYNOPSIS + st_lex::can_not_use_merged() + + DESCRIPTION + Temporary table algorithm will be used on all SELECT levels for queries + listed here (see also st_lex::can_use_merged()). + + RETURN + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used +*/ + +bool st_lex::can_not_use_merged() +{ + switch (sql_command) + { + case SQLCOM_CREATE_VIEW: + case SQLCOM_SHOW_CREATE: + return TRUE; + default: + return FALSE; + } +} + +/* + Detect that we need only table structure of derived table/view + + SYNOPSIS + only_view_structure() + + RETURN + TRUE yes, we need only structure + FALSE no, we need data +*/ +bool st_lex::only_view_structure() +{ + switch(sql_command) + { + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_REVOKE_ALL: + case SQLCOM_REVOKE: + case SQLCOM_GRANT: + case SQLCOM_CREATE_VIEW: + return TRUE; + default: + return FALSE; + } +} + + +/* + initialize limit counters + + SYNOPSIS + st_select_lex_unit::set_limit() + values - SELECT_LEX with initial values for counters + sl - SELECT_LEX for options set +*/ + +void st_select_lex_unit::set_limit(SELECT_LEX *values, + SELECT_LEX *sl) +{ + offset_limit_cnt= values->offset_limit; + select_limit_cnt= values->select_limit+values->offset_limit; + if (select_limit_cnt < values->select_limit) + select_limit_cnt= HA_POS_ERROR; // no limit + if (select_limit_cnt == HA_POS_ERROR) + sl->options&= ~OPTION_FOUND_ROWS; +} + + +/* + Unlink the first table from the global table list and the first table from + outer select (lex->select_lex) local list SYNOPSIS unlink_first_table() - tables Global table list - global_first Save first global table here - local_first Save first local table here + link_to_local Set to 1 if caller should link this table to local list - NORES - global_first & local_first are used to save result for link_first_table_back + NOTES + We assume that first tables in both lists is the same table or the local + list is empty. RETURN - global list without first table + 0 If 'query_tables' == 0 + unlinked table + In this case link_to_local is set. */ -TABLE_LIST *st_lex::unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first) +TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local) { - *global_first= tables; - *local_first= (TABLE_LIST*)select_lex.table_list.first; - /* - Exclude from global table list - */ - tables= tables->next; - /* - and from local list if it is not the same - */ - select_lex.table_list.first= ((&select_lex != all_selects_list) ? - (byte*) (*local_first)->next : - (byte*) tables); - (*global_first)->next= 0; - return tables; + TABLE_LIST *first; + if ((first= query_tables)) + { + /* + Exclude from global table list + */ + if ((query_tables= query_tables->next_global)) + query_tables->prev_global= &query_tables; + first->next_global= 0; + + /* + and from local list if it is not empty + */ + if ((*link_to_local= test(select_lex.table_list.first))) + { + select_lex.table_list.first= (byte*) first->next_local; + select_lex.table_list.elements--; //safety + first->next_local= 0; + /* + Ensure that the global list has the same first table as the local + list. + */ + first_lists_tables_same(); + } + } + return first; +} + + +/* + Bring first local table of first most outer select to first place in global + table list + + SYNOPSYS + st_lex::first_lists_tables_same() + + NOTES + In many cases (for example, usual INSERT/DELETE/...) the first table of + main SELECT_LEX have special meaning => check that it is the first table + in global list and re-link to be first in the global list if it is + necessary. We need such re-linking only for queries with sub-queries in + the select list, as only in this case tables of sub-queries will go to + the global list first. +*/ + +void st_lex::first_lists_tables_same() +{ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex.table_list.first; + if (query_tables != first_table && first_table != 0) + { + TABLE_LIST *next; + if (query_tables_last == &first_table->next_global) + query_tables_last= first_table->prev_global; + + if ((next= *first_table->prev_global= first_table->next_global)) + next->prev_global= first_table->prev_global; + /* include in new place */ + first_table->next_global= query_tables; + /* + We are sure that query_tables is not 0, because first_table was not + first table in the global list => we can use + query_tables->prev_global without check of query_tables + */ + query_tables->prev_global= &first_table->next_global; + first_table->prev_global= &query_tables; + query_tables= first_table; + } } @@ -1750,36 +1849,54 @@ TABLE_LIST *st_lex::unlink_first_table(TABLE_LIST *tables, SYNOPSIS link_first_table_back() - tables Global table list - global_first Saved first global table - local_first Saved first local table + link_to_local do we need link this table to local RETURN global list */ -TABLE_LIST *st_lex::link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first) + +void st_lex::link_first_table_back(TABLE_LIST *first, + bool link_to_local) { - global_first->next= tables; - if (&select_lex != all_selects_list) + if (first) { - /* - we do not touch local table 'next' field => we need just - put the table in the list - */ - select_lex.table_list.first= (byte*) local_first; + if ((first->next_global= query_tables)) + query_tables->prev_global= &first->next_global; + query_tables= first; + + if (link_to_local) + { + first->next_local= (TABLE_LIST*) select_lex.table_list.first; + select_lex.table_list.first= (byte*) first; + select_lex.table_list.elements++; //safety + } + } +} + + +/* + fix some structures at the end of preparation + + SYNOPSIS + st_select_lex::fix_prepare_information + thd thread handler + conds pointer on conditions which will be used for execution statement +*/ + +void st_select_lex::fix_prepare_information(THD *thd, Item **conds) +{ + if (!thd->current_arena->is_conventional() && first_execution) + { + first_execution= 0; + prep_where= where; } - else - select_lex.table_list.first= (byte*) global_first; - return global_first; } /* - There are st_select_lex::add_table_to_list & + There are st_select_lex::add_table_to_list & st_select_lex::set_lock_for_tables are in sql_parse.cc - st_select_lex::print is in sql_select.h + st_select_lex::print is in sql_select.cc st_select_lex_unit::prepare, st_select_lex_unit::exec, st_select_lex_unit::cleanup, st_select_lex_unit::reinit_exec_mechanism, diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 90c020b3e93..fd1c1dee0b7 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -21,6 +21,10 @@ class Table_ident; class sql_exchange; class LEX_COLUMN; +class sp_head; +class sp_name; +class sp_instr; +class sp_pcontext; /* The following hack is needed because mysql_yacc.cc does not define @@ -75,8 +79,13 @@ enum enum_sql_command { SQLCOM_SHOW_WARNS, SQLCOM_EMPTY_QUERY, SQLCOM_SHOW_ERRORS, SQLCOM_SHOW_COLUMN_TYPES, SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES, SQLCOM_HELP, SQLCOM_DROP_USER, SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, - + SQLCOM_CREATE_PROCEDURE, SQLCOM_CREATE_SPFUNCTION, SQLCOM_CALL, + SQLCOM_DROP_PROCEDURE, SQLCOM_ALTER_PROCEDURE,SQLCOM_ALTER_FUNCTION, + SQLCOM_SHOW_CREATE_PROC, SQLCOM_SHOW_CREATE_FUNC, + SQLCOM_SHOW_STATUS_PROC, SQLCOM_SHOW_STATUS_FUNC, SQLCOM_PREPARE, SQLCOM_EXECUTE, SQLCOM_DEALLOCATE_PREPARE, + SQLCOM_CREATE_VIEW, SQLCOM_DROP_VIEW, + SQLCOM_CREATE_TRIGGER, SQLCOM_DROP_TRIGGER, /* This should be the last !!! */ SQLCOM_END }; @@ -85,6 +94,39 @@ enum enum_sql_command { #define DESCRIBE_NORMAL 1 #define DESCRIBE_EXTENDED 2 +enum enum_sp_suid_behaviour +{ + SP_IS_DEFAULT_SUID= 0, + SP_IS_NOT_SUID, + SP_IS_SUID +}; + +enum enum_sp_data_access +{ + SP_DEFAULT_ACCESS= 0, + SP_CONTAINS_SQL, + SP_NO_SQL, + SP_READS_SQL_DATA, + SP_MODIFIES_SQL_DATA +}; + + +#define DERIVED_SUBQUERY 1 +#define DERIVED_VIEW 2 + +enum enum_view_create_mode +{ + VIEW_CREATE_NEW, // check that there are not such VIEW/table + VIEW_ALTER, // check that VIEW .frm with such name exists + VIEW_CREATE_OR_REPLACE // check only that there are not such table +}; + +enum enum_drop_mode +{ + DROP_DEFAULT, // mode is not specified + DROP_CASCADE, // CASCADE option + DROP_RESTRICT // RESTRICT option +}; typedef List<Item> List_item; @@ -276,6 +318,8 @@ public: friend class st_select_lex_unit; friend bool mysql_new_select(struct st_lex *lex, bool move_down); + friend my_bool mysql_make_view (File_parser *parser, + TABLE_LIST *table); private: void fast_exclude(); }; @@ -340,7 +384,6 @@ public: Procedure *last_procedure; /* Pointer to procedure, if such exists */ void init_query(); - bool create_total_list(THD *thd, st_lex *lex, TABLE_LIST **result); st_select_lex_unit* master_unit(); st_select_lex* outer_select(); st_select_lex* first_select() @@ -371,12 +414,10 @@ public: ulong init_prepare_fake_select_lex(THD *thd); int change_result(select_subselect *result, select_subselect *old_result); + void set_limit(st_select_lex *values, st_select_lex *sl); friend void lex_start(THD *thd, uchar *buf, uint length); friend int subselect_union_engine::exec(); -private: - bool create_total_list_n_last_return(THD *thd, st_lex *lex, - TABLE_LIST ***result); }; typedef class st_select_lex_unit SELECT_LEX_UNIT; @@ -389,6 +430,8 @@ public: char *db, *db1, *table1, *db2, *table2; /* For outer join using .. */ Item *where, *having; /* WHERE & HAVING clauses */ Item *prep_where; /* saved WHERE clause for prepared statement processing */ + /* point on lex in which it was created, used in view subquery detection */ + st_lex *parent_lex; enum olap_type olap; SQL_LIST table_list, group_list; /* FROM & GROUP BY clauses */ List<Item> item_list; /* list of fields & expressions */ @@ -401,7 +444,10 @@ public: List<Item_func_match> *ftfunc_list; List<Item_func_match> ftfunc_list_alloc; JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ - const char *type; /* type of select for EXPLAIN */ + List<TABLE_LIST> top_join_list; /* join list of the top level */ + List<TABLE_LIST> *join_list; /* list for the currently parsed join */ + TABLE_LIST *embedding; /* table embedding to the above list */ + const char *type; /* type of select for EXPLAIN */ SQL_LIST order_list; /* ORDER clause */ List<List_item> expr_list; @@ -420,6 +466,11 @@ public: uint cond_count; /* number of arguments of and/or/xor in where/having */ enum_parsing_place parsing_place; /* where we are parsing expression */ bool with_sum_func; /* sum function indicator */ + /* + PS or SP cond natural joins was alredy processed with permanent + arena and all additional items which we need alredy stored in it + */ + bool conds_processed_with_permanent_arena; ulong table_join_options; uint in_sum_expr; @@ -435,6 +486,10 @@ public: query processing end even if we use temporary table */ bool subquery_in_having; + bool first_execution; /* first execution in SP or PS */ + bool first_cond_optimization; + /* do not wrap view fields with Item_ref */ + bool no_wrap_view_item; /* SELECT for SELECT command st_select_lex. Used to privent scaning @@ -497,6 +552,12 @@ public: List<String> *ignore_index= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); + bool init_nested_join(THD *thd); + TABLE_LIST *end_nested_join(THD *thd); + TABLE_LIST *nest_last_join(THD *thd); + void save_names_for_using_list(TABLE_LIST *tab1, TABLE_LIST *tab2); + void add_joined_table(TABLE_LIST *table); + TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); List<String>* get_use_index(); List<String>* get_ignore_index(); @@ -524,6 +585,7 @@ public: void print(THD *thd, String *str); static void print_order(String *str, ORDER *order); void print_limit(THD *thd, String *str); + void fix_prepare_information(THD *thd, Item **conds); }; typedef class st_select_lex SELECT_LEX; @@ -535,6 +597,9 @@ typedef class st_select_lex SELECT_LEX; #define ALTER_RENAME 32 #define ALTER_ORDER 64 #define ALTER_OPTIONS 128 +#define ALTER_CHANGE_COLUMN_DEFAULT 256 +#define ALTER_KEYS_ONOFF 512 +#define ALTER_CONVERT 1024 typedef struct st_alter_info { @@ -543,13 +608,29 @@ typedef struct st_alter_info uint flags; enum enum_enable_or_disable keys_onoff; enum tablespace_op_type tablespace_op; - bool is_simple; st_alter_info(){clear();} void clear(){keys_onoff= LEAVE_AS_IS;tablespace_op= NO_TABLESPACE_OP;} void reset(){drop_list.empty();alter_list.empty();clear();} } ALTER_INFO; +struct st_sp_chistics +{ + LEX_STRING comment; + enum enum_sp_suid_behaviour suid; + bool detistic; + enum enum_sp_data_access daccess; +}; + + +struct st_trg_chistics +{ + enum trg_action_time_type action_time; + enum trg_event_type event; +}; + +extern sys_var_long_ptr trg_new_row_fake_var; + /* The state of the lex parsing. This is saved in the THD struct */ typedef struct st_lex @@ -562,6 +643,7 @@ typedef struct st_lex SELECT_LEX *current_select; /* list of all SELECT_LEX */ SELECT_LEX *all_selects_list; + uchar *buf; /* The beginning of string, used by SPs */ uchar *ptr,*tok_start,*tok_end,*end_of_query; char *length,*dec,*change,*name; char *help_arg; @@ -579,6 +661,14 @@ typedef struct st_lex gptr yacc_yyss,yacc_yyvs; THD *thd; CHARSET_INFO *charset; + TABLE_LIST *query_tables; /* global list of all tables in this query */ + /* + last element next_global of previous list (used only for list building + during parsing and VIEW processing. This pointer is not valid in + mysql_execute_command + */ + TABLE_LIST **query_tables_last; + TABLE_LIST *proc_table; /* refer to mysql.proc if it was opened by VIEW */ List<key_part_spec> col_list; List<key_part_spec> ref_list; @@ -591,6 +681,7 @@ typedef struct st_lex List<List_item> many_values; List<set_var_base> var_list; List<Item_param> param_list; + List<LEX_STRING> view_list; // view list (list of field names in view) SQL_LIST proc_list, auxilliary_table_list, save_list; TYPELIB *interval; create_field *last_field; @@ -610,15 +701,22 @@ typedef struct st_lex enum enum_ha_read_modes ha_read_mode; enum ha_rkey_function ha_rkey_mode; enum enum_var_type option_type; + enum enum_view_create_mode create_view_mode; + enum enum_drop_mode drop_mode; uint uint_geom_type; uint grant, grant_tot_col, which_columns; uint fk_delete_opt, fk_update_opt, fk_match_option; uint slave_thd_opt, start_transaction_opt; uint8 describe; + uint8 derived_tables; + uint8 create_view_algorithm; + uint8 create_view_check; bool drop_if_exists, drop_temporary, local_file, one_shot_set; bool in_comment, ignore_space, verbose, no_write_to_binlog; - bool derived_tables; + /* special JOIN::prepare mode: changing of query is prohibited */ + bool view_prepare_mode; bool safe_to_cache_query; + bool variables_used; ALTER_INFO alter_info; /* Prepared statements SQL syntax:*/ LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */ @@ -637,7 +735,39 @@ typedef struct st_lex list of those tables after they are opened. */ TABLE_LIST *time_zone_tables_used; - st_lex(); + sp_head *sphead; + sp_name *spname; + bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ + sp_pcontext *spcont; + HASH spfuns; /* Called functions */ + st_sp_chistics sp_chistics; + bool only_view; /* used for SHOW CREATE TABLE/VIEW */ + /* + field_list was created for view and should be removed before PS/SP + rexecuton + */ + bool empty_field_list_on_rset; + /* Characterstics of trigger being created */ + st_trg_chistics trg_chistics; + /* + Points to table being opened when we are parsing trigger definition + while opening table. 0 if we are parsing user provided CREATE TRIGGER + or any other statement. Used for NEW/OLD row field lookup in trigger. + */ + TABLE *trg_table; + + st_lex() :result(0) + { + extern byte *sp_lex_spfuns_key(const byte *ptr, uint *plen, my_bool first); + hash_init(&spfuns, system_charset_info, 0, 0, 0, sp_lex_spfuns_key, 0, 0); + } + + ~st_lex() + { + if (spfuns.array.buffer) + hash_free(&spfuns); + } + inline void uncacheable(uint8 cause) { safe_to_cache_query= 0; @@ -657,15 +787,34 @@ typedef struct st_lex un->uncacheable|= cause; } } - TABLE_LIST *unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first); - TABLE_LIST *link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first); + TABLE_LIST *unlink_first_table(bool *link_to_local); + void link_first_table_back(TABLE_LIST *first, bool link_to_local); + void first_lists_tables_same(); + inline void add_to_query_tables(TABLE_LIST *table) + { + *(table->prev_global= query_tables_last)= table; + query_tables_last= &table->next_global; + } + + bool can_be_merged(); + bool can_use_merged(); + bool can_not_use_merged(); + bool only_view_structure(); } LEX; extern TABLE_LIST fake_time_zone_tables_list; +struct st_lex_local: public st_lex +{ + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { + return (void*) alloc_root(mem_root, (uint) size); + } + static void operator delete(void *ptr,size_t size) {} +}; void lex_init(void); void lex_free(void); diff --git a/sql/sql_list.h b/sql/sql_list.h index 38c5af2a4f7..141742c3d4a 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -113,6 +113,16 @@ public: } return 1; } + inline bool push_back(void *info, MEM_ROOT *mem_root) + { + if (((*last)=new (mem_root) list_node(info, &end_of_list))) + { + last= &(*last)->next; + elements++; + return 0; + } + return 1; + } inline bool push_front(void *info) { list_node *node=new list_node(info,first); @@ -129,10 +139,12 @@ public: void remove(list_node **prev) { list_node *node=(*prev)->next; - delete *prev; - *prev=node; if (!--elements) last= &first; + else if (last == &(*prev)->next) + last= prev; + delete *prev; + *prev=node; } inline void *pop(void) { @@ -143,6 +155,39 @@ public: last= &first; return tmp->info; } + inline void concat(base_list *list) + { + if (!list->is_empty()) + { + *last= list->first; + last= list->last; + elements+= list->elements; + } + } + inline void disjoin(base_list *list) + { + list_node **prev= &first; + list_node *node= first; + list_node *list_first= list->first; + elements=0; + while (node && node != list_first) + { + prev= &node->next; + node= node->next; + elements++; + } + *prev= *last; + last= prev; + } + inline void prepand(base_list *list) + { + if (!list->is_empty()) + { + *list->last= first; + first= list->first; + elements+= list->elements; + } + } inline list_node* last_node() { return *last; } inline list_node* first_node() { return first;} inline void *head() { return first->info; } @@ -249,10 +294,15 @@ public: inline List() :base_list() {} inline List(const List<T> &tmp) :base_list(tmp) {} inline bool push_back(T *a) { return base_list::push_back(a); } + inline bool push_back(T *a, MEM_ROOT *mem_root) + { return base_list::push_back(a, mem_root); } inline bool push_front(T *a) { return base_list::push_front(a); } inline T* head() {return (T*) base_list::head(); } inline T** head_ref() {return (T**) base_list::head_ref(); } inline T* pop() {return (T*) base_list::pop(); } + inline void concat(List<T> *list) { base_list::concat(list); } + inline void disjoin(List<T> *list) { base_list::disjoin(list); } + inline void prepand(List<T> *list) { base_list::prepand(list); } void delete_elements(void) { list_node *element,*next; @@ -273,6 +323,8 @@ public: inline T* operator++(int) { return (T*) base_list_iterator::next(); } inline T *replace(T *a) { return (T*) base_list_iterator::replace(a); } inline T *replace(List<T> &a) { return (T*) base_list_iterator::replace(a); } + inline void rewind(void) { base_list_iterator::rewind(); } + inline void remove() { base_list_iterator::remove(); } inline void after(T *a) { base_list_iterator::after(a); } inline T** ref(void) { return (T**) base_list_iterator::ref(); } }; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 17ab472c87b..9c2a025e089 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -71,16 +71,19 @@ public: void set_io_cache_arg(void* arg) { cache.arg = arg; } }; -static int read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table, +static int read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - ulong skip_lines); -static int read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, + ulong skip_lines, + bool ignore_check_option_errors); +static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines); + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors); int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, - bool read_file_from_client,thr_lock_type lock_type) + bool read_file_from_client,thr_lock_type lock_type, + bool ignore_check_option_errors) { char name[FN_REFLEN]; File file; @@ -88,6 +91,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, int error; String *field_term=ex->field_term,*escaped=ex->escaped; String *enclosed=ex->enclosed; + Item *unused_conds= 0; bool is_fifo=0; #ifndef EMBEDDED_LIBRARY LOAD_FILE_INFO lf_info; @@ -99,8 +103,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, loaded is located */ char *tdb= thd->db ? thd->db : db; // Result is never null - bool transactional_table, log_delayed; ulong skip_lines= ex->skip_lines; + int res; + bool transactional_table, log_delayed; DBUG_ENTER("mysql_load"); #ifdef EMBEDDED_LIBRARY @@ -113,8 +118,17 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, MYF(0)); DBUG_RETURN(-1); } - if (!(table = open_ltable(thd,table_list,lock_type))) + table_list->lock_type= lock_type; + if ((res= open_and_lock_tables(thd, table_list))) + DBUG_RETURN(res); + if (setup_tables(thd, table_list, &unused_conds)) + DBUG_RETURN(-1); + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "LOAD"); DBUG_RETURN(-1); + } + table= table_list->table; transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); @@ -127,7 +141,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, else { // Part field list thd->dupp_field=0; - if (setup_tables(table_list) || + /* TODO: use this conds for 'WITH CHECK OPTIONS' */ + Item *unused_conds= 0; + if (setup_tables(thd, table_list, &unused_conds) || setup_fields(thd, 0, table_list, fields, 1, 0, 0)) DBUG_RETURN(-1); if (thd->dupp_field) @@ -135,6 +151,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name); DBUG_RETURN(-1); } + if (check_that_all_fields_are_given_values(thd, table)) + DBUG_RETURN(1); } uint tot_length=0; @@ -273,12 +291,20 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, ha_enable_transaction(thd, FALSE); table->file->start_bulk_insert((ha_rows) 0); table->copy_blobs=1; + + thd->no_trans_update= 0; + thd->abort_on_warning= (handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + if (!field_term->length() && !enclosed->length()) - error=read_fixed_length(thd,info,table,fields,read_info, - skip_lines); + error= read_fixed_length(thd, info, table_list, fields,read_info, + skip_lines, ignore_check_option_errors); else - error=read_sep_field(thd,info,table,fields,read_info,*enclosed, - skip_lines); + error= read_sep_field(thd, info, table_list, fields, read_info, + *enclosed, skip_lines, + ignore_check_option_errors); if (table->file->end_bulk_insert()) error=1; /* purecov: inspected */ ha_enable_transaction(thd, TRUE); @@ -345,9 +371,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); send_ok(thd,info.copied+info.deleted,0L,name); - // on the slave thd->query is never initialized - if (!thd->slave_thread) - mysql_update_log.write(thd,thd->query,thd->query_length); if (!log_delayed) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; @@ -369,12 +392,14 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, #endif /*!EMBEDDED_LIBRARY*/ if (transactional_table) error=ha_autocommit_or_rollback(thd,error); + err: if (thd->lock) { mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -383,12 +408,15 @@ err: ****************************************************************************/ static int -read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, - READ_INFO &read_info, ulong skip_lines) +read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields, READ_INFO &read_info, ulong skip_lines, + bool ignore_check_option_errors) { List_iterator_fast<Item> it(fields); Item_field *sql_field; + TABLE *table= table_list->table; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_fixed_length"); id= 0; @@ -401,7 +429,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } if (skip_lines) @@ -420,6 +448,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, #ifdef HAVE_purify read_info.row_end[0]=0; #endif + no_trans_update= !table->file->has_transactions(); while ((sql_field= (Item_field*) it++)) { Field *field= sql_field->field; @@ -439,7 +468,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, field->field_length) length=field->field_length; save_chr=pos[length]; pos[length]='\0'; // Safeguard aganst malloc - field->store((char*) pos,length,read_info.read_charset); + field->store((char*) pos,length,read_info.read_charset); pos[length]=save_chr; if ((pos+=length) > read_info.row_end) pos= read_info.row_end; /* Fills rest with space */ @@ -452,8 +481,21 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } - if (write_record(table,&info)) + + switch(table_list->view_check_option(thd, + ignore_check_option_errors)) + { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + if (thd->killed || write_record(thd,table,&info)) DBUG_RETURN(1); + thd->no_trans_update= no_trans_update; + /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the binary/update log. @@ -474,6 +516,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -483,24 +526,28 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, static int -read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, +read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines) + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors) { List_iterator_fast<Item> it(fields); Item_field *sql_field; + TABLE *table= table_list->table; uint enclosed_length; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); id= 0; + no_trans_update= !table->file->has_transactions(); for (;;it.rewind()) { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } while ((sql_field=(Item_field*) it++)) @@ -556,7 +603,19 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); } } - if (write_record(table,&info)) + + switch(table_list->view_check_option(thd, + ignore_check_option_errors)) + { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + + if (thd->killed || write_record(thd, table, &info)) DBUG_RETURN(1); /* If auto_increment values are used, save the first one @@ -568,6 +627,7 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, id= thd->last_insert_id; if (table->next_number_field) table->next_number_field->reset(); // Clear for next record + thd->no_trans_update= no_trans_update; if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) @@ -576,8 +636,11 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); + if (thd->killed) + DBUG_RETURN(1); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc index 46f1e6c156e..20fd7fe2ee0 100644 --- a/sql/sql_olap.cc +++ b/sql/sql_olap.cc @@ -152,7 +152,8 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) List<Item> all_fields(select_lex->item_list); - if (setup_tables((TABLE_LIST *)select_lex->table_list.first) || + if (setup_tables(lex->thd, (TABLE_LIST *)select_lex->table_list.first + &select_lex->where) || setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, select_lex->item_list, 1, &all_fields,1) || setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index d81ed2cd014..1c23162b212 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -71,9 +71,13 @@ Long data handling: #include "sql_acl.h" #include "sql_select.h" // for JOIN #include <m_ctype.h> // for isspace() +#include "sp_head.h" +#include "sp.h" #ifdef EMBEDDED_LIBRARY /* include MYSQL_BIND headers */ #include <mysql.h> +#else +#include <mysql_com.h> #endif /****************************************************************************** @@ -105,7 +109,7 @@ public: }; static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context); + String *expanded_query); /****************************************************************************** Implementation @@ -167,7 +171,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) (stmt->param_count && stmt->thd->protocol_simple.send_fields((List<Item> *) &stmt->lex->param_list, - 0))); + Protocol::SEND_EOF))); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -411,6 +415,7 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len) *pos+= length; } + static void set_param_date(Item_param *param, uchar **pos, ulong len) { MYSQL_TIME tm; @@ -874,13 +879,14 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, SYNOPSIS mysql_test_insert() stmt prepared statemen handler - tables list of tables queries + tables global/local table list RETURN VALUE 0 ok 1 error, sent to the client -1 error, not sent to client */ + static int mysql_test_insert(Prepared_statement *stmt, TABLE_LIST *table_list, List<Item> &fields, @@ -893,9 +899,7 @@ static int mysql_test_insert(Prepared_statement *stmt, LEX *lex= stmt->lex; List_iterator_fast<List_item> its(values_list); List_item *values; - int res= -1; - TABLE_LIST *insert_table_list= - (TABLE_LIST*) lex->select_lex.table_list.first; + int res; DBUG_ENTER("mysql_test_insert"); if ((res= insert_precheck(thd, table_list))) @@ -905,9 +909,9 @@ static int mysql_test_insert(Prepared_statement *stmt, open temporary memory pool for temporary data allocated by derived tables & preparation procedure */ - if (open_and_lock_tables(thd, table_list)) + if ((res= open_and_lock_tables(thd, table_list))) { - DBUG_RETURN(-1); + DBUG_RETURN(res); } if ((values= its++)) @@ -915,9 +919,9 @@ static int mysql_test_insert(Prepared_statement *stmt, uint value_count; ulong counter= 0; - if ((res= mysql_prepare_insert(thd, table_list, insert_table_list, - table_list->table, fields, values, - update_fields, update_values, duplic))) + if ((res= mysql_prepare_insert(thd, table_list, table_list->table, + fields, values, update_fields, + update_values, duplic))) goto error; value_count= values->elements; @@ -933,7 +937,7 @@ static int mysql_test_insert(Prepared_statement *stmt, MYF(0), counter); goto error; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + if (setup_fields(thd, 0, table_list, *values, 0, 0, 0)) goto error; } } @@ -969,22 +973,26 @@ static int mysql_test_update(Prepared_statement *stmt, if ((res= update_precheck(thd, table_list))) DBUG_RETURN(res); - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + if (!(res=open_and_lock_tables(thd, table_list))) { - TABLE_LIST *update_table_list= (TABLE_LIST *)select->table_list.first; if (!(res= mysql_prepare_update(thd, table_list, - update_table_list, &select->where, select->order_list.elements, (ORDER *) select->order_list.first))) { - if (setup_fields(thd, 0, update_table_list, - select->item_list, 1, 0, 0) || - setup_fields(thd, 0, update_table_list, - stmt->lex->value_list, 0, 0, 0)) - res= -1; + thd->lex->select_lex.no_wrap_view_item= 1; + if (setup_fields(thd, 0, table_list, select->item_list, 1, 0, 0)) + { + res= -1; + thd->lex->select_lex.no_wrap_view_item= 0; + } + else + { + thd->lex->select_lex.no_wrap_view_item= 0; + if (setup_fields(thd, 0, table_list, + stmt->lex->value_list, 0, 0, 0)) + res= -1; + } } stmt->lex->unit.cleanup(); } @@ -1017,9 +1025,7 @@ static int mysql_test_delete(Prepared_statement *stmt, if ((res= delete_precheck(thd, table_list))) DBUG_RETURN(res); - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + if (!(res=open_and_lock_tables(thd, table_list))) { res= mysql_prepare_delete(thd, table_list, &lex->select_lex.where); lex->unit.cleanup(); @@ -1051,7 +1057,7 @@ static int mysql_test_select(Prepared_statement *stmt, THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX_UNIT *unit= &lex->unit; - int result= 1; + int result; DBUG_ENTER("mysql_test_select"); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1071,11 +1077,13 @@ static int mysql_test_select(Prepared_statement *stmt, goto err; } - if (open_and_lock_tables(thd, tables)) + if ((result= open_and_lock_tables(thd, tables))) { + result= 1; // Error sent send_error(thd); goto err; } + result= 1; thd->used_tables= 0; // Updated by setup_fields @@ -1107,7 +1115,7 @@ static int mysql_test_select(Prepared_statement *stmt, prepared in unit->prepare call above. */ if (send_prep_stmt(stmt, lex->result->field_count(fields)) || - lex->result->send_fields(fields, 0) || + lex->result->send_fields(fields, Protocol::SEND_EOF) || thd->protocol->flush()) goto err_prep; } @@ -1207,8 +1215,9 @@ error: SYNOPSIS select_like_statement_test() - stmt - prepared table handler - tables - global list of tables + stmt - prepared table handler + tables - global list of tables + specific_prepare - function of command specific prepare RETURN VALUE 0 success @@ -1216,7 +1225,8 @@ error: -1 error, not sent to client */ static int select_like_statement_test(Prepared_statement *stmt, - TABLE_LIST *tables) + TABLE_LIST *tables, + int (*specific_prepare)(THD *thd)) { DBUG_ENTER("select_like_statement_test"); THD *thd= stmt->thd; @@ -1226,6 +1236,9 @@ static int select_like_statement_test(Prepared_statement *stmt, if (tables && (res= open_and_lock_tables(thd, tables))) goto end; + if (specific_prepare && (res= (*specific_prepare)(thd))) + goto end; + thd->used_tables= 0; // Updated by setup_fields // JOIN::prepare calls @@ -1252,31 +1265,29 @@ end: 1 error, sent to client -1 error, not sent to client */ -static int mysql_test_create_table(Prepared_statement *stmt, - TABLE_LIST *tables) + +static int mysql_test_create_table(Prepared_statement *stmt) { DBUG_ENTER("mysql_test_create_table"); THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; int res= 0; - /* Skip first table, which is the table we are creating */ - TABLE_LIST *create_table, *create_table_local; - tables= lex->unlink_first_table(tables, &create_table, - &create_table_local); + bool link_to_local; + TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; if (!(res= create_table_precheck(thd, tables, create_table)) && select_lex->item_list.elements) { select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); + res= select_like_statement_test(stmt, tables, 0); select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; } /* put tables back for PS rexecuting */ - tables= lex->link_first_table_back(tables, create_table, - create_table_local); + lex->link_first_table_back(create_table, link_to_local); DBUG_RETURN(res); } @@ -1300,7 +1311,11 @@ static int mysql_test_multiupdate(Prepared_statement *stmt, int res; if ((res= multi_update_precheck(stmt->thd, tables))) return res; - return select_like_statement_test(stmt, tables); + /* + here we do not pass tables for opening, tables will be opened and locked + by mysql_multi_update_prepare + */ + return select_like_statement_test(stmt, 0, &mysql_multi_update_prepare); } @@ -1328,7 +1343,7 @@ static int mysql_test_multidelete(Prepared_statement *stmt, uint fake_counter; if ((res= multi_delete_precheck(stmt->thd, tables, &fake_counter))) return res; - return select_like_statement_test(stmt, tables); + return select_like_statement_test(stmt, tables, &mysql_multi_delete_prepare); } @@ -1354,14 +1369,15 @@ static int mysql_test_insert_select(Prepared_statement *stmt, return res; TABLE_LIST *first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; + DBUG_ASSERT(first_local_table != 0); /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= (byte*) first_local_table->next; + lex->select_lex.table_list.first= (byte*) first_local_table->next_local; /* insert/replace from SELECT give its SELECT_LEX for SELECT, and item_list belong to SELECT */ lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); + res= select_like_statement_test(stmt, tables, &mysql_insert_select_prepare); /* revert changes*/ lex->select_lex.table_list.first= (byte*) first_local_table; lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; @@ -1383,17 +1399,29 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *tables=(TABLE_LIST*) select_lex->table_list.first; + TABLE_LIST *tables; enum enum_sql_command sql_command= lex->sql_command; int res= 0; DBUG_ENTER("send_prepare_results"); DBUG_PRINT("enter",("command: %d, param_count: %ld", sql_command, stmt->param_count)); - if ((&lex->select_lex != lex->all_selects_list || - lex->time_zone_tables_used) && - lex->unit.create_total_list(thd, lex, &tables)) - DBUG_RETURN(1); + lex->first_lists_tables_same(); + tables= lex->query_tables; + + /* + Preopen 'proc' system table and cache all functions used in this + statement. We must do that before we open ordinary tables to avoid + deadlocks. We can't open and lock any table once query tables were + opened. + */ + if (lex->sql_command != SQLCOM_CREATE_PROCEDURE && + lex->sql_command != SQLCOM_CREATE_SPFUNCTION) + { + /* the error is print inside */ + if (sp_cache_functions(thd, lex)) + DBUG_RETURN(1); + } switch (sql_command) { case SQLCOM_REPLACE: @@ -1419,7 +1447,7 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) DBUG_RETURN(0); case SQLCOM_CREATE_TABLE: - res= mysql_test_create_table(stmt, tables); + res= mysql_test_create_table(stmt); break; case SQLCOM_DO: @@ -1461,6 +1489,12 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_SHOW_GRANTS: case SQLCOM_DROP_TABLE: case SQLCOM_RENAME_TABLE: + case SQLCOM_ALTER_TABLE: + case SQLCOM_COMMIT: + case SQLCOM_CREATE_INDEX: + case SQLCOM_DROP_INDEX: + case SQLCOM_ROLLBACK: + case SQLCOM_TRUNCATE: break; default: @@ -1476,7 +1510,7 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) thd->protocol->flush())); error: if (res < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + send_error(thd,thd->killed_errno()); DBUG_RETURN(1); } @@ -1625,13 +1659,19 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, /* restore to WAIT_PRIOR: QUERY_PRIOR is set inside alloc_query */ if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),WAIT_PRIOR); + if (error && thd->lex->sphead) + { + if (lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } lex_end(lex); thd->restore_backup_statement(stmt, &thd->stmt_backup); cleanup_items(stmt->free_list); close_thread_tables(thd); - free_items(thd->free_list); thd->rollback_item_tree_changes(); - thd->free_list= 0; + thd->cleanup_after_query(); thd->current_arena= thd; if (error) @@ -1652,61 +1692,54 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, optimisation. */ for (; sl; sl= sl->next_select_in_list()) - { sl->prep_where= sl->where; - } stmt->state= Item_arena::PREPARED; } - DBUG_RETURN(!stmt); } + /* Reinit statement before execution */ -static void reset_stmt_for_execute(Prepared_statement *stmt) +void reset_stmt_for_execute(THD *thd, LEX *lex) { - THD *thd= stmt->thd; - LEX *lex= stmt->lex; SELECT_LEX *sl= lex->all_selects_list; + DBUG_ENTER("reset_stmt_for_execute"); + if (lex->empty_field_list_on_rset) + { + lex->empty_field_list_on_rset= 0; + lex->field_list.empty(); + } for (; sl; sl= sl->next_select_in_list()) { - /* remove option which was put by mysql_explain_union() */ - sl->options&= ~SELECT_DESCRIBE; - /* - Copy WHERE clause pointers to avoid damaging they by optimisation - */ - if (sl->prep_where) + if (!sl->first_execution) { - sl->where= sl->prep_where->copy_andor_structure(thd); - sl->where->cleanup(); - } - DBUG_ASSERT(sl->join == 0); - ORDER *order; - /* Fix GROUP list */ - for (order= (ORDER *)sl->group_list.first; order; order= order->next) - order->item= &order->item_ptr; - /* Fix ORDER list */ - for (order= (ORDER *)sl->order_list.first; order; order= order->next) - order->item= &order->item_ptr; + /* remove option which was put by mysql_explain_union() */ + sl->options&= ~SELECT_DESCRIBE; - /* - TODO: When the new table structure is ready, then have a status bit - to indicate the table is altered, and re-do the setup_* - and open the tables back. - */ - for (TABLE_LIST *tables= (TABLE_LIST*) sl->table_list.first; - tables; - tables= tables->next) - { /* - Reset old pointers to TABLEs: they are not valid since the tables - were closed in the end of previous prepare or execute call. + Copy WHERE clause pointers to avoid damaging they by optimisation */ - tables->table= 0; - tables->table_list= 0; + if (sl->prep_where) + { + sl->where= sl->prep_where->copy_andor_structure(thd); + sl->where->cleanup(); + } + DBUG_ASSERT(sl->join == 0); + ORDER *order; + /* Fix GROUP list */ + for (order= (ORDER *)sl->group_list.first; order; order= order->next) + order->item= &order->item_ptr; + /* Fix ORDER list */ + for (order= (ORDER *)sl->order_list.first; order; order= order->next) + order->item= &order->item_ptr; + } + { + TABLE_LIST *tables= (TABLE_LIST *)sl->table_list.first; + if (tables) + tables->setup_is_done= 0; } - { SELECT_LEX_UNIT *unit= sl->master_unit(); unit->unclean(); @@ -1715,9 +1748,27 @@ static void reset_stmt_for_execute(Prepared_statement *stmt) unit->reinit_exec_mechanism(); } } + + /* + TODO: When the new table structure is ready, then have a status bit + to indicate the table is altered, and re-do the setup_* + and open the tables back. + */ + for (TABLE_LIST *tables= lex->query_tables; + tables; + tables= tables->next_global) + { + /* + Reset old pointers to TABLEs: they are not valid since the tables + were closed in the end of previous prepare or execute call. + */ + tables->table= 0; + } lex->current_select= &lex->select_lex; if (lex->result) lex->result->cleanup(); + + DBUG_VOID_RETURN; } @@ -1752,6 +1803,7 @@ static void reset_stmt_params(Prepared_statement *stmt) void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) { ulong stmt_id= uint4korr(packet); + ulong flags= (ulong) ((uchar) packet[4]); /* Query text for binary log, or empty string if the query is not put into binary log. @@ -1780,6 +1832,30 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) DBUG_ASSERT(thd->free_list == NULL); mysql_reset_thd_for_next_command(thd); + if (flags & (ulong) CURSOR_TYPE_READ_ONLY) + { + if (!stmt->lex->result || !stmt->lex->result->simple_select()) + { + DBUG_PRINT("info",("Cursor asked for not SELECT stmt")); + /* + If lex->result is set in the parser, this is not a SELECT + statement: we can't open a cursor for it. + */ + flags= 0; + } + else + { + DBUG_PRINT("info",("Using READ_ONLY cursor")); + if (!stmt->cursor && + !(stmt->cursor= new (&stmt->main_mem_root) Cursor())) + { + send_error(thd, ER_OUT_OF_RESOURCES); + DBUG_VOID_RETURN; + } + /* If lex->result is set, mysql_execute_command will use it */ + stmt->lex->result= &stmt->cursor->result; + } + } #ifndef EMBEDDED_LIBRARY if (stmt->param_count) { @@ -1798,14 +1874,51 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query)) goto set_params_data_err; #endif + thd->stmt_backup.set_statement(thd); + thd->set_statement(stmt); + thd->current_arena= stmt; + reset_stmt_for_execute(thd, stmt->lex); + /* From now cursors assume that thd->mem_root is clean */ + if (expanded_query.length() && + alloc_query(thd, (char *)expanded_query.ptr(), + expanded_query.length()+1)) + { + my_error(ER_OUTOFMEMORY, 0, expanded_query.length()); + goto err; + } + thd->protocol= &thd->protocol_prep; // Switch to binary protocol - execute_stmt(thd, stmt, &expanded_query, TRUE); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + mysql_execute_command(thd); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); thd->protocol= &thd->protocol_simple; // Use normal protocol + + if (flags & (ulong) CURSOR_TYPE_READ_ONLY) + { + if (stmt->cursor->is_open()) + stmt->cursor->init_from_thd(thd); + stmt->cursor->state= stmt->state; + } + else + { + thd->lex->unit.cleanup(); + cleanup_items(stmt->free_list); + reset_stmt_params(stmt); + close_thread_tables(thd); /* to close derived tables */ + thd->rollback_item_tree_changes(); + thd->cleanup_after_query(); + } + + thd->set_statement(&thd->stmt_backup); + thd->current_arena= thd; DBUG_VOID_RETURN; set_params_data_err: reset_stmt_params(stmt); my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_execute"); +err: send_error(thd); DBUG_VOID_RETURN; } @@ -1826,6 +1939,8 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) String expanded_query; DBUG_ENTER("mysql_sql_stmt_execute"); + DBUG_ASSERT(thd->free_list == NULL); + if (!(stmt= (Prepared_statement*)thd->stmt_map.find_by_name(stmt_name))) { my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_name->length, @@ -1841,10 +1956,10 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) DBUG_VOID_RETURN; } - DBUG_ASSERT(thd->free_list == NULL); /* Must go before setting variables, as it clears thd->user_var_events */ mysql_reset_thd_for_next_command(thd); thd->set_n_backup_statement(stmt, &thd->stmt_backup); + thd->set_statement(stmt); if (stmt->set_params_from_vars(stmt, thd->stmt_backup.lex->prepared_stmt_params, &expanded_query)) @@ -1852,7 +1967,7 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); send_error(thd); } - execute_stmt(thd, stmt, &expanded_query, FALSE); + execute_stmt(thd, stmt, &expanded_query); DBUG_VOID_RETURN; } @@ -1867,17 +1982,15 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) placeholders replaced with actual values. Otherwise empty string. NOTES - Caller must set parameter values and thd::protocol. - thd->free_list is assumed to be garbage. + Caller must set parameter values and thd::protocol. */ static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context) + String *expanded_query) { DBUG_ENTER("execute_stmt"); - if (set_context) - thd->set_n_backup_statement(stmt, &thd->stmt_backup); - reset_stmt_for_execute(stmt); + + reset_stmt_for_execute(thd, stmt->lex); if (expanded_query->length() && alloc_query(thd, (char *)expanded_query->ptr(), @@ -1896,21 +2009,74 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),QUERY_PRIOR); mysql_execute_command(thd); - thd->lex->unit.cleanup(); if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); - /* Free Items that were created during this execution of the PS. */ - free_items(thd->free_list); - thd->free_list= 0; - if (stmt->state == Item_arena::PREPARED) - stmt->state= Item_arena::EXECUTED; + thd->lex->unit.cleanup(); thd->current_arena= thd; cleanup_items(stmt->free_list); thd->rollback_item_tree_changes(); reset_stmt_params(stmt); close_thread_tables(thd); // to close derived tables thd->set_statement(&thd->stmt_backup); + thd->cleanup_after_query(); + + if (stmt->state == Item_arena::PREPARED) + { + thd->current_arena= thd; + stmt->state= Item_arena::EXECUTED; + } + DBUG_VOID_RETURN; +} + + +/* + COM_FETCH handler: fetches requested amount of rows from cursor + + SYNOPSIS + mysql_stmt_fetch() + thd Thread handler + packet Packet from client (with stmt_id & num_rows) + packet_length Length of packet +*/ + +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length) +{ + /* assume there is always place for 8-16 bytes */ + ulong stmt_id= uint4korr(packet); + ulong num_rows= uint4korr(packet+=4); + Statement *stmt; + int error; + DBUG_ENTER("mysql_stmt_fetch"); + + thd->current_arena= stmt; + if (!(stmt= thd->stmt_map.find(stmt_id)) || + !stmt->cursor || + !stmt->cursor->is_open()) + { + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_id, "fetch"); + send_error(thd); + DBUG_VOID_RETURN; + } + + thd->set_n_backup_statement(stmt, &thd->stmt_backup); + stmt->cursor->init_thd(thd); + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), QUERY_PRIOR); + + thd->protocol= &thd->protocol_prep; // Switch to binary protocol + error= stmt->cursor->fetch(num_rows); + thd->protocol= &thd->protocol_simple; // Use normal protocol + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); + + /* Restore THD state */ + stmt->cursor->reset_thd(thd); + thd->restore_backup_statement(stmt, &thd->stmt_backup); + thd->current_arena= thd; + DBUG_VOID_RETURN; } @@ -1920,7 +2086,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, SYNOPSIS mysql_stmt_reset() thd Thread handle - packet Packet with stmt id + packet Packet with stmt id DESCRIPTION This function resets statement to the state it was right after prepare. @@ -2090,8 +2256,11 @@ void Prepared_statement::setup_set_params() } } + Prepared_statement::~Prepared_statement() { + if (cursor) + cursor->Cursor::~Cursor(); free_items(free_list); delete lex->result; } @@ -2101,4 +2270,3 @@ Item_arena::Type Prepared_statement::type() const { return PREPARED_STATEMENT; } - diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index afaf2ed0923..3298eb68a91 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -64,10 +64,10 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) table_list= reverse_table_list(table_list); /* Find the last renamed table */ - for (table=table_list ; - table->next != ren_table ; - table=table->next->next) ; - table=table->next->next; // Skip error table + for (table= table_list; + table->next_local != ren_table ; + table= table->next_local->next_local) ; + table= table->next_local->next_local; // Skip error table /* Revert to old names */ rename_tables(thd, table, 1); @@ -80,7 +80,6 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) /* Lets hope this doesn't fail as the result will be messy */ if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -90,7 +89,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) send_ok(thd); } - unlock_table_names(thd,table_list); + unlock_table_names(thd, table_list); err: pthread_mutex_unlock(&LOCK_open); @@ -115,8 +114,8 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) while (table_list) { - TABLE_LIST *next= table_list->next; - table_list->next= prev; + TABLE_LIST *next= table_list->next_local; + table_list->next_local= prev; prev= table_list; table_list= next; } @@ -135,13 +134,13 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) TABLE_LIST *ren_table,*new_table; DBUG_ENTER("rename_tables"); - for (ren_table=table_list ; ren_table ; ren_table=new_table->next) + for (ren_table= table_list; ren_table; ren_table= new_table->next_local) { db_type table_type; char name[FN_REFLEN]; const char *new_alias, *old_alias; - new_table=ren_table->next; + new_table= ren_table->next_local; if (lower_case_table_names == 2) { old_alias= ren_table->alias; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index a84b63c270b..10f0c23f54d 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -46,16 +46,34 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg) return 0; } + /* + fake_rotate_event() builds a fake (=which does not exist physically in any + binlog) Rotate event, which contains the name of the binlog we are going to + send to the slave (because the slave may not know it if it just asked for + MASTER_LOG_FILE='', MASTER_LOG_POS=4). + < 4.0.14, fake_rotate_event() was called only if the requested pos was + 4. After this version we always call it, so that a 3.23.58 slave can rely on + it to detect if the master is 4.0 (and stop) (the _fake_ Rotate event has + zeros in the good positions which, by chance, make it possible for the 3.23 + slave to detect that this event is unexpected) (this is luck which happens + because the master and slave disagree on the size of the header of + Log_event). + + Relying on the event length of the Rotate event instead of these well-placed + zeros was not possible as Rotate events have a variable-length part. +*/ + static int fake_rotate_event(NET* net, String* packet, char* log_file_name, - ulonglong position, const char**errmsg) + ulonglong position, const char** errmsg) { + DBUG_ENTER("fake_rotate_event"); char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN]; - memset(header, 0, 4); // when does not matter + memset(header, 0, 4); // 'when' (the timestamp) does not matter, is set to 0 header[EVENT_TYPE_OFFSET] = ROTATE_EVENT; char* p = log_file_name+dirname_length(log_file_name); uint ident_len = (uint) strlen(p); - ulong event_len = ident_len + ROTATE_EVENT_OVERHEAD; + ulong event_len = ident_len + LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN; int4store(header + SERVER_ID_OFFSET, server_id); int4store(header + EVENT_LEN_OFFSET, event_len); int2store(header + FLAGS_OFFSET, 0); @@ -70,9 +88,9 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name, if (my_net_write(net, (char*)packet->ptr(), packet->length())) { *errmsg = "failed on my_net_write()"; - return -1; + DBUG_RETURN(-1); } - return 0; + DBUG_RETURN(0); } static int send_file(THD *thd) @@ -313,6 +331,36 @@ int purge_master_logs_before_date(THD* thd, time_t purge_time) mysql_bin_log.purge_logs_before_date(purge_time)); } +int test_for_non_eof_log_read_errors(int error, const char **errmsg) +{ + if (error == LOG_READ_EOF) + return 0; + my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; + switch (error) { + case LOG_READ_BOGUS: + *errmsg = "bogus data in log event"; + break; + case LOG_READ_TOO_LARGE: + *errmsg = "log event entry exceeded max_allowed_packet; \ +Increase max_allowed_packet on master"; + break; + case LOG_READ_IO: + *errmsg = "I/O error reading log event"; + break; + case LOG_READ_MEM: + *errmsg = "memory allocation failed reading log event"; + break; + case LOG_READ_TRUNC: + *errmsg = "binlog truncated in the middle of event"; + break; + default: + *errmsg = "unknown error reading log event on the master"; + break; + } + return error; +} + + /* TODO: Clean up loop to only have one call to send_file() */ @@ -329,6 +377,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, int error; const char *errmsg = "Unknown error"; NET* net = &thd->net; + pthread_mutex_t *log_lock; #ifndef DBUG_OFF int left_events = max_binlog_dump_events; #endif @@ -388,18 +437,25 @@ impossible position"; goto err; } - my_b_seek(&log, pos); // Seek will done on next read /* We need to start a packet with something other than 255 - to distiquish it from error + to distinguish it from error */ - packet->set("\0", 1, &my_charset_bin); + packet->set("\0", 1, &my_charset_bin); /* This is the start of a new packet */ /* + Tell the client about the log name with a fake Rotate event; + this is needed even if we also send a Format_description_log_event just + after, because that event does not contain the binlog's name. + Note that as this Rotate event is sent before Format_description_log_event, + the slave cannot have any info to understand this event's format, so the + header len of Rotate_log_event is FROZEN + (so in 5.0 it will have a header shorter than other events except + FORMAT_DESCRIPTION_EVENT). Before 4.0.14 we called fake_rotate_event below only if (pos == BIN_LOG_HEADER_SIZE), because if this is false then the slave already knows the binlog's name. - Now we always call fake_rotate_event; if the slave already knew the log's + Since, we always call fake_rotate_event; if the slave already knew the log's name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is useless but does not harm much. It is nice for 3.23 (>=.58) slaves which test Rotate events to see if the master is 4.0 (then they choose to stop because they can't @@ -416,15 +472,72 @@ impossible position"; */ if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg)) { + /* + This error code is not perfect, as fake_rotate_event() does not read + anything from the binlog; if it fails it's because of an error in + my_net_write(), fortunately it will say it in errmsg. + */ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } packet->set("\0", 1, &my_charset_bin); + /* + We can set log_lock now, it does not move (it's a member of mysql_bin_log, + and it's already inited, and it will be destroyed only at shutdown). + */ + log_lock = mysql_bin_log.get_log_lock(); + if (pos > BIN_LOG_HEADER_SIZE) + { + /* Try to find a Format_description_log_event at the beginning of the binlog */ + if (!(error = Log_event::read_log_event(&log, packet, log_lock))) + { + /* + The packet has offsets equal to the normal offsets in a binlog event + +1 (the first character is \0). + */ + DBUG_PRINT("info", + ("Looked for a Format_description_log_event, found event type %d", + (*packet)[EVENT_TYPE_OFFSET+1])); + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) + { + /* + mark that this event with "log_pos=0", so the slave + should not increment master's binlog position + (rli->group_master_log_pos) + */ + int4store(packet->c_ptr() +LOG_POS_OFFSET+1,0); + /* send it */ + if (my_net_write(net, (char*)packet->ptr(), packet->length())) + { + errmsg = "Failed on my_net_write()"; + my_errno= ER_UNKNOWN_ERROR; + goto err; + } + /* + No need to save this event. We are only doing simple reads (no real + parsing of the events) so we don't need it. And so we don't need the + artificial Format_description_log_event of 3.23&4.x. + */ + } + } + else + if (test_for_non_eof_log_read_errors(error, &errmsg)) + goto err; + /* + else: it's EOF, nothing to do, go on reading next events, the + Format_description_log_event will be found naturally if it is written. + */ + /* reset the packet as we wrote to it in any case */ + packet->set("\0", 1, &my_charset_bin); + } /* end of if (pos > BIN_LOG_HEADER_SIZE); if false, the Format_description_log_event + event will be found naturally. */ + + /* seek to the requested position, to start the requested dump */ + my_b_seek(&log, pos); // Seek will done on next read + while (!net->error && net->vio != 0 && !thd->killed) { - pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock(); - while (!(error = Log_event::read_log_event(&log, packet, log_lock))) { #ifndef DBUG_OFF @@ -436,7 +549,7 @@ impossible position"; goto err; } #endif - if (my_net_write(net, (char*)packet->ptr(), packet->length()) ) + if (my_net_write(net, (char*)packet->ptr(), packet->length())) { errmsg = "Failed on my_net_write()"; my_errno= ER_UNKNOWN_ERROR; @@ -457,34 +570,14 @@ impossible position"; } /* TODO: now that we are logging the offset, check to make sure - the recorded offset and the actual match + the recorded offset and the actual match. + Guilhem 2003-06: this is not true if this master is a slave <4.0.15 + running with --log-slave-updates, because then log_pos may be the offset + in the-master-of-this-master's binlog. */ - if (error != LOG_READ_EOF) - { - my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; - switch (error) { - case LOG_READ_BOGUS: - errmsg = "bogus data in log event"; - break; - case LOG_READ_TOO_LARGE: - errmsg = "log event entry exceeded max_allowed_packet; \ -Increase max_allowed_packet on master"; - break; - case LOG_READ_IO: - errmsg = "I/O error reading log event"; - break; - case LOG_READ_MEM: - errmsg = "memory allocation failed reading log event"; - break; - case LOG_READ_TRUNC: - errmsg = "binlog truncated in the middle of event"; - break; - default: - errmsg = "unknown error reading log event on the master"; - break; - } + + if (test_for_non_eof_log_read_errors(error, &errmsg)) goto err; - } if (!(flags & BINLOG_DUMP_NON_BLOCK) && mysql_bin_log.is_active(log_file_name)) @@ -618,8 +711,13 @@ Increase max_allowed_packet on master"; (void) my_close(file, MYF(MY_WME)); /* - Even if the previous log contained a Rotate_log_event, we still fake - one. + Call fake_rotate_event() in case the previous log (the one which we have + just finished reading) did not contain a Rotate event (for example (I + don't know any other example) the previous log was the last one before + the master was shutdown & restarted). + This way we tell the slave about the new log's name and position. + If the binlog is 5.0, the next event we are going to read and send is + Format_description_log_event. */ if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 || fake_rotate_event(net, packet, log_file_name, BIN_LOG_HEADER_SIZE, &errmsg)) @@ -953,7 +1051,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id) it will be slow because it will iterate through the list again. We just to do kill the thread ourselves. */ - tmp->awake(1/*prepare to die*/); + tmp->awake(THD::KILL_QUERY); pthread_mutex_unlock(&tmp->LOCK_delete); } } @@ -1113,7 +1211,7 @@ int change_master(THD* thd, MASTER_INFO* mi) mi->rli.group_relay_log_name, mi->rli.group_relay_log_pos, 0 /*no data lock*/, - &msg)) + &msg, 0)) { net_printf(thd,0,"Failed initializing relay log position: %s",msg); unlock_slave_threads(mi); @@ -1198,9 +1296,12 @@ int show_binlog_events(THD* thd) const char *errmsg = 0; IO_CACHE log; File file = -1; + Format_description_log_event *description_event= new + Format_description_log_event(3); /* MySQL 4.0 by default */ Log_event::init_show_field_list(&field_list); - if (protocol-> send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); if (mysql_bin_log.is_open()) @@ -1236,10 +1337,38 @@ int show_binlog_events(THD* thd) goto err; pthread_mutex_lock(log_lock); + + /* + open_binlog() sought to position 4. + Read the first event in case it's a Format_description_log_event, to know the + format. If there's no such event, we are 3.23 or 4.x. This code, like + before, can't read 3.23 binlogs. + This code will fail on a mixed relay log (one which has Format_desc then + Rotate then Format_desc). + */ + + ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event); + if (ev) + { + if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + delete description_event; + description_event= (Format_description_log_event*) ev; + } + else + delete ev; + } + my_b_seek(&log, pos); + if (!description_event->is_valid()) + { + errmsg="Invalid Format_description event; could be out of memory"; + goto err; + } + for (event_count = 0; - (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,0)); ) + (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event)); ) { if (event_count >= limit_start && ev->net_send(protocol, linfo.log_file_name, pos)) @@ -1268,6 +1397,7 @@ int show_binlog_events(THD* thd) } err: + delete description_event; if (file >= 0) { end_io_cache(&log); @@ -1300,7 +1430,8 @@ int show_binlog_info(THD* thd) field_list.push_back(new Item_empty_string("Binlog_Do_DB",255)); field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); protocol->prepare_for_resend(); @@ -1350,7 +1481,8 @@ int show_binlogs(THD* thd) } field_list.push_back(new Item_empty_string("Log_name", 255)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); mysql_bin_log.lock_index(); index_file=mysql_bin_log.get_index_file(); diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 3c17540b664..66fdfb4c022 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -36,7 +36,7 @@ extern I_List<i_string> binlog_do_db, binlog_ignore_db; extern int max_binlog_dump_events; extern my_bool opt_sporadic_binlog_dump_fail; -#define KICK_SLAVE(thd) { pthread_mutex_lock(&(thd)->LOCK_delete); (thd)->awake(0 /* do not prepare to die*/); pthread_mutex_unlock(&(thd)->LOCK_delete); } +#define KICK_SLAVE(thd) { pthread_mutex_lock(&(thd)->LOCK_delete); (thd)->awake(THD::NOT_KILLED); pthread_mutex_unlock(&(thd)->LOCK_delete); } File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9162cd30d63..924b74e2e6e 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -30,7 +30,8 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", "MAYBE_REF","ALL","range","index","fulltext", - "ref_or_null","unique_subquery","index_subquery" + "ref_or_null","unique_subquery","index_subquery", + "index_merge" }; const key_map key_map_empty(0); @@ -42,12 +43,31 @@ static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, JOIN_TAB *join_tab, uint tables, COND *conds, + COND_EQUAL *cond_equal, table_map table_map, SELECT_LEX *select_lex); static int sort_keyuse(KEYUSE *a,KEYUSE *b); static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key); static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, table_map used_tables); -static void find_best_combination(JOIN *join,table_map rest_tables); +static void choose_plan(JOIN *join,table_map join_tables); + +static void best_access_path(JOIN *join, JOIN_TAB *s, THD *thd, + table_map remaining_tables, uint idx, + double record_count, double read_time); +static void optimize_straight_join(JOIN *join, table_map join_tables); +static void greedy_search(JOIN *join, table_map remaining_tables, + uint depth, uint prune_level); +static void best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, double record_count, + double read_time, uint depth, + uint prune_level); +static uint determine_search_depth(JOIN* join); +static int join_tab_cmp(const void* ptr1, const void* ptr2); +/* + TODO: 'find_best' is here only temporarily until 'greedy_search' is + tested and approved. +*/ static void find_best(JOIN *join,table_map rest_tables,uint index, double record_count,double read_time); static uint cache_record_length(JOIN *join,uint index); @@ -58,6 +78,7 @@ static store_key *get_store_key(THD *thd, KEY_PART_INFO *key_part, char *key_buff, uint maybe_null); static bool make_simple_join(JOIN *join,TABLE *tmp_table); +static void make_outerjoin_info(JOIN *join); static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item); static void make_join_readinfo(JOIN *join,uint options); static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables); @@ -70,8 +91,19 @@ static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables, uint select_options, const char *info, Item *having, Procedure *proc, SELECT_LEX_UNIT *unit); -static COND *optimize_cond(THD *thd, COND *conds, +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref); +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx); +static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, + COND *conds, bool top); +static COND *optimize_cond(JOIN *join, COND *conds, + List<TABLE_LIST> *join_list, Item::cond_result *cond_value); +static bool resolve_nested_join (TABLE_LIST *table); static COND *remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value); static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); @@ -116,7 +148,7 @@ static int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); -static uint find_shortest_key(TABLE *table, const key_map *usable_keys); +uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit, bool no_changes); static int create_sort_index(THD *thd, JOIN *join, ORDER *order, @@ -127,6 +159,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, ulong offset,Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, uint field_count, Field **first_field, + ulong key_length,Item *having); static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count); static ulong used_blob_length(CACHE_FIELD **ptr); @@ -159,11 +192,13 @@ static void init_tmptable_sum_functions(Item_sum **func); static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table); static void copy_sum_funcs(Item_sum **func_ptr); static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab); +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr); static bool init_sum_functions(Item_sum **func, Item_sum **end); static bool update_sum_func(Item_sum **func); static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); +static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); /* @@ -177,8 +212,11 @@ int handle_select(THD *thd, LEX *lex, select_result *result) DBUG_ENTER("handle_select"); if (select_lex->next_select()) - res=mysql_union(thd, lex, result, &lex->unit); + res= mysql_union(thd, lex, result, &lex->unit); else + { + SELECT_LEX_UNIT *unit= &lex->unit; + unit->set_limit(unit->global_parameters, select_lex); res= mysql_select(thd, &select_lex->ref_pointer_array, (TABLE_LIST*) select_lex->table_list.first, select_lex->with_wild, select_lex->item_list, @@ -190,12 +228,11 @@ int handle_select(THD *thd, LEX *lex, select_result *result) select_lex->having, (ORDER*) lex->proc_list.first, select_lex->options | thd->options, - result, &(lex->unit), &(lex->select_lex)); - - /* Don't set res if it's -1 as we may want this later */ + result, unit, select_lex); + } DBUG_PRINT("info",("res: %d report_error: %d", res, thd->net.report_error)); - if (thd->net.report_error || res<0) + if (thd->net.report_error || res < 0) { result->send_error(0, NullS); result->abort(); @@ -264,11 +301,12 @@ JOIN::prepare(Item ***rref_pointer_array, tables_list= tables_init; select_lex= select_lex_arg; select_lex->join= this; + join_list= &select_lex->top_join_list; union_part= (unit_arg->first_select()->next_select() != 0); /* Check that all tables, fields, conds and order are ok */ - if (setup_tables(tables_list) || + if (setup_tables(thd, tables_list, &conds) || setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || setup_fields(thd, (*rref_pointer_array), tables_list, fields_list, 1, @@ -295,15 +333,19 @@ JOIN::prepare(Item ***rref_pointer_array, having->split_sum_func(thd, ref_pointer_array, all_fields); } - // Is it subselect + if (!thd->lex->view_prepare_mode) { Item_subselect *subselect; + /* Is it subselect? */ if ((subselect= select_lex->master_unit()->item)) { Item_subselect::trans_res res; if ((res= subselect->select_transformer(this)) != Item_subselect::RES_OK) + { + select_lex->fix_prepare_information(thd, &conds); DBUG_RETURN((res == Item_subselect::RES_ERROR)); + } } } @@ -337,7 +379,7 @@ JOIN::prepare(Item ***rref_pointer_array, } } TABLE_LIST *table_ptr; - for (table_ptr= tables_list ; table_ptr ; table_ptr= table_ptr->next) + for (table_ptr= tables_list; table_ptr; table_ptr= table_ptr->next_local) tables++; } { @@ -406,6 +448,7 @@ JOIN::prepare(Item ***rref_pointer_array, if (alloc_func_list()) goto err; + select_lex->fix_prepare_information(thd, &conds); DBUG_RETURN(0); // All OK err: @@ -451,7 +494,6 @@ bool JOIN::test_in_subselect(Item **where) return 0; } - /* global select optimisation. return 0 - success @@ -488,8 +530,32 @@ JOIN::optimize() } } #endif + SELECT_LEX *sel= thd->lex->current_select; + if (sel->first_cond_optimization) + { + /* + The following code will allocate the new items in a permanent + MEMROOT for prepared statements and stored procedures. + */ + + Item_arena *arena= thd->current_arena, backup; + if (arena->is_conventional()) + arena= 0; // For easier test + else + thd->set_n_backup_item_arena(arena, &backup); + + sel->first_cond_optimization= 0; + + /* Convert all outer joins to inner joins if possible */ + conds= simplify_joins(this, join_list, conds, TRUE); + + sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0; + + if (arena) + thd->restore_backup_item_arena(arena, &backup); + } - conds= optimize_cond(thd, conds, &cond_value); + conds= optimize_cond(this, conds, join_list, &cond_value); if (thd->net.report_error) { error= 1; @@ -500,6 +566,7 @@ JOIN::optimize() if (cond_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) { /* Impossible cond */ + DBUG_PRINT("info", ("Impossible WHERE")); zero_result_cause= "Impossible WHERE"; error= 0; DBUG_RETURN(0); @@ -517,20 +584,24 @@ JOIN::optimize() { if (res > 1) { + DBUG_PRINT("error",("Error from opt_sum_query")); DBUG_RETURN(1); } if (res < 0) { + DBUG_PRINT("info",("No matching min/max row")); zero_result_cause= "No matching min/max row"; error=0; DBUG_RETURN(0); } + DBUG_PRINT("info",("Select tables optimized away")); zero_result_cause= "Select tables optimized away"; tables_list= 0; // All tables resolved } } if (!tables_list) { + DBUG_PRINT("info",("No tables")); error= 0; DBUG_RETURN(0); } @@ -576,20 +647,49 @@ JOIN::optimize() if (const_tables && !thd->locked_tables && !(select_options & SELECT_NO_UNLOCK)) mysql_unlock_some_tables(thd, table, const_tables); - if (!conds && outer_join) { /* Handle the case where we have an OUTER JOIN without a WHERE */ conds=new Item_int((longlong) 1,1); // Always true } select=make_select(*table, const_table_map, - const_table_map, conds, &error); + const_table_map, conds, &error, true); if (error) { /* purecov: inspected */ error= -1; /* purecov: inspected */ DBUG_PRINT("error",("Error: make_select() failed")); DBUG_RETURN(1); } + + make_outerjoin_info(this); + + /* + Among the equal fields belonging to the same multiple equality + choose the one that is to be retrieved first and substitute + all references to these in where condition for a reference for + the selected field. + */ + if (conds) + { + conds= substitute_for_best_equal_field(conds, cond_equal, map2table); + conds->update_used_tables(); + DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal");); + } + /* + Permorm the the optimization on fields evaluation mentioned above + for all on expressions. + */ + for (JOIN_TAB *tab= join_tab + const_tables; tab < join_tab + tables ; tab++) + { + if (*tab->on_expr_ref) + { + *tab->on_expr_ref= substitute_for_best_equal_field(*tab->on_expr_ref, + tab->cond_equal, + map2table); + (*tab->on_expr_ref)->update_used_tables(); + } + } + if (make_join_select(this, select, conds)) { zero_result_cause= @@ -891,13 +991,15 @@ JOIN::optimize() if (create_sort_index(thd, this, group_list, HA_POS_ERROR, HA_POS_ERROR) || alloc_group_fields(this, group_list) || - make_sum_func_list(all_fields, fields_list, 1)) + make_sum_func_list(all_fields, fields_list, 1) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); group_list=0; } else { - if (make_sum_func_list(all_fields, fields_list, 0)) + if (make_sum_func_list(all_fields, fields_list, 0) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); if (!group_list && ! exec_tmp_table1->distinct && order && simple_order) { @@ -964,16 +1066,16 @@ JOIN::reinit() { DBUG_ENTER("JOIN::reinit"); /* TODO move to unit reinit */ - unit->offset_limit_cnt =select_lex->offset_limit; - unit->select_limit_cnt =select_lex->select_limit+select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; - - if (setup_tables(tables_list)) - DBUG_RETURN(1); - + unit->set_limit(select_lex, select_lex); + + /* conds should not be used here, it is added just for safety */ + if (tables_list) + { + tables_list->setup_is_done= 0; + if (setup_tables(thd, tables_list, &conds)) + DBUG_RETURN(1); + } + /* Reset of sum functions */ first_record= 0; @@ -1051,7 +1153,8 @@ JOIN::exec() (zero_result_cause?zero_result_cause:"No tables used")); else { - result->send_fields(fields_list,1); + result->send_fields(fields_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); if (!having || having->val_int()) { if (do_send_rows && (procedure ? (procedure->send_row(fields_list) || @@ -1271,7 +1374,8 @@ JOIN::exec() } } if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1) || + 1, TRUE) || + setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) { @@ -1359,7 +1463,9 @@ JOIN::exec() set_items_ref_array(items3); if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1) || thd->is_fatal_error) + 1, TRUE) || + setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || + thd->is_fatal_error) DBUG_VOID_RETURN; } if (curr_join->group_list || curr_join->order) @@ -1424,11 +1530,9 @@ JOIN::exec() /* table->keyuse is set in the case there was an original WHERE clause on the table that was optimized away. - table->on_expr tells us that it was a LEFT JOIN and there will be - at least one row generated from the table. */ if (curr_table->select_cond || - (curr_table->keyuse && !curr_table->on_expr)) + (curr_table->keyuse && !curr_table->first_inner)) { /* We have to sort all rows */ curr_join->select_limit= HA_POS_ERROR; @@ -1458,12 +1562,45 @@ JOIN::exec() DBUG_VOID_RETURN; } } + /* XXX: When can we have here thd->net.report_error not zero? */ + if (thd->net.report_error) + { + error= thd->net.report_error; + DBUG_VOID_RETURN; + } curr_join->having= curr_join->tmp_having; - thd->proc_info="Sending data"; - error= thd->net.report_error ? -1 : - do_select(curr_join, curr_fields_list, NULL, procedure); - thd->limit_found_rows= curr_join->send_records; - thd->examined_row_count= curr_join->examined_rows; + curr_join->fields= curr_fields_list; + curr_join->procedure= procedure; + + if (unit == &thd->lex->unit && + (unit->fake_select_lex == 0 || select_lex == unit->fake_select_lex) && + thd->cursor && tables != const_tables) + { + /* + We are here if this is JOIN::exec for the last select of the main unit + and the client requested to open a cursor. + We check that not all tables are constant because this case is not + handled by do_select() separately, and this case is not implemented + for cursors yet. + */ + DBUG_ASSERT(error == 0); + /* + curr_join is used only for reusable joins - that is, + to perform SELECT for each outer row (like in subselects). + This join is main, so we know for sure that curr_join == join. + */ + DBUG_ASSERT(curr_join == this); + /* Open cursor for the last join sweep */ + error= thd->cursor->open(this); + } + else + { + thd->proc_info="Sending data"; + error= do_select(curr_join, curr_fields_list, NULL, procedure); + thd->limit_found_rows= curr_join->send_records; + thd->examined_row_count= curr_join->examined_rows; + } + DBUG_VOID_RETURN; } @@ -1492,6 +1629,7 @@ JOIN::cleanup() tmp_table_param.copy_field=0; DBUG_RETURN(tmp_join->cleanup()); } + cond_equal= 0; lock=0; // It's faster to unlock later join_free(1); @@ -1512,6 +1650,296 @@ JOIN::cleanup() } +/************************* Cursor ******************************************/ + +void +Cursor::init_from_thd(THD *thd) +{ + /* + We need to save and reset thd->mem_root, otherwise it'll be freed + later in mysql_parse. + + We can't just change the thd->mem_root here as we want to keep the things + that is already allocated in thd->mem_root for Cursor::fetch() + */ + main_mem_root= *thd->mem_root; + /* Allocate new memory root for thd */ + init_sql_alloc(thd->mem_root, + thd->variables.query_alloc_block_size, + thd->variables.query_prealloc_size); + + /* + The same is true for open tables and lock: save tables and zero THD + pointers to prevent table close in close_thread_tables (This is a part + of the temporary solution to make cursors work with minimal changes to + the current source base). + */ + derived_tables= thd->derived_tables; + open_tables= thd->open_tables; + lock= thd->lock; + query_id= thd->query_id; + free_list= thd->free_list; + reset_thd(thd); + /* + XXX: thd->locked_tables is not changed. + What problems can we have with it if cursor is open? + */ + /* + TODO: grab thd->free_list here? + */ +} + + +void +Cursor::init_thd(THD *thd) +{ + DBUG_ASSERT(thd->derived_tables == 0); + thd->derived_tables= derived_tables; + + DBUG_ASSERT(thd->open_tables == 0); + thd->open_tables= open_tables; + + DBUG_ASSERT(thd->lock== 0); + thd->lock= lock; + thd->query_id= query_id; +} + + +void +Cursor::reset_thd(THD *thd) +{ + thd->derived_tables= 0; + thd->open_tables= 0; + thd->lock= 0; + thd->free_list= 0; +} + + +int +Cursor::open(JOIN *join_arg) +{ + join= join_arg; + THD *thd= join->thd; + /* First non-constant table */ + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + DBUG_ENTER("Cursor::open"); + + /* + Send fields description to the client; server_status is sent + in 'EOF' packet, which ends send_fields(). + */ + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + join->result->send_fields(*join->fields, Protocol::SEND_NUM_ROWS); + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + + /* Prepare JOIN for reading rows. */ + + Next_select_func end_select= join->sort_and_group || join->procedure && + join->procedure->flags & PROC_GROUP ? + end_send_group : end_send; + + join->join_tab[join->tables-1].next_select= end_select; + join->send_records= 0; + join->fetch_limit= join->unit->offset_limit_cnt; + + /* Disable JOIN CACHE as it is not working with cursors yet */ + for (JOIN_TAB *tab= join_tab; + tab != join->join_tab + join->tables - 1; + tab++) + { + if (tab->next_select == sub_select_cache) + tab->next_select= sub_select; + } + + DBUG_ASSERT(join_tab->table->reginfo.not_exists_optimize == 0); + DBUG_ASSERT(join_tab->not_used_in_distinct == 0); + /* + null_row is set only if row not found and it's outer join: should never + happen for the first table in join_tab list + */ + DBUG_ASSERT(join_tab->table->null_row == 0); + + DBUG_RETURN(join_tab->read_first_record(join_tab)); +} + + +/* + DESCRIPTION + Fetch next num_rows rows from the cursor and sent them to the client + PRECONDITION: + Cursor is open + RETURN VALUES: + -4 there are more rows, send_eof sent to the client + 0 no more rows, send_eof was sent to the client, cursor is closed + other fatal fetch error, cursor is closed (error is not reported) +*/ + +int +Cursor::fetch(ulong num_rows) +{ + THD *thd= join->thd; + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + COND *on_expr= *join_tab->on_expr_ref; + COND *select_cond= join_tab->select_cond; + READ_RECORD *info= &join_tab->read_record; + int error= 0; + + /* save references to memory, allocated during fetch */ + thd->set_n_backup_item_arena(this, &thd->stmt_backup); + + join->fetch_limit+= num_rows; + + /* + Run while there are new rows in the first table; + For each row, satisfying ON and WHERE clauses (those parts of them which + can be evaluated early), call next_select. + */ + do + { + int no_more_rows; + + join->examined_rows++; + + if (thd->killed) /* Aborted by user */ + { + my_error(ER_SERVER_SHUTDOWN,MYF(0)); + return -1; + } + + if (on_expr == 0 || on_expr->val_int()) + { + if (select_cond == 0 || select_cond->val_int()) + { + /* + TODO: call table->unlock_row() to unlock row failed selection, + when this feature will be used. + */ + error= join_tab->next_select(join, join_tab + 1, 0); + DBUG_ASSERT(error <= 0); + if (error) + { + /* real error or LIMIT/FETCH LIMIT worked */ + if (error == -4) + { + /* + FETCH LIMIT, read ahead one row, and close cursor + if there is no more rows XXX: to be fixed to support + non-equi-joins! + */ + if ((no_more_rows= info->read_record(info))) + error= no_more_rows > 0 ? -1: 0; + } + break; + } + } + } + /* read next row; break loop if there was an error */ + if ((no_more_rows= info->read_record(info))) + { + if (no_more_rows > 0) + error= -1; + else + { + enum { END_OF_RECORDS= 1 }; + error= join_tab->next_select(join, join_tab+1, (int) END_OF_RECORDS); + } + break; + } + } + while (thd->net.report_error == 0); + + if (thd->net.report_error) + error= -1; + + if (error == -3) /* LIMIT clause worked */ + error= 0; + +#ifdef USING_TRANSACTIONS + if (thd->transaction.all.innobase_tid) + ha_release_temporary_latches(thd); +#endif + + thd->restore_backup_item_arena(this, &thd->stmt_backup); + if (error == -4) + { + /* Fetch limit worked, possibly more rows are there */ + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + } + else + { + close(); + if (error == 0) + { + thd->server_status|= SERVER_STATUS_LAST_ROW_SENT; + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_LAST_ROW_SENT; + } + else + send_error(thd, ER_OUT_OF_RESOURCES); + /* free cursor memory */ + free_items(free_list); + free_list= 0; + free_root(&main_mem_root, MYF(0)); + } + return error; +} + + +void +Cursor::close() +{ + THD *thd= join->thd; + join->join_free(0); + if (unit) + { + /* In case of UNIONs JOIN is freed inside unit->cleanup() */ + unit->cleanup(); + } + else + { + join->cleanup(); + delete join; + } + /* XXX: Another hack: closing tables used in the cursor */ + { + DBUG_ASSERT(lock || open_tables || derived_tables); + + TABLE *tmp_open_tables= thd->open_tables; + TABLE *tmp_derived_tables= thd->derived_tables; + MYSQL_LOCK *tmp_lock= thd->lock; + + thd->open_tables= open_tables; + thd->derived_tables= derived_tables; + thd->lock= lock; + close_thread_tables(thd); + + thd->open_tables= tmp_derived_tables; + thd->derived_tables= tmp_derived_tables; + thd->lock= tmp_lock; + } + join= 0; + unit= 0; +} + + +Cursor::~Cursor() +{ + if (is_open()) + close(); + free_items(free_list); + /* + Must be last, as some memory might be allocated for free purposes, + like in free_tmp_table() (TODO: fix this issue) + */ + free_root(&main_mem_root, MYF(0)); +} + +/*********************************************************************/ + + int mysql_select(THD *thd, Item ***rref_pointer_array, TABLE_LIST *tables, uint wild_num, List<Item> &fields, @@ -1583,6 +2011,16 @@ mysql_select(THD *thd, Item ***rref_pointer_array, join->exec(); + if (thd->cursor && thd->cursor->is_open()) + { + /* + A cursor was opened for the last sweep in exec(). + We are here only if this is mysql_select for top-level SELECT_LEX_UNIT + and there were no error. + */ + free_join= 0; + } + if (thd->lex->describe & DESCRIBE_EXTENDED) { select_lex->where= join->conds_history; @@ -1644,6 +2082,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, DYNAMIC_ARRAY *keyuse_array) { int error; + TABLE *table; uint i,table_count,const_count,key; table_map found_const_table_map, all_table_map, found_ref, refs; key_map const_ref, eq_part; @@ -1667,15 +2106,18 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, found_const_table_map= all_table_map=0; const_count=0; - for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++) + for (s= stat, i= 0; + tables; + s++, tables= tables->next_local, i++) { - TABLE *table; + TABLE_LIST *embedding= tables->embedding; stat_vector[i]=s; s->keys.init(); s->const_keys.init(); s->checked_keys.init(); s->needed_reg.init(); table_vector[i]=s->table=table=tables->table; + table->pos_in_table_list= tables; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);// record count table->quick_keys.clear_all(); table->reginfo.join_tab=s; @@ -1684,29 +2126,37 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, all_table_map|= table->map; s->join=join; s->info=0; // For describe - if ((s->on_expr=tables->on_expr)) + + s->dependent= tables->dep_tables; + s->key_dependent= 0; + + s->on_expr_ref= &tables->on_expr; + if (*s->on_expr_ref) { - /* Left join */ + /* s is the only inner table of an outer join */ if (!table->file->records) { // Empty table - s->key_dependent=s->dependent=0; // Ignore LEFT JOIN depend. + s->dependent= 0; // Ignore LEFT JOIN depend. set_position(join,const_count++,s,(KEYUSE*) 0); continue; } - s->key_dependent=s->dependent= - s->on_expr->used_tables() & ~(table->map); - if (table->outer_join & JOIN_TYPE_LEFT) - s->dependent|=stat_vector[i-1]->dependent | table_vector[i-1]->map; - if (tables->outer_join & JOIN_TYPE_RIGHT) - s->dependent|=tables->next->table->map; - outer_join|=table->map; + outer_join|= table->map; continue; } - if (tables->straight) // We don't have to move this - s->dependent= table_vector[i-1]->map | stat_vector[i-1]->dependent; - else - s->dependent=(table_map) 0; - s->key_dependent=(table_map) 0; + if (embedding) + { + /* s belongs to a nested join, maybe to several embedded joins */ + do + { + NESTED_JOIN *nested_join= embedding->nested_join; + s->dependent|= embedding->dep_tables; + embedding= embedding->embedding; + outer_join|= nested_join->used_tables; + } + while (embedding); + continue; + } + if ((table->system || table->file->records <= 1) && ! s->dependent && !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && !table->fulltext_searched) @@ -1717,45 +2167,42 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, stat_vector[i]=0; join->outer_join=outer_join; - /* - If outer join: Re-arrange tables in stat_vector so that outer join - tables are after all tables it is dependent of. - For example: SELECT * from A LEFT JOIN B ON B.c=C.c, C WHERE A.C=C.C - Will shift table B after table C. - */ - if (outer_join) + if (join->outer_join) { - table_map used_tables=0L; - for (i=0 ; i < join->tables-1 ; i++) + /* + Build transitive closure for relation 'to be dependent on'. + This will speed up the plan search for many cases with outer joins, + as well as allow us to catch illegal cross references/ + Warshall's algorithm is used to build the transitive closure. + As we use bitmaps to represent the relation the complexity + of the algorithm is O((number of tables)^2). + */ + for (i= 0, s= stat ; i < table_count ; i++, s++) { - if (stat_vector[i]->dependent & ~used_tables) + for (uint j= 0 ; j < table_count ; j++) { - JOIN_TAB *save= stat_vector[i]; - uint j; - for (j=i+1; - j < join->tables && stat_vector[j]->dependent & ~used_tables; - j++) - { - JOIN_TAB *tmp=stat_vector[j]; // Move element up - stat_vector[j]=save; - save=tmp; - } - if (j == join->tables) - { - join->tables=0; // Don't use join->table - my_error(ER_WRONG_OUTER_JOIN,MYF(0)); - DBUG_RETURN(1); - } - stat_vector[i]=stat_vector[j]; - stat_vector[j]=save; + table= stat[j].table; + if (s->dependent & table->map) + s->dependent |= table->reginfo.join_tab->dependent; } - used_tables|= stat_vector[i]->table->map; + } + /* Catch illegal cross references for outer joins */ + for (i= 0, s= stat ; i < table_count ; i++, s++) + { + if (s->dependent & s->table->map) + { + join->tables=0; // Don't use join->table + my_error(ER_WRONG_OUTER_JOIN,MYF(0)); + DBUG_RETURN(1); + } + s->key_dependent= s->dependent; } } if (conds || outer_join) if (update_ref_and_keys(join->thd, keyuse_array, stat, join->tables, - conds, ~outer_join, join->select_lex)) + conds, join->cond_equal, + ~outer_join, join->select_lex)) DBUG_RETURN(1); /* Read tables with 0 or 1 rows (system tables) */ @@ -1792,14 +2239,15 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++) { - TABLE *table=s->table; + table=s->table; if (s->dependent) // If dependent on some table { // All dep. must be constants if (s->dependent & ~(found_const_table_map)) continue; if (table->file->records <= 1L && - !(table->file->table_flags() & HA_NOT_EXACT_COUNT)) + !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && + !table->pos_in_table_list->embedding) { // system table int tmp= 0; s->type=JT_SYSTEM; @@ -1898,14 +2346,23 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; - if (! s->const_keys.is_clear_all()) + /* + Add to stat->const_keys those indexes for which all group fields or + all select distinct fields participate in one index. + */ + add_group_and_distinct_keys(join, s); + + if (!s->const_keys.is_clear_all() && + !s->table->pos_in_table_list->embedding) { ha_rows records; SQL_SELECT *select; select= make_select(s->table, found_const_table_map, found_const_table_map, - s->on_expr ? s->on_expr : conds, - &error); + *s->on_expr_ref ? *s->on_expr_ref : conds, + &error, true); + if (!select) + DBUG_RETURN(1); records= get_quick_record_count(join->thd, select, s->table, &s->const_keys, join->row_limit); s->quick=select->quick; @@ -1922,7 +2379,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, join->const_table_map|= s->table->map; set_position(join,const_count++,s,(KEYUSE*) 0); s->type= JT_CONST; - if (s->on_expr) + if (*s->on_expr_ref) { /* Generate empty row */ s->info= "Impossible ON condition"; @@ -1940,17 +2397,17 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } } - /* Find best combination and return it */ join->join_tab=stat; join->map2table=stat_ref; join->table= join->all_tables=table_vector; join->const_tables=const_count; join->found_const_table_map=found_const_table_map; + /* Find an optimal join order of the non-constant tables. */ if (join->const_tables != join->tables) { optimize_keyuse(join, keyuse_array); - find_best_combination(join,all_table_map & ~join->const_table_map); + choose_plan(join, all_table_map & ~join->const_table_map); } else { @@ -1958,6 +2415,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, sizeof(POSITION)*join->const_tables); join->best_read=1.0; } + /* Generate an execution plan from the found optimal join order. */ DBUG_RETURN(join->thd->killed || get_best_combination(join)); } @@ -2085,6 +2543,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, add_key_field() key_fields Pointer to add key, if usable and_level And level, to be stored in KEY_FIELD + cond Condition predicate field Field used in comparision eq_func True if we used =, <=> or IS NULL value Value used for comparison with field @@ -2100,8 +2559,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, */ static void -add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, - Field *field,bool eq_func,Item **value, uint num_values, +add_key_field(KEY_FIELD **key_fields, uint and_level, COND *cond, + Field *field, bool eq_func, Item **value, uint num_values, table_map usable_tables) { uint exists_optimize= 0; @@ -2154,6 +2613,10 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, bool is_const=1; for (uint i=0; i<num_values; i++) + /* + TODO: This looks like a bug. It should be + is_const&= (value[i])->const_item(); + */ is_const&= (*value)->const_item(); if (is_const) stat[0].const_keys.merge(possible_keys); @@ -2177,7 +2640,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, We can't use indexes if the effective collation of the operation differ from the field collation. - We can also not used index on a text column, as the column may + We also cannot use index on a text column, as the column may contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after 'x' when searching for WHERE col='x ' */ @@ -2206,6 +2669,57 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, } +/* + Add possible keys to array of possible keys originated from a simple predicate + + SYNPOSIS + add_key_equal_fields() + key_fields Pointer to add key, if usable + and_level And level, to be stored in KEY_FIELD + cond Condition predicate + field Field used in comparision + eq_func True if we used =, <=> or IS NULL + value Value used for comparison with field + Is NULL for BETWEEN and IN + usable_tables Tables which can be used for key optimization + + NOTES + If field items f1 and f2 belong to the same multiple equality and + a key is added for f1, the the same key is added for f2. + + RETURN + *key_fields is incremented if we stored a key in the array +*/ + +static void +add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, + COND *cond, Item_field *field_item, + bool eq_func, Item **val, + uint num_values, table_map usable_tables) +{ + Field *field= field_item->field; + add_key_field(key_fields, and_level, cond, field, + eq_func, val, num_values, usable_tables); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + /* + Add to the set of possible key values every substitution of + the field for an equal field included into item_equal + */ + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, and_level, cond, item->field, + eq_func, val, num_values, usable_tables); + } + } + } +} + static void add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, COND *cond, table_map usable_tables) @@ -2248,16 +2762,26 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, case Item_func::OPTIMIZE_NONE: break; case Item_func::OPTIMIZE_KEY: - // BETWEEN, IN, NOT + { + // BETWEEN, IN, NE if (cond_func->key_item()->real_item()->type() == Item::FIELD_ITEM && !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*)(cond_func->key_item()->real_item()))->field, - cond_func->argument_count() == 2 && - cond_func->functype() == Item_func::IN_FUNC, - cond_func->arguments()+1, cond_func->argument_count()-1, - usable_tables); + { + Item **values= cond_func->arguments()+1; + if (cond_func->functype() == Item_func::NE_FUNC && + cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) + values--; + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->key_item()->real_item()), + cond_func->argument_count() == 2 && + cond_func->functype() == Item_func::IN_FUNC, + values, + cond_func->argument_count()-1, + usable_tables); + } break; + } case Item_func::OPTIMIZE_OP: { bool equal_func=(cond_func->functype() == Item_func::EQ_FUNC || @@ -2266,21 +2790,19 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM && !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, - equal_func, - cond_func->arguments()+1, 1, usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), + equal_func, + cond_func->arguments()+1, 1, usable_tables); } if (cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && cond_func->functype() != Item_func::LIKE_FUNC && !(cond_func->arguments()[1]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[1])->real_item()) - ->field, - equal_func, - cond_func->arguments(),1,usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[1])->real_item(), + equal_func, + cond_func->arguments(),1,usable_tables); } break; } @@ -2292,15 +2814,55 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, Item *tmp=new Item_null; if (unlikely(!tmp)) // Should never be true return; - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), cond_func->functype() == Item_func::ISNULL_FUNC, &tmp, 1, usable_tables); } break; + case Item_func::OPTIMIZE_EQUAL: + Item_equal *item_equal= (Item_equal *) cond; + Item *const_item= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item_field *item; + if (const_item) + { + /* + For each field field1 from item_equal consider the equality + field1=const_item as a condition allowing an index access of the table + with field1 by the keys value of field1. + */ + while ((item= it++)) + { + add_key_field(key_fields, *and_level, cond, item->field, + TRUE, &const_item, 1, usable_tables); + } + } + else + { + /* + Consider all pairs of different fields included into item_equal. + For each of them (field1, field1) consider the equality + field1=field2 as a condition allowing an index access of the table + with field1 by the keys value of field2. + */ + Item_equal_iterator fi(*item_equal); + while ((item= fi++)) + { + Field *field= item->field; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, *and_level, cond, field, + TRUE, (Item **) &item, 1, usable_tables); + } + } + it.rewind(); + } + } + break; } - return; } /* @@ -2443,15 +3005,19 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, - uint tables, COND *cond, table_map normal_tables, - SELECT_LEX *select_lex) + uint tables, COND *cond, COND_EQUAL *cond_equal, + table_map normal_tables, SELECT_LEX *select_lex) { uint and_level,i,found_eq_constant; KEY_FIELD *key_fields, *end, *field; + uint m= 1; + + if (cond_equal && cond_equal->max_members) + m= cond_equal->max_members; if (!(key_fields=(KEY_FIELD*) thd->alloc(sizeof(key_fields[0])* - (thd->lex->current_select->cond_count+1)*2))) + (thd->lex->current_select->cond_count+1)*2*m))) return TRUE; /* purecov: inspected */ and_level= 0; field= end= key_fields; @@ -2471,11 +3037,32 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } for (i=0 ; i < tables ; i++) { - if (join_tab[i].on_expr) - { - add_key_fields(join_tab,&end,&and_level,join_tab[i].on_expr, + /* + Block the creation of keys for inner tables of outer joins. + Here only the outer joins that can not be converted to + inner joins are left and all nests that can be eliminated + are flattened. + In the future when we introduce conditional accesses + for inner tables in outer joins these keys will be taken + into account as well. + */ + if (*join_tab[i].on_expr_ref) + { + add_key_fields(join_tab,&end,&and_level,*join_tab[i].on_expr_ref, join_tab[i].table->map); } + else + { + TABLE_LIST *tab= join_tab[i].table->pos_in_table_list; + TABLE_LIST *embedding= tab->embedding; + if (embedding) + { + NESTED_JOIN *nested_join= embedding->nested_join; + if (nested_join->join_list.head() == tab) + add_key_fields(join_tab, &end, &and_level, embedding->on_expr, + nested_join->used_tables); + } + } } /* fill keyuse with found key parts */ for ( ; field != end ; field++) @@ -2538,7 +3125,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } /* - Update some values in keyuse for faster find_best_combination() loop + Update some values in keyuse for faster choose_plan() loop */ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) @@ -2579,6 +3166,68 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) } +/* + Discover the indexes that can be used for GROUP BY or DISTINCT queries. + + SYNOPSIS + add_group_and_distinct_keys() + join + join_tab + + DESCRIPTION + If the query has a GROUP BY clause, find all indexes that contain all + GROUP BY fields, and add those indexes to join->const_keys. + If the query has a DISTINCT clause, find all indexes that contain all + SELECT fields, and add those indexes to join->const_keys. + This allows later on such queries to be processed by a + QUICK_GROUP_MIN_MAX_SELECT. + + RETURN + None +*/ + +static void +add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab) +{ + List<Item_field> indexed_fields; + List_iterator<Item_field> indexed_fields_it(indexed_fields); + ORDER *cur_group; + Item_field *cur_item; + key_map possible_keys(0); + + if (join->group_list) + { /* Collect all query fields referenced in the GROUP clause. */ + for (cur_group= join->group_list; cur_group; cur_group= cur_group->next) + (*cur_group->item)->walk(&Item::collect_item_field_processor, + (byte*) &indexed_fields); + } + else if (join->select_distinct) + { /* Collect all query fields referenced in the SELECT clause. */ + List<Item> &select_items= join->fields_list; + List_iterator<Item> select_items_it(select_items); + Item *item; + while ((item= select_items_it++)) + item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields); + } + else + return; + + if (indexed_fields.elements == 0) + return; + + /* Intersect the keys of all group fields. */ + cur_item= indexed_fields_it++; + possible_keys.merge(cur_item->field->part_of_key); + while ((cur_item= indexed_fields_it++)) + { + possible_keys.intersect(cur_item->field->part_of_key); + } + + if (!possible_keys.is_clear_all()) + join_tab->const_keys.merge(possible_keys); +} + + /***************************************************************************** Go through all combinations of not marked tables and find the one which uses least records @@ -2606,16 +3255,956 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key) } +/* + Find the best access path for an extension of a partial execution plan and + add this path to the plan. + + SYNOPSIS + best_access_path() + join pointer to the structure providing all context info + for the query + s the table to be joined by the function + thd thread for the connection that submitted the query + remaining_tables set of tables not included into the partial plan yet + idx the length of the partial plan + record_count estimate for the number of records returned by the partial + plan + read_time the cost of the partial plan + + DESCRIPTION + The function finds the best access path to table 's' from the passed + partial plan where an access path is the general term for any means to + access the data in 's'. An access path may use either an index or a scan, + whichever is cheaper. The input partial plan is passed via the array + 'join->positions' of length 'idx'. The chosen access method for 's' and its + cost are stored in 'join->positions[idx]'. + + RETURN + None +*/ + static void -find_best_combination(JOIN *join, table_map rest_tables) +best_access_path(JOIN *join, + JOIN_TAB *s, + THD *thd, + table_map remaining_tables, + uint idx, + double record_count, + double read_time) +{ + KEYUSE *best_key= 0; + uint best_max_key_part= 0; + my_bool found_constraint= 0; + double best= DBL_MAX; + double best_time= DBL_MAX; + double records= DBL_MAX; + double tmp; + ha_rows rec; + + DBUG_ENTER("best_access_path"); + + if (s->keyuse) + { /* Use key if possible */ + TABLE *table= s->table; + KEYUSE *keyuse,*start_key=0; + double best_records= DBL_MAX; + uint max_key_part=0; + + /* Test how we can use keys */ + rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key + for (keyuse=s->keyuse ; keyuse->table == table ;) + { + key_part_map found_part= 0; + table_map found_ref= 0; + uint found_ref_or_null= 0; + uint key= keyuse->key; + KEY *keyinfo= table->key_info+key; + bool ft_key= (keyuse->keypart == FT_KEYPART); + + /* Calculate how many key segments of the current key we can use */ + start_key= keyuse; + do + { /* for each keypart */ + uint keypart= keyuse->keypart; + uint found_part_ref_or_null= KEY_OPTIMIZE_REF_OR_NULL; + do + { + if (!(remaining_tables & keyuse->used_tables) && + !(found_ref_or_null & keyuse->optimize)) + { + found_part|= keyuse->keypart_map; + found_ref|= keyuse->used_tables; + if (rec > keyuse->ref_table_rows) + rec= keyuse->ref_table_rows; + found_part_ref_or_null&= keyuse->optimize; + } + keyuse++; + found_ref_or_null|= found_part_ref_or_null; + } while (keyuse->table == table && keyuse->key == key && + keyuse->keypart == keypart); + } while (keyuse->table == table && keyuse->key == key); + + /* + Assume that that each key matches a proportional part of table. + */ + if (!found_part && !ft_key) + continue; // Nothing usable found + + if (rec < MATCHING_ROWS_IN_OTHER_TABLE) + rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables + + /* + ft-keys require special treatment + */ + if (ft_key) + { + /* + Really, there should be records=0.0 (yes!) + but 1.0 would be probably safer + */ + tmp= prev_record_reads(join, found_ref); + records= 1.0; + } + else + { + found_constraint= 1; + /* + Check if we found full key + */ + if (found_part == PREV_BITS(uint,keyinfo->key_parts) && + !found_ref_or_null) + { /* use eq key */ + max_key_part= (uint) ~0; + if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + tmp = prev_record_reads(join, found_ref); + records=1.0; + } + else + { + if (!found_ref) + { /* We found a const key */ + if (table->quick_keys.is_set(key)) + records= (double) table->quick_rows[key]; + else + { + /* quick_range couldn't use key! */ + records= (double) s->records/rec; + } + } + else + { + if (!(records=keyinfo->rec_per_key[keyinfo->key_parts-1])) + { /* Prefer longer keys */ + records= + ((double) s->records / (double) rec * + (1.0 + + ((double) (table->max_key_length-keyinfo->key_length) / + (double) table->max_key_length))); + if (records < 2.0) + records=2.0; /* Can't be as good as a unique */ + } + } + /* Limit the number of matched rows */ + tmp = records; + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp = record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp = record_count*min(tmp,s->worst_seeks); + } + } + else + { + /* + Use as much key-parts as possible and a uniq key is better + than a not unique key + Set tmp to (previous record count) * (records / combination) + */ + if ((found_part & 1) && + (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) || + found_part == PREV_BITS(uint,keyinfo->key_parts))) + { + max_key_part=max_part_bit(found_part); + /* + Check if quick_range could determinate how many rows we + will match + */ + if (table->quick_keys.is_set(key) && + table->quick_key_parts[key] == max_key_part) + tmp= records= (double) table->quick_rows[key]; + else + { + /* Check if we have statistic about the distribution */ + if ((records = keyinfo->rec_per_key[max_key_part-1])) + tmp = records; + else + { + /* + Assume that the first key part matches 1% of the file + and that the hole key matches 10 (duplicates) or 1 + (unique) records. + Assume also that more key matches proportionally more + records + This gives the formula: + records = (x * (b-a) + a*c-b)/(c-1) + + b = records matched by whole key + a = records matched by first key part (10% of all records?) + c = number of key parts in key + x = used key parts (1 <= x <= c) + */ + double rec_per_key; + if (!(rec_per_key=(double) + keyinfo->rec_per_key[keyinfo->key_parts-1])) + rec_per_key=(double) s->records/rec+1; + + if (!s->records) + tmp = 0; + else if (rec_per_key/(double) s->records >= 0.01) + tmp = rec_per_key; + else + { + double a=s->records*0.01; + tmp = (max_key_part * (rec_per_key - a) + + a*keyinfo->key_parts - rec_per_key)/ + (keyinfo->key_parts-1); + set_if_bigger(tmp,1.0); + } + records = (ulong) tmp; + } + /* + If quick_select was used on a part of this key, we know + the maximum number of rows that the key can match. + */ + if (table->quick_keys.is_set(key) && + table->quick_key_parts[key] <= max_key_part && + records > (double) table->quick_rows[key]) + tmp= records= (double) table->quick_rows[key]; + else if (found_ref_or_null) + { + /* We need to do two key searches to find key */ + tmp *= 2.0; + records *= 2.0; + } + } + /* Limit the number of matched rows */ + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp = record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp = record_count*min(tmp,s->worst_seeks); + } + else + tmp = best_time; // Do nothing + } + } /* not ft_key */ + if (tmp < best_time - records/(double) TIME_FOR_COMPARE) + { + best_time= tmp + records/(double) TIME_FOR_COMPARE; + best= tmp; + best_records= records; + best_key= start_key; + best_max_key_part= max_key_part; + } + } + records= best_records; + } + + /* + Don't test table scan if it can't be better. + Prefer key lookup if we would use the same key for scanning. + + Don't do a table scan on InnoDB tables, if we can read the used + parts of the row from any of the used index. + This is because table scans uses index and we would not win + anything by using a table scan. + */ + if ((records >= s->found_records || best > s->read_time) && + !(s->quick && best_key && s->quick->index == best_key->key && + best_max_key_part >= s->table->quick_key_parts[best_key->key]) && + !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && + ! s->table->used_keys.is_clear_all() && best_key) && + !(s->table->force_index && best_key)) + { // Check full join + ha_rows rnd_records= s->found_records; + /* + If there is a restriction on the table, assume that 25% of the + rows can be skipped on next part. + This is to force tables that this table depends on before this + table + */ + if (found_constraint) + rnd_records-= rnd_records/4; + + /* + Range optimizer never proposes a RANGE if it isn't better + than FULL: so if RANGE is present, it's always preferred to FULL. + Here we estimate its cost. + */ + if (s->quick) + { + /* + For each record we: + - read record range through 'quick' + - skip rows which does not satisfy WHERE constraints + */ + tmp= record_count * + (s->quick->read_time + + (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* Estimate cost of reading table. */ + tmp= s->table->file->scan_time(); + if (s->table->map & join->outer_join) // Can't use join cache + { + /* + For each record we have to: + - read the whole table record + - skip rows which does not satisfy join condition + */ + tmp= record_count * + (tmp + + (s->records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* We read the table as many times as join buffer becomes full. */ + tmp*= (1.0 + floor((double) cache_record_length(join,idx) * + record_count / + (double) thd->variables.join_buff_size)); + /* + We don't make full cartesian product between rows in the scanned + table and existing records because we skip all rows from the + scanned table, which does not satisfy join condition when + we read the table (see flush_cached_records for details). Here we + take into account cost to read and skip these records. + */ + tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + } + } + + /* + We estimate the cost of evaluating WHERE clause for found records + as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus + tmp give us total cost of using TABLE SCAN + */ + if (best == DBL_MAX || + (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records < + best + record_count/(double) TIME_FOR_COMPARE*records)) + { + /* + If the table has a range (s->quick is set) make_join_select() + will ensure that this will be used + */ + best= tmp; + records= rows2double(rnd_records); + best_key= 0; + } + } + + /* Update the cost information for the current partial plan */ + join->positions[idx].records_read= records; + join->positions[idx].read_time= best; + join->positions[idx].key= best_key; + join->positions[idx].table= s; + + if (!best_key && + idx == join->const_tables && + s->table == join->sort_by_table && + join->unit->select_limit_cnt >= records) + join->sort_by_table= (TABLE*) 1; // Must use temporary table + + DBUG_VOID_RETURN; +} + + +/* + Selects and invokes a search strategy for an optimal query plan. + + SYNOPSIS + choose_plan() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + The function checks user-configurable parameters that control the search + strategy for an optimal plan, selects the search method and then invokes + it. Each specific optimization procedure stores the final optimal plan in + the array 'join->best_positions', and the cost of the plan in + 'join->best_read'. + + RETURN + None +*/ + +static void +choose_plan(JOIN *join, table_map join_tables) { - DBUG_ENTER("find_best_combination"); - join->best_read=DBL_MAX; - find_best(join,rest_tables, join->const_tables,1.0,0.0); + uint search_depth= join->thd->variables.optimizer_search_depth; + uint prune_level= join->thd->variables.optimizer_prune_level; + + DBUG_ENTER("choose_plan"); + + if (join->select_options & SELECT_STRAIGHT_JOIN) + { + optimize_straight_join(join, join_tables); + } + else + { + /* + Heuristic: pre-sort all access plans with respect to the number of + records accessed. + */ + qsort(join->best_ref + join->const_tables, join->tables - join->const_tables, + sizeof(JOIN_TAB*), join_tab_cmp); + + if (search_depth == MAX_TABLES+2) + { /* + TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before + the greedy version. Will be removed when greedy_search is approved. + */ + join->best_read= DBL_MAX; + find_best(join, join_tables, join->const_tables, 1.0, 0.0); + } + else + { + if (search_depth == 0) + /* Automatically determine a reasonable value for 'search_depth' */ + search_depth= determine_search_depth(join); + greedy_search(join, join_tables, search_depth, prune_level); + } + } + + /* Store the cost of this query into a user variable */ + last_query_cost= join->best_read; + DBUG_VOID_RETURN; } +/* + Compare two JOIN_TAB objects based on the number of accessed records. + + SYNOPSIS + join_tab_cmp() + ptr1 pointer to first JOIN_TAB object + ptr2 pointer to second JOIN_TAB object + + RETURN + 1 if first is bigger + -1 if second is bigger + 0 if equal +*/ + +static int +join_tab_cmp(const void* ptr1, const void* ptr2) +{ + JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; + JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + + if (jt1->dependent & jt2->table->map) + return 1; + if (jt2->dependent & jt1->table->map) + return -1; + if (jt1->found_records > jt2->found_records) + return 1; + if (jt1->found_records < jt2->found_records) + return -1; + return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0); +} + + +/* + Heuristic procedure to automatically guess a reasonable degree of + exhaustiveness for the greedy search procedure. + + SYNOPSIS + determine_search_depth() + join pointer to the structure providing all context info for the query + + DESCRIPTION + The procedure estimates the optimization time and selects a search depth + big enough to result in a near-optimal QEP, that doesn't take too long to + find. If the number of tables in the query exceeds some constant, then + search_depth is set to this constant. + + NOTES + This is an extremely simplistic implementation that serves as a stub for a + more advanced analysis of the join. Ideally the search depth should be + determined by learning from previous query optimizations, because it will + depend on the CPU power (and other factors). + + RETURN + A positive integer that specifies the search depth (and thus the + exhaustiveness) of the depth-first search algorithm used by + 'greedy_search'. +*/ + +static uint +determine_search_depth(JOIN *join) +{ + uint table_count= join->tables - join->const_tables; + uint search_depth; + /* TODO: this value should be determined dynamically, based on statistics: */ + uint max_tables_for_exhaustive_opt= 7; + + if (table_count <= max_tables_for_exhaustive_opt) + search_depth= table_count+1; // use exhaustive for small number of tables + else + /* + TODO: this value could be determined by some mapping of the form: + depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE] + */ + search_depth= max_tables_for_exhaustive_opt; // use greedy search + + return search_depth; +} + + +/* + Select the best ways to access the tables in a query without reordering them. + + SYNOPSIS + optimize_straight_join() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + Find the best access paths for each query table and compute their costs + according to their order in the array 'join->best_ref' (thus without + reordering the join tables). The function calls sequentially + 'best_access_path' for each table in the query to select the best table + access method. The final optimal plan is stored in the array + 'join->best_positions', and the corresponding cost in 'join->best_read'. + + NOTES + This function can be applied to: + - queries with STRAIGHT_JOIN + - internally to compute the cost of an arbitrary QEP + Thus 'optimize_straight_join' can be used at any stage of the query + optimization process to finalize a QEP as it is. + + RETURN + None +*/ + +static void +optimize_straight_join(JOIN *join, table_map join_tables) +{ + JOIN_TAB *s; + uint idx= join->const_tables; + double record_count= 1.0; + double read_time= 0.0; + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, join->thd, join_tables, idx, record_count, read_time); + /* compute the cost of the new plan extended with 's' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + join_tables&= ~(s->table->map); + ++idx; + } + + read_time+= record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != join->positions[join->const_tables].table->table) + read_time+= record_count; // We have to make a temp table + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION)*idx); + join->best_read= read_time; +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a greedy search. + + SYNOPSIS + join pointer to the structure providing all context info + for the query + remaining_tables set of tables not included into the partial plan yet + search_depth controlls the exhaustiveness of the search + prune_level the pruning heuristics that should be applied during + search + + DESCRIPTION + The search procedure uses a hybrid greedy/exhaustive search with controlled + exhaustiveness. The search is performed in N = card(remaining_tables) + steps. Each step evaluates how promising is each of the unoptimized tables, + selects the most promising table, and extends the current partial QEP with + that table. Currenly the most 'promising' table is the one with least + expensive extension. + There are two extreme cases: + 1. When (card(remaining_tables) < search_depth), the estimate finds the best + complete continuation of the partial QEP. This continuation can be + used directly as a result of the search. + 2. When (search_depth == 1) the 'best_extension_by_limited_search' + consideres the extension of the current QEP with each of the remaining + unoptimized tables. + All other cases are in-between these two extremes. Thus the parameter + 'search_depth' controlls the exhaustiveness of the search. The higher the + value, the longer the optimizaton time and possibly the better the + resulting plan. The lower the value, the fewer alternative plans are + estimated, but the more likely to get a bad QEP. + + All intermediate and final results of the procedure are stored in 'join': + join->positions modified for every partial QEP that is explored + join->best_positions modified for the current best complete QEP + join->best_read modified for the current best complete QEP + join->best_ref might be partially reordered + The final optimal plan is stored in 'join->best_positions', and its + corresponding cost in 'join->best_read'. + + NOTES + The following pseudocode describes the algorithm of 'greedy_search': + + procedure greedy_search + input: remaining_tables + output: pplan; + { + pplan = <>; + do { + (t, a) = best_extension(pplan, remaining_tables); + pplan = concat(pplan, (t, a)); + remaining_tables = remaining_tables - t; + } while (remaining_tables != {}) + return pplan; + } + + where 'best_extension' is a placeholder for a procedure that selects the + most "promising" of all tables in 'remaining_tables'. + Currently this estimate is performed by calling + 'best_extension_by_limited_search' to evaluate all extensions of the + current QEP of size 'search_depth', thus the complexity of 'greedy_search' + mainly depends on that of 'best_extension_by_limited_search'. + + If 'best_extension()' == 'best_extension_by_limited_search()', then the + worst-case complexity of this algorithm is <= + O(N*N^search_depth/search_depth). When serch_depth >= N, then the + complexity of greedy_search is O(N!). + + In the future, 'greedy_search' might be extended to support other + implementations of 'best_extension', e.g. some simpler quadratic procedure. + + RETURN + None +*/ + +static void +greedy_search(JOIN *join, + table_map remaining_tables, + uint search_depth, + uint prune_level) +{ + double record_count= 1.0; + double read_time= 0.0; + uint idx= join->const_tables; // index into 'join->best_ref' + uint best_idx; + uint rem_size; // cardinality of remaining_tables + POSITION best_pos; + JOIN_TAB *best_table; // the next plan node to be added to the curr QEP + + DBUG_ENTER("greedy_search"); + + /* number of tables that remain to be optimized */ + rem_size= my_count_bits(remaining_tables); + + do { + /* Find the extension of the current QEP with the lowest cost */ + join->best_read= DBL_MAX; + best_extension_by_limited_search(join, remaining_tables, idx, record_count, + read_time, search_depth, prune_level); + + if (rem_size <= search_depth) + { + /* + 'join->best_positions' contains a complete optimal extension of the + current partial QEP. + */ + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, + join->tables, "optimal");); + DBUG_VOID_RETURN; + } + + /* select the first table in the optimal extension as most promising */ + best_pos= join->best_positions[idx]; + best_table= best_pos.table; + /* + Each subsequent loop of 'best_extension_by_limited_search' uses + 'join->positions' for cost estimates, therefore we have to update its + value. + */ + join->positions[idx]= best_pos; + + /* find the position of 'best_table' in 'join->best_ref' */ + best_idx= idx; + JOIN_TAB *pos= join->best_ref[best_idx]; + while (pos && best_table != pos) + pos= join->best_ref[++best_idx]; + DBUG_ASSERT((pos != NULL)); // should always find 'best_table' + /* move 'best_table' at the first free position in the array of joins */ + swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]); + + /* compute the cost of the new plan extended with 'best_table' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + + remaining_tables&= ~(best_table->table->map); + --rem_size; + ++idx; + + DBUG_EXECUTE("opt", + print_plan(join, read_time, record_count, idx, "extended");); + } while (TRUE); +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a possibly + exhaustive search. + + SYNOPSIS + best_extension_by_limited_search() + join pointer to the structure providing all context info for + the query + remaining_tables set of tables not included into the partial plan yet + idx length of the partial QEP in 'join->positions'; + since a depth-first search is used, also corresponds to + the current depth of the search tree; + also an index in the array 'join->best_ref'; + record_count estimate for the number of records returned by the best + partial plan + read_time the cost of the best partial plan + search_depth maximum depth of the recursion and thus size of the found + optimal plan (0 < search_depth <= join->tables+1). + prune_level pruning heuristics that should be applied during optimization + (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS) + + DESCRIPTION + The procedure searches for the optimal ordering of the query tables in set + 'remaining_tables' of size N, and the corresponding optimal access paths to each + table. The choice of a table order and an access path for each table + constitutes a query execution plan (QEP) that fully specifies how to + execute the query. + + The maximal size of the found plan is controlled by the parameter + 'search_depth'. When search_depth == N, the resulting plan is complete and + can be used directly as a QEP. If search_depth < N, the found plan consists + of only some of the query tables. Such "partial" optimal plans are useful + only as input to query optimization procedures, and cannot be used directly + to execute a query. + + The algorithm begins with an empty partial plan stored in 'join->positions' + and a set of N tables - 'remaining_tables'. Each step of the algorithm + evaluates the cost of the partial plan extended by all access plans for + each of the relations in 'remaining_tables', expands the current partial + plan with the access plan that results in lowest cost of the expanded + partial plan, and removes the corresponding relation from + 'remaining_tables'. The algorithm continues until it either constructs a + complete optimal plan, or constructs an optimal plartial plan with size = + search_depth. + + The final optimal plan is stored in 'join->best_positions'. The + corresponding cost of the optimal plan is in 'join->best_read'. + + NOTES + The procedure uses a recursive depth-first search where the depth of the + recursion (and thus the exhaustiveness of the search) is controlled by the + parameter 'search_depth'. + + The pseudocode below describes the algorithm of + 'best_extension_by_limited_search'. The worst-case complexity of this + algorithm is O(N*N^search_depth/search_depth). When serch_depth >= N, then + the complexity of greedy_search is O(N!). + + procedure best_extension_by_limited_search( + pplan in, // in, partial plan of tables-joined-so-far + pplan_cost, // in, cost of pplan + remaining_tables, // in, set of tables not referenced in pplan + best_plan_so_far, // in/out, best plan found so far + best_plan_so_far_cost,// in/out, cost of best_plan_so_far + search_depth) // in, maximum size of the plans being considered + { + for each table T from remaining_tables + { + // Calculate the cost of using table T as above + cost = complex-series-of-calculations; + + // Add the cost to the cost so far. + pplan_cost+= cost; + + if (pplan_cost >= best_plan_so_far_cost) + // pplan_cost already too great, stop search + continue; + + pplan= expand pplan by best_access_method; + remaining_tables= remaining_tables - table T; + if (remaining_tables is not an empty set + and + search_depth > 1) + { + best_extension_by_limited_search(pplan, pplan_cost, + remaining_tables, + best_plan_so_far, + best_plan_so_far_cost, + search_depth - 1); + } + else + { + best_plan_so_far_cost= pplan_cost; + best_plan_so_far= pplan; + } + } + } + + IMPLEMENTATION + When 'best_extension_by_limited_search' is called for the first time, + 'join->best_read' must be set to the largest possible value (e.g. DBL_MAX). + The actual implementation provides a way to optionally use pruning + heuristic (controlled by the parameter 'prune_level') to reduce the search + space by skipping some partial plans. + The parameter 'search_depth' provides control over the recursion + depth, and thus the size of the resulting optimal plan. + + RETURN + None +*/ + +static void +best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, + double record_count, + double read_time, + uint search_depth, + uint prune_level) +{ + THD *thd= join->thd; + if (thd->killed) // Abort + return; + + DBUG_ENTER("best_extension_by_limited_search"); + + /* + 'join' is a partial plan with lower cost than the best plan so far, + so continue expanding it further with the tables in 'remaining_tables'. + */ + JOIN_TAB *s; + double best_record_count= DBL_MAX; + double best_read_time= DBL_MAX; + + DBUG_EXECUTE("opt", + print_plan(join, read_time, record_count, idx, "part_plan");); + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + table_map real_table_bit= s->table->map; + if ((remaining_tables & real_table_bit) && !(remaining_tables & s->dependent)) + { + double current_record_count, current_read_time; + + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, thd, remaining_tables, idx, record_count, read_time); + /* Compute the cost of extending the plan with 's' */ + current_record_count= record_count * join->positions[idx].records_read; + current_read_time= read_time + join->positions[idx].read_time; + + /* Expand only partial plans with lower cost than the best QEP so far */ + if ((current_read_time + + current_record_count / (double) TIME_FOR_COMPARE) >= join->best_read) + { + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, idx, + "prune_by_cost");); + continue; + } + + /* + Prune some less promising partial plans. This heuristic may miss + the optimal QEPs, thus it results in a non-exhaustive search. + */ + if (prune_level == 1) + { + if (best_record_count > current_record_count || + best_read_time > current_read_time || + idx == join->const_tables && // 's' is the first table in the QEP + s->table == join->sort_by_table) + { + if (best_record_count >= current_record_count && + best_read_time >= current_read_time && + /* TODO: What is the reasoning behind this condition? */ + (!(s->key_dependent & remaining_tables) || + join->positions[idx].records_read < 2.0)) + { + best_record_count= current_record_count; + best_read_time= current_read_time; + } + } + else + { + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, idx, + "pruned_by_heuristic");); + continue; + } + } + + if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) ) + { /* Recursively expand the current partial plan */ + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + best_extension_by_limited_search(join, + remaining_tables & ~real_table_bit, + idx + 1, + current_record_count, + current_read_time, + search_depth - 1, + prune_level); + if (thd->killed) + DBUG_VOID_RETURN; + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + } + else + { /* + 'join' is either the best partial QEP with 'search_depth' relations, + or the best complete QEP so far, whichever is smaller. + */ + current_read_time+= current_record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != join->positions[join->const_tables].table->table) + /* We have to make a temp table */ + current_read_time+= current_record_count; + if ((search_depth == 1) || (current_read_time < join->best_read)) + { + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION) * (idx + 1)); + join->best_read= current_read_time - 0.001; + } + DBUG_EXECUTE("opt", + print_plan(join, current_read_time, current_record_count, idx, "full_plan");); + } + } + } + DBUG_VOID_RETURN; +} + + +/* + TODO: this function is here only temporarily until 'greedy_search' is + tested and accepted. +*/ static void find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, double read_time) @@ -2935,7 +4524,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { /* Estimate cost of reading table. */ tmp= s->table->file->scan_time(); - if (s->on_expr) // Can't use join cache + if (s->table->map & join->outer_join) // Can't use join cache { /* For each record we have to: @@ -3125,7 +4714,7 @@ get_best_combination(JOIN *join) form=join->table[tablenr]=j->table; used_tables|= form->map; form->reginfo.join_tab=j; - if (!j->on_expr) + if (!*j->on_expr_ref) form->reginfo.not_exists_optimize=0; // Only with LEFT JOIN if (j->type == JT_CONST) continue; // Handled in make_join_stat.. @@ -3380,7 +4969,9 @@ make_simple_join(JOIN *join,TABLE *tmp_table) join_tab->type= JT_ALL; /* Map through all records */ join_tab->keys.init(~0); /* test everything in quick */ join_tab->info=0; - join_tab->on_expr=0; + join_tab->on_expr_ref=0; + join_tab->last_inner= 0; + join_tab->first_unmatched= 0; join_tab->ref.key = -1; join_tab->not_used_in_distinct=0; join_tab->read_first_record= join_init_read_record; @@ -3392,6 +4983,134 @@ make_simple_join(JOIN *join,TABLE *tmp_table) } +/* + Build a predicate guarded by match variables for embedding outer joins + + SYNOPSIS + add_found_match_trig_cond() + tab the first inner table for most nested outer join + cond the predicate to be guarded + root_tab the first inner table to stop + + DESCRIPTION + The function recursively adds guards for predicate cond + assending from tab to the first inner table next embedding + nested outer join and so on until it reaches root_tab + (root_tab can be 0). + + RETURN VALUE + pointer to the guarded predicate, if success + 0, otherwise +*/ + +static COND* +add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab) +{ + COND *tmp; + if (tab == root_tab || !cond) + return cond; + if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab))) + { + tmp= new Item_func_trig_cond(tmp, &tab->found); + } + if (tmp) + tmp->quick_fix_field(); + return tmp; +} + + +/* + Fill in outer join related info for the execution plan structure + + SYNOPSIS + make_outerjoin_info() + join - reference to the info fully describing the query + + DESCRIPTION + For each outer join operation left after simplification of the + original query the function set up the following pointers in the linear + structure join->join_tab representing the selected execution plan. + The first inner table t0 for the operation is set to refer to the last + inner table tk through the field t0->last_inner. + Any inner table ti for the operation are set to refer to the first + inner table ti->first_inner. + The first inner table t0 for the operation is set to refer to the + first inner table of the embedding outer join operation, if there is any, + through the field t0->first_upper. + The on expression for the outer join operation is attached to the + corresponding first inner table through the field t0->on_expr_ref. + Here ti are structures of the JOIN_TAB type. + + EXAMPLE + For the query: + SELECT * FROM t1 + LEFT JOIN + (t2, t3 LEFT JOIN t4 ON t3.a=t4.a) + ON (t1.a=t2.a AND t1.b=t3.b) + WHERE t1.c > 5, + given the execution plan with the table order t1,t2,t3,t4 + is selected, the following references will be set; + t4->last_inner=[t4], t4->first_inner=[t4], t4->first_upper=[t2] + t2->last_inner=[t4], t2->first_inner=t3->first_inner=[t2], + on expression (t1.a=t2.a AND t1.b=t3.b) will be attached to + *t2->on_expr_ref, while t3.a=t4.a will be attached to *t4->on_expr_ref. + + NOTES + The function assumes that the simplification procedure has been + already applied to the join query (see simplify_joins). + This function can be called only after the execution plan + has been chosen. +*/ + +static void +make_outerjoin_info(JOIN *join) +{ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + TABLE *table=tab->table; + TABLE_LIST *tbl= table->pos_in_table_list; + TABLE_LIST *embedding= tbl->embedding; + + if (tbl->outer_join) + { + /* + Table tab is the only one inner table for outer join. + (Like table t4 for the table reference t3 LEFT JOIN t4 ON t3.a=t4.a + is in the query above.) + */ + tab->last_inner= tab->first_inner= tab; + tab->on_expr_ref= &tbl->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding) + tab->first_upper= embedding->nested_join->first_nested; + } + for ( ; embedding ; embedding= embedding->embedding) + { + NESTED_JOIN *nested_join= embedding->nested_join; + if (!nested_join->counter) + { + /* + Table tab is the first inner table for nested_join. + Save reference to it in the nested join structure. + */ + nested_join->first_nested= tab; + tab->on_expr_ref= &embedding->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding->embedding) + tab->first_upper= embedding->embedding->nested_join->first_nested; + } + if (!tab->first_inner) + tab->first_inner= nested_join->first_nested; + if (++nested_join->counter < nested_join->join_list.elements) + break; + /* Table tab is the last inner table for nested join. */ + nested_join->first_nested->last_inner= tab; + } + } +} + + static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { @@ -3399,20 +5118,45 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (select) { table_map used_tables; - if (join->tables > 1) - cond->update_used_tables(); // Tablenr may have changed - if (join->const_tables == join->tables && - join->thd->lex->current_select->master_unit() == - &join->thd->lex->unit) // not upper level SELECT - join->const_table_map|=RAND_TABLE_BIT; - { // Check const tables - COND *const_cond= - make_cond_for_table(cond,join->const_table_map,(table_map) 0); - DBUG_EXECUTE("where",print_where(const_cond,"constants");); - if (const_cond && !const_cond->val_int()) - { - DBUG_PRINT("info",("Found impossible WHERE condition")); - DBUG_RETURN(1); // Impossible const condition + if (cond) /* Because of QUICK_GROUP_MIN_MAX_SELECT */ + { /* there may be a select without a cond. */ + if (join->tables > 1) + cond->update_used_tables(); // Tablenr may have changed + if (join->const_tables == join->tables && + join->thd->lex->current_select->master_unit() == + &join->thd->lex->unit) // not upper level SELECT + join->const_table_map|=RAND_TABLE_BIT; + { // Check const tables + COND *const_cond= + make_cond_for_table(cond,join->const_table_map,(table_map) 0); + DBUG_EXECUTE("where",print_where(const_cond,"constants");); + for (JOIN_TAB *tab= join->join_tab+join->const_tables; + tab < join->join_tab+join->tables ; tab++) + { + if (*tab->on_expr_ref) + { + JOIN_TAB *cond_tab= tab->first_inner; + COND *tmp= make_cond_for_table(*tab->on_expr_ref, + join->const_table_map, + ( table_map) 0); + if (!tmp) + continue; + tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl); + if (!tmp) + DBUG_RETURN(1); + tmp->quick_fix_field(); + cond_tab->select_cond= !cond_tab->select_cond ? tmp : + new Item_cond_and(cond_tab->select_cond,tmp); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + if (const_cond && !const_cond->val_int()) + { + DBUG_PRINT("info",("Found impossible WHERE condition")); + DBUG_RETURN(1); // Impossible const condition + } } } used_tables=((select->const_tables=join->const_table_map) | @@ -3420,14 +5164,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) for (uint i=join->const_tables ; i < join->tables ; i++) { JOIN_TAB *tab=join->join_tab+i; + JOIN_TAB *first_inner_tab= tab->first_inner; table_map current_map= tab->table->map; + bool use_quick_range=0; /* Following force including random expression in last table condition. It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5 */ if (i == join->tables-1) current_map|= OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; - bool use_quick_range=0; used_tables|=current_map; if (tab->type == JT_REF && tab->quick && @@ -3443,23 +5188,57 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) join->best_positions[i].records_read= rows2double(tab->quick->records); } - COND *tmp=make_cond_for_table(cond,used_tables,current_map); - if (!tmp && tab->quick) + COND *tmp= NULL; + if (cond) + tmp= make_cond_for_table(cond,used_tables,current_map); + if (cond && !tmp && tab->quick) { // Outer join - /* - Hack to handle the case where we only refer to a table - in the ON part of an OUTER JOIN. - */ - tmp=new Item_int((longlong) 1,1); // Always true + if (tab->type != JT_ALL) + { + /* + Don't use the quick method + We come here in the case where we have 'key=constant' and + the test is removed by make_cond_for_table() + */ + delete tab->quick; + tab->quick= 0; + } + else + { + /* + Hack to handle the case where we only refer to a table + in the ON part of an OUTER JOIN. In this case we want the code + below to check if we should use 'quick' instead. + */ + tmp= new Item_int((longlong) 1,1); // Always true + } + } - if (tmp) + if (tmp || !cond) { DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) DBUG_RETURN(1); // End of memory - tab->select_cond=sel->cond=tmp; + /* + If tab is an inner table of an outer join operation, + add a match guard to the pushed down predicate. + The guard will turn the predicate on only after + the first match for outer tables is encountered. + */ + if (cond) + {/* + Because of QUICK_GROUP_MIN_MAX_SELECT there may be a select without + a cond, so neutralize the hack above. + */ + if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0))) + DBUG_RETURN(1); + tab->select_cond=sel->cond=tmp; + } + else + tab->select_cond= sel->cond= NULL; + sel->head=tab->table; if (tab->quick) { @@ -3467,7 +5246,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) with key reading */ if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF && tab->type != JT_FT && (tab->type != JT_REF || - (uint) tab->ref.key == tab->quick->index)) + (uint) tab->ref.key == tab->quick->index)) { sel->quick=tab->quick; // Use value from get_quick_... sel->quick_keys.clear_all(); @@ -3498,7 +5277,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) the index if we are using limit and this is the first table */ - if ((!tab->keys.is_subset(tab->const_keys) && i > 0) || + if (cond && + (!tab->keys.is_subset(tab->const_keys) && i > 0) || (!tab->const_keys.is_clear_all() && i == join->const_tables && join->unit->select_limit_cnt < join->best_positions[i].records_read && @@ -3506,7 +5286,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { /* Join with outer join condition */ COND *orig_cond=sel->cond; - sel->cond= and_conds(sel->cond, tab->on_expr); + sel->cond= and_conds(sel->cond, *tab->on_expr_ref); /* We can't call sel->cond->fix_fields, @@ -3530,7 +5310,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) we have to check isn't it only "impossible ON" instead */ sel->cond=orig_cond; - if (!tab->on_expr || + if (!*tab->on_expr_ref || sel->test_quick_select(join->thd, tab->keys, used_tables & ~ current_map, (join->select_options & @@ -3565,7 +5345,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (i != join->const_tables && tab->use_quick != 2) { /* Read with cache */ - if ((tmp=make_cond_for_table(cond, + if (cond && + (tmp=make_cond_for_table(cond, join->const_table_map | current_map, current_map))) @@ -3578,17 +5359,70 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } } } + } + + /* + Push down all predicates from on expressions. + Each of these predicated are guarded by a variable + that turns if off just before null complemented row for + outer joins is formed. Thus, the predicates from an + 'on expression' are guaranteed not to be checked for + the null complemented row. + */ + JOIN_TAB *last_tab= tab; + while (first_inner_tab && first_inner_tab->last_inner == last_tab) + { + /* + Table tab is the last inner table of an outer join. + An on expression is always attached to it. + */ + COND *on_expr= *first_inner_tab->on_expr_ref; + + table_map used_tables= join->const_table_map | + OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; + for (tab= join->join_tab+join->const_tables; tab <= last_tab ; tab++) + { + current_map= tab->table->map; + used_tables|= current_map; + COND *tmp= make_cond_for_table(on_expr, used_tables, current_map); + if (tmp) + { + JOIN_TAB *cond_tab= tab < first_inner_tab ? first_inner_tab : tab; + /* + First add the guards for match variables of + all embedding outer join operations. + */ + if (!(tmp= add_found_match_trig_cond(cond_tab->first_inner, + tmp, first_inner_tab))) + DBUG_RETURN(1); + /* + Now add the guard turning the predicate off for + the null complemented row. + */ + tmp= new Item_func_trig_cond(tmp, + &first_inner_tab->not_null_compl); + if (tmp) + tmp->quick_fix_field(); + /* Add the predicate to other pushed down predicates */ + cond_tab->select_cond= !cond_tab->select_cond ? tmp : + new Item_cond_and(cond_tab->select_cond,tmp); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + first_inner_tab= first_inner_tab->first_upper; } } } DBUG_RETURN(0); } - static void make_join_readinfo(JOIN *join, uint options) { uint i; + bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); DBUG_ENTER("make_join_readinfo"); @@ -3672,7 +5506,7 @@ make_join_readinfo(JOIN *join, uint options) */ table->status=STATUS_NO_RECORD; if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) && - tab->use_quick != 2 && !tab->on_expr) + tab->use_quick != 2 && !tab->first_inner) { if ((options & SELECT_DESCRIBE) || !join_init_cache(join->thd,join->join_tab+join->const_tables, @@ -3687,7 +5521,8 @@ make_join_readinfo(JOIN *join, uint options) join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED; tab->read_first_record= join_init_quick_read_record; if (statistics) - statistic_increment(select_range_check_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_check_count, + &LOCK_status); } else { @@ -3697,13 +5532,15 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_range_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_scan_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_scan_count, + &LOCK_status); } } else @@ -3711,13 +5548,15 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_full_range_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_range_join_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_full_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_join_count, + &LOCK_status); } } if (!table->no_keyread) @@ -4051,7 +5890,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) table_map not_const_tables= ~join->const_table_map; table_map ref; prev_ptr= &first_order; - *simple_order= join->join_tab[join->const_tables].on_expr ? 0 : 1; + *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1; /* NOTE: A variable of not_const_tables ^ first_table; breaks gcc 2.7 */ @@ -4118,12 +5957,13 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, if (send_row) { - for (TABLE_LIST *table=tables; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_local) mark_as_null_row(table->table); // All fields are NULL if (having && having->val_int() == 0) send_row=0; } - if (!(result->send_fields(fields,1))) + if (!(result->send_fields(fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))) { if (send_row) { @@ -4174,6 +6014,790 @@ template class List<Item_func_match>; template class List_iterator<Item_func_match>; #endif + +/* + Find the multiple equality predicate containing a field + + SYNOPSIS + find_item_equal() + cond_equal multiple equalities to search in + field field to look for + inherited_fl :out set up to TRUE if multiple equality is found + on upper levels (not on current level of cond_equal) + + DESCRIPTION + The function retrieves the multiple equalities accessed through + the con_equal structure from current level and up looking for + an equality containing field. It stops retrieval as soon as the equality + is found and set up inherited_fl to TRUE if it's found on upper levels. + + RETURN + Item_equal for the found multiple equality predicate if a success; + NULL - otherwise. +*/ + +Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field, + bool *inherited_fl) +{ + Item_equal *item= 0; + bool in_upper_level= FALSE; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + goto finish; + } + in_upper_level= TRUE; + cond_equal= cond_equal->upper_levels; + } + in_upper_level= FALSE; +finish: + *inherited_fl= in_upper_level; + return item; +} + + +/* + Check whether an item is a simple equality predicate and if so + create/find a multiple equality for this predicate + + SYNOPSIS + check_equality() + item item to check + cond_equal multiple equalities that must hold together with the predicate + + DESCRIPTION + This function first checks whether an item is a simple equality i.e. + the one that equates a field with another field or a constant + (item=constant_item or item=field_item). + If this is the case the function looks a for a multiple equality + in the lists referenced directly or indirectly by cond_equal inferring + the given simple equality. If it doesn't find any, it builds a multiple + equality that covers the predicate, i.e. the predicate can be inferred + from it. + The built multiple equality could be obtained in such a way: + create a binary multiple equality equivalent to the predicate, then + merge it, if possible, with one of old multiple equalities. + This guarantees that the set of multiple equalities covering equality + predicates will + be minimal. + + EXAMPLE + For the where condition + WHERE a=b AND b=c AND + (b=2 OR f=e) + the check_equality will be called for the following equality + predicates a=b, b=c, b=2 and f=e. + For a=b it will be called with *cond_equal=(0,[]) and will transform + *cond_equal into (0,[Item_equal(a,b)]). + For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)]) + and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]). + For b=2 it will be called with *cond_equal=(ptr(CE),[]) + and will transform *cond_equal into (ptr(CE,[Item_equal(2,a,b,c)]). + For f=e it will be called with *cond_equal=(ptr(CE), []) + and will transform *cond_equal into (ptr(CE,[Item_equal(f,e)]). + + NOTES + Now only fields that have the same type defintions (verified by + the Field::eq_def method) are placed to the same multiple equalities. + Because of this some equality predicates are not eliminated and + can be used in the constant propagation procedure. + We could weeken the equlity test as soon as at least one of the + equal fields is to be equal to a constant. It would require a + more complicated implementation: we would have to store, in + general case, its own constant for each fields from the multiple + equality. But at the same time it would allow us to get rid + of constant propagation completely: it would be done by the call + to build_equal_items_for_cond. + + IMPLEMENTATION + The implementation does not follow exactly the above rules to + build a new multiple equality for the equality predicate. + If it processes the equality of the form field1=field2, it + looks for multiple equalities me1 containig field1 and me2 containing + field2. If only one of them is found the fuction expands it with + the lacking field. If multiple equalities for both fields are + found they are merged. If both searches fail a new multiple equality + containing just field1 and field2 is added to the existing + multiple equalities. + If the function processes the predicate of the form field1=const, + it looks for a multiple equality containing field1. If found, the + function checks the constant of the multiple equality. If the value + is unknown, it is setup to const. Otherwise the value is compared with + const and the evaluation of the equality predicate is performed. + When expanding/merging equality predicates from the upper levels + the function first copies them for the current level. It looks + acceptable, as this happens rarely. The implementation without + copying would be much more complicated. + + RETURN + TRUE - if the predicate is a simple equality predicate + FALSE - otherwise +*/ + +static bool check_equality(Item *item, COND_EQUAL *cond_equal) +{ + if (item->type() == Item::FUNC_ITEM && + ((Item_func*) item)->functype() == Item_func::EQ_FUNC) + { + Item *left_item= ((Item_func*) item)->arguments()[0]; + Item *right_item= ((Item_func*) item)->arguments()[1]; + if (left_item->type() == Item::FIELD_ITEM && + right_item->type() == Item::FIELD_ITEM) + { + /* The predicate the form field1=field2 is processed */ + + Field *left_field= ((Item_field*) left_item)->field; + Field *right_field= ((Item_field*) right_item)->field; + + if (!left_field->eq_def(right_field)) + return FALSE; + + if (left_field->eq(right_field)) /* f = f */ + return TRUE; + + /* Search for multiple equalities containing field1 and/or field2 */ + bool left_copyfl, right_copyfl; + Item_equal *left_item_equal= + find_item_equal(cond_equal, left_field, &left_copyfl); + Item_equal *right_item_equal= + find_item_equal(cond_equal, right_field, &right_copyfl); + + if (left_item_equal && left_item_equal == right_item_equal) + { + /* + The equality predicate is inference of one of the existing + multiple equalities, i.e the condition is already covered + by upper level equalities + */ + return TRUE; + } + + /* Copy the found multiple equalities at the current level if needed */ + if (left_copyfl) + { + /* left_item_equal of an upper level contains left_item */ + left_item_equal= new Item_equal(left_item_equal); + cond_equal->current_level.push_back(left_item_equal); + } + if (right_copyfl) + { + /* right_item_equal of an upper level contains right_item */ + right_item_equal= new Item_equal(right_item_equal); + cond_equal->current_level.push_back(right_item_equal); + } + + if (left_item_equal) + { + /* left item was found in the current or one of the upper levels */ + if (! right_item_equal) + left_item_equal->add((Item_field *) right_item); + else + { + /* Merge two multiple equalities forming a new one */ + left_item_equal->merge(right_item_equal); + /* Remove the merged multiple equality from the list */ + List_iterator<Item_equal> li(cond_equal->current_level); + while ((li++) != right_item_equal); + li.remove(); + } + } + else + { + /* left item was not found neither the current nor in upper levels */ + if (right_item_equal) + right_item_equal->add((Item_field *) left_item); + else + { + /* None of the fields was found in multiple equalities */ + Item_equal *item= new Item_equal((Item_field *) left_item, + (Item_field *) right_item); + cond_equal->current_level.push_back(item); + } + } + return TRUE; + } + + { + /* The predicate of the form field=const/const=field is processed */ + Item *const_item= 0; + Item_field *field_item= 0; + if (left_item->type() == Item::FIELD_ITEM && + right_item->const_item()) + { + field_item= (Item_field*) left_item; + const_item= right_item; + } + else if (right_item->type() == Item::FIELD_ITEM && + left_item->const_item()) + { + field_item= (Item_field*) right_item; + const_item= left_item; + } + if (const_item && + field_item->result_type() == const_item->result_type()) + { + bool copyfl; + + if (field_item->result_type() == STRING_RESULT && + ((Field_str *) field_item->field)->charset() != + ((Item_cond *) item)->compare_collation()) + return FALSE; + + Item_equal *item_equal = find_item_equal(cond_equal, + field_item->field, ©fl); + if (copyfl) + { + item_equal= new Item_equal(item_equal); + cond_equal->current_level.push_back(item_equal); + } + if (item_equal) + { + /* + The flag cond_false will be set to 1 after this, if item_equal + already contains a constant and its value is not equal to + the value of const_item. + */ + item_equal->add(const_item); + } + else + { + item_equal= new Item_equal(const_item, field_item); + cond_equal->current_level.push_back(item_equal); + } + return TRUE; + } + } + } + return FALSE; +} + +/* + Replace all equality predicates in a condition by multiple equality items + + SYNOPSIS + build_equal_items_for_cond() + cond condition(expression) where to make replacement + inherited path to all inherited multiple equality items + + DESCRIPTION + At each 'and' level the function detects items for equality predicates + and replaced them by a set of multiple equality items of class Item_equal, + taking into account inherited equalities from upper levels. + If an equality predicate is used not in a conjunction it's just + replaced by a multiple equality predicate. + For each 'and' level the function set a pointer to the inherited + multiple equalities in the cond_equal field of the associated + object of the type Item_cond_and. + The function also traverses the cond tree and and for each field reference + sets a pointer to the multiple equality item containing the field, if there + is any. If this multiple equality equates fields to a constant the + function replace the field reference by the constant. + The function also determines the maximum number of members in + equality lists of each Item_cond_and object assigning it to + cond_equal->max_members of this object and updating accordingly + the upper levels COND_EQUAL structures. + + NOTES + Multiple equality predicate =(f1,..fn) is equivalent to the conjuction of + f1=f2, .., fn-1=fn. It substitutes any inference from these + equality predicates that is equivalent to the conjunction. + Thus, =(a1,a2,a3) can substitute for ((a1=a3) AND (a2=a3) AND (a2=a1)) as + it is equivalent to ((a1=a2) AND (a2=a3)). + The function always makes a substitution of all equality predicates occured + in a conjuction for a minimal set of multiple equality predicates. + This set can be considered as a canonical representation of the + sub-conjunction of the equality predicates. + E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by + (=(t1.a,t2.b,t3.c) AND t2.b>5), not by + (=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5); + while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by + (=(t1.a,t2.b) AND =(t3.c=t4.d) AND t2.b>5), + but if additionally =(t4.d,t2.b) is inherited, it + will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5) + + IMPLEMENTATION + The function performs the substitution in a recursive descent by + the condtion tree, passing to the next AND level a chain of multiple + equality predicates which have been built at the upper levels. + The Item_equal items built at the level are attached to other + non-equality conjucts as a sublist. The pointer to the inherited + multiple equalities is saved in the and condition object (Item_cond_and). + This chain allows us for any field reference occurence easyly to find a + multiple equality that must be held for this occurence. + For each AND level we do the following: + - scan it for all equality predicate (=) items + - join them into disjoint Item_equal() groups + - process the included OR conditions recursively to do the same for + lower AND levels. + We need to do things in this order as lower AND levels need to know about + all possible Item_equal objects in upper levels. + + RETURN + pointer to the transformed condition +*/ + +static COND *build_equal_items_for_cond(COND *cond, + COND_EQUAL *inherited) +{ + Item_equal *item_equal; + uint members; + COND_EQUAL cond_equal; + cond_equal.upper_levels= inherited; + + if (cond->type() == Item::COND_ITEM) + { + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + List<Item> *args= ((Item_cond*) cond)->argument_list(); + + List_iterator<Item> li(*args); + Item *item; + + if (and_level) + { + /* + Retrieve all conjucts of this level detecting the equality + that are subject to substitution by multiple equality items and + removing each such predicate from the conjunction after having + found/created a multiple equality whose inference the predicate is. + */ + while ((item= li++)) + { + if (check_equality(item, &cond_equal)) + li.remove(); + } + + List_iterator_fast<Item_equal> it(cond_equal.current_level); + while ((item_equal= it++)) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + members= item_equal->members(); + if (cond_equal.max_members < members) + cond_equal.max_members= members; + } + members= cond_equal.max_members; + if (inherited && inherited->max_members < members) + { + do + { + inherited->max_members= members; + inherited= inherited->upper_levels; + } + while (inherited); + } + + ((Item_cond_and*)cond)->cond_equal= cond_equal; + inherited= &(((Item_cond_and*)cond)->cond_equal); + } + /* + Make replacement of equality predicates for lower levels + of the condition expression. + */ + li.rewind(); + while((item= li++)) + { + Item *new_item; + if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + { + /* This replacement happens only for standalone equalities */ + li.replace(new_item); + } + } + if (and_level) + args->concat((List<Item> *)&cond_equal.current_level); + } + else if (cond->type() == Item::FUNC_ITEM) + { + /* + If an equality predicate forms the whole and level, + we call it standalone equality and it's processed here. + E.g. in the following where condition + WHERE a=5 AND (b=5 or a=c) + (b=5) and (a=c) are standalone equalities. + In general we can't leave alone standalone eqalities: + for WHERE a=b AND c=d AND (b=c OR d=5) + b=c is replaced by =(a,b,c,d). + */ + if (check_equality(cond, &cond_equal) && + (item_equal= cond_equal.current_level.pop())) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + return item_equal; + } + /* + For each field reference in cond, not from equalitym predicates, + set a pointer to the multiple equality if belongs to (if there is any) + */ + cond= cond->transform(&Item::equal_fields_propagator, + (byte *) inherited); + cond->update_used_tables(); + } + return cond; +} + + +/* + Build multiple equalities for a condition and all on expressions that + inherit these multiple equalities + + SYNOPSIS + build_equal_items() + thd Thread handler + cond condition to build the multiple equalities for + inherited path to all inherited multiple equality items + join_list list of join tables to which the condition refers to + cond_equal_ref :out pointer to the structure to place built equalities in + + DESCRIPTION + The function first applies the build_equal_items_for_cond function + to build all multiple equalities for condition cond utilizing equalities + referred through the parameter inherited. The extended set of + equalities is returned in the structure referred by the cond_equal_ref + parameter. After this the function calls itself recursively for + all on expressions whose direct references can be found in join_list + and who inherit directly the multiple equalities just having built. + + NOTES + The on expression used in an outer join operation inherits all equalities + from the on expression of the embedding join, if there is any, or + otherwise - from the where condition. + This fact is not obvious, but presumably can be proved. + Consider the following query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t2.a=t4.a + WHERE t1.a=t2.a; + If the on expression in the query inherits =(t1.a,t2.a), then we + can build the multiple equality =(t1.a,t2.a,t3.a,t4.a) that infers + the equality t3.a=t4.a. Although the on expression + t1.a=t3.a AND t2.a=t4.a AND t3.a=t4.a is not equivalent to the one + in the query the latter can be replaced by the former: the new query + will return the same result set as the original one. + + Interesting that multiple equality =(t1.a,t2.a,t3.a,t4.a) allows us + to use t1.a=t3.a AND t3.a=t4.a under the on condition: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a + WHERE t1.a=t2.a + This query equivalent to: + SELECT * FROM (t1 LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a),t2 + WHERE t1.a=t2.a + Similarly the original query can be rewritten to the query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t2.a=t4.a AND t3.a=t4.a + WHERE t1.a=t2.a + that is equivalent to: + SELECT * FROM (t2 LEFT JOIN (t3,t4)ON t2.a=t4.a AND t3.a=t4.a), t1 + WHERE t1.a=t2.a + Thus, applying equalities from the where condition we basically + can get more freedom in performing join operations. + Althogh we don't use this property now, it probably makes sense to use + it in the future. + + RETURN + pointer to the transformed condition containing multiple equalities +*/ + +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref) +{ + COND_EQUAL *cond_equal= 0; + + if (cond) + { + cond= build_equal_items_for_cond(cond, inherited); + cond->update_used_tables(); + if (cond->type() == Item::COND_ITEM && + ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + cond_equal= &((Item_cond_and*) cond)->cond_equal; + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + cond_equal= new COND_EQUAL; + cond_equal->current_level.push_back((Item_equal *) cond); + } + } + if (cond_equal) + { + cond_equal->upper_levels= inherited; + inherited= cond_equal; + } + *cond_equal_ref= cond_equal; + + if (join_list) + { + TABLE_LIST *table; + List_iterator<TABLE_LIST> li(*join_list); + + while ((table= li++)) + { + if (table->on_expr) + { + Item *expr; + List<TABLE_LIST> *join_list= table->nested_join ? + &table->nested_join->join_list : NULL; + expr= build_equal_items(thd, table->on_expr, inherited, join_list, + &table->cond_equal); + if (expr != table->on_expr) + thd->change_item_tree(&table->on_expr, expr); + } + } + } + + return cond; +} + + +/* + Compare field items by table order in the execution plan + + SYNOPSIS + compare_fields_by_table_order() + field1 first field item to compare + field2 second field item to compare + table_join_idx index to tables determining table order + + DESCRIPTION + field1 considered as better than field2 if the table containing + field1 is accessed earlier than the table containing field2. + The function finds out what of two fields is better according + this criteria. + + RETURN + 1, if field1 is better than field2 + -1, if field2 is better than field1 + 0, otherwise +*/ + +static int compare_fields_by_table_order(Item_field *field1, + Item_field *field2, + void *table_join_idx) +{ + int cmp= 0; + bool outer_ref= 0; + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp= -1; + } + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp++; + } + if (outer_ref) + return cmp; + JOIN_TAB **idx= (JOIN_TAB **) table_join_idx; + cmp= idx[field2->field->table->tablenr]-idx[field1->field->table->tablenr]; + return cmp < 0 ? -1 : (cmp ? 1 : 0); +} + + +/* + Generate minimal set of simple equalities equivalent to a multiple equality + + SYNOPSIS + eliminate_item_equal() + cond condition to add the generated equality to + upper_levels structure to access multiple equality of upper levels + item_equal multiple equality to generate simple equality from + + DESCRIPTION + The function retrieves the fields of the multiple equality item + item_equal and for each field f: + - if item_equal contains const it generates the equality f=const_item; + - otherwise, if f is not the first field, generates the equality + f=item_equal->get_first(). + All generated equality are added to the cond conjunction. + + NOTES + Before generating an equality function checks that it has not + been generated for multiple equalies of the upper levels. + E.g. for the following where condition + WHERE a=5 AND ((a=b AND b=c) OR c>4) + the upper level AND condition will contain =(5,a), + while the lower level AND condition will contain =(5,a,b,c). + When splitting =(5,a,b,c) into a separate equality predicates + we should omit 5=a, as we have it already in the upper level. + The following where condition gives us a more complicated case: + WHERE t1.a=t2.b AND t3.c=t4.d AND (t2.b=t3.c OR t4.e>5 ...) AND ... + Given the tables are accessed in the order t1->t2->t3->t4 for + the selected query execution plan the lower level multiple + equality =(t1.a,t2.b,t3.c,t4.d) formally should be converted to + t1.a=t2.b AND t1.a=t3.c AND t1.a=t4.d. But t1.a=t2.a will be + generated for the upper level. Also t3.c=t4.d will be generated there. + So only t1.a=t3.c should be left in the lower level. + If cond is equal to 0, then not more then one equality is generated + and a pointer to it is returned as the result of the function. + + RETURN + The condition with generated simple equalities or + a pointer to the simple generated equality, if success. + 0, otherwise. +*/ + +static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, + Item_equal *item_equal) +{ + List<Item> eq_list; + Item_func_eq *eq_item= 0; + if (((Item *) item_equal)->const_item() && !item_equal->val_int()) + return new Item_int((longlong) 0,1); + Item *item_const= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item *head; + if (item_const) + head= item_const; + else + { + head= item_equal->get_first(); + it++; + } + Item_field *item_field; + while ((item_field= it++)) + { + Item_equal *upper= item_field->find_item_equal(upper_levels); + Item_field *item= item_field; + if (upper) + { + if (item_const && upper->get_const()) + item= 0; + else + { + Item_equal_iterator li(*item_equal); + while ((item= li++) != item_field) + { + if (item->find_item_equal(upper_levels) == upper) + break; + } + } + } + if (item == item_field) + { + if (eq_item) + eq_list.push_back(eq_item); + eq_item= new Item_func_eq(item_field, head); + if (!eq_item) + return 0; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + } + } + + if (!cond && !eq_list.head()) + { + if (!eq_item) + return new Item_int((longlong) 1,1); + return eq_item; + } + + if (eq_item) + eq_list.push_back(eq_item); + if (!cond) + cond= new Item_cond_and(eq_list); + else + ((Item_cond *) cond)->add_at_head(&eq_list); + + cond->quick_fix_field(); + cond->update_used_tables(); + + return cond; +} + + +/* + Substitute every field reference in a condition by the best equal field + and eliminate all multiplle equality predicates + + SYNOPSIS + substitute_for_best_equal_field() + cond condition to process + cond_equal multiple equalities to take into consideration + table_join_idx index to tables determining field preference + + DESCRIPTION + The function retrieves the cond condition and for each encountered + multiple equality predicate it sorts the field references in it + according to the order of tables specified by the table_join_idx + parameter. Then it eliminates the multiple equality predicate it + replacing it by the conjunction of simple equality predicates + equating every field from the multiple equality to the first + field in it, or to the constant, if there is any. + After this the function retrieves all other conjuncted + predicates substitute every field reference by the field reference + to the first equal field or equal constant if there are any. + + NOTES + At the first glance full sort of fields in multiple equality + seems to be an overkill. Yet it's not the case due to possible + new fields in multiple equality item of lower levels. We want + the order in them to comply with the order of upper levels. + + RETURN + The transformed condition +*/ + +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx) +{ + Item_equal *item_equal; + + if (cond->type() == Item::COND_ITEM) + { + List<Item> *cond_list= ((Item_cond*) cond)->argument_list(); + + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + if (and_level) + { + cond_equal= &((Item_cond_and *) cond)->cond_equal; + cond_list->disjoin((List<Item> *) &cond_equal->current_level); + + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + } + } + + List_iterator<Item> li(*cond_list); + Item *item; + while ((item= li++)) + { + Item *new_item =substitute_for_best_equal_field(item, cond_equal, + table_join_idx); + if (new_item != item) + li.replace(new_item); + } + + if (and_level) + { + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + eliminate_item_equal(cond, cond_equal->upper_levels, item_equal); + } + } + } + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + item_equal= (Item_equal *) cond; + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + if (cond_equal && cond_equal->current_level.head() == item_equal) + cond_equal= 0; + return eliminate_item_equal(0, cond_equal, item_equal); + } + else + cond->walk(&Item::replace_equal_field_processor, 0); + return cond; +} + + /* change field = field to field = const for each found field = const in the and_level @@ -4346,28 +6970,284 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, } +/* + Simplify joins replacing outer joins by inner joins whenever it's possible + + SYNOPSIS + simplify_joins() + join reference to the query info + join_list list representation of the join to be converted + conds conditions to add on expressions for converted joins + top true <=> conds is the where condition + + DESCRIPTION + The function, during a retrieval of join_list, eliminates those + outer joins that can be converted into inner join, possibly nested. + It also moves the on expressions for the converted outer joins + and from inner joins to conds. + The function also calculates some attributes for nested joins: + - used_tables + - not_null_tables + - dep_tables. + - on_expr_dep_tables + The first two attributes are used to test whether an outer join can + be substituted for an inner join. The third attribute represents the + relation 'to be dependent on' for tables. If table t2 is dependent + on table t1, then in any evaluated execution plan table access to + table t2 must precede access to table t2. This relation is used also + to check whether the query contains invalid cross-references. + The forth attribute is an auxiliary one and is used to calculate + dep_tables. + As the attribute dep_tables qualifies possibles orders of tables in the + execution plan, the dependencies required by the straight join + modifiers are reflected in this attribute as well. + The function also removes all braces that can be removed from the join + expression without changing its meaning. + + NOTES + An outer join can be replaced by an inner join if the where condition + or the on expression for an embedding nested join contains a conjunctive + predicate rejecting null values for some attribute of the inner tables. + + E.g. in the query: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + the predicate t2.b < 5 rejects nulls. + The query is converted first to: + SELECT * FROM t1 INNER JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + then to the equivalent form: + SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a. + + Similarly the following query: + SELECT * from t1 LEFT JOIN (t2, t3) ON t2.a=t1.a t3.b=t1.b + WHERE t2.c < 5 + is converted to: + SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b + + One conversion might trigger another: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a + LEFT JOIN t3 ON t3.b=t2.b + WHERE t3 IS NOT NULL => + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b => + SELECT * FROM t1, t2, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a + + The function removes all unnecessary braces from the expression + produced by the conversions. + E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + finally is converted to: + SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + + It also will remove braces from the following queries: + SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b + SELECT * from (t1, (t2,t3)) WHERE t1.a=t2.a AND t2.b=t3.b. + + The benefit of this simplification procedure is that it might return + a query for which the optimizer can evaluate execution plan with more + join orders. With a left join operation the optimizer does not + consider any plan where one of the inner tables is before some of outer + tables. + + IMPLEMENTATION. + The function is implemented by a recursive procedure. On the recursive + ascent all attributes are calculated, all outer joins that can be + converted are replaced and then all unnecessary braces are removed. + As join list contains join tables in the reverse order sequential + elimination of outer joins does not requite extra recursive calls. + + EXAMPLES + Here is an example of a join query with invalid cross references: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t3.a LEFT JOIN ON t3.b=t1.b + + RETURN VALUE + The new condition, if success + 0, otherwise +*/ + static COND * -optimize_cond(THD *thd, COND *conds, Item::cond_result *cond_value) +simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) { - SELECT_LEX *select= thd->lex->current_select; + TABLE_LIST *table; + NESTED_JOIN *nested_join; + TABLE_LIST *prev_table= 0; + List_iterator<TABLE_LIST> li(*join_list); + DBUG_ENTER("simplify_joins"); + + /* + Try to simplify join operations from join_list. + The most outer join operation is checked for conversion first. + */ + while ((table= li++)) + { + table_map used_tables; + table_map not_null_tables= (table_map) 0; + + if ((nested_join= table->nested_join)) + { + /* + If the element of join_list is a nested join apply + the procedure to its nested join list first. + */ + if (table->on_expr) + { + Item *expr; + /* + If an on expression E is attached to the table, + check all null rejected predicates in this expression. + If such a predicate over an attribute belonging to + an inner table of an embedded outer join is found, + the outer join is converted to an inner join and + the corresponding on expression is added to E. + */ + expr= simplify_joins(join, &nested_join->join_list, + table->on_expr, FALSE); + table->on_expr= expr; + } + nested_join->used_tables= (table_map) 0; + nested_join->not_null_tables=(table_map) 0; + conds= simplify_joins(join, &nested_join->join_list, conds, top); + used_tables= nested_join->used_tables; + not_null_tables= nested_join->not_null_tables; + } + else + { + used_tables= table->table->map; + if (conds) + not_null_tables= conds->not_null_tables(); + } + + if (table->embedding) + { + table->embedding->nested_join->used_tables|= used_tables; + table->embedding->nested_join->not_null_tables|= not_null_tables; + } + + if (!table->outer_join || (used_tables & not_null_tables)) + { + /* + For some of the inner tables there are conjunctive predicates + that reject nulls => the outer join can be replaced by an inner join. + */ + table->outer_join= 0; + if (table->on_expr) + { + /* Add on expression to the where condition. */ + if (conds) + { + conds= and_conds(conds, table->on_expr); + conds->fix_fields(join->thd, 0, &conds); + } + else + conds= table->on_expr; + table->on_expr= 0; + } + } + + if (!top) + continue; + + /* + Only inner tables of non-convertible outer joins + remain with on_expr. + */ + if (table->on_expr) + { + table->dep_tables|= table->on_expr->used_tables(); + if (table->embedding) + { + table->dep_tables&= ~table->embedding->nested_join->used_tables; + /* + Embedding table depends on tables used + in embedded on expressions. + */ + table->embedding->on_expr_dep_tables|= table->on_expr->used_tables(); + } + else + table->dep_tables&= ~table->table->map; + } + + if (prev_table) + { + /* The order of tables is reverse: prev_table follows table */ + if (prev_table->straight) + prev_table->dep_tables|= used_tables; + if (prev_table->on_expr) + { + prev_table->dep_tables|= table->on_expr_dep_tables; + table_map prev_used_tables= prev_table->nested_join ? + prev_table->nested_join->used_tables : + prev_table->table->map; + /* + If on expression contains only references to inner tables + we still make the inner tables dependent on the outer tables. + It would be enough to set dependency only on one outer table + for them. Yet this is really a rare case. + */ + if (!(prev_table->on_expr->used_tables() & ~prev_used_tables)) + prev_table->dep_tables|= used_tables; + } + } + prev_table= table; + } + + /* Flatten nested joins that can be flattened. */ + li.rewind(); + while((table= li++)) + { + nested_join= table->nested_join; + if (nested_join && !table->on_expr) + { + TABLE_LIST *tbl; + List_iterator<TABLE_LIST> it(nested_join->join_list); + while ((tbl= it++)) + { + tbl->embedding= table->embedding; + tbl->join_list= table->join_list; + } + li.replace(nested_join->join_list); + } + } + DBUG_RETURN(conds); +} + + +static COND * +optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, + Item::cond_result *cond_value) +{ + THD *thd= join->thd; + SELECT_LEX *select= thd->lex->current_select; DBUG_ENTER("optimize_cond"); - if (conds) + + if (!conds) + { + *cond_value= Item::COND_TRUE; + select->prep_where= 0; + } + else { + /* + Build all multiple equality predicates and eliminate equality + predicates that can be inferred from these multiple equalities. + For each reference of a field included into a multiple equality + that occurs in a function set a pointer to the multiple equality + predicate. Substitute a constant instead of this field if the + multiple equality contains a constant. + */ DBUG_EXECUTE("where", print_where(conds, "original");); + conds= build_equal_items(join->thd, conds, NULL, join_list, + &join->cond_equal); + DBUG_EXECUTE("where",print_where(conds,"after equal_items");); + /* change field = field to field = const for each found field = const */ propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds); /* Remove all instances of item == item Remove all and-levels where CONST item != CONST item */ - DBUG_EXECUTE("where", print_where(conds, "after const change");); - conds= remove_eq_conds(thd, conds, cond_value); - DBUG_EXECUTE("info", print_where(conds, "after remove");); - } - else - { - *cond_value= Item::COND_TRUE; - select->prep_where= 0; + DBUG_EXECUTE("where",print_where(conds,"after const change");); + conds= remove_eq_conds(thd, conds, cond_value) ; + DBUG_EXECUTE("info",print_where(conds,"after remove");); } DBUG_RETURN(conds); } @@ -4492,6 +7372,11 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) } } } + if (cond->const_item()) + { + *cond_value= eval_const_cond(cond) ? Item::COND_TRUE : Item::COND_FALSE; + return (COND*) 0; + } } else if (cond->const_item()) { @@ -4571,7 +7456,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) return 0; } - /**************************************************************************** Create internal temporary table ****************************************************************************/ @@ -4856,7 +7740,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, (int) distinct, (int) save_sum_fields, (ulong) rows_limit,test(group))); - statistic_increment(created_tmp_tables, &LOCK_status); + statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status); if (use_temp_pool) temp_pool_slot = bitmap_set_next(&temp_pool); @@ -5445,7 +8329,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, table->db_stat=0; goto err; } - statistic_increment(created_tmp_disk_tables, &LOCK_status); + statistic_increment(table->in_use->status_var.created_tmp_disk_tables, + &LOCK_status); table->db_record_offset=1; DBUG_RETURN(0); err: @@ -5597,7 +8482,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) { int error= 0; JOIN_TAB *join_tab; - int (*end_select)(JOIN *, struct st_join_table *,bool); + Next_select_func end_select; DBUG_ENTER("do_select"); join->procedure=procedure; @@ -5605,7 +8490,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) Tell the client how many fields there are in a row */ if (!table) - join->result->send_fields(*fields,1); + join->result->send_fields(*fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); else { VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); @@ -5645,11 +8531,18 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) } else { - if (join->sort_and_group || (join->procedure && - join->procedure->flags & PROC_GROUP)) - end_select=end_send_group; + /* Test if data is accessed via QUICK_GROUP_MIN_MAX_SELECT. */ + bool is_using_quick_group_min_max_select= + (join->join_tab->select && join->join_tab->select->quick && + (join->join_tab->select->quick->get_type() == + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)); + + if ((join->sort_and_group || + (join->procedure && join->procedure->flags & PROC_GROUP)) && + !is_using_quick_group_min_max_select) + end_select= end_send_group; else - end_select=end_send; + end_select= end_send; } join->join_tab[join->tables-1].next_select=end_select; @@ -5658,7 +8551,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) if (join->tables == join->const_tables) { /* - HAVING will be chcked after processing aggregate functions, + HAVING will be checked after processing aggregate functions, But WHERE should checkd here (we alredy have read tables) */ if (!join->conds || join->conds->val_int()) @@ -5734,7 +8627,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) } if (join->thd->killed) // If aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; /* purecov: inspected */ } if (join_tab->use_quick != 2 || test_if_quick_select(join_tab) <= 0) @@ -5748,20 +8641,148 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return sub_select(join,join_tab,end_of_records); /* Use ordinary select */ } +/* + Retrieve records ends with a given beginning from the result of a join + + SYNPOSIS + sub_select() + join pointer to the structure providing all context info for the query + join_tab the first next table of the execution plan to be retrieved + end_records true when we need to perform final steps of retrival + + DESCRIPTION + For a given partial join record consisting of records from the tables + preceding the table join_tab in the execution plan, the function + retrieves all matching full records from the result set and + send them to the result set stream. + + NOTES + The function effectively implements the final (n-k) nested loops + of nested loops join algorithm, where k is the ordinal number of + the join_tab table and n is the total number of tables in the join query. + It performs nested loops joins with all conjunctive predicates from + the where condition pushed as low to the tables as possible. + E.g. for the query + SELECT * FROM t1,t2,t3 + WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9 + the predicate (t1.a BETWEEN 5 AND 9) will be pushed to table t1, + given the selected plan prescribes to nest retrievals of the + joined tables in the following order: t1,t2,t3. + A pushed down predicate are attached to the table which it pushed to, + at the field select_cond. + When executing a nested loop of level k the function runs through + the rows of 'join_tab' and for each row checks the pushed condition + attached to the table. + If it is false the function moves to the next row of the + table. If the condition is true the function recursively executes (n-k-1) + remaining embedded nested loops. + The situation becomes more complicated if outer joins are involved in + the execution plan. In this case the pushed down predicates can be + checked only at certain conditions. + Suppose for the query + SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a + WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL) + the optimizer has chosen a plan with the table order t1,t2,t3. + The predicate P1=t1>2 will be pushed down to the table t1, while the + predicate P2=(t2.b>5 OR t2.b IS NULL) will be attached to the table + t2. But the second predicate can not be unconditionally tested right + after a row from t2 has been read. This can be done only after the + first row with t3.a=t1.a has been encountered. + Thus, the second predicate P2 is supplied with a guarded value that are + stored in the field 'found' of the first inner table for the outer join + (table t2). When the first row with t3.a=t1.a for the current row + of table t1 appears, the value becomes true. For now on the predicate + is evaluated immediately after the row of table t2 has been read. + When the first row with t3.a=t1.a has been encountered all + conditions attached to the inner tables t2,t3 must be evaluated. + Only when all of them are true the row is sent to the output stream. + If not, the function returns to the lowest nest level that has a false + attached condition. + The predicates from on expressions are also pushed down. If in the + the above example the on expression were (t3.a=t1.a AND t2.a=t1.a), + then t1.a=t2.a would be pushed down to table t2, and without any + guard. + If after the run through all rows of table t2, the first inner table + for the outer join operation, it turns out that no matches are + found for the current row of t1, then current row from table t1 + is complemented by nulls for t2 and t3. Then the pushed down predicates + are checked for the composed row almost in the same way as it had + been done for the first row with a match. The only difference is + the predicates from on expressions are not checked. + + IMPLEMENTATION + The function forms output rows for a current partial join of k + tables tables recursively. + For each partial join record ending with a certain row from + join_tab it calls sub_select that builds all possible matching + tails from the result set. + To be able check predicates conditionally items of the class + Item_func_trig_cond are employed. + An object of this class is constructed from an item of class COND + and a pointer to a guarding boolean variable. + When the value of the guard variable is true the value of the object + is the same as the value of the predicate, otherwise it's just returns + true. + To carry out a return to a nested loop level of join table t the pointer + to t is remembered in the field 'return_tab' of the join structure. + Consider the following query: + SELECT * FROM t1, + LEFT JOIN + (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a) + ON t4.a=t2.a + WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL) + Suppose the chosen execution plan dictates the order t1,t2,t3,t4,t5 + and suppose for a given joined rows from tables t1,t2,t3 there are + no rows in the result set yet. + When first row from t5 that satisfies the on condition + t5.a=t3.a is found, the pushed down predicate t4.b=2 OR t4.b IS NULL + becomes 'activated', as well the predicate t4.a=t2.a. But + the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until + t4.a=t2.a becomes true. + In order not to re-evaluate the predicates that were already evaluated + as attached pushed down predicates, a pointer to the the first + most inner unmatched table is maintained in join_tab->first_unmatched. + Thus, when the first row from t5 with t5.a=t3.a is found + this pointer for t5 is changed from t4 to t2. + + STRUCTURE NOTES + join_tab->first_unmatched points always backwards to the first inner + table of the embedding nested join, if any. + + RETURN + 0, if success + # of the error, otherwise +*/ static int sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { - join_tab->table->null_row=0; if (end_of_records) return (*join_tab->next_select)(join,join_tab+1,end_of_records); - /* Cache variables for faster loop */ int error; - bool found=0; - COND *on_expr=join_tab->on_expr, *select_cond=join_tab->select_cond; + JOIN_TAB *first_unmatched; + JOIN_TAB *tab; + bool found= 0; + /* Cache variables for faster loop */ + COND *select_cond= join_tab->select_cond; + JOIN_TAB *first_inner_tab= join_tab->first_inner; + my_bool *report_error= &(join->thd->net.report_error); + join->return_tab= join_tab; + + if (join_tab->last_inner) + { + /* join_tab is the first inner table for an outer join operation. */ + + /* Set initial state of guard variables for this table.*/ + join_tab->found=0; + join_tab->not_null_compl= 1; + + /* Set first_unmatched for the last inner table of this group */ + join_tab->last_inner->first_unmatched= join_tab; + } if (!(error=(*join_tab->read_first_record)(join_tab))) { @@ -5775,20 +8796,80 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; /* purecov: inspected */ } - join->examined_rows++; - join->thd->row_count++; - if (!on_expr || on_expr->val_int()) + if (!select_cond || select_cond->val_int()) { - found=1; - if (not_exists_optimize) - break; // Searching after not null columns - if (!select_cond || select_cond->val_int()) - { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) + /* + There is no select condition or the attached pushed down + condition is true => a match is found. + */ + bool found= 1; + while (join_tab->first_unmatched && found) + { + /* + The while condition is always false if join_tab is not + the last inner join table of an outer join operation. + */ + first_unmatched= join_tab->first_unmatched; + /* + Mark that a match for current outer table is found. + This activates push down conditional predicates attached + to the all inner tables of the outer join. + */ + first_unmatched->found= 1; + for (tab= first_unmatched; tab <= join_tab; tab++) + { + /* Check all predicates that has just been activated. */ + /* + Actually all predicates non-guarded by first_unmatched->found + will be re-evaluated again. It could be fixed, but, probably, + it's not worth doing now. + */ + if (tab->select_cond && !tab->select_cond->val_int()) + { + /* The condition attached to table tab is false */ + if (tab == join_tab) + found= 0; + else + { + /* + Set a return point if rejected predicate is attached + not to the last table of the current nest level. + */ + join->return_tab= tab; + return 0; + } + } + } + /* + Check whether join_tab is not the last inner table + for another embedding outer join. + */ + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + } + + /* + It was not just a return to lower loop level when one + of the newly activated predicates is evaluated as false + (See above join->return_tab= tab). + */ + join->examined_rows++; + join->thd->row_count++; + + if (found) + { + if (not_exists_optimize) + break; + /* A match from join_tab is found for the current partial join. */ + if ((error=(*join_tab->next_select)(join, join_tab+1, 0)) < 0) return error; + if (join->return_tab < join_tab) + return 0; /* Test if this was a SELECT DISTINCT query on a table that was not in the field list; In this case we can abort if @@ -5798,30 +8879,77 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return 0; } else - { - /* - This row failed selection, release lock on it. - XXX: There is no table handler in MySQL which makes use of this - call. It's kept from Gemini times. A lot of new code was added - recently (i. e. subselects) without having it in mind. - */ - info->file->unlock_row(); - } + info->file->unlock_row(); + } + else + { + /* + The condition pushed down to the table join_tab rejects all rows + with the beginning coinciding with the current partial join. + */ + join->examined_rows++; + join->thd->row_count++; } + } while (!(error=info->read_record(info)) && !(*report_error)); } if (error > 0 || (*report_error)) // Fatal error return -1; - if (!found && on_expr) - { // OUTER JOIN - restore_record(join_tab->table,default_values); // Make empty record - mark_as_null_row(join_tab->table); // For group by without error - if (!select_cond || select_cond->val_int()) - { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) - return error; /* purecov: inspected */ + if (join_tab->last_inner && !join_tab->found) + { + /* + The table join_tab is the first inner table of a outer join operation + and no matches has been found for the current outer row. + */ + JOIN_TAB *last_inner_tab= join_tab->last_inner; + for ( ; join_tab <= last_inner_tab ; join_tab++) + { + /* Change the the values of guard predicate variables. */ + join_tab->found= 1; + join_tab->not_null_compl= 0; + /* The outer row is complemented by nulls for each inner tables */ + restore_record(join_tab->table,default_values); // Make empty record + mark_as_null_row(join_tab->table); // For group by without error + select_cond= join_tab->select_cond; + /* Check all attached conditions for inner table rows. */ + if (select_cond && !select_cond->val_int()) + return 0; + } + join_tab--; + /* + The row complemented by nulls might be the first row + of embedding outer joins. + If so, perform the same actions as in the code + for the first regular outer join row above. + */ + for ( ; ; ) + { + first_unmatched= join_tab->first_unmatched; + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + if (!first_unmatched) + break; + first_unmatched->found= 1; + for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++) + { + if (tab->select_cond && !tab->select_cond->val_int()) + { + join->return_tab= tab; + return 0; + } + } } + /* + The row complemented by nulls satisfies all conditions + attached to inner tables. + Send the row complemented by nulls to be joined with the + remaining tables. + */ + if ((error=(*join_tab->next_select)(join, join_tab+1 ,0)) < 0) + return error; } return 0; } @@ -5863,7 +8991,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { if (join->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; // Aborted by user /* purecov: inspected */ } SQL_SELECT *select=join_tab->select; @@ -5979,9 +9107,9 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) table->file->extra(HA_EXTRA_NO_KEYREAD); } } - if (tab->on_expr && !table->null_row) + if (*tab->on_expr_ref && !table->null_row) { - if ((table->null_row= test(tab->on_expr->val_int() == 0))) + if ((table->null_row= test((*tab->on_expr_ref)->val_int() == 0))) mark_as_null_row(table); } if (!table->null_row) @@ -6015,6 +9143,19 @@ join_read_system(JOIN_TAB *tab) } +/* + Read a table when there is at most one matching row + + SYNOPSIS + join_read_const() + tab Table to read + + RETURN + 0 Row was found + -1 Row was not found + 1 Got an error (other than row not found) during read +*/ + static int join_read_const(JOIN_TAB *tab) { @@ -6022,6 +9163,7 @@ join_read_const(JOIN_TAB *tab) TABLE *table= tab->table; if (table->status & STATUS_GARBAGE) // If first read { + table->status= 0; if (cp_buffer_from_ref(&tab->ref)) error=HA_ERR_KEY_NOT_FOUND; else @@ -6032,6 +9174,7 @@ join_read_const(JOIN_TAB *tab) } if (error) { + table->status= STATUS_NOT_FOUND; table->null_row=1; empty_record(table); if (error != HA_ERR_KEY_NOT_FOUND) @@ -6195,8 +9338,8 @@ test_if_quick_select(JOIN_TAB *tab) static int join_init_read_record(JOIN_TAB *tab) { - if (tab->select && tab->select->quick) - tab->select->quick->reset(); + if (tab->select && tab->select->quick && tab->select->quick->reset()) + return 1; init_read_record(&tab->read_record, tab->join->thd, tab->table, tab->select,1,1); return (*tab->read_record.read_record)(&tab->read_record); @@ -6404,6 +9547,14 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } DBUG_RETURN(-3); // Abort nicely } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows for + this fetch request are sent. + */ + DBUG_RETURN(-4); + } } else { @@ -6478,6 +9629,14 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), join->do_send_rows=0; join->unit->select_limit_cnt = HA_POS_ERROR; } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows + for this fetch request are sent. + */ + DBUG_RETURN(-4); + } } } else @@ -6516,7 +9675,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } if (!end_of_records) @@ -6584,7 +9743,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } @@ -6653,7 +9812,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } @@ -6700,7 +9859,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->thd->killed) { // Aborted by user - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } if (!join->first_record || end_of_records || @@ -6980,7 +10139,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, } -static uint find_shortest_key(TABLE *table, const key_map *usable_keys) +uint find_shortest_key(TABLE *table, const key_map *usable_keys) { uint min_length= (uint) ~0; uint best= MAX_KEY; @@ -7117,6 +10276,17 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } else if (select && select->quick) // Range found by opt_range { + int quick_type= select->quick->get_type(); + /* + assume results are not ordered when index merge is used + TODO: sergeyp: Results of all index merge selects actually are ordered + by clustered PK values. + */ + + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) + DBUG_RETURN(0); ref_key= select->quick->index; ref_key_parts= select->quick->used_key_parts; } @@ -7150,7 +10320,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } else { - select->quick->file->ha_index_end(); + select->quick->head->file->ha_index_end(); + /* + We have verified above that select->quick is not + index_merge quick select. + */ select->quick->index= new_ref_key; select->quick->init(); } @@ -7172,8 +10346,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ if (!select->quick->reverse_sorted()) { - // ORDER BY range_key DESC - QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick, + int quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + DBUG_RETURN(0); // Use filesort + + /* ORDER BY range_key DESC */ + QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), used_key_parts); if (!tmp || tmp->error) { @@ -7321,8 +10502,11 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, { select->quick=tab->quick; tab->quick=0; - /* We can only use 'Only index' if quick key is same as ref_key */ - if (table->key_read && (uint) tab->ref.key != select->quick->index) + /* + We can only use 'Only index' if quick key is same as ref_key + and in index_merge 'Only index' cannot be used + */ + if (table->key_read && ((uint) tab->ref.key != select->quick->index)) { table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); @@ -7353,6 +10537,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tab->select= 0; } tab->select_cond=0; + tab->last_inner= 0; + tab->first_unmatched= 0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; tab->join->examined_rows+=examined_rows; @@ -7510,7 +10696,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -7622,7 +10808,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -7740,6 +10926,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } if (!(cache->field=(CACHE_FIELD*) sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)* + sizeof(CACHE_FIELD*)))) { my_free((gptr) cache->buff,MYF(0)); /* purecov: inspected */ @@ -7981,62 +11168,123 @@ cp_buffer_from_ref(TABLE_REF *ref) *****************************************************************************/ /* - Find order/group item in requested columns and change the item to point at - it. If item doesn't exists, add it first in the field list - Return 0 if ok. + Resolve an ORDER BY or GROUP BY column reference. + + SYNOPSIS + find_order_in_list() + thd [in] Pointer to current thread structure + ref_pointer_array [in/out] All select, group and order by fields + tables [in] List of tables to search in (usually FROM clause) + order [in] Column reference to be resolved + fields [in] List of fields to search in (usually SELECT list) + all_fields [in/out] All select, group and order by fields + is_group_field [in] True if order is a GROUP field, false if + ORDER by field + + DESCRIPTION + Given a column reference (represented by 'order') from a GROUP BY or ORDER + BY clause, find the actual column it represents. If the column being + resolved is from the GROUP BY clause, the procedure searches the SELECT + list 'fields' and the columns in the FROM list 'tables'. If 'order' is from + the ORDER BY clause, only the SELECT list is being searched. + + If 'order' is resolved to an Item, then order->item is set to the found + Item. If there is no item for the found column (that is, it was resolved + into a table field), order->item is 'fixed' and is added to all_fields and + ref_pointer_array. + + RETURN + 0 if OK + 1 if error occurred */ static int -find_order_in_list(THD *thd, Item **ref_pointer_array, - TABLE_LIST *tables,ORDER *order, List<Item> &fields, - List<Item> &all_fields) +find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + ORDER *order, List<Item> &fields, List<Item> &all_fields, + bool is_group_field) { - Item *it= *order->item; - if (it->type() == Item::INT_ITEM) + Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */ + Item::Type order_item_type; + Item **select_item; /* The corresponding item from the SELECT clause. */ + Field *from_field; /* The corresponding field from the FROM clause. */ + + if (order_item->type() == Item::INT_ITEM) { /* Order by position */ - uint count= (uint) it->val_int(); + uint count= (uint) order_item->val_int(); if (!count || count > fields.elements) { my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), - MYF(0), it->full_name(), thd->where); + MYF(0), order_item->full_name(), thd->where); return 1; } - order->item= ref_pointer_array + count-1; + order->item= ref_pointer_array + count - 1; order->in_field_list= 1; + order->counter= count; + order->counter_used= 1; return 0; } + /* Lookup the current GROUP/ORDER field in the SELECT clause. */ uint counter; bool unaliased; - Item **item= find_item_in_list(it, fields, &counter, + select_item= find_item_in_list(order_item, fields, &counter, REPORT_EXCEPT_NOT_FOUND, &unaliased); - if (!item) - return 1; + if (!select_item) + return 1; /* Some error occured. */ + - if (item != (Item **)not_found_item) + /* Check whether the resolved field is not ambiguos. */ + if (select_item != not_found_item) { /* If we have found field not by its alias in select list but by its original field name, we should additionaly check if we have conflict for this name (in case if we would perform lookup in all tables). */ - if (unaliased && !it->fixed && it->fix_fields(thd, tables, order->item)) + if (unaliased && !order_item->fixed && order_item->fix_fields(thd, tables, order->item)) return 1; - order->item= ref_pointer_array + counter; - order->in_field_list=1; - return 0; + /* Lookup the current GROUP field in the FROM clause. */ + order_item_type= order_item->type(); + if (is_group_field && + order_item_type == Item::FIELD_ITEM || order_item_type == Item::REF_ITEM) + { + Item **view_ref= NULL; + from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables, + view_ref, IGNORE_ERRORS, TRUE); + if(!from_field) + from_field= (Field*) not_found_field; + } + else + from_field= (Field*) not_found_field; + + if (from_field == not_found_field || + from_field && from_field != view_ref_found && + (*select_item)->type() == Item::FIELD_ITEM && + ((Item_field*) (*select_item))->field->eq(from_field)) + /* + If there is no such field in the FROM clause, or it is the same field as + the one found in the SELECT clause, then use the Item created for the + SELECT field. As a result if there was a derived field that 'shadowed' + a table field with the same name, the table field will be chosen over + the derived field. + */ + { + order->item= ref_pointer_array + counter; + order->in_field_list=1; + return 0; + } } order->in_field_list=0; /* - We check it->fixed because Item_func_group_concat can put + We check order_item->fixed because Item_func_group_concat can put arguments for which fix_fields already was called. 'it' reassigned in if condition because fix_field can change it. */ - if (!it->fixed && - (it->fix_fields(thd, tables, order->item) || - (it= *order->item)->check_cols(1) || + if (!order_item->fixed && + (order_item->fix_fields(thd, tables, order->item) || + (order_item= *order->item)->check_cols(1) || thd->is_fatal_error)) return 1; // Wrong field uint el= all_fields.elements; @@ -8046,6 +11294,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, return 0; } + /* Change order to point at item in select list. If item isn't a number and doesn't exits in the select list, add it the the field list. @@ -8058,7 +11307,7 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + all_fields, FALSE)) return 1; } return 0; @@ -8110,7 +11359,7 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + all_fields, TRUE)) return 1; (*order->item)->marker=1; /* Mark found */ if ((*order->item)->with_sum_func) @@ -8321,7 +11570,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables) if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT))) DBUG_RETURN(0); - for (; !(map & tables->table->map) ; tables=tables->next) ; + for (; !(map & tables->table->map); tables= tables->next_local); if (map != tables->table->map) DBUG_RETURN(0); // More than one table DBUG_PRINT("exit",("sort by table: %d",tables->table->tablenr)); @@ -8625,9 +11874,7 @@ bool JOIN::alloc_func_list() field_list All items send_fields Items in select list before_group_by Set to 1 if this is called before GROUP BY handling - - NOTES - Calls ::setup() for all item_sum objects in field_list + recompute Set to TRUE if sum_funcs must be recomputed RETURN 0 ok @@ -8635,23 +11882,21 @@ bool JOIN::alloc_func_list() */ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, - bool before_group_by) + bool before_group_by, bool recompute) { List_iterator_fast<Item> it(field_list); Item_sum **func; Item *item; DBUG_ENTER("make_sum_func_list"); + if (*sum_funcs && !recompute) + DBUG_RETURN(FALSE); /* We have already initialized sum_funcs. */ + func= sum_funcs; while ((item=it++)) { if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) - { *func++= (Item_sum*) item; - /* let COUNT(DISTINCT) create the temporary table */ - if (((Item_sum*) item)->setup(thd)) - DBUG_RETURN(TRUE); - } } if (before_group_by && rollup.state == ROLLUP::STATE_INITED) { @@ -8796,6 +12041,30 @@ change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, Code for calculating functions ******************************************************************************/ + +/* + Call ::setup for all sum functions + + SYNOPSIS + setup_sum_funcs() + thd thread handler + func_ptr sum function list + + RETURN + FALSE ok + TRUE error +*/ + +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr) +{ + Item_sum *func; + while ((func= *(func_ptr++))) + if (func->setup(thd)) + return TRUE; + return FALSE; +} + + static void init_tmptable_sum_functions(Item_sum **func_ptr) { @@ -9173,6 +12442,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, select_result *result=join->result; Item *item_null= new Item_null(); CHARSET_INFO *cs= system_charset_info; + int quick_type; DBUG_ENTER("select_describe"); DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s", (ulong)join->select_lex, join->select_lex->type, @@ -9264,14 +12534,20 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { JOIN_TAB *tab=join->join_tab+i; TABLE *table=tab->table; - char buff[512],*buff_ptr=buff; - char buff1[512], buff2[512]; + char buff[512]; + char buff1[512], buff2[512], buff3[512]; + char keylen_str_buf[64]; + String extra(buff, sizeof(buff),cs); char table_name_buffer[NAME_LEN]; String tmp1(buff1,sizeof(buff1),cs); String tmp2(buff2,sizeof(buff2),cs); + String tmp3(buff3,sizeof(buff3),cs); + extra.length(0); tmp1.length(0); tmp2.length(0); + tmp3.length(0); + quick_type= -1; item_list.empty(); /* id */ item_list.push_back(new Item_uint((uint32) @@ -9281,7 +12557,15 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, strlen(join->select_lex->type), cs)); if (tab->type == JT_ALL && tab->select && tab->select->quick) - tab->type= JT_RANGE; + { + quick_type= tab->select->quick->get_type(); + if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION)) + tab->type = JT_INDEX_MERGE; + else + tab->type = JT_RANGE; + } /* table */ if (table->derived_select_number) { @@ -9300,7 +12584,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, strlen(join_type_str[tab->type]), cs)); uint j; - /* possible_keys */ + /* Build "possible_keys" value and add it to item_list */ if (!tab->keys.is_clear_all()) { for (j=0 ; j < table->keys ; j++) @@ -9319,14 +12603,19 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length(),cs)); else item_list.push_back(item_null); - /* key key_len ref */ + + /* Build "key", "key_len", and "ref" values and add them to item_list */ if (tab->ref.key_parts) { KEY *key_info=table->key_info+ tab->ref.key; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name), system_charset_info)); - item_list.push_back(new Item_int((int32) tab->ref.key_length)); + length= longlong2str(tab->ref.key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, length, + system_charset_info)); for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) { if (tmp2.length()) @@ -9339,18 +12628,21 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, else if (tab->type == JT_NEXT) { KEY *key_info=table->key_info+ tab->index; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) key_info->key_length)); + length= longlong2str(key_info->key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, + length, + system_charset_info)); item_list.push_back(item_null); } else if (tab->select && tab->select->quick) { - KEY *key_info=table->key_info+ tab->select->quick->index; - item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) tab->select->quick-> - max_used_key_length)); + tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3); + item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); + item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs)); item_list.push_back(item_null); } else @@ -9359,52 +12651,73 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(item_null); item_list.push_back(item_null); } - /* rows */ + /* Add "rows" field to item_list. */ item_list.push_back(new Item_int((longlong) (ulonglong) join->best_positions[i]. records_read, 21)); - /* extra */ + /* Build "Extra" field and add it to item_list. */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && table->used_keys.is_set(tab->index)) key_read=1; - + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT && + !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row) + key_read=1; + if (tab->info) item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs)); else { + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) + { + extra.append("; Using "); + tab->select->quick->add_info_string(&extra); + } if (tab->select) { if (tab->use_quick == 2) { char buf[MAX_KEY/8+1]; - sprintf(buff_ptr,"; Range checked for each record (index map: 0x%s)", - tab->keys.print(buf)); - buff_ptr=strend(buff_ptr); + extra.append("; Range checked for each record (index map: 0x"); + extra.append(tab->keys.print(buf)); + extra.append(')'); } - else - buff_ptr=strmov(buff_ptr,"; Using where"); + else if (tab->select->cond) + extra.append("; Using where"); } if (key_read) - buff_ptr= strmov(buff_ptr,"; Using index"); + { + if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + extra.append("; Using index for group-by"); + else + extra.append("; Using index"); + } if (table->reginfo.not_exists_optimize) - buff_ptr= strmov(buff_ptr,"; Not exists"); + extra.append("; Not exists"); if (need_tmp_table) { need_tmp_table=0; - buff_ptr= strmov(buff_ptr,"; Using temporary"); + extra.append("; Using temporary"); } if (need_order) { need_order=0; - buff_ptr= strmov(buff_ptr,"; Using filesort"); + extra.append("; Using filesort"); } if (distinct & test_all_bits(used_tables,thd->used_tables)) - buff_ptr= strmov(buff_ptr,"; Distinct"); - if (buff_ptr == buff) - buff_ptr+= 2; // Skip inital "; " - item_list.push_back(new Item_string(buff+2,(uint) (buff_ptr - buff)-2, - cs)); + extra.append("; Distinct"); + + /* Skip initial "; "*/ + const char *str= extra.ptr(); + uint32 len= extra.length(); + if (len) + { + str += 2; + len -= 2; + } + item_list.push_back(new Item_string(str, len, cs)); } // For next iteration used_tables|=table->map; @@ -9463,6 +12776,7 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) else { thd->lex->current_select= first; + unit->set_limit(unit->global_parameters, first); res= mysql_select(thd, &first->ref_pointer_array, (TABLE_LIST*) first->table_list.first, first->with_wild, first->item_list, @@ -9482,14 +12796,109 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) } +/* + Print joins from the FROM clause + + SYNOPSIS + print_join() + thd thread handler + str string where table should be printed + tables list of tables in join +*/ + +static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables) +{ + /* List is reversed => we should reverse it before using */ + List_iterator_fast<TABLE_LIST> ti(*tables); + TABLE_LIST **table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) * + tables->elements); + if (table == 0) + return; // out of memory + + for (TABLE_LIST **t= table + (tables->elements - 1); t >= table; t--) + *t= ti++; + + DBUG_ASSERT(tables->elements >= 1); + (*table)->print(thd, str); + + TABLE_LIST **end= table + tables->elements; + for(TABLE_LIST **tbl= table + 1; tbl < end; tbl++) + { + TABLE_LIST *curr= *tbl; + if (curr->outer_join) + str->append(" left join ", 11); // MySQL converg right to left joins + else if (curr->straight) + str->append(" straight_join ", 15); + else + str->append(" join ", 6); + curr->print(thd, str); + if (curr->on_expr) + { + str->append(" on(", 4); + curr->on_expr->print(str); + str->append(')'); + } + } +} + + +/* + Print table as it should be in join list + + SYNOPSIS + st_table_list::print(); + str string where table should bbe printed +*/ + +void st_table_list::print(THD *thd, String *str) +{ + if (nested_join) + { + str->append('('); + print_join(thd, str, &nested_join->join_list); + str->append(')'); + } + else + { + const char *cmp_name; // Name to compare with alias + if (view_name.str) + { + append_identifier(thd, str, view_db.str, view_db.length); + str->append('.'); + append_identifier(thd, str, view_name.str, view_name.length); + cmp_name= view_name.str; + } + else if (derived) + { + str->append('('); + derived->print(str); + str->append(')'); + cmp_name= ""; // Force printing of alias + } + else + { + append_identifier(thd, str, db, db_length); + str->append('.'); + append_identifier(thd, str, real_name, real_name_length); + cmp_name= real_name; + } + if (my_strcasecmp(table_alias_charset, cmp_name, alias)) + { + str->append(' '); + append_identifier(thd, str, alias, strlen(alias)); + } + } +} + + void st_select_lex::print(THD *thd, String *str) { if (!thd) thd= current_thd; str->append("select ", 7); - - //options + + /* First add options */ if (options & SELECT_STRAIGHT_JOIN) str->append("straight_join ", 14); if ((thd->lex->lock_option == TL_READ_HIGH_PRIORITY) && @@ -9530,60 +12939,8 @@ void st_select_lex::print(THD *thd, String *str) if (table_list.elements) { str->append(" from ", 6); - Item *next_on= 0; - for (TABLE_LIST *table= (TABLE_LIST *) table_list.first; - table; - table= table->next) - { - if (table->derived) - { - str->append('('); - table->derived->print(str); - str->append(") "); - str->append(table->alias); - } - else - { - str->append(table->db); - str->append('.'); - str->append(table->real_name); - if (my_strcasecmp(table_alias_charset, table->real_name, table->alias)) - { - str->append(' '); - str->append(table->alias); - } - } - - if (table->on_expr && ((table->outer_join & JOIN_TYPE_LEFT) || - !(table->outer_join & JOIN_TYPE_RIGHT))) - next_on= table->on_expr; - - if (next_on) - { - str->append(" on(", 4); - next_on->print(str); - str->append(')'); - next_on= 0; - } - - TABLE_LIST *next_table; - if ((next_table= table->next)) - { - if (table->outer_join & JOIN_TYPE_RIGHT) - { - str->append(" right join ", 12); - if (!(table->outer_join & JOIN_TYPE_LEFT) && - table->on_expr) - next_on= table->on_expr; - } - else if (next_table->straight) - str->append(" straight_join ", 15); - else if (next_table->outer_join & JOIN_TYPE_LEFT) - str->append(" left join ", 11); - else - str->append(" join ", 6); - } - } + /* go through join tree */ + print_join(thd, str, &top_join_list); } // Where diff --git a/sql/sql_select.h b/sql/sql_select.h index bbd169d1850..e1bf60f6896 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -75,23 +75,33 @@ typedef struct st_join_cache { /* ** The structs which holds the join connections and join states */ - enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF, JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL, - JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY}; + JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY, JT_INDEX_MERGE}; class JOIN; +typedef int (*Next_select_func)(JOIN *,struct st_join_table *,bool); +typedef int (*Read_record_func)(struct st_join_table *tab); + + typedef struct st_join_table { TABLE *table; KEYUSE *keyuse; /* pointer to first used key */ SQL_SELECT *select; COND *select_cond; - QUICK_SELECT *quick; - Item *on_expr; + QUICK_SELECT_I *quick; + Item **on_expr_ref; /* pointer to the associated on expression */ + COND_EQUAL *cond_equal; /* multiple equalities for the on expression */ + st_join_table *first_inner; /* first inner table for including outerjoin */ + bool found; /* true after all matches or null complement */ + bool not_null_compl;/* true before null complement is added */ + st_join_table *last_inner; /* last table table for embedding outer join */ + st_join_table *first_upper; /* first inner table for embedding outer join */ + st_join_table *first_unmatched; /* used for optimization purposes only */ const char *info; - int (*read_first_record)(struct st_join_table *tab); - int (*next_select)(JOIN *,struct st_join_table *,bool); + Read_record_func read_first_record; + Next_select_func next_select; READ_RECORD read_record; double worst_seeks; key_map const_keys; /* Keys with constant part */ @@ -116,6 +126,7 @@ typedef struct st_join_table { typedef struct st_position /* Used in find_best */ { double records_read; + double read_time; JOIN_TAB *table; KEYUSE *key; } POSITION; @@ -133,8 +144,9 @@ typedef struct st_rollup class JOIN :public Sql_alloc { public: - JOIN_TAB *join_tab,**best_ref,**map2table; - JOIN_TAB *join_tab_save; //saved join_tab for subquery reexecution + JOIN_TAB *join_tab,**best_ref; + JOIN_TAB **map2table; // mapping between table indexes and JOIN_TABs + JOIN_TAB *join_tab_save; // saved join_tab for subquery reexecution TABLE **table,**all_tables,*sort_by_table; uint tables,const_tables; uint send_group_parts; @@ -142,6 +154,16 @@ class JOIN :public Sql_alloc bool do_send_rows; table_map const_table_map,found_const_table_map,outer_join; ha_rows send_records,found_records,examined_rows,row_limit, select_limit; + /* + Used to fetch no more than given amount of rows per one + fetch operation of server side cursor. + The value is checked in end_send and end_send_group in fashion, similar + to offset_limit_cnt: + - fetch_limit= HA_POS_ERROR if there is no cursor. + - when we open a cursor, we set fetch_limit to 0, + - on each fetch iteration we add num_rows to fetch to fetch_limit + */ + ha_rows fetch_limit; POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1]; double best_read; List<Item> *fields; @@ -199,8 +221,11 @@ class JOIN :public Sql_alloc ORDER *order, *group_list, *proc_param; //hold parameters of mysql_select COND *conds; // ---"--- Item *conds_history; // store WHERE for explain - TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_selec + TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_select + List<TABLE_LIST> *join_list; // list of joined tables in reverse order + COND_EQUAL *cond_equal; SQL_SELECT *select; //created in optimisation phase + JOIN_TAB *return_tab; //used only for outer joins Item **ref_pointer_array; //used pointer reference for this select // Copy of above to be used with different lists Item **items0, **items1, **items2, **items3, **current_ref_pointer_array; @@ -224,11 +249,13 @@ class JOIN :public Sql_alloc table= 0; tables= 0; const_tables= 0; + join_list= 0; sort_and_group= 0; first_record= 0; do_send_rows= 1; send_records= 0; found_records= 0; + fetch_limit= HA_POS_ERROR; examined_rows= 0; exec_tmp_table1= 0; exec_tmp_table2= 0; @@ -254,10 +281,12 @@ class JOIN :public Sql_alloc fields_list= fields_arg; error= 0; select= 0; + return_tab= 0; ref_pointer_array= items0= items1= items2= items3= 0; ref_pointer_array_size= 0; zero_result_cause= 0; optimized= 0; + cond_equal= 0; fields_list= fields_arg; bzero((char*) &keyuse,sizeof(keyuse)); @@ -277,7 +306,7 @@ class JOIN :public Sql_alloc void restore_tmp(); bool alloc_func_list(); bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields, - bool before_group_by); + bool before_group_by, bool recompute= FALSE); inline void set_items_ref_array(Item **ptr) { @@ -308,6 +337,44 @@ class JOIN :public Sql_alloc }; +/* + Server-side cursor (now stands only for basic read-only cursor) + See class implementation in sql_select.cc +*/ + +class Cursor: public Sql_alloc, public Item_arena +{ + JOIN *join; + SELECT_LEX_UNIT *unit; + + TABLE *open_tables; + MYSQL_LOCK *lock; + TABLE *derived_tables; + /* List of items created during execution */ + ulong query_id; +public: + select_send result; + + /* Temporary implementation as now we replace THD state by value */ + /* Save THD state into cursor */ + void init_from_thd(THD *thd); + /* Restore THD from cursor to continue cursor execution */ + void init_thd(THD *thd); + /* bzero cursor state in THD */ + void reset_thd(THD *thd); + + int open(JOIN *join); + int fetch(ulong num_rows); + void reset() { join= 0; } + bool is_open() const { return join != 0; } + void close(); + + void set_unit(SELECT_LEX_UNIT *unit_arg) { unit= unit_arg; } + Cursor() :join(0), unit(0) {} + ~Cursor(); +}; + + typedef struct st_select_check { uint const_ref,reg_ref; } SELECT_CHECK; @@ -332,10 +399,15 @@ void copy_fields(TMP_TABLE_PARAM *param); void copy_funcs(Item **func_ptr); bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int error, bool ignore_last_dupp_error); +uint find_shortest_key(TABLE *table, const key_map *usable_keys); /* functions from opt_sum.cc */ +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +/* from sql_delete.cc, used by opt_range.cc */ +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); + /* class to copying an field/item to a key struct */ class store_key :public Sql_alloc @@ -403,7 +475,7 @@ public: {} bool copy() { - return item->save_in_field(to_field, 1) || err != 0; + return item->save_in_field_no_warnings(to_field, 1) || err != 0; } const char *name() const { return "func"; } }; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index bda490e2916..557ec1bd5d2 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -39,6 +39,8 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), static int store_create_info(THD *thd, TABLE *table, String *packet); +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *packet); /* @@ -64,7 +66,8 @@ mysqld_show_dbs(THD *thd,const char *wild) strxmov(end," (",wild,")",NullS); field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,NullS,mysql_data_home,wild,1)) DBUG_RETURN(1); @@ -105,7 +108,8 @@ int mysqld_show_open_tables(THD *thd,const char *wild) field_list.push_back(new Item_return_int("In_use", 1, MYSQL_TYPE_TINY)); field_list.push_back(new Item_return_int("Name_locked", 4, MYSQL_TYPE_TINY)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); if (!(open_list=list_open_tables(thd,wild)) && thd->is_fatal_error) @@ -133,7 +137,8 @@ int mysqld_show_open_tables(THD *thd,const char *wild) ** A table is a .frm file in the current databasedir ***************************************************************************/ -int mysqld_show_tables(THD *thd,const char *db,const char *wild) +int mysqld_show_tables(THD *thd, const char *db, const char *wild, + bool show_type) { Item_string *field=new Item_string("",0,thd->charset()); List<Item> field_list; @@ -141,6 +146,7 @@ int mysqld_show_tables(THD *thd,const char *db,const char *wild) List<char> files; char *file_name; Protocol *protocol= thd->protocol; + uint len; DBUG_ENTER("mysqld_show_tables"); field->name=(char*) thd->alloc(20+(uint) strlen(db)+ @@ -149,10 +155,14 @@ int mysqld_show_tables(THD *thd,const char *db,const char *wild) if (wild && wild[0]) strxmov(end," (",wild,")",NullS); field->max_length=NAME_LEN; - (void) sprintf(path,"%s/%s",mysql_data_home,db); - (void) unpack_dirname(path,path); + (void) my_snprintf(path, FN_LEN, "%s/%s", mysql_data_home, db); + end= path + (len= unpack_dirname(path,path)); + len= FN_LEN - len; field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) + if (show_type) + field_list.push_back(new Item_empty_string("Table_type", 10)); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,db,path,wild,0)) DBUG_RETURN(-1); @@ -161,6 +171,24 @@ int mysqld_show_tables(THD *thd,const char *db,const char *wild) { protocol->prepare_for_resend(); protocol->store(file_name, system_charset_info); + if (show_type) + { + my_snprintf(end, len, "/%s%s", file_name, reg_ext); + switch (mysql_frm_type(path)) + { + case FRMTYPE_ERROR: + protocol->store("ERROR", system_charset_info); + break; + case FRMTYPE_TABLE: + protocol->store("BASE TABLE", system_charset_info); + break; + case FRMTYPE_VIEW: + protocol->store("VIEW", system_charset_info); + break; + default: + DBUG_ASSERT(0); // this should be impossible + } + } if (protocol->write()) DBUG_RETURN(-1); } @@ -182,7 +210,8 @@ int mysqld_show_storage_engines(THD *thd) field_list.push_back(new Item_empty_string("Support",10)); field_list.push_back(new Item_empty_string("Comment",80)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); const char *default_type_name= @@ -221,10 +250,11 @@ struct show_privileges_st { static struct show_privileges_st sys_privileges[]= { {"Alter", "Tables", "To alter the table"}, - {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, {"Create", "Databases,Tables,Indexes", "To create new databases and tables"}, + {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Create view", "Tables", "To create new views"}, {"Delete", "Tables", "To delete existing rows"}, - {"Drop", "Databases,Tables", "To drop databases and tables"}, + {"Drop", "Databases,Tables", "To drop databases, tables, and views"}, {"File", "File access on server", "To read and write files on the server"}, {"Grant option", "Databases,Tables", "To give to other users those privileges you possess"}, {"Index", "Tables", "To create or drop indexes"}, @@ -237,7 +267,8 @@ static struct show_privileges_st sys_privileges[]= {"Replication slave","Server Admin","To read binary log events from the master"}, {"Select", "Tables", "To retrieve rows from table"}, {"Show databases","Server Admin","To see all databases with SHOW DATABASES"}, - {"Shutdown","Server Admin", "To shutdown the server"}, + {"Show view","Tables","To see views with SHOW CREATE VIEW"}, + {"Shutdown","Server Admin", "To shut down the server"}, {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."}, {"Update", "Tables", "To update existing rows"}, {"Usage","Server Admin","No privileges - allow connect only"}, @@ -254,7 +285,8 @@ int mysqld_show_privileges(THD *thd) field_list.push_back(new Item_empty_string("Context",15)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); show_privileges_st *privilege= sys_privileges; @@ -329,7 +361,8 @@ int mysqld_show_column_types(THD *thd) field_list.push_back(new Item_empty_string("Default",NAME_LEN)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); /* TODO: Change the loop to not use 'i' */ @@ -463,6 +496,7 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) TABLE *table; Protocol *protocol= thd->protocol; TIME time; + int res; DBUG_ENTER("mysqld_extend_show_tables"); (void) sprintf(path,"%s/%s",mysql_data_home,db); @@ -502,7 +536,8 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) item->maybe_null=1; field_list.push_back(item=new Item_empty_string("Comment",80)); item->maybe_null=1; - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); if (mysql_find_files(thd,&files,db,path,wild,0)) @@ -516,20 +551,32 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) protocol->store(file_name, system_charset_info); table_list.db=(char*) db; table_list.real_name= table_list.alias= file_name; + table_list.select_lex= &thd->lex->select_lex; if (lower_case_table_names) my_casedn_str(files_charset_info, file_name); - if (!(table = open_ltable(thd, &table_list, TL_READ))) + if ((res= open_and_lock_tables(thd, &table_list))) { for (uint i=2 ; i < field_list.elements ; i++) protocol->store_null(); - // Send error to Comment field - protocol->store(thd->net.last_error, system_charset_info); - thd->clear_error(); + // Send error to Comment field if possible + if (res < 0) + { + protocol->store(thd->net.last_error, system_charset_info); + thd->clear_error(); + } + else + DBUG_RETURN(1); + } + else if (table_list.view) + { + for (uint i= 2; i < field_list.elements; i++) + protocol->store_null(); + protocol->store("view", system_charset_info); } else { const char *str; - handler *file=table->file; + handler *file= (table= table_list.table)->file; file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); protocol->store(file->table_type(), system_charset_info); protocol->store((ulonglong) table->frm_version); @@ -630,8 +677,8 @@ int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) if (comment != table->comment) my_free(comment,MYF(0)); } - close_thread_tables(thd,0); } + close_thread_tables(thd, 0); if (protocol->write()) DBUG_RETURN(-1); } @@ -654,15 +701,19 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, char tmp1[MAX_FIELD_WIDTH]; Item *item; Protocol *protocol= thd->protocol; + int res; DBUG_ENTER("mysqld_show_fields"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) + table_list->lock_type= TL_UNLOCK; + if ((res= open_and_lock_tables(thd, table_list))) { - send_error(thd); + if (res < 0) + send_error(thd); DBUG_RETURN(1); } + table= table_list->table; file=table->file; file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -685,7 +736,7 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, } // Send first number of fields and records if (protocol->send_records_num(&field_list, (ulonglong)file->records) || - protocol->send_fields(&field_list,0)) + protocol->send_fields(&field_list, Protocol::SEND_EOF)) DBUG_RETURN(1); restore_record(table,default_values); // Get empty record @@ -730,12 +781,13 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, { /* We have NOW() as default value but we use CURRENT_TIMESTAMP form - because it is more SQL standard comatible + because it is more SQL standard compatible */ protocol->store("CURRENT_TIMESTAMP", system_charset_info); } else if (field->unireg_check != Field::NEXT_NUMBER && - !field->is_null()) + !field->is_null() && + !(field->flags & NO_DEFAULT_VALUE_FLAG)) { // Not null by default /* Note: we have to convert the default value into @@ -773,7 +825,10 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, /* Add grant options & comments */ end=tmp; #ifndef NO_EMBEDDED_ACCESS_CHECKS - col_access= get_column_grant(thd,table_list,field) & COL_ACLS; + col_access= get_column_grant(thd, &table_list->grant, + table_list->db, + table_list->real_name, + field->field_name) & COL_ACLS; for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) { if (col_access & 1) @@ -807,34 +862,67 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) Protocol *protocol= thd->protocol; char buff[2048]; String buffer(buff, sizeof(buff), system_charset_info); + int res; DBUG_ENTER("mysqld_show_create"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); - /* Only one table for now */ - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) + /* Only one table for now, but VIEW can involve several tables */ + if ((res= open_and_lock_tables(thd, table_list))) { - send_error(thd); + if (res < 0) + send_error(thd); DBUG_RETURN(1); } + /* TODO: add environment variables show when it become possible */ + if (thd->lex->only_view && !table_list->view) + { + my_error(ER_WRONG_OBJECT, MYF(0), table_list->db, + table_list->real_name, "VIEW"); + DBUG_RETURN(-1); + } + + table= table_list->table; - if (store_create_info(thd, table, &buffer)) + if ((table_list->view ? + view_store_create_info(thd, table_list, &buffer) : + store_create_info(thd, table, &buffer))) DBUG_RETURN(-1); List<Item> field_list; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - // 1024 is for not to confuse old clients - field_list.push_back(new Item_empty_string("Create Table", - max(buffer.length(),1024))); + if (table_list->view) + { + field_list.push_back(new Item_empty_string("View",NAME_LEN)); + field_list.push_back(new Item_empty_string("Create View", + max(buffer.length(),1024))); + } + else + { + field_list.push_back(new Item_empty_string("Table",NAME_LEN)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Table", + max(buffer.length(),1024))); + } - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); protocol->prepare_for_resend(); - protocol->store(table->table_name, system_charset_info); buffer.length(0); - if (store_create_info(thd, table, &buffer)) - DBUG_RETURN(-1); + if (table_list->view) + { + protocol->store(table_list->view_name.str, system_charset_info); + if (view_store_create_info(thd, table_list, &buffer)) + DBUG_RETURN(-1); + } + else + { + protocol->store(table->table_name, system_charset_info); + if (store_create_info(thd, table, &buffer)) + DBUG_RETURN(-1); + } protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + if (protocol->write()) DBUG_RETURN(1); send_eof(thd); @@ -901,7 +989,8 @@ int mysqld_show_create_db(THD *thd, char *dbname, field_list.push_back(new Item_empty_string("Database",NAME_LEN)); field_list.push_back(new Item_empty_string("Create Database",1024)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); protocol->prepare_for_resend(); @@ -943,7 +1032,8 @@ mysqld_show_logs(THD *thd) field_list.push_back(new Item_empty_string("Type",10)); field_list.push_back(new Item_empty_string("Status",10)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); #ifdef HAVE_BERKELEY_DB @@ -992,7 +1082,8 @@ mysqld_show_keys(THD *thd, TABLE_LIST *table_list) field_list.push_back(new Item_empty_string("Comment",255)); item->maybe_null=1; - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); KEY *key_info=table->key_info; @@ -1061,14 +1152,19 @@ void mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) { TABLE *table; + int res; DBUG_ENTER("mysqld_list_fields"); DBUG_PRINT("enter",("table: %s",table_list->real_name)); - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) + table_list->lock_type= TL_UNLOCK; + if ((res= open_and_lock_tables(thd, table_list))) { - send_error(thd); + if (res < 0) + send_error(thd); DBUG_VOID_RETURN; } + table= table_list->table; + List<Item> field_list; Field **ptr,*field; @@ -1079,7 +1175,8 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) field_list.push_back(new Item_field(field)); } restore_record(table,default_values); // Get empty record - if (thd->protocol->send_fields(&field_list,2)) + if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS | + Protocol::SEND_EOF)) DBUG_VOID_RETURN; thd->protocol->flush(); DBUG_VOID_RETURN; @@ -1115,7 +1212,7 @@ mysqld_dump_create_info(THD *thd, TABLE *table, int fd) /* Go through all character combinations and ensure that sql_lex.cc can - parse it as an identifer. + parse it as an identifier. SYNOPSIS require_quotes() @@ -1321,6 +1418,7 @@ store_create_info(THD *thd, TABLE *table, String *packet) field->unireg_check != Field::TIMESTAMP_UN_FIELD; has_default= (field->type() != FIELD_TYPE_BLOB && + !(field->flags & NO_DEFAULT_VALUE_FLAG) && field->unireg_check != Field::NEXT_NUMBER && !((foreign_db_mode || limited_mysql_mode) && has_now_default)); @@ -1522,6 +1620,51 @@ store_create_info(THD *thd, TABLE *table, String *packet) } +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *buff) +{ + my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | + MODE_ORACLE | + MODE_MSSQL | + MODE_DB2 | + MODE_MAXDB | + MODE_ANSI)) != 0; + buff->append("CREATE ", 7); + if (!foreign_db_mode) + { + buff->append("ALGORITHM=", 10); + switch(table->algorithm) + { + case VIEW_ALGORITHM_UNDEFINED: + buff->append("UNDEFINED ", 10); + break; + case VIEW_ALGORITHM_TMPTABLE: + buff->append("TEMPTABLE ", 10); + break; + case VIEW_ALGORITHM_MERGE: + buff->append("MERGE ", 6); + break; + default: + DBUG_ASSERT(0); // never should happen + } + } + buff->append("VIEW ", 5); + append_identifier(thd, buff, table->view_db.str, table->view_db.length); + buff->append('.'); + append_identifier(thd, buff, table->view_name.str, table->view_name.length); + buff->append(" AS ", 4); + buff->append(table->query.str, table->query.length); + if (table->with_check != VIEW_CHECK_NONE) + { + if (table->with_check == VIEW_CHECK_LOCAL) + buff->append(" WITH LOCAL CHECK OPTION", 24); + else + buff->append(" WITH CASCADED CHECK OPTION", 27); + } + return 0; +} + + /**************************************************************************** Return info about all processes returns for each thread: thread id, user, host, db, command, info @@ -1565,7 +1708,8 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) field->maybe_null=1; field_list.push_back(field=new Item_empty_string("Info",max_query_length)); field->maybe_null=1; - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_VOID_RETURN; VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list @@ -1598,7 +1742,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) thd_info->command=(int) tmp->command; if ((mysys_var= tmp->mysys_var)) pthread_mutex_lock(&mysys_var->mutex); - thd_info->proc_info= (char*) (tmp->killed ? "Killed" : 0); + thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0); #ifndef EMBEDDED_LIBRARY thd_info->state_info= (char*) (tmp->locked ? "Locked" : tmp->net.reading_or_writing ? @@ -1701,7 +1845,8 @@ int mysqld_show_collations(THD *thd, const char *wild) field_list.push_back(new Item_empty_string("Compiled",30)); field_list.push_back(new Item_return_int("Sortlen",3, FIELD_TYPE_SHORT)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) @@ -1754,7 +1899,8 @@ int mysqld_show_charsets(THD *thd, const char *wild) field_list.push_back(new Item_empty_string("Default collation",60)); field_list.push_back(new Item_return_int("Maxlen",3, FIELD_TYPE_SHORT)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) @@ -1778,7 +1924,8 @@ err: int mysqld_show(THD *thd, const char *wild, show_var_st *variables, enum enum_var_type value_type, - pthread_mutex_t *mutex) + pthread_mutex_t *mutex, + struct system_status_var *status_var) { char buff[1024]; List<Item> field_list; @@ -1788,7 +1935,8 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables, field_list.push_back(new Item_empty_string("Variable_name",30)); field_list.push_back(new Item_empty_string("Value",256)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); /* purecov: inspected */ null_lex_str.str= 0; // For sys_var->value_ptr() null_lex_str.length= 0; @@ -1815,6 +1963,10 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables, pos= end= buff; switch (show_type) { + case SHOW_LONG_STATUS: + case SHOW_LONG_CONST_STATUS: + value= ((char *) status_var + (uint) value); + /* fall through */ case SHOW_LONG: case SHOW_LONG_CONST: end= int10_to_str(*(long*) value, buff, 10); @@ -1879,6 +2031,11 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables, end= strend(pos); break; } + case SHOW_DOUBLE: + { + end= buff + sprintf(buff, "%f", *(double*) value); + break; + } #ifdef HAVE_OPENSSL /* First group - functions relying on CTX */ case SHOW_SSL_CTX_SESS_ACCEPT: @@ -2079,6 +2236,31 @@ int mysqld_show(THD *thd, const char *wild, show_var_st *variables, DBUG_RETURN(1); } + +/* collect status for all running threads */ + +void calc_sum_of_all_status(STATUS_VAR *to) +{ + DBUG_ENTER("calc_sum_of_all_status"); + + /* Ensure that thread id not killed during loop */ + VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list + + I_List_iterator<THD> it(threads); + THD *tmp; + + /* Get global values as base */ + *to= global_status_var; + + /* Add to this status from existing threads */ + while ((tmp= it++)) + add_to_status(to, &tmp->status_var); + + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + DBUG_VOID_RETURN; +} + + #ifdef __GNUC__ template class List_iterator_fast<char>; template class List<char>; diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 9f95ffa4884..1831c4a2f3d 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -78,3 +78,4 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file, IO_CACHE *to_file, uchar *sort_buffer, BUFFPEK *lastbuff,BUFFPEK *Fb, BUFFPEK *Tb,int flag); +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length); diff --git a/sql/sql_string.cc b/sql/sql_string.cc index c1701e7e9bf..69b530911f4 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -684,6 +684,19 @@ void String::qs_append(double *d) qs_append(ld); } +void String::qs_append(int i) +{ + char *buff = Ptr + str_length; + sprintf(buff,"%d", i); + str_length += strlen(buff); +} + +void String::qs_append(uint i) +{ + char *buff = Ptr + str_length; + sprintf(buff,"%u", i); + str_length += strlen(buff); +} /* Compare strings according to collation, without end space. diff --git a/sql/sql_string.h b/sql/sql_string.h index a8fb9574c0b..cb5b1fb25fd 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -284,6 +284,8 @@ public: Ptr[str_length]= c; str_length++; } + void qs_append(int i); + void qs_append(uint i); /* Inline (general) functions used by the protocol functions */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c69235a5647..e73bf81eb74 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -95,7 +95,7 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, } } - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, 0); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); err: pthread_mutex_unlock(&LOCK_open); @@ -140,8 +140,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, thd->mysys_var->current_cond= &COND_refresh; VOID(pthread_mutex_lock(&LOCK_open)); - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, - dont_log_query); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 1, + dont_log_query); pthread_mutex_unlock(&LOCK_open); @@ -163,6 +163,7 @@ int mysql_rm_table_part2_with_lock(THD *thd, if_exists If set, don't give an error if table doesn't exists. In this case we give an warning of level 'NOTE' drop_temporary Only drop temporary tables + drop_view Allow to delete VIEW .frm dont_log_query Don't log the query TODO: @@ -182,7 +183,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, */ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool dont_log_query) + bool drop_temporary, bool drop_view, + bool dont_log_query) { TABLE_LIST *table; char path[FN_REFLEN], *alias; @@ -194,7 +196,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (lock_table_names(thd, tables)) DBUG_RETURN(1); - for (table=tables ; table ; table=table->next) + for (table= tables; table; table= table->next_local) { char *db=table->db; mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL); @@ -222,8 +224,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, strxmov(path, mysql_data_home, "/", db, "/", alias, reg_ext, NullS); (void) unpack_filename(path,path); } - if (drop_temporary || - (access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE))) + if (drop_temporary || + (access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE)) || + (!drop_view && mysql_frm_type(path) != FRMTYPE_TABLE)) { if (if_exists) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, @@ -274,17 +277,13 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (some_tables_deleted || tmp_table_deleted || !error) { query_cache_invalidate3(thd, tables, 0); - if (!dont_log_query) + if (!dont_log_query && mysql_bin_log.is_open()) { - mysql_update_log.write(thd, thd->query,thd->query_length); - if (mysql_bin_log.is_open()) - { - if (!error) - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - tmp_table_deleted && !some_tables_deleted); - mysql_bin_log.write(&qinfo); - } + if (!error) + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + tmp_table_deleted && !some_tables_deleted); + mysql_bin_log.write(&qinfo); } } @@ -617,7 +616,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, break; } if (!(sql_field->flags & NOT_NULL_FLAG)) - sql_field->pack_flag|=FIELDFLAG_MAYBE_NULL; + sql_field->pack_flag|= FIELDFLAG_MAYBE_NULL; + if (sql_field->flags & NO_DEFAULT_VALUE_FLAG) + sql_field->pack_flag|= FIELDFLAG_NO_DEFAULT; sql_field->offset= pos; if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER) auto_increment++; @@ -1099,7 +1100,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, (From ALTER TABLE) DESCRIPTION - If one creates a temporary table, this is automaticly opened + If one creates a temporary table, this is automatically opened no_log is needed for the case of CREATE ... SELECT, as the logging will be done later in sql_insert.cc @@ -1283,18 +1284,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, } thd->tmp_table_used= 1; } - if (!tmp_table) + if (!tmp_table && mysql_bin_log.is_open()) { - // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - test(create_info->options & - HA_LEX_CREATE_TMP_TABLE)); - mysql_bin_log.write(&qinfo); - } + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + test(create_info->options & + HA_LEX_CREATE_TMP_TABLE)); + mysql_bin_log.write(&qinfo); } error=0; end: @@ -1348,7 +1344,7 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end) ****************************************************************************/ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, + TABLE_LIST *create_table, List<create_field> *extra_fields, List<Key> *keys, List<Item> *items, @@ -1378,7 +1374,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, field=item->tmp_table_field(&tmp_table); else field=create_tmp_field(thd, &tmp_table, item, item->type(), - (Item ***) 0, &tmp_field, 0, 0, 0); + (Item ***) 0, &tmp_field,0,0,0); if (!field || !(cr_field=new create_field(field,(item->type() == Item::FIELD_ITEM ? ((Item_field *)item)->field : @@ -1386,34 +1382,42 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(0); extra_fields->push_back(cr_field); } - /* create and lock table */ - /* QQ: create and open should be done atomic ! */ /* + create and lock table + We don't log the statement, it will be logged later. + If this is a HEAP table, the automatic DELETE FROM which is written to the binlog when a HEAP table is opened for the first time since startup, must not be written: 1) it would be wrong (imagine we're in CREATE SELECT: we don't want to delete from it) 2) it would be written before the CREATE TABLE, which is a wrong order. So we keep binary logging disabled when we open_table(). + TODO: create and open should be done atomic ! */ - tmp_disable_binlog(thd); - if (!mysql_create_table(thd,db,name,create_info,*extra_fields, - *keys,0,select_field_count)) { - if (!(table=open_table(thd,db,name,name,(bool*) 0))) - quick_rm_table(create_info->db_type,db,table_case_name(create_info,name)); + tmp_disable_binlog(thd); + if (!mysql_create_table(thd, create_table->db, create_table->real_name, + create_info, *extra_fields, *keys, 0, + select_field_count)) + { + if (!(table= open_table(thd, create_table, thd->mem_root, (bool*) 0))) + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->real_name)); + } + reenable_binlog(thd); + if (!table) // open failed + DBUG_RETURN(0); } - reenable_binlog(thd); - if (!table) - DBUG_RETURN(0); + table->reginfo.lock_type=TL_WRITE; - if (!((*lock)=mysql_lock_tables(thd,&table,1))) + if (!((*lock)= mysql_lock_tables(thd, &table,1))) { VOID(pthread_mutex_lock(&LOCK_open)); hash_delete(&open_cache,(byte*) table); VOID(pthread_mutex_unlock(&LOCK_open)); - quick_rm_table(create_info->db_type,db,table_case_name(create_info, name)); + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->real_name)); DBUG_RETURN(0); } table->file->extra(HA_EXTRA_WRITE_CACHE); @@ -1435,11 +1439,12 @@ mysql_rename_table(enum db_type base, { char from[FN_REFLEN], to[FN_REFLEN]; char tmp_from[NAME_LEN+1], tmp_to[NAME_LEN+1]; - handler *file=get_new_handler((TABLE*) 0, base); + handler *file=(base == DB_TYPE_UNKNOWN ? 0 : get_new_handler((TABLE*) 0, base)); int error=0; DBUG_ENTER("mysql_rename_table"); - if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) + if (lower_case_table_names == 2 && file && + !(file->table_flags() & HA_FILE_BASED)) { /* Table handler expects to get all file names as lower case */ strmov(tmp_from, old_name); @@ -1457,13 +1462,15 @@ mysql_rename_table(enum db_type base, fn_format(from,from,"","",4); fn_format(to,to, "","",4); - if (!(error=file->rename_table((const char*) from,(const char *) to))) + if (!file || + !(error=file->rename_table((const char*) from,(const char *) to))) { if (rename_file_ext(from,to,reg_ext)) { error=my_errno; /* Restore old file name */ - file->rename_table((const char*) to,(const char *) from); + if (file) + file->rename_table((const char*) to,(const char *) from); } } delete file; @@ -1530,7 +1537,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, Win32 clients must also have a WRITE LOCK on the table ! */ -static bool close_cached_table(THD *thd, TABLE *table) +void close_cached_table(THD *thd, TABLE *table) { DBUG_ENTER("close_cached_table"); @@ -1546,7 +1553,6 @@ static bool close_cached_table(THD *thd, TABLE *table) /* When lock on LOCK_open is freed other threads can continue */ pthread_cond_broadcast(&COND_refresh); - DBUG_RETURN(0); } static int send_check_errmsg(THD *thd, TABLE_LIST* table, @@ -1643,7 +1649,7 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, char name[FN_REFLEN]; strxmov(name, mysql_data_home, "/", table_list->db, "/", table_list->real_name, NullS); - if (openfrm(name, "", 0, 0, 0, &tmp_table)) + if (openfrm(thd, name, "", 0, 0, 0, &tmp_table)) DBUG_RETURN(0); // Can't open frm file table= &tmp_table; } @@ -1765,11 +1771,12 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, item->maybe_null = 1; field_list.push_back(item = new Item_empty_string("Msg_text", 255)); item->maybe_null = 1; - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); mysql_ha_flush(thd, tables, MYSQL_HA_CLOSE_FINAL); - for (table = tables; table; table = table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; char* db = (table->db) ? table->db : thd->db; @@ -1911,8 +1918,9 @@ send_result_message: reopen the table and do ha_innobase::analyze() on it. */ close_thread_tables(thd); - TABLE_LIST *save_next= table->next; - table->next= 0; + TABLE_LIST *save_next_local= table->next_local, + *save_next_global= table->next_global; + table->next_local= table->next_global= 0; result_code= mysql_recreate_table(thd, table, 0); close_thread_tables(thd); if (!result_code) // recreation went ok @@ -1922,7 +1930,8 @@ send_result_message: result_code= 0; // analyze went ok } result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; - table->next= save_next; + table->next_local= save_next_local; + table->next_global= save_next_global; goto send_result_message; } @@ -2136,9 +2145,9 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, DBUG_RETURN(-1); } + bzero((gptr)&src_tables_list, sizeof(src_tables_list)); src_tables_list.db= table_ident->db.str ? table_ident->db.str : thd->db; src_tables_list.real_name= table_ident->table.str; - src_tables_list.next= 0; if (lock_and_wait_for_table_name(thd, &src_tables_list)) goto err; @@ -2212,7 +2221,6 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, } // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -2230,7 +2238,7 @@ table_exists: char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TABLE_EXISTS_ERROR), table_name); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR,warn_buff); res= 0; } @@ -2326,7 +2334,6 @@ mysql_discard_or_import_tablespace(THD *thd, error=1; if (error) goto err; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0); @@ -2560,7 +2567,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, enum enum_duplicates handle_duplicates, ALTER_INFO *alter_info, bool do_send_ok) { - TABLE *table,*new_table; + TABLE *table,*new_table=0; int error; char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN]; char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias; @@ -2569,6 +2576,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, ulonglong next_insert_id; uint db_create_options, used_fields; enum db_type old_db_type,new_db_type; + bool need_copy_table; DBUG_ENTER("mysql_alter_table"); thd->proc_info="init"; @@ -2658,7 +2666,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->row_type=table->row_type; thd->proc_info="setup"; - if (alter_info->is_simple && !table->tmp_table) + if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) && + !table->tmp_table) // no need to touch frm { error=0; if (new_name != table_name || new_db != db) @@ -2686,23 +2695,24 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { switch (alter_info->keys_onoff) { case LEAVE_AS_IS: - break; + break; case ENABLE: - VOID(pthread_mutex_lock(&LOCK_open)); - wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); - VOID(pthread_mutex_unlock(&LOCK_open)); - error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); - /* COND_refresh will be signaled in close_thread_tables() */ - break; + VOID(pthread_mutex_lock(&LOCK_open)); + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); + VOID(pthread_mutex_unlock(&LOCK_open)); + error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + /* COND_refresh will be signaled in close_thread_tables() */ + break; case DISABLE: - VOID(pthread_mutex_lock(&LOCK_open)); - wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); - VOID(pthread_mutex_unlock(&LOCK_open)); - error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); - /* COND_refresh will be signaled in close_thread_tables() */ - break; + VOID(pthread_mutex_lock(&LOCK_open)); + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); + VOID(pthread_mutex_unlock(&LOCK_open)); + error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + /* COND_refresh will be signaled in close_thread_tables() */ + break; } } + if (error == HA_ERR_WRONG_COMMAND) { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, @@ -2712,7 +2722,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } if (!error) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -2734,7 +2743,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Full alter table */ - /* let new create options override the old ones */ + /* Let new create options override the old ones */ if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) create_info->min_rows=table->min_rows; if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) @@ -2995,6 +3004,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->options|=HA_LEX_CREATE_TMP_TABLE; /* + better have a negative test here, instead of positive, like + alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|... + so that ALTER TABLE won't break when somebody will add new flag + */ + need_copy_table=(alter_info->flags & ~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) || + create_info->used_fields & ~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD) || + table->tmp_table); + create_info->frm_only= !need_copy_table; + + /* Handling of symlinked tables: If no rename: Create new data file and index file on the same disk as the @@ -3040,8 +3059,9 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } else create_info->data_file_name=create_info->index_file_name=0; + + /* We don't log the statement, it will be logged later. */ { - /* We don't log the statement, it will be logged later. */ tmp_disable_binlog(thd); error= mysql_create_table(thd, new_db, tmp_name, create_info,create_list,key_list,1,0); @@ -3049,35 +3069,45 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) DBUG_RETURN(error); } - if (table->tmp_table) - new_table=open_table(thd,new_db,tmp_name,tmp_name,0); - else + if (need_copy_table) { - char path[FN_REFLEN]; - my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home, - new_db, tmp_name); - fn_format(path,path,"","",4); - new_table=open_temporary_table(thd, path, new_db, tmp_name,0); - } - if (!new_table) - { - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - goto err; + if (table->tmp_table) + { + TABLE_LIST tbl; + bzero((void*) &tbl, sizeof(tbl)); + tbl.db= new_db; + tbl.real_name= tbl.alias= tmp_name; + new_table= open_table(thd, &tbl, thd->mem_root, 0); + } + else + { + char path[FN_REFLEN]; + my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home, + new_db, tmp_name); + fn_format(path,path,"","",4); + new_table=open_temporary_table(thd, path, new_db, tmp_name,0); + } + if (!new_table) + { + VOID(quick_rm_table(new_db_type,new_db,tmp_name)); + goto err; + } } - /* We don't want update TIMESTAMP fields during ALTER TABLE. */ - new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - new_table->next_number_field=new_table->found_next_number_field; thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; thd->proc_info="copy to tmp table"; - next_insert_id=thd->next_insert_id; // Remember for loggin + next_insert_id=thd->next_insert_id; // Remember for logging copied=deleted=0; - if (!new_table->is_view) + if (new_table && !new_table->is_view) + { + new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + new_table->next_number_field=new_table->found_next_number_field; error=copy_data_between_tables(table,new_table,create_list, handle_duplicates, order_num, order, &copied, &deleted); + } thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -3107,7 +3137,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, my_free((gptr) new_table,MYF(0)); goto err; } - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -3117,8 +3146,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto end_temporary; } - intern_close_table(new_table); /* close temporary table */ - my_free((gptr) new_table,MYF(0)); + if (new_table) + { + intern_close_table(new_table); /* close temporary table */ + my_free((gptr) new_table,MYF(0)); + } VOID(pthread_mutex_lock(&LOCK_open)); if (error) { @@ -3130,7 +3162,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Data is copied. Now we rename the old table to a temp name, rename the new one to the old name, remove all entries from the old table - from the cash, free all locks, close the old table and remove it. + from the cache, free all locks, close the old table and remove it. */ thd->proc_info="rename result table"; @@ -3159,12 +3191,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, close the original table at before doing the rename */ table_name=thd->strdup(table_name); // must be saved - if (close_cached_table(thd, table)) - { // Aborted - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - VOID(pthread_mutex_unlock(&LOCK_open)); - goto err; - } + close_cached_table(thd, table); table=0; // Marker that table is closed } #if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2)) @@ -3174,6 +3201,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, error=0; + if (!need_copy_table) + new_db_type=old_db_type=DB_TYPE_UNKNOWN; // this type cannot happen in regular ALTER if (mysql_rename_table(old_db_type,db,table_name,db,old_name)) { error=1; @@ -3242,7 +3271,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } thd->proc_info="end"; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -3397,7 +3425,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error= 1; break; } @@ -3423,6 +3451,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, to->file->print_error(error,MYF(0)); break; } + to->file->restore_auto_increment(); delete_count++; } else @@ -3483,11 +3512,12 @@ int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, lex->key_list.empty(); lex->col_list.empty(); lex->alter_info.reset(); - lex->alter_info.is_simple= 0; // Force full recreate bzero((char*) &create_info,sizeof(create_info)); create_info.db_type=DB_TYPE_DEFAULT; create_info.row_type=ROW_TYPE_DEFAULT; create_info.default_table_charset=default_charset_info; + /* Force alter table to recreate table */ + lex->alter_info.flags= ALTER_CHANGE_COLUMN; DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, lex->create_list, lex->key_list, 0, (ORDER *) 0, @@ -3507,10 +3537,11 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) item->maybe_null= 1; field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); item->maybe_null= 1; - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(-1); - for (table= tables; table; table= table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; TABLE *t; diff --git a/sql/sql_test.cc b/sql/sql_test.cc index d992c93f8fc..6cffa9df2c6 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -181,9 +181,10 @@ TEST_join(JOIN *join) " quick select checked for each record (keys: %s)\n", tab->select->quick_keys.print(buf)); else if (tab->select->quick) - fprintf(DBUG_FILE," quick select used on key %s, length: %d\n", - form->key_info[tab->select->quick->index].name, - tab->select->quick->max_used_key_length); + { + fprintf(DBUG_FILE, " quick select used:\n"); + tab->select->quick->dbug_dump(18, false); + } else VOID(fputs(" select used\n",DBUG_FILE)); } @@ -202,6 +203,104 @@ TEST_join(JOIN *join) DBUG_VOID_RETURN; } + +/* + Print the current state during query optimization. + + SYNOPSIS + print_plan() + join pointer to the structure providing all context info for + the query + read_time the cost of the best partial plan + record_count estimate for the number of records returned by the best + partial plan + idx length of the partial QEP in 'join->positions'; + also an index in the array 'join->best_ref'; + info comment string to appear above the printout + + DESCRIPTION + This function prints to the log file DBUG_FILE the members of 'join' that + are used during query optimization (join->positions, join->best_positions, + and join->best_ref) and few other related variables (read_time, + record_count). + Useful to trace query optimizer functions. + + RETURN + None +*/ + +void +print_plan(JOIN* join, double read_time, double record_count, + uint idx, const char *info) +{ + uint i; + POSITION pos; + JOIN_TAB *join_table; + JOIN_TAB **plan_nodes; + TABLE* table; + + if (info == 0) + info= ""; + + DBUG_LOCK_FILE; + if (join->best_read == DBL_MAX) + { + fprintf(DBUG_FILE,"%s; idx:%u, best: DBL_MAX, current:%g\n", + info, idx, read_time); + } + else + { + fprintf(DBUG_FILE,"%s; idx: %u, best: %g, current: %g\n", + info, idx, join->best_read, read_time); + } + + /* Print the tables in JOIN->positions */ + fputs(" POSITIONS: ", DBUG_FILE); + for (i= 0; i < idx ; i++) + { + pos = join->positions[i]; + table= pos.table->table; + if (table) + fputs(table->real_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + /* + Print the tables in JOIN->best_positions only if at least one complete plan + has been found. An indicator for this is the value of 'join->best_read'. + */ + fputs("BEST_POSITIONS: ", DBUG_FILE); + if (join->best_read < DBL_MAX) + { + for (i= 0; i < idx ; i++) + { + pos= join->best_positions[i]; + table= pos.table->table; + if (table) + fputs(table->real_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + } + fputc('\n', DBUG_FILE); + + /* Print the tables in JOIN->best_ref */ + fputs(" BEST_REF: ", DBUG_FILE); + for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++) + { + join_table= (*plan_nodes); + fputs(join_table->table->real_name, DBUG_FILE); + fprintf(DBUG_FILE, "(%lu,%lu,%lu)", + (ulong) join_table->found_records, + (ulong) join_table->records, + (ulong) join_table->read_time); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + DBUG_UNLOCK_FILE; +} + #endif typedef struct st_debug_lock @@ -369,16 +468,20 @@ read_first: %10lu\n\ write: %10lu\n\ delete %10lu\n\ update: %10lu\n", - ha_read_key_count, ha_read_next_count, - ha_read_rnd_count, ha_read_first_count, - ha_write_count, ha_delete_count, ha_update_count); + thd->status_var.ha_read_key_count, + thd->status_var.ha_read_next_count, + thd->status_var.ha_read_rnd_count, + thd->status_var.ha_read_first_count, + thd->status_var.ha_write_count, + thd->status_var.ha_delete_count, + thd->status_var.ha_update_count); pthread_mutex_unlock(&LOCK_status); printf("\nTable status:\n\ Opened tables: %10lu\n\ Open tables: %10lu\n\ Open files: %10lu\n\ Open streams: %10lu\n", - opened_tables, + thd->status_var.opened_tables, (ulong) cached_tables(), (ulong) my_file_opened, (ulong) my_stream_opened); diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc new file mode 100644 index 00000000000..5d175be74e1 --- /dev/null +++ b/sql/sql_trigger.cc @@ -0,0 +1,425 @@ +#include "mysql_priv.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "parse_file.h" +#include "sql_acl.h" + + +static const LEX_STRING triggers_file_type= {(char *)"TRIGGERS", 8}; +static const char * const triggers_file_ext= ".TRG"; + +/* + Table of .TRG file field descriptors. + We have here only one field now because in nearest future .TRG + files will be merged into .FRM files (so we don't need something + like md5 or created fields). +*/ +static File_option triggers_file_parameters[]= +{ + {{(char*)"triggers", 8}, offsetof(class Table_triggers_list, definitions_list), + FILE_OPTIONS_STRLIST}, + {{0, 0}, 0, FILE_OPTIONS_STRING} +}; + + +/* + Create or drop trigger for table. + + SYNOPSIS + mysql_create_or_drop_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one table for which trigger is created. + create - whenever we create (true) or drop (false) trigger + + NOTE + This function is mainly responsible for opening and locking of table and + invalidation of all its instances in table cache after trigger creation. + Real work on trigger creation/dropping is done inside Table_triggers_list + methods. + + RETURN VALUE + 0 - Success, non-0 in case of error. +*/ +int mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) +{ + TABLE *table; + int result= 0; + + DBUG_ENTER("mysql_create_or_drop_trigger"); + + /* + QQ: This function could be merged in mysql_alter_table() function + But do we want this ? + */ + + if (open_and_lock_tables(thd, tables)) + DBUG_RETURN(-1); + + /* + TODO: We should check if user has TRIGGER privilege for table here. + Now we just require SUPER privilege for creating/dropping because + we don't have proper privilege checking for triggers in place yet. + */ + if (check_global_access(thd, SUPER_ACL)) + DBUG_RETURN(1); + + table= tables->table; + + /* + We do not allow creation of triggers on views or temporary tables. + We have to do this check here and not in + Table_triggers_list::create_trigger() because we want to avoid messing + with table cash for views and temporary tables. + */ + if (tables->view || table->tmp_table != NO_TMP_TABLE) + { + my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias); + DBUG_RETURN(-1); + } + + if (!table->triggers) + { + if (!create) + { + my_error(ER_TRG_DOES_NOT_EXIST, MYF(0)); + DBUG_RETURN(-1); + } + + if (!(table->triggers= new (&table->mem_root) Table_triggers_list())) + DBUG_RETURN(-1); + } + + /* + We don't want perform our operations while global read lock is held + so we have to wait until its end and then prevent it from occuring + again until we are done. (Acquiring LOCK_open is not enough because + global read lock is held without helding LOCK_open). + */ + if (wait_if_global_read_lock(thd, 0, 0)) + DBUG_RETURN(-1); + + VOID(pthread_mutex_lock(&LOCK_open)); + if ((create ? table->triggers->create_trigger(thd, tables): + table->triggers->drop_trigger(thd, tables))) + result= -1; + + /* It is sensible to invalidate table in any case */ + close_cached_table(thd, table); + VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); + + if (!result) + send_ok(thd); + + DBUG_RETURN(result); +} + + +/* + Create trigger for table. + + SYNOPSIS + create_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one open table for which trigger is + created. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables) +{ + LEX *lex= thd->lex; + TABLE *table= tables->table; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + LEX_STRING *trg_def, *name; + List_iterator_fast<LEX_STRING> it(names_list); + + /* We don't allow creation of several triggers of the same type yet */ + if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) + { + my_error(ER_TRG_ALREADY_EXISTS, MYF(0)); + return 1; + } + + /* Let us check if trigger with the same name exists */ + while ((name= it++)) + { + if (my_strcasecmp(system_charset_info, lex->name_and_length.str, + name->str) == 0) + { + my_error(ER_TRG_ALREADY_EXISTS, MYF(0)); + return 1; + } + } + + /* + Here we are creating file with triggers and save all triggers in it. + sql_create_definition_file() files handles renaming and backup of older + versions + */ + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + triggers_file_ext, NullS) - file_buff; + file.str= file_buff; + + /* + Soon we will invalidate table object and thus Table_triggers_list object + so don't care about place to which trg_def->ptr points and other + invariants (e.g. we don't bother to update names_list) + + QQ: Hmm... probably we should not care about setting up active thread + mem_root too. + */ + if (!(trg_def= (LEX_STRING *)alloc_root(&table->mem_root, + sizeof(LEX_STRING))) || + definitions_list.push_back(trg_def, &table->mem_root)) + return 1; + + trg_def->str= thd->query; + trg_def->length= thd->query_length; + + return sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)this, triggers_file_parameters, 3); +} + + +/* + Drop trigger for table. + + SYNOPSIS + drop_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one open table for which trigger is + dropped. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables) +{ + LEX *lex= thd->lex; + LEX_STRING *name; + List_iterator_fast<LEX_STRING> it_name(names_list); + List_iterator<LEX_STRING> it_def(definitions_list); + + while ((name= it_name++)) + { + it_def++; + + if (my_strcasecmp(system_charset_info, lex->name_and_length.str, + name->str) == 0) + { + /* + Again we don't care much about other things required for + clean trigger removing since table will be reopened anyway. + */ + it_def.remove(); + + if (definitions_list.is_empty()) + { + char path[FN_REFLEN]; + + /* + TODO: Probably instead of removing .TRG file we should move + to archive directory but this should be done as part of + parse_file.cc functionality (because we will need it + elsewhere). + */ + strxnmov(path, FN_REFLEN, mysql_data_home, "/", tables->db, "/", + tables->real_name, triggers_file_ext, NullS); + unpack_filename(path, path); + return my_delete(path, MYF(MY_WME)); + } + else + { + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, + "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + triggers_file_ext, NullS) - file_buff; + file.str= file_buff; + + return sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)this, + triggers_file_parameters, 3); + } + } + } + + my_error(ER_TRG_DOES_NOT_EXIST, MYF(0)); + return 1; +} + + +Table_triggers_list::~Table_triggers_list() +{ + for (int i= 0; i < 3; i++) + for (int j= 0; j < 2; j++) + delete bodies[i][j]; + + if (old_field) + for (Field **fld_ptr= old_field; *fld_ptr; fld_ptr++) + delete *fld_ptr; +} + + +/* + Check whenever .TRG file for table exist and load all triggers it contains. + + SYNOPSIS + check_n_load() + thd - current thread context + db - table's database name + table_name - table's name + table - pointer to table object + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::check_n_load(THD *thd, const char *db, + const char *table_name, TABLE *table) +{ + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + + DBUG_ENTER("Table_triggers_list::check_n_load"); + + strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/", table_name, + triggers_file_ext, NullS); + path.length= unpack_filename(path_buff, path_buff); + path.str= path_buff; + + // QQ: should we analyze errno somehow ? + if (access(path_buff, F_OK)) + DBUG_RETURN(0); + + /* + File exists so we got to load triggers + FIXME: A lot of things to do here e.g. how about other funcs and being + more paranoical ? + */ + + if ((parser= sql_parse_prepare(&path, &table->mem_root, 1))) + { + if (!strncmp(triggers_file_type.str, parser->type()->str, + parser->type()->length)) + { + Field **fld, **old_fld; + Table_triggers_list *triggers= + new (&table->mem_root) Table_triggers_list(); + + if (!triggers) + DBUG_RETURN(1); + + if (parser->parse((gptr)triggers, &table->mem_root, + triggers_file_parameters, 1)) + DBUG_RETURN(1); + + table->triggers= triggers; + + /* + We have to prepare array of Field objects which will represent OLD.* + row values by referencing to record[1] instead of record[0] + + TODO: This could be avoided if there is no ON UPDATE trigger. + */ + if (!(triggers->old_field= + (Field **)alloc_root(&table->mem_root, (table->fields + 1) * + sizeof(Field*)))) + DBUG_RETURN(1); + + for (fld= table->field, old_fld= triggers->old_field; *fld; + fld++, old_fld++) + { + /* + QQ: it is supposed that it is ok to use this function for field + cloning... + */ + if (!(*old_fld= (*fld)->new_field(&table->mem_root, table))) + DBUG_RETURN(1); + (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] - + table->record[0])); + } + *old_fld= 0; + + List_iterator_fast<LEX_STRING> it(triggers->definitions_list); + LEX_STRING *trg_create_str, *trg_name_str; + char *trg_name_buff; + LEX *old_lex= thd->lex, lex; + + thd->lex= &lex; + + while ((trg_create_str= it++)) + { + lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); + lex.trg_table= table; + if (yyparse((void *)thd) || thd->is_fatal_error) + { + /* + Free lex associated resources + QQ: Do we really need all this stuff here ? + */ + if (lex.sphead) + { + if (&lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete lex.sphead; + } + goto err_with_lex_cleanup; + } + + triggers->bodies[lex.trg_chistics.event] + [lex.trg_chistics.action_time]= lex.sphead; + lex.sphead= 0; + + if (!(trg_name_buff= alloc_root(&table->mem_root, + sizeof(LEX_STRING) + + lex.name_and_length.length + 1))) + goto err_with_lex_cleanup; + + trg_name_str= (LEX_STRING *)trg_name_buff; + trg_name_buff+= sizeof(LEX_STRING); + memcpy(trg_name_buff, lex.name_and_length.str, + lex.name_and_length.length + 1); + trg_name_str->str= trg_name_buff; + trg_name_str->length= lex.name_and_length.length; + + if (triggers->names_list.push_back(trg_name_str, &table->mem_root)) + goto err_with_lex_cleanup; + + lex_end(&lex); + } + thd->lex= old_lex; + + DBUG_RETURN(0); + +err_with_lex_cleanup: + // QQ: anything else ? + lex_end(&lex); + thd->lex= old_lex; + DBUG_RETURN(1); + } + + /* + We don't care about this error message much because .TRG files will + be merged into .FRM anyway. + */ + my_error(ER_WRONG_OBJECT, MYF(0), table_name, triggers_file_ext, + "TRIGGER"); + DBUG_RETURN(1); + } + + DBUG_RETURN(1); +} diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h new file mode 100644 index 00000000000..8ab2ab003f8 --- /dev/null +++ b/sql/sql_trigger.h @@ -0,0 +1,62 @@ +/* + This class holds all information about triggers of table. + + QQ: Will it be merged into TABLE in future ? +*/ +class Table_triggers_list: public Sql_alloc +{ + /* Triggers as SPs grouped by event, action_time */ + sp_head *bodies[3][2]; + /* + Copy of TABLE::Field array with field pointers set to old version + of record, used for OLD values in trigger on UPDATE. + */ + Field **old_field; + /* + Names of triggers. + Should correspond to order of triggers on definitions_list, + used in CREATE/DROP TRIGGER for looking up trigger by name. + */ + List<LEX_STRING> names_list; + +public: + /* + Field responsible for storing triggers definitions in file. + It have to be public because we are using it directly from parser. + */ + List<LEX_STRING> definitions_list; + + Table_triggers_list(): + old_field(0) + { + bzero((char *)bodies, sizeof(bodies)); + } + ~Table_triggers_list(); + + bool create_trigger(THD *thd, TABLE_LIST *table); + bool drop_trigger(THD *thd, TABLE_LIST *table); + bool process_triggers(THD *thd, trg_event_type event, + trg_action_time_type time_type) + { + int res= 0; + + if (bodies[event][time_type]) + { + /* + Similar to function invocation we don't need to surpress sending of + ok packets here because don't allow execute statements from trigger. + + FIXME: We should juggle with security context here (because trigger + should be invoked with creator rights). + */ + res= bodies[event][time_type]->execute_function(thd, 0, 0, 0); + } + + return res; + } + + static bool check_n_load(THD *thd, const char *db, const char *table_name, + TABLE *table); + + friend class Item_trigger_field; +}; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index e0e8f8d42c5..792e2cdf775 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -28,11 +28,17 @@ int mysql_union(THD *thd, LEX *lex, select_result *result, SELECT_LEX_UNIT *unit) { DBUG_ENTER("mysql_union"); - int res= 0; + int res, res_cln; if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK))) res= unit->exec(); - res|= unit->cleanup(); - DBUG_RETURN(res); + if (res == 0 && thd->cursor && thd->cursor->is_open()) + { + thd->cursor->set_unit(unit); + res_cln= 0; + } + else + res_cln= unit->cleanup(); + DBUG_RETURN(res?res:res_cln); } @@ -71,7 +77,7 @@ bool select_union::send_data(List<Item> &values) return 0; } fill_record(table->field, values, 1); - if (thd->net.report_error || write_record(table,&info)) + if (thd->net.report_error || write_record(thd, table,&info)) { if (thd->net.last_errno == ER_RECORD_FILE_FULL) { @@ -136,7 +142,7 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd) fake_select_lex->ftfunc_list= &fake_select_lex->ftfunc_list_alloc; fake_select_lex->table_list.link_in_list((byte *)&result_table_list, (byte **) - &result_table_list.next); + &result_table_list.next_local); return options_tmp; } @@ -210,20 +216,17 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, goto err; thd_arg->lex->current_select= sl; - offset_limit_cnt= sl->offset_limit; - select_limit_cnt= sl->select_limit+sl->offset_limit; - if (select_limit_cnt < sl->select_limit) - select_limit_cnt= HA_POS_ERROR; // no limit - if (select_limit_cnt == HA_POS_ERROR || sl->braces) + set_limit(sl, sl); + if (sl->braces) sl->options&= ~OPTION_FOUND_ROWS; can_skip_order_by= is_union && (!sl->braces || select_limit_cnt == HA_POS_ERROR); - + res= join->prepare(&sl->ref_pointer_array, (TABLE_LIST*) sl->table_list.first, sl->with_wild, sl->where, - (can_skip_order_by ? 0 : sl->order_list.elements) + + (can_skip_order_by ? 0 : sl->order_list.elements) + sl->group_list.elements, can_skip_order_by ? (ORDER*) 0 : (ORDER *)sl->order_list.first, @@ -235,6 +238,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, /* There are no * in the statement anymore (for PS) */ sl->with_wild= 0; last_procedure= join->procedure; + if (res || thd_arg->is_fatal_error) goto err; if (sl == first_select) @@ -279,6 +283,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, List_iterator_fast<Item> tp(types); Item_arena *arena= thd->current_arena; Item *type; + while ((type= tp++)) { if (type->result_type() == STRING_RESULT && @@ -309,14 +314,10 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, thd_arg->lex->current_select= lex_select_save; if (!item_list.elements) { - /* - We're in statement prepare or in execution - of a conventional statement. - */ + Field **field; Item_arena *tmp_arena,backup; tmp_arena= thd->change_arena_if_needed(&backup); - Field **field; for (field= table->field; *field; field++) { Item_field *item= new Item_field(*field); @@ -333,6 +334,11 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { /* prepare fake select to initialize it correctly */ ulong options_tmp= init_prepare_fake_select_lex(thd); + /* + it should be done only once (because item_list builds only onece + per statement) + */ + DBUG_ASSERT(fake_select_lex->join == 0); if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, result))) { @@ -354,11 +360,11 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, fake_select_lex->table_list.empty(); } } - else if (arena->is_stmt_execute()) + else if (!arena->is_conventional()) { /* - We're in execution of a prepared statement: reset field items - to point at fields from the created temporary table. + We're in execution of a prepared statement or stored procedure: + reset field items to point at fields from the created temporary table. */ List_iterator_fast<Item> it(item_list); for (Field **field= table->field; *field; field++) @@ -509,8 +515,8 @@ int st_select_lex_unit::exec() allocate JOIN for fake select only once (prevent mysql_select automatic allocation) */ - if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, - result))) + if (!(fake_select_lex->join= new JOIN(thd, item_list, + fake_select_lex->options, result))) { fake_select_lex->table_list.empty(); DBUG_RETURN(-1); @@ -525,12 +531,14 @@ int st_select_lex_unit::exec() else { JOIN_TAB *tab,*end; - for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++) + for (tab=join->join_tab, end=tab+join->tables ; + tab && tab != end ; + tab++) { delete tab->select; delete tab->quick; } - join->init(thd, item_list, thd->options, result); + join->init(thd, item_list, fake_select_lex->options, result); } res= mysql_select(thd, &fake_select_lex->ref_pointer_array, &result_table_list, @@ -538,7 +546,7 @@ int st_select_lex_unit::exec() global_parameters->order_list.elements, (ORDER*)global_parameters->order_list.first, (ORDER*) NULL, NULL, (ORDER*) NULL, - options_tmp | SELECT_NO_UNLOCK, + fake_select_lex->options | SELECT_NO_UNLOCK, result, this, fake_select_lex); fake_select_lex->table_list.empty(); @@ -561,6 +569,7 @@ int st_select_lex_unit::exec() int st_select_lex_unit::cleanup() { int error= 0; + JOIN *join; DBUG_ENTER("st_select_lex_unit::cleanup"); if (cleaned) @@ -577,9 +586,8 @@ int st_select_lex_unit::cleanup() free_tmp_table(thd, table); table= 0; // Safety } - JOIN *join; - SELECT_LEX *sl= first_select_in_union(); - for (; sl; sl= sl->next_select()) + + for (SELECT_LEX *sl= first_select_in_union(); sl; sl= sl->next_select()) { if ((join= sl->join)) { @@ -604,6 +612,7 @@ int st_select_lex_unit::cleanup() error|= join->cleanup(); delete join; } + DBUG_RETURN(error); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d3597f274dc..561480bd289 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -23,6 +23,8 @@ #include "mysql_priv.h" #include "sql_acl.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); @@ -48,6 +50,42 @@ static bool compare_record(TABLE *table, ulong query_id) } +/* + check that all fields are real fields + + SYNOPSIS + check_fields() + thd thread handler + items Items for check + + RETURN + TRUE Items can't be used in UPDATE + FALSE Items are OK +*/ + +static bool check_fields(THD *thd, List<Item> &items) +{ + List_iterator<Item> it(items); + Item *item; + Item_field *field; + while ((item= it++)) + { + if (!(field= item->filed_for_view_update())) + { + /* item has name, because it comes from VIEW SELECT list */ + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name); + return TRUE; + } + /* + we make temporary copy of Item_field, to avoid influence of changing + result_field on Item_ref which refer on this field + */ + thd->change_item_tree(it.ref(), new Item_field(thd, field)); + } + return FALSE; +} + + int mysql_update(THD *thd, TABLE_LIST *table_list, List<Item> &fields, @@ -57,10 +95,12 @@ int mysql_update(THD *thd, ha_rows limit, enum enum_duplicates handle_duplicates) { - bool using_limit=limit != HA_POS_ERROR; + bool using_limit= limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; bool used_key_is_modified, transactional_table, log_delayed; + bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); int error=0; + int res; uint used_index; #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; @@ -71,15 +111,13 @@ int mysql_update(THD *thd, TABLE *table; SQL_SELECT *select; READ_RECORD info; - TABLE_LIST *update_table_list= ((TABLE_LIST*) - thd->lex->select_lex.table_list.first); DBUG_ENTER("mysql_update"); LINT_INIT(used_index); LINT_INIT(timestamp_query_id); - if ((open_and_lock_tables(thd, table_list))) - DBUG_RETURN(-1); + if ((error= open_and_lock_tables(thd, table_list))) + DBUG_RETURN(error); thd->proc_info="init"; table= table_list->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); @@ -89,10 +127,12 @@ int mysql_update(THD *thd, table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS - want_privilege= table->grant.want_privilege; + /* In case of view TABLE_LIST contain right privilages request */ + want_privilege= (table_list->view ? + table_list->grant.want_privilege : + table->grant.want_privilege); #endif - if ((error= mysql_prepare_update(thd, table_list, update_table_list, - &conds, order_num, order))) + if ((error= mysql_prepare_update(thd, table_list, &conds, order_num, order))) DBUG_RETURN(error); old_used_keys= table->used_keys; // Keys used in WHERE @@ -108,10 +148,24 @@ int mysql_update(THD *thd, /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege=want_privilege; + table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; #endif - if (setup_fields(thd, 0, update_table_list, fields, 1, 0, 0)) - DBUG_RETURN(-1); /* purecov: inspected */ + { + thd->lex->select_lex.no_wrap_view_item= 1; + res= setup_fields(thd, 0, table_list, fields, 1, 0, 0); + thd->lex->select_lex.no_wrap_view_item= 0; + if (res) + DBUG_RETURN(-1); /* purecov: inspected */ + } + if (table_list->view && check_fields(thd, fields)) + { + DBUG_RETURN(-1); + } + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE"); + DBUG_RETURN(-1); + } if (table->timestamp_field) { // Don't set timestamp column if this is modified @@ -123,9 +177,10 @@ int mysql_update(THD *thd, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check values */ - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); #endif - if (setup_fields(thd, 0, update_table_list, values, 0, 0, 0)) + if (setup_fields(thd, 0, table_list, values, 0, 0, 0)) { free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(-1); /* purecov: inspected */ @@ -159,11 +214,13 @@ int mysql_update(THD *thd, } init_ftfuncs(thd, &thd->lex->select_lex, 1); /* Check if we are modifying a key that we are used to search with */ + if (select && select->quick) + { + used_index= select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - check_if_key_used(table, - (used_index=select->quick->index), - fields)); + select->quick->check_if_keys_used(&fields)); + } else if ((used_index=table->file->key_used_on_scan) < MAX_KEY) used_key_is_modified=check_if_key_used(table, used_index, fields); else @@ -175,7 +232,7 @@ int mysql_update(THD *thd, matching rows before updating the table! */ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (old_used_keys.is_set(used_index)) + if ( (used_index != MAX_KEY) && old_used_keys.is_set(used_index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); @@ -221,8 +278,12 @@ int mysql_update(THD *thd, if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME))) goto err; - + + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + goto err; init_read_record(&info,thd,table,select,0,1); + thd->proc_info="Searching rows for update"; uint tmp_limit= limit; @@ -277,6 +338,9 @@ int mysql_update(THD *thd, if (handle_duplicates == DUP_IGNORE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + + if (select && select->quick && select->quick->reset()) + goto err; init_read_record(&info,thd,table,select,0,1); updated= found= 0; @@ -285,6 +349,13 @@ int mysql_update(THD *thd, thd->proc_info="Updating"; query_id=thd->query_id; + transactional_table= table->file->has_transactions(); + thd->no_trans_update= 0; + thd->abort_on_warning= test(handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) @@ -293,12 +364,29 @@ int mysql_update(THD *thd, if (fill_record(fields,values, 0) || thd->net.report_error) break; /* purecov: inspected */ found++; + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_BEFORE); + if (compare_record(table, query_id)) { + if ((res= table_list->view_check_option(thd, ignore_err)) != + VIEW_CHECK_OK) + { + found--; + if (res == VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } + } if (!(error=table->file->update_row((byte*) table->record[1], (byte*) table->record[0]))) { updated++; + thd->no_trans_update= !transactional_table; } else if (handle_duplicates != DUP_IGNORE || error != HA_ERR_FOUND_DUPP_KEY) @@ -309,6 +397,10 @@ int mysql_update(THD *thd, break; } } + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER); + if (!--limit && using_limit) { error= -1; // Simulate end of file @@ -332,13 +424,13 @@ int mysql_update(THD *thd, This must be before binlog writing and ha_autocommit_... */ if (updated) + { query_cache_invalidate3(thd, table_list, 1); + } - transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); if ((updated || (error < 0)) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -365,18 +457,20 @@ int mysql_update(THD *thd, free_underlaid_joins(thd, &thd->lex->select_lex); if (error >= 0) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */ + send_error(thd,thd->killed_errno()); /* purecov: inspected */ else { char buff[80]; sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->insert_id() : 0L,buff); DBUG_PRINT("info",("%d records updated",updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ + thd->abort_on_warning= 0; free_io_cache(table); DBUG_RETURN(0); @@ -388,6 +482,7 @@ err: table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); } + thd->abort_on_warning= 0; DBUG_RETURN(-1); } @@ -397,8 +492,7 @@ err: SYNOPSIS mysql_prepare_update() thd - thread handler - table_list - global table list - update_table_list - local table list of UPDATE SELECT_LEX + table_list - global/local table list conds - conditions order_num - number of ORDER BY list entries order - ORDER BY clause list @@ -409,38 +503,38 @@ err: -1 - error (message is not sent to user) */ int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, Item **conds, uint order_num, ORDER *order) { TABLE *table= table_list->table; TABLE_LIST tables; List<Item> all_fields; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_update"); #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); #endif bzero((char*) &tables,sizeof(tables)); // For ORDER BY tables.table= table; tables.alias= table_list->alias; - if (setup_tables(update_table_list) || - setup_conds(thd, update_table_list, conds) || - thd->lex->select_lex.setup_ref_array(thd, order_num) || - setup_order(thd, thd->lex->select_lex.ref_pointer_array, - update_table_list, all_fields, all_fields, order) || - setup_ftfuncs(&thd->lex->select_lex)) + if (setup_tables(thd, table_list, conds) || + setup_conds(thd, table_list, conds) || + select_lex->setup_ref_array(thd, order_num) || + setup_order(thd, select_lex->ref_pointer_array, + table_list, all_fields, all_fields, order) || + setup_ftfuncs(select_lex)) DBUG_RETURN(-1); /* Check that we are not using table that we are updating in a sub select */ - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + if (unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); DBUG_RETURN(-1); } - + select_lex->fix_prepare_information(thd, conds); DBUG_RETURN(0); } @@ -466,203 +560,212 @@ static table_map get_table_map(List<Item> *items) } - /* - Setup multi-update handling and call SELECT to do the join + make update specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_update_prepare() + thd thread handler + + RETURN + 0 OK + -1 Error */ -int mysql_multi_update(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields, - List<Item> *values, - COND *conds, - ulong options, - enum enum_duplicates handle_duplicates, - SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) +int mysql_multi_update_prepare(THD *thd) { - int res; - multi_update *result; + LEX *lex= thd->lex; + ulong opened_tables; + TABLE_LIST *table_list= lex->query_tables; + List<Item> *fields= &lex->select_lex.item_list; TABLE_LIST *tl; - TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; - List<Item> total_list; + table_map tables_for_update; + int res; + bool update_view= 0; + uint table_count; const bool using_lock_tables= thd->locked_tables != 0; - bool initialized_dervied= 0; - DBUG_ENTER("mysql_multi_update"); - - select_lex->select_limit= HA_POS_ERROR; + DBUG_ENTER("mysql_multi_update_prepare"); + /* open tables and create derived ones, but do not lock and fill them */ + if (open_tables(thd, table_list, & table_count) || + mysql_handle_derived(lex, &mysql_derived_prepare)) + DBUG_RETURN(thd->net.report_error ? -1 : 1); /* - The following loop is here to to ensure that we only lock tables - that we are going to update with a write lock + Ensure that we have update privilege for all tables and columns in the + SET part */ - for (;;) + for (tl= table_list; tl; tl= tl->next_local) { - table_map update_tables, derived_tables=0; - uint tnr, table_count; - - if ((res=open_tables(thd, table_list, &table_count))) - DBUG_RETURN(res); + TABLE *table= tl->table; + /* + Update of derived tables is checked later + We don't check privileges here, becasue then we would get error + "UPDATE command denided .. for column N" instead of + "Target table ... is not updatable" + */ + if (!tl->derived) + tl->grant.want_privilege= table->grant.want_privilege= + (UPDATE_ACL & ~table->grant.privilege); + } - /* Only need to call lock_tables if we are not using LOCK TABLES */ - if (!using_lock_tables && - ((res= lock_tables(thd, table_list, table_count)))) - DBUG_RETURN(res); + /* + setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables() + second time, but this call will do nothing (there are check for second + call in setup_tables()). + */ + if (setup_tables(thd, table_list, &lex->select_lex.where) || + (lex->select_lex.no_wrap_view_item= 1, + res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0), + lex->select_lex.no_wrap_view_item= 0, + res)) + DBUG_RETURN(-1); - if (!initialized_dervied) + for (tl= table_list; tl ; tl= tl->next_local) + { + if (tl->view) { - initialized_dervied= 1; - relink_tables_for_derived(thd); - if ((res= mysql_handle_derived(thd->lex))) - DBUG_RETURN(res); + update_view= 1; + break; } + } - /* - Ensure that we have update privilege for all tables and columns in the - SET part - While we are here, initialize the table->map field to check which - tables are updated and updatability of derived tables - */ - for (tl= update_list, tnr=0 ; tl ; tl=tl->next) - { - TABLE *table= tl->table; - /* - Update of derived tables is checked later - We don't check privileges here, becasue then we would get error - "UPDATE command denided .. for column N" instead of - "Target table ... is not updatable" - */ - if (!tl->derived) - table->grant.want_privilege= (UPDATE_ACL & ~table->grant.privilege); - table->map= (table_map) 1 << (tnr++); - } + if (update_view && check_fields(thd, *fields)) + { + DBUG_RETURN(-1); + } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); + tables_for_update= get_table_map(fields); - update_tables= get_table_map(fields); + /* + Setup timestamp handling and locking mode + */ + for (tl= table_list; tl ; tl= tl->next_local) + { + TABLE *table= tl->table; - /* Unlock the tables in preparation for relocking */ - if (!using_lock_tables) - { - mysql_unlock_tables(thd, thd->lock); - thd->lock= 0; - } + /* We only need SELECT privilege for columns in the values list */ + tl->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); + // Only set timestamp column if this is not modified + if (table->timestamp_field && + table->timestamp_field->query_id == thd->query_id) + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - /* - Count tables and setup timestamp handling - Set also the table locking strategy according to the update map - */ - for (tl= update_list; tl; tl= tl->next) + /* if table will be updated then check that it is unique */ + if (table->map & tables_for_update) { - TABLE *table= tl->table; - /* if table will be updated then check that it is unique */ - if (table->map & update_tables) - { - /* - Multi-update can't be constructed over-union => we always have - single SELECT on top and have to check underlaying SELECTs of it - */ - if (select_lex->check_updateable_in_subqueries(tl->db, - tl->real_name)) - { - my_error(ER_UPDATE_TABLE_USED, MYF(0), - tl->real_name); - DBUG_RETURN(-1); - } - DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); - tl->lock_type= thd->lex->multi_lock_option; - tl->updating= 1; - } - else + if (!tl->updatable || check_key_in_view(thd, tl)) { - DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); - tl->lock_type= TL_READ; - tl->updating= 0; + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE"); + DBUG_RETURN(-1); } - if (tl->derived) - derived_tables|= table->map; - else if (!using_lock_tables) - tl->table->reginfo.lock_type= tl->lock_type; - } - if (thd->lex->derived_tables && (update_tables & derived_tables)) - { - // find derived table which cause error - for (tl= update_list; tl; tl= tl->next) + /* + Multi-update can't be constructed over-union => we always have + single SELECT on top and have to check underlaying SELECTs of it + */ + if (lex->select_lex.check_updateable_in_subqueries(tl->db, + tl->real_name)) { - if (tl->derived && (update_tables & tl->table->map)) - { - my_printf_error(ER_NON_UPDATABLE_TABLE, ER(ER_NON_UPDATABLE_TABLE), - MYF(0), tl->alias, "UPDATE"); - DBUG_RETURN(-1); - } + my_error(ER_UPDATE_TABLE_USED, MYF(0), tl->real_name); + DBUG_RETURN(-1); } - } - - /* Relock the tables with the correct modes */ - res= lock_tables(thd, table_list, table_count); - if (using_lock_tables) + DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); + tl->lock_type= lex->multi_lock_option; + tl->updating= 1; + } + else { - if (res) - DBUG_RETURN(res); - break; // Don't have to do setup_field() + DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); + tl->lock_type= TL_READ; + tl->updating= 0; } + if (!using_lock_tables) + tl->table->reginfo.lock_type= tl->lock_type; + } + + opened_tables= thd->status_var.opened_tables; + /* now lock and fill tables */ + if (lock_tables(thd, table_list, table_count)) + DBUG_RETURN(thd->net.report_error ? -1 : 1); + /* + we have to re-call fixfields for fixed items, because lock maybe + reopened tables + */ + if (opened_tables != thd->status_var.opened_tables) + { /* - We must setup fields again as the file may have been reopened - during lock_tables + Fields items cleanup(). There are only Item_fields in the list, so we + do not do Item tree walking */ + List_iterator_fast<Item> it(*fields); + Item *item; + while (item= it++) { - List_iterator_fast<Item> field_it(*fields); - Item_field *item; - - while ((item= (Item_field *) field_it++)) - { - item->field->query_id= 0; - item->cleanup(); - } + item->cleanup(); } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); - /* - If lock succeded and the table map didn't change since the above lock - we can continue. - */ - if (!res && update_tables == get_table_map(fields)) - break; - /* - There was some very unexpected changes in the table definition between - open tables and lock tables. Close tables and try again. - */ - close_thread_tables(thd); - } + /* We have to cleunup translation tables of views. */ + for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global) + tbl->cleanup_items(); - /* Setup timestamp handling */ - for (tl= update_list; tl; tl= tl->next) - { - TABLE *table= tl->table; - /* Only set timestamp column if this is not modified */ - if (table->timestamp_field && - table->timestamp_field->query_id == thd->query_id) - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + /* undone setup_tables() */ + table_list->setup_is_done= 0; - /* We only need SELECT privilege for columns in the values list */ - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + if (setup_tables(thd, table_list, &lex->select_lex.where) || + (lex->select_lex.no_wrap_view_item= 1, + res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0), + lex->select_lex.no_wrap_view_item= 0, + res)) + DBUG_RETURN(-1); } + if (thd->fill_derived_tables() && + mysql_handle_derived(lex, &mysql_derived_filling)) + DBUG_RETURN(thd->net.report_error ? -1 : 1); + DBUG_RETURN (0); +} + - if (!(result=new multi_update(thd, update_list, fields, values, - handle_duplicates))) +/* + Setup multi-update handling and call SELECT to do the join +*/ + +int mysql_multi_update(THD *thd, + TABLE_LIST *table_list, + List<Item> *fields, + List<Item> *values, + COND *conds, + ulong options, + enum enum_duplicates handle_duplicates, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) +{ + int res; + multi_update *result; + DBUG_ENTER("mysql_multi_update"); + + if ((res= mysql_multi_update_prepare(thd))) + DBUG_RETURN(res); + + if (!(result= new multi_update(thd, table_list, fields, values, + handle_duplicates))) DBUG_RETURN(-1); + thd->no_trans_update= 0; + thd->abort_on_warning= test(thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + + List<Item> total_list; res= mysql_select(thd, &select_lex->ref_pointer_array, - select_lex->get_table_list(), select_lex->with_wild, + table_list, select_lex->with_wild, total_list, conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, (ORDER *)NULL, options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, result, unit, select_lex); delete result; + thd->abort_on_warning= 0; DBUG_RETURN(res); } @@ -720,7 +823,7 @@ int multi_update::prepare(List<Item> ¬_used_values, */ update.empty(); - for (table_ref= all_tables; table_ref; table_ref=table_ref->next) + for (table_ref= all_tables; table_ref; table_ref= table_ref->next_local) { TABLE *table=table_ref->table; if (tables_to_update & table->map) @@ -729,7 +832,7 @@ int multi_update::prepare(List<Item> ¬_used_values, sizeof(*tl)); if (!tl) DBUG_RETURN(1); - update.link_in_list((byte*) tl, (byte**) &tl->next); + update.link_in_list((byte*) tl, (byte**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; table->used_keys.clear_all(); @@ -788,11 +891,11 @@ int multi_update::prepare(List<Item> ¬_used_values, which will cause an error when reading a row. (This issue is mostly relevent for MyISAM tables) */ - for (table_ref= all_tables; table_ref; table_ref=table_ref->next) + for (table_ref= all_tables; table_ref; table_ref= table_ref->next_local) { TABLE *table=table_ref->table; if (!(tables_to_update & table->map) && - find_real_table_in_list(update_tables, table_ref->db, + find_table_in_local_list(update_tables, table_ref->db, table_ref->real_name)) table->no_cache= 1; // Disable row cache } @@ -823,7 +926,7 @@ multi_update::initialize_tables(JOIN *join) table_to_update= 0; /* Create a temporary table for keys to all tables, except main table */ - for (table_ref= update_tables; table_ref; table_ref=table_ref->next) + for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local) { TABLE *table=table_ref->table; uint cnt= table_ref->shared; @@ -919,8 +1022,7 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) case JT_ALL: /* If range search on index */ if (join_tab->quick) - return !check_if_key_used(table, join_tab->quick->index, - *fields); + return !join_tab->quick->check_if_keys_used(fields); /* If scanning in clustered key */ if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && table->primary_key < MAX_KEY) @@ -936,7 +1038,7 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) multi_update::~multi_update() { TABLE_LIST *table; - for (table= update_tables ; table; table= table->next) + for (table= update_tables ; table; table= table->next_local) table->table->no_keyread= table->table->no_cache= 0; if (tmp_tables) @@ -961,9 +1063,10 @@ multi_update::~multi_update() bool multi_update::send_data(List<Item> ¬_used_values) { TABLE_LIST *cur_table; + bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); DBUG_ENTER("multi_update::send_data"); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { TABLE *table= cur_table->table; /* @@ -993,6 +1096,15 @@ bool multi_update::send_data(List<Item> ¬_used_values) if (compare_record(table, thd->query_id)) { int error; + if ((error= cur_table->view_check_option(thd, ignore_err)) != + VIEW_CHECK_OK) + { + found--; + if (error == VIEW_CHECK_SKIP) + continue; + else if (error == VIEW_CHECK_ERROR) + DBUG_RETURN(1); + } if (!updated++) { /* @@ -1006,7 +1118,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) table->record[0]))) { updated--; - if (handle_duplicates != DUP_IGNORE || + if (handle_duplicates != DUP_IGNORE || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message @@ -1014,6 +1126,8 @@ bool multi_update::send_data(List<Item> ¬_used_values) DBUG_RETURN(1); } } + else if (!table->file->has_transactions()) + thd->no_trans_update= 1; } } else @@ -1081,7 +1195,7 @@ int multi_update::do_updates(bool from_send_error) do_update= 0; // Don't retry this function if (!found) DBUG_RETURN(0); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { byte *ref_pos; @@ -1215,7 +1329,6 @@ bool multi_update::send_eof() if (updated && (local_error <= 0 || !trans_safe)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (local_error <= 0) @@ -1247,8 +1360,9 @@ bool multi_update::send_eof() sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - ::send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + ::send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->insert_id() : 0L,buff); return 0; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc new file mode 100644 index 00000000000..760ada9cdbd --- /dev/null +++ b/sql/sql_view.cc @@ -0,0 +1,1063 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include "mysql_priv.h" +#include "sql_acl.h" +#include "sql_select.h" +#include "parse_file.h" +#include "sp.h" + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode); + +const char *updatable_views_with_limit_names[]= { "NO", "YES", NullS }; +TYPELIB updatable_views_with_limit_typelib= +{ + array_elements(updatable_views_with_limit_names)-1, "", + updatable_views_with_limit_names, + 0 +}; + + +/* + Creating/altering VIEW procedure + + SYNOPSIS + mysql_create_view() + thd - thread handler + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN VALUE + 0 OK + -1 Error + 1 Error and error message given +*/ + +int mysql_create_view(THD *thd, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + bool link_to_local; + /* first table in list is target VIEW name => cut off it */ + TABLE_LIST *view= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; + TABLE_LIST *tbl; + SELECT_LEX *select_lex= &lex->select_lex, *sl; + SELECT_LEX_UNIT *unit= &lex->unit; + int res= 0; + DBUG_ENTER("mysql_create_view"); + + if (lex->proc_list.first || + lex->result) + { + my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), (lex->result ? + "INTO" : + "PROCEDURE")); + res= -1; + goto err; + } + if (lex->derived_tables || + lex->variables_used || lex->param_list.elements) + { + my_error((lex->derived_tables ? + ER_VIEW_SELECT_DERIVED : + ER_VIEW_SELECT_VARIABLE), MYF(0)); + res= -1; + goto err; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Privilege check for view creation: + - user have CREATE VIEW privilege on view table + - user have DELETE privilege in case of ALTER VIEW or CREATE OR REPLACE + VIEW + - have some (SELECT/UPDATE/INSERT/DELETE) privileges on columns of + underlying tables used on top of SELECT list (because it can be + (theoretically) updated, so it is enough to have UPDATE privilege on + them, for example) + - have SELECT privilege on columns used in expressions of VIEW select + - for columns of underly tables used on top of SELECT list also will be + checked that we have not more privileges on correspondent column of view + table (i.e. user will not get some privileges by view creation) + */ + if ((check_access(thd, CREATE_VIEW_ACL, view->db, &view->grant.privilege, + 0, 0) || + grant_option && check_grant(thd, CREATE_VIEW_ACL, view, 0, 1, 0)) || + (mode != VIEW_CREATE_NEW && + (check_access(thd, DELETE_ACL, view->db, &view->grant.privilege, + 0, 0) || + grant_option && check_grant(thd, DELETE_ACL, view, 0, 1, 0)))) + DBUG_RETURN(1); + for (sl= select_lex; sl; sl= sl->next_select()) + { + for (tbl= sl->get_table_list(); tbl; tbl= tbl->next_local) + { + /* + Ensure that we have some privileges on this table, more strict check + will be done on column level after preparation, + */ + if (check_some_access(thd, VIEW_ANY_ACL, tbl)) + { + my_printf_error(ER_TABLEACCESS_DENIED_ERROR, + ER(ER_TABLEACCESS_DENIED_ERROR), + MYF(0), + "ANY", + thd->priv_user, + thd->host_or_ip, + tbl->real_name); + DBUG_RETURN(-1); + } + /* + Mark this table as a table which will be checked after the prepare + phase + */ + tbl->table_in_first_from_clause= 1; + + /* + We need to check only SELECT_ACL for all normal fields, fields for + which we need "any" (SELECT/UPDATE/INSERT/DELETE) privilege will be + checked later + */ + tbl->grant.want_privilege= SELECT_ACL; + /* + Make sure that all rights are loaded to the TABLE::grant field. + + tbl->real_name will be correct name of table because VIEWs are + not opened yet. + */ + fill_effective_table_privileges(thd, &tbl->grant, tbl->db, + tbl->real_name); + } + } + + if (&lex->select_lex != lex->all_selects_list) + { + /* check tables of subqueries */ + for (tbl= tables; tbl; tbl= tbl->next_global) + { + if (!tbl->table_in_first_from_clause) + { + if (check_access(thd, SELECT_ACL, tbl->db, + &tbl->grant.privilege, 0, 0) || + grant_option && check_grant(thd, SELECT_ACL, tbl, 0, 1, 0)) + { + res= 1; + goto err; + } + } + } + } + /* + Mark fields for special privilege check ("any" privilege) + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + List_iterator_fast<Item> it(sl->item_list); + Item *item; + while ((item= it++)) + { + Item_field *field; + if ((field= item->filed_for_view_update())) + field->any_privileges= 1; + } + } +#endif + + if ((res= open_and_lock_tables(thd, tables))) + DBUG_RETURN(res); + + /* + check that tables are not temporary and this VIEW do not used in query + (it is possible with ALTERing VIEW) + */ + for (tbl= tables; tbl; tbl= tbl->next_global) + { + /* is this table temporary and is not view? */ + if (tbl->table->tmp_table != NO_TMP_TABLE && !tbl->view) + { + my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias); + res= -1; + goto err; + } + + /* is this table view and the same view which we creates now? */ + if (tbl->view && + strcmp(tbl->view_db.str, view->db) == 0 && + strcmp(tbl->view_name.str, view->real_name) == 0) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), tbl->view_db.str, tbl->view_name.str); + res= -1; + goto err; + } + + /* + Copy the privileges of the underlying VIEWs which were filled by + fill_effective_table_privileges + (they were not copied at derived tables processing) + */ + tbl->table->grant.privilege= tbl->grant.privilege; + } + + /* prepare select to resolve all fields */ + lex->view_prepare_mode= 1; + if (unit->prepare(thd, 0, 0)) + { + /* + some errors from prepare are reported to user, if is not then + it will be checked after err: label + */ + res= 1; + goto err; + } + + /* view list (list of view fields names) */ + if (lex->view_list.elements) + { + List_iterator_fast<Item> it(select_lex->item_list); + List_iterator_fast<LEX_STRING> nm(lex->view_list); + Item *item; + LEX_STRING *name; + + if (lex->view_list.elements != select_lex->item_list.elements) + { + my_message(ER_VIEW_WRONG_LIST, ER(ER_VIEW_WRONG_LIST), MYF(0)); + goto err; + } + while ((item= it++, name= nm++)) + item->set_name(name->str, name->length, system_charset_info); + } + + /* Test absence of duplicates names */ + { + Item *item; + List_iterator_fast<Item> it(select_lex->item_list); + it++; + while ((item= it++)) + { + Item *check; + List_iterator_fast<Item> itc(select_lex->item_list); + while ((check= itc++) && check != item) + { + if (strcmp(item->name, check->name) == 0) + { + my_error(ER_DUP_FIELDNAME, MYF(0), item->name); + DBUG_RETURN(-1); + } + } + } + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Compare/check grants on view with grants of underlying tables + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + char *db= view->db ? view->db : thd->db; + List_iterator_fast<Item> it(sl->item_list); + Item *item; + fill_effective_table_privileges(thd, &view->grant, db, + view->real_name); + while ((item= it++)) + { + Item_field *fld; + uint priv= (get_column_grant(thd, &view->grant, db, + view->real_name, item->name) & + VIEW_ANY_ACL); + if ((fld= item->filed_for_view_update())) + { + /* + Do we have more privileges on view field then underlying table field? + */ + if ((~fld->have_privileges & priv)) + { + /* VIEW column has more privileges */ + my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, + ER(ER_COLUMNACCESS_DENIED_ERROR), + MYF(0), + "create view", + thd->priv_user, + thd->host_or_ip, + item->name, + view->real_name); + DBUG_RETURN(-1); + } + } + } + } +#endif + + if (wait_if_global_read_lock(thd, 0, 0)) + { + res= -1; + goto err; + } + VOID(pthread_mutex_lock(&LOCK_open)); + res= mysql_register_view(thd, view, mode); + VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); + if (res) + goto err; + + send_ok(thd); + lex->link_first_table_back(view, link_to_local); + return 0; + +err: + thd->proc_info= "end"; + lex->link_first_table_back(view, link_to_local); + unit->cleanup(); + if (thd->net.report_error) + res= -1; + DBUG_RETURN(res); +} + + +/* index of revision number in following table */ +static const int revision_number_position= 5; +/* index of last required parameter for making view */ +static const int required_view_parameters= 7; + +/* + table of VIEW .frm field descriptors + + Note that one should NOT change the order for this, as it's used by + parse() +*/ +static File_option view_parameters[]= +{{{(char*) "query", 5}, offsetof(TABLE_LIST, query), + FILE_OPTIONS_STRING}, + {{(char*) "md5", 3}, offsetof(TABLE_LIST, md5), + FILE_OPTIONS_STRING}, + {{(char*) "updatable", 9}, offsetof(TABLE_LIST, updatable_view), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "algorithm", 9}, offsetof(TABLE_LIST, algorithm), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "with_check_option", 17}, offsetof(TABLE_LIST, with_check), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "revision", 8}, offsetof(TABLE_LIST, revision), + FILE_OPTIONS_REV}, + {{(char*) "timestamp", 9}, offsetof(TABLE_LIST, timestamp), + FILE_OPTIONS_TIMESTAMP}, + {{(char*)"create-version", 14},offsetof(TABLE_LIST, file_version), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "source", 6}, offsetof(TABLE_LIST, source), + FILE_OPTIONS_ESTRING}, + {{NullS, 0}, 0, + FILE_OPTIONS_STRING} +}; + +static LEX_STRING view_file_type[]= {{(char*)"VIEW", 4}}; + + +/* + Register VIEW (write .frm & process .frm's history backups) + + SYNOPSIS + mysql_register_view() + thd - thread handler + view - view description + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN + 0 OK + -1 Error + 1 Error and error message given +*/ + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + char buff[4096]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + char md5[33]; + bool can_be_merged; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + DBUG_ENTER("mysql_register_view"); + + /* print query */ + str.length(0); + { + ulong sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES; + thd->variables.sql_mode&= ~MODE_ANSI_QUOTES; + lex->unit.print(&str); + thd->variables.sql_mode|= sql_mode; + } + str.append('\0'); + DBUG_PRINT("VIEW", ("View: %s", str.ptr())); + + /* print file name */ + (void) my_snprintf(dir_buff, FN_REFLEN, "%s/%s/", + mysql_data_home, view->db); + unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + dir.length= strlen(dir_buff); + + file.str= file_buff; + file.length= (strxnmov(file_buff, FN_REFLEN, view->real_name, reg_ext, + NullS) - file_buff); + /* init timestamp */ + if (!view->timestamp.str) + view->timestamp.str= view->timestamp_buffer; + + /* check old .frm */ + { + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + + path.str= path_buff; + fn_format(path_buff, file.str, dir.str, 0, MY_UNPACK_FILENAME); + path.length= strlen(path_buff); + + if (!access(path.str, F_OK)) + { + if (mode == VIEW_CREATE_NEW) + { + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), view->alias); + DBUG_RETURN(-1); + } + + if (!(parser= sql_parse_prepare(&path, thd->mem_root, 0))) + DBUG_RETURN(1); + + if (!parser->ok() || + strncmp("VIEW", parser->type()->str, parser->type()->length)) + { + my_error(ER_WRONG_OBJECT, MYF(0), (view->db ? view->db : thd->db), + view->real_name, "VIEW"); + DBUG_RETURN(-1); + } + + /* + read revision number + + TODO: read dependence list, too, to process cascade/restrict + TODO: special cascade/restrict procedure for alter? + */ + if (parser->parse((gptr)view, thd->mem_root, + view_parameters + revision_number_position, 1)) + { + DBUG_RETURN(thd->net.report_error? -1 : 0); + } + } + else + { + if (mode == VIEW_ALTER) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), view->db, view->alias); + DBUG_RETURN(-1); + } + } + } + /* fill structure */ + view->query.str= (char*)str.ptr(); + view->query.length= str.length()-1; // we do not need last \0 + view->source.str= thd->query; + view->source.length= thd->query_length; + view->file_version= 1; + view->calc_md5(md5); + view->md5.str= md5; + view->md5.length= 32; + can_be_merged= lex->can_be_merged(); + if (lex->create_view_algorithm == VIEW_ALGORITHM_MERGE && + !lex->can_be_merged()) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE, + ER(ER_WARN_VIEW_MERGE)); + lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + } + view->algorithm= lex->create_view_algorithm; + view->with_check= lex->create_view_check; + if ((view->updatable_view= (can_be_merged && + view->algorithm != VIEW_ALGORITHM_TMPTABLE))) + { + /* TODO: change here when we will support UNIONs */ + for (TABLE_LIST *tbl= (TABLE_LIST *)lex->select_lex.table_list.first; + tbl; + tbl= tbl->next_local) + { + if (tbl->view && !tbl->updatable_view) + { + view->updatable_view= 0; + break; + } + } + } + + /* + Check that table of main select do not used in subqueries. + + This test can catch only very simple cases of such non-updateable views, + all other will be detected before updating commands execution. + (it is more optimisation then real check) + + NOTE: this skip cases of using table via VIEWs, joined VIEWs, VIEWs with + UNION + */ + if (view->updatable_view && + !lex->select_lex.next_select() && + !((TABLE_LIST*)lex->select_lex.table_list.first)->next_local && + find_table_in_global_list(lex->query_tables->next_global, + lex->query_tables->db, + lex->query_tables->real_name)) + { + view->updatable_view= 0; + } + + if (view->with_check != VIEW_CHECK_NONE && + !view->updatable_view) + { + my_error(ER_VIEW_NONUPD_CHECK, MYF(0), view->db, view->real_name); + DBUG_RETURN(-1); + } + + if (sql_create_definition_file(&dir, &file, view_file_type, + (gptr)view, view_parameters, 3)) + { + DBUG_RETURN(thd->net.report_error? -1 : 1); + } + DBUG_RETURN(0); +} + + +/* + read VIEW .frm and create structures + + SYNOPSIS + mysql_make_view() + parser - parser object; + table - TABLE_LIST structure for filling + + RETURN + 0 ok + 1 error +*/ + +my_bool +mysql_make_view(File_parser *parser, TABLE_LIST *table) +{ + DBUG_ENTER("mysql_make_view"); + + if (table->view) + { + DBUG_PRINT("info", + ("VIEW %s.%s is already processed on previous PS/SP execution", + table->view_db.str, table->view_name.str)); + DBUG_RETURN(0); + } + + SELECT_LEX *end; + THD *thd= current_thd; + LEX *old_lex= thd->lex, *lex; + int res= 0; + + /* + For now we assume that tables will not be changed during PS life (it + will be TRUE as far as we make new table cache). + */ + Item_arena *arena= thd->current_arena, backup; + if (arena->is_conventional()) + arena= 0; + else + thd->set_n_backup_item_arena(arena, &backup); + + /* init timestamp */ + if (!table->timestamp.str) + table->timestamp.str= table->timestamp_buffer; + /* + TODO: when VIEWs will be stored in cache, table mem_root should + be used here + */ + if (parser->parse((gptr)table, thd->mem_root, view_parameters, + required_view_parameters)) + goto err; + + /* + Save VIEW parameters, which will be wiped out by derived table + processing + */ + table->view_db.str= table->db; + table->view_db.length= table->db_length; + table->view_name.str= table->real_name; + table->view_name.length= table->real_name_length; + + /*TODO: md5 test here and warning if it is differ */ + + /* + TODO: TABLE mem root should be used here when VIEW will be stored in + TABLE cache + + now Lex placed in statement memory + */ + table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; + lex_start(thd, (uchar*)table->query.str, table->query.length); + lex->select_lex.select_number= ++thd->select_number; + old_lex->derived_tables|= DERIVED_VIEW; + { + ulong options= thd->options; + /* switch off modes which can prevent normal parsing of VIEW + - MODE_REAL_AS_FLOAT affect only CREATE TABLE parsing + + MODE_PIPES_AS_CONCAT affect expression parsing + + MODE_ANSI_QUOTES affect expression parsing + + MODE_IGNORE_SPACE affect expression parsing + - MODE_NOT_USED not used :) + * MODE_ONLY_FULL_GROUP_BY affect execution + * MODE_NO_UNSIGNED_SUBTRACTION affect execution + - MODE_NO_DIR_IN_CREATE affect table creation only + - MODE_POSTGRESQL compounded from other modes + - MODE_ORACLE compounded from other modes + - MODE_MSSQL compounded from other modes + - MODE_DB2 compounded from other modes + - MODE_MAXDB affect only CREATE TABLE parsing + - MODE_NO_KEY_OPTIONS affect only SHOW + - MODE_NO_TABLE_OPTIONS affect only SHOW + - MODE_NO_FIELD_OPTIONS affect only SHOW + - MODE_MYSQL323 affect only SHOW + - MODE_MYSQL40 affect only SHOW + - MODE_ANSI compounded from other modes + (+ transaction mode) + ? MODE_NO_AUTO_VALUE_ON_ZERO affect UPDATEs + + MODE_NO_BACKSLASH_ESCAPES affect expression parsing + */ + thd->options&= ~(MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | MODE_NO_BACKSLASH_ESCAPES); + CHARSET_INFO *save_cs= thd->variables.character_set_client; + thd->variables.character_set_client= system_charset_info; + res= yyparse((void *)thd); + thd->variables.character_set_client= save_cs; + thd->options= options; + } + if (!res && !thd->is_fatal_error) + { + TABLE_LIST *top_view= (table->belong_to_view ? + table->belong_to_view : + table); + TABLE_LIST *view_tables= lex->query_tables; + TABLE_LIST *view_tables_tail= 0; + + if (lex->spfuns.records) + { + /* move SP to main LEX */ + sp_merge_funs(old_lex, lex); + /* open mysq.proc for functions which are not in cache */ + if (old_lex->proc_table == 0 && + (old_lex->proc_table= + (TABLE_LIST*)thd->calloc(sizeof(TABLE_LIST))) != 0) + { + TABLE_LIST *table= old_lex->proc_table; + table->db= (char*)"mysql"; + table->db_length= 5; + table->real_name= table->alias= (char*)"proc"; + table->real_name_length= 4; + table->cacheable_table= 1; + old_lex->add_to_query_tables(table); + } + } + /* cleanup LEX */ + if (lex->spfuns.array.buffer) + hash_free(&lex->spfuns); + + /* + mark to avoid temporary table using and put view reference and find + last view table + */ + for (TABLE_LIST *tbl= view_tables; + tbl; + tbl= (view_tables_tail= tbl)->next_global) + { + tbl->skip_temporary= 1; + tbl->belong_to_view= top_view; + } + + /* + check rights to run commands (EXPLAIN SELECT & SHOW CREATE) which show + underlying tables + */ + if ((old_lex->sql_command == SQLCOM_SELECT && old_lex->describe)) + { + if (check_table_access(thd, SELECT_ACL, view_tables, 1) && + check_table_access(thd, SHOW_VIEW_ACL, table, 1)) + { + my_error(ER_VIEW_NO_EXPLAIN, MYF(0)); + goto err; + } + } + else if (old_lex->sql_command == SQLCOM_SHOW_CREATE) + { + if (check_table_access(thd, SHOW_VIEW_ACL, table, 0)) + goto err; + } + + /* move SQL_NO_CACHE & Co to whole query */ + old_lex->safe_to_cache_query= (old_lex->safe_to_cache_query && + lex->safe_to_cache_query); + /* move SQL_CACHE to whole query */ + if (lex->select_lex.options & OPTION_TO_QUERY_CACHE) + old_lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + + /* + Put tables of VIEW after VIEW TABLE_LIST + + NOTE: It is important for UPDATE/INSERT/DELETE checks to have this + tables just after VIEW instead of tail of list, to be able check that + table is unique. Also we store old next table for the same purpose. + */ + if (view_tables) + { + if (table->next_global) + { + view_tables_tail->next_global= table->next_global; + table->next_global->prev_global= &view_tables_tail->next_global; + } + else + { + lex->query_tables_last= &view_tables_tail->next_global; + } + view_tables->prev_global= &table->next_global; + table->next_global= view_tables; + } + + /* + check MERGE algorithm ability + - algorithm is not explicit TEMPORARY TABLE + - VIEW SELECT allow merging + - VIEW used in subquery or command support MERGE algorithm + */ + if (table->algorithm != VIEW_ALGORITHM_TMPTABLE && + lex->can_be_merged() && + (table->select_lex->master_unit() != &old_lex->unit || + old_lex->can_use_merged()) && + !old_lex->can_not_use_merged()) + { + /* + TODO: support multi tables substitutions + */ + /* lex should contain at least one table */ + DBUG_ASSERT(view_tables != 0); + + table->effective_algorithm= VIEW_ALGORITHM_MERGE; + DBUG_PRINT("info", ("algorithm: MERGE")); + table->updatable= (table->updatable_view != 0); + table->effective_with_check= table->with_check; + + table->ancestor= view_tables; + /* + next table should include SELECT_LEX under this table SELECT_LEX + + TODO: here should be loop for multi tables substitution + */ + table->ancestor->select_lex= table->select_lex; + /* + move lock type (TODO: should we issue error in case of TMPTABLE + algorithm and non-read locking)? + */ + view_tables->lock_type= table->lock_type; + + /* Store WHERE clause for post-processing in setup_ancestor */ + table->where= lex->select_lex.where; + + /* + Add subqueries units to SELECT in which we merging current view. + + NOTE: we do not support UNION here, so we take only one select + */ + for (SELECT_LEX_UNIT *unit= lex->select_lex.first_inner_unit(); + unit; + unit= unit->next_unit()) + { + SELECT_LEX_NODE *save_slave= unit->slave; + unit->include_down(table->select_lex); + unit->slave= save_slave; // fix include_down initialisation + } + + /* + This SELECT_LEX will be linked in global SELECT_LEX list + to make it processed by mysql_handle_derived(), + but it will not be included to SELECT_LEX tree, because it + will not be executed + */ + goto ok; + } + + table->effective_algorithm= VIEW_ALGORITHM_TMPTABLE; + DBUG_PRINT("info", ("algorithm: TEMPORARY TABLE")); + lex->select_lex.linkage= DERIVED_TABLE_TYPE; + table->updatable= 0; + table->effective_with_check= VIEW_CHECK_NONE; + + /* SELECT tree link */ + lex->unit.include_down(table->select_lex); + lex->unit.slave= &lex->select_lex; // fix include_down initialisation + + table->derived= &lex->unit; + } + else + goto err; + +ok: + if (arena) + thd->restore_backup_item_arena(arena, &backup); + /* global SELECT list linking */ + end= &lex->select_lex; // primary SELECT_LEX is always last + end->link_next= old_lex->all_selects_list; + old_lex->all_selects_list->link_prev= &end->link_next; + old_lex->all_selects_list= lex->all_selects_list; + lex->all_selects_list->link_prev= + (st_select_lex_node**)&old_lex->all_selects_list; + + thd->lex= old_lex; + DBUG_RETURN(0); + +err: + if (arena) + thd->restore_backup_item_arena(arena, &backup); + table->view= 0; // now it is not VIEW placeholder + thd->lex= old_lex; + DBUG_RETURN(1); +} + + +/* + drop view + + SYNOPSIS + mysql_drop_view() + thd - thread handler + views - views to delete + drop_mode - cascade/check + + RETURN VALUE + 0 OK + -1 Error + 1 Error and error message given +*/ + +int mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) +{ + DBUG_ENTER("mysql_drop_view"); + char path[FN_REFLEN]; + TABLE_LIST *view; + bool type= 0; + + for (view= views; view; view= view->next_local) + { + strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/", + view->real_name, reg_ext, NullS); + (void) unpack_filename(path, path); + VOID(pthread_mutex_lock(&LOCK_open)); + if (access(path, F_OK) || (type= (mysql_frm_type(path) != FRMTYPE_VIEW))) + { + char name[FN_REFLEN]; + my_snprintf(name, sizeof(name), "%s.%s", view->db, view->real_name); + if (thd->lex->drop_if_exists) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), + name); + VOID(pthread_mutex_unlock(&LOCK_open)); + continue; + } + if (type) + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->real_name, "VIEW"); + else + my_error(ER_BAD_TABLE_ERROR, MYF(0), name); + goto err; + } + if (my_delete(path, MYF(MY_WME))) + goto err; + VOID(pthread_mutex_unlock(&LOCK_open)); + } + send_ok(thd); + DBUG_RETURN(0); + +err: + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(-1); + +} + + +/* + Check type of .frm if we are not going to parse it + + SYNOPSIS + mysql_frm_type() + path path to file + + RETURN + FRMTYPE_ERROR error + FRMTYPE_TABLE table + FRMTYPE_VIEW view +*/ + +frm_type_enum mysql_frm_type(char *path) +{ + File file; + char header[10]; //"TYPE=VIEW\n" it is 10 characters + int length; + DBUG_ENTER("mysql_frm_type"); + + if ((file= my_open(path, O_RDONLY | O_SHARE, MYF(MY_WME))) < 0) + { + DBUG_RETURN(FRMTYPE_ERROR); + } + length= my_read(file, (byte*) header, 10, MYF(MY_WME)); + my_close(file, MYF(MY_WME)); + if (length == (int) MY_FILE_ERROR) + DBUG_RETURN(FRMTYPE_ERROR); + if (!strncmp(header, "TYPE=VIEW\n", 10)) + DBUG_RETURN(FRMTYPE_VIEW); + DBUG_RETURN(FRMTYPE_TABLE); // Is probably a .frm table +} + + +/* + check of key (primary or unique) presence in updatable view + + SYNOPSIS + check_key_in_view() + thd thread handler + view view for check with opened table + + DESCRIPTION + If it is VIEW and query have LIMIT clause then check that undertlying + table of viey contain one of following: + 1) primary key of underlying table + 2) unique key underlying table with fields for which NULL value is + impossible + 3) all fields of underlying table + + RETURN + FALSE OK + TRUE view do not contain key or all fields +*/ + +bool check_key_in_view(THD *thd, TABLE_LIST *view) +{ + TABLE *table; + Item **trans; + KEY *key_info, *key_info_end; + uint i, elements_in_view; + DBUG_ENTER("check_key_in_view"); + + /* + we do not support updatable UNIONs in VIW, so we can check just limit of + LEX::select_lex + */ + if (!view->view || thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->select_lex.select_limit == HA_POS_ERROR) + DBUG_RETURN(FALSE); /* it is normal table or query without LIMIT */ + table= view->table; + trans= view->field_translation; + key_info_end= (key_info= table->key_info)+ table->keys; + + elements_in_view= view->view->select_lex.item_list.elements; + DBUG_ASSERT(view->table != 0 && view->field_translation != 0); + + /* Loop over all keys to see if a unique-not-null key is used */ + for (;key_info != key_info_end ; key_info++) + { + if ((key_info->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *key_part_end= key_part + key_info->key_parts; + + /* check that all key parts are used */ + for (;;) + { + uint k; + for (k= 0; k < elements_in_view; k++) + { + Item_field *field; + if ((field= trans[k]->filed_for_view_update()) && + field->field == key_part->field) + break; + } + if (k == elements_in_view) + break; // Key is not possible + if (++key_part == key_part_end) + DBUG_RETURN(FALSE); // Found usable key + } + } + } + + DBUG_PRINT("info", ("checking if all fields of table are used")); + /* check all fields presence */ + { + Field **field_ptr; + for (field_ptr= table->field; *field_ptr; field_ptr++) + { + for (i= 0; i < elements_in_view; i++) + { + Item_field *field; + if ((field= trans[i]->filed_for_view_update()) && + field->field == *field_ptr) + break; + } + if (i == elements_in_view) // If field didn't exists + { + /* + Keys or all fields of underlaying tables are not foud => we have + to check variable updatable_views_with_limit to decide should we + issue an error or just a warning + */ + if (thd->variables.updatable_views_with_limit) + { + /* update allowed, but issue warning */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_WARN_VIEW_WITHOUT_KEY, ER(ER_WARN_VIEW_WITHOUT_KEY)); + DBUG_RETURN(FALSE); + } + /* prohibit update */ + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + insert fields from VIEW (MERGE algorithm) into given list + + SYNOPSIS + insert_view_fields() + list list for insertion + view view for processing +*/ + +void insert_view_fields(List<Item> *list, TABLE_LIST *view) +{ + uint elements_in_view= view->view->select_lex.item_list.elements; + Item **trans; + DBUG_ENTER("insert_view_fields"); + + if (!(trans= view->field_translation)) + DBUG_VOID_RETURN; + + for (uint i= 0; i < elements_in_view; i++) + { + Item_field *fld; + if ((fld= trans[i]->filed_for_view_update())) + list->push_back(fld); + } + DBUG_VOID_RETURN; +} diff --git a/sql/sql_view.h b/sql/sql_view.h new file mode 100644 index 00000000000..538f548d97b --- /dev/null +++ b/sql/sql_view.h @@ -0,0 +1,35 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +int mysql_create_view(THD *thd, + enum_view_create_mode mode); + +my_bool mysql_make_view(File_parser *parser, TABLE_LIST *table); + +int mysql_drop_view(THD *thd, TABLE_LIST *view, enum_drop_mode drop_mode); + +bool check_key_in_view(THD *thd, TABLE_LIST * view); + +void insert_view_fields(List<Item> *list, TABLE_LIST *view); + +frm_type_enum mysql_frm_type(char *path); + +extern TYPELIB updatable_views_with_limit_typelib; + +#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL) + diff --git a/sql/structs.h b/sql/structs.h index c30d85f59cb..cc053e2e2fd 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -158,8 +158,8 @@ typedef struct st_known_date_time_format { enum SHOW_TYPE { SHOW_UNDEF, - SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, SHOW_BOOL, - SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, + SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, SHOW_DOUBLE, + SHOW_BOOL, SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE, SHOW_SYS, SHOW_HA_ROWS, #ifdef HAVE_OPENSSL SHOW_SSL_CTX_SESS_ACCEPT, SHOW_SSL_CTX_SESS_ACCEPT_GOOD, @@ -176,7 +176,8 @@ enum SHOW_TYPE SHOW_SSL_GET_CIPHER_LIST, #endif /* HAVE_OPENSSL */ SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, - SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG + SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, + SHOW_LONG_STATUS, SHOW_LONG_CONST_STATUS }; enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED}; diff --git a/sql/table.cc b/sql/table.cc index 20ac714020d..0e8045abacf 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -20,7 +20,8 @@ #include "mysql_priv.h" #include <errno.h> #include <m_ctype.h> - +#include "md5.h" +#include "sql_acl.h" /* Functions defined in this file */ @@ -57,10 +58,11 @@ static byte* get_field_name(Field **buff,uint *length, 2 Error (see frm_error) 3 Wrong data in .frm file 4 Error (see frm_error) + 5 It is new format of .frm file */ -int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, - uint ha_open_flags, TABLE *outparam) +int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, + uint prgflag, uint ha_open_flags, TABLE *outparam) { reg1 uint i; reg2 uchar *strpos; @@ -83,17 +85,45 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, SQL_CRYPT *crypted=0; MEM_ROOT **root_ptr, *old_root; DBUG_ENTER("openfrm"); - DBUG_PRINT("enter",("name: '%s' form: %lx",name,outparam)); + DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam)); + + error=1; + disk_buff=NULL; + root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); + old_root= *root_ptr; + + if ((file=my_open(fn_format(index_file, name, "", reg_ext, + MY_UNPACK_FILENAME), + O_RDONLY | O_SHARE, + MYF(0))) + < 0) + { + goto err_w_init; + } + + if (my_read(file,(byte*) head,64,MYF(MY_NABP))) + { + goto err_w_init; + } + + if (memcmp(head, "TYPE=", 5) == 0) + { + // new .frm + my_close(file,MYF(MY_WME)); + + if (db_stat & NO_ERR_ON_NEW_FRM) + DBUG_RETURN(5); + + // caller can't process new .frm + error= 4; + goto err_w_init; + } bzero((char*) outparam,sizeof(*outparam)); + outparam->in_use= thd; outparam->blob_ptr_size=sizeof(char*); - disk_buff=NULL; record= NULL; keynames=NullS; outparam->db_stat = db_stat; - error=1; - init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); - root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); - old_root= *root_ptr; *root_ptr= &outparam->mem_root; outparam->real_name=strdup_root(&outparam->mem_root, @@ -103,19 +133,11 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (!outparam->real_name || !outparam->table_name) goto err_end; - if ((file=my_open(fn_format(index_file,name,"",reg_ext,MY_UNPACK_FILENAME), - O_RDONLY | O_SHARE, - MYF(0))) - < 0) - { - goto err_end; /* purecov: inspected */ - } error=4; if (!(outparam->path= strdup_root(&outparam->mem_root,name))) goto err_not_open; *fn_ext(outparam->path)='\0'; // Remove extension - if (my_read(file,(byte*) head,64,MYF(MY_NABP))) goto err_not_open; if (head[0] != (uchar) 254 || head[1] != 1 || (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3)) goto err_not_open; /* purecov: inspected */ @@ -148,7 +170,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->db_record_offset=1; if (db_create_options & HA_OPTION_LONG_BLOB_PTR) outparam->blob_ptr_size=portable_sizeof_char_ptr; - /* Set temporaryly a good value for db_low_byte_first */ + /* Set temporarily a good value for db_low_byte_first */ outparam->db_low_byte_first=test(outparam->db_type != DB_TYPE_ISAM); error=4; outparam->max_rows=uint4korr(head+18); @@ -458,6 +480,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, field_length= (uint) strpos[3]; recpos= uint2korr(strpos+4), pack_flag= uint2korr(strpos+6); + pack_flag&= ~FIELDFLAG_NO_DEFAULT; // Safety for old files unireg_type= (uint) strpos[8]; interval_nr= (uint) strpos[10]; @@ -494,6 +517,8 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, null_bit=1; } } + if (f_no_default(pack_flag)) + reg_field->flags|= NO_DEFAULT_VALUE_FLAG; if (reg_field->unireg_check == Field::NEXT_NUMBER) outparam->found_next_number_field= reg_field; if (outparam->timestamp_field == reg_field) @@ -704,7 +729,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->blob_field= (Field_blob**) (outparam->field+outparam->fields); // Point at null ptr - /* The table struct is now initialzed; Open the table */ + /* The table struct is now initialized; Open the table */ error=2; if (db_stat) { @@ -739,13 +764,18 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->db_low_byte_first=outparam->file->low_byte_first(); *root_ptr= old_root; - opened_tables++; + thd->status_var.opened_tables++; #ifndef DBUG_OFF if (use_hash) (void) hash_check(&outparam->name_hash); #endif DBUG_RETURN (0); + err_w_init: + /* Avoid problem with uninitialized data */ + bzero((char*) outparam,sizeof(*outparam)); + outparam->real_name= (char*)name+dirname_length(name); + err_not_open: x_free((gptr) disk_buff); if (file > 0) @@ -756,7 +786,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, *root_ptr= old_root; frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG); delete outparam->file; - outparam->file=0; // For easyer errorchecking + outparam->file=0; // For easier errorchecking outparam->db_stat=0; hash_free(&outparam->name_hash); free_root(&outparam->mem_root,MYF(0)); @@ -785,7 +815,7 @@ int closefrm(register TABLE *table) table->fields=0; } delete table->file; - table->file=0; /* For easyer errorchecking */ + table->file=0; /* For easier errorchecking */ hash_free(&table->name_hash); free_root(&table->mem_root,MYF(0)); DBUG_RETURN(error); @@ -982,7 +1012,7 @@ static void frm_error(int error, TABLE *form, const char *name, myf errortype) /* ** fix a str_type to a array type - ** typeparts sepearated with some char. differents types are separated + ** typeparts separated with some char. differents types are separated ** with a '\0' */ @@ -1074,7 +1104,7 @@ static uint find_field(TABLE *form,uint start,uint length) } - /* Check that the integer is in the internvall */ + /* Check that the integer is in the internal */ int set_zone(register int nr, int min_zone, int max_zone) { @@ -1138,7 +1168,7 @@ void append_unescaped(String *res, const char *pos, uint length) res->append('n'); break; case '\r': - res->append('\\'); /* This gives better readbility */ + res->append('\\'); /* This gives better readability */ res->append('r'); break; case '\\': @@ -1454,6 +1484,335 @@ db_type get_table_type(const char *name) } +/* + calculate md5 of query + + SYNOPSIS + st_table_list::calc_md5() + buffer buffer for md5 writing +*/ + +void st_table_list::calc_md5(char *buffer) +{ + my_MD5_CTX context; + uchar digest[16]; + my_MD5Init(&context); + my_MD5Update(&context,(uchar *) query.str, query.length); + my_MD5Final(digest, &context); + sprintf((char *) buffer, + "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", + digest[0], digest[1], digest[2], digest[3], + digest[4], digest[5], digest[6], digest[7], + digest[8], digest[9], digest[10], digest[11], + digest[12], digest[13], digest[14], digest[15]); +} + + +/* + set ancestor TABLE for table place holder of VIEW + + SYNOPSIS + st_table_list::set_ancestor() +*/ + +void st_table_list::set_ancestor() +{ + if (ancestor->ancestor) + ancestor->set_ancestor(); + table= ancestor->table; + ancestor->table->grant= grant; +} + + +/* + setup fields of placeholder of merged VIEW + + SYNOPSIS + st_table_list::setup_ancestor() + thd - thread handler + conds - condition of this JOIN + check_opt_type - WHITH CHECK OPTION type (VIEW_CHECK_NONE, + VIEW_CHECK_LOCAL, VIEW_CHECK_CASCADED) + + DESCRIPTION + It is: + - preparing translation table for view columns (fix_fields() for every + call and creation for first call) + - preparing WHERE, ON and CHECK OPTION condition (fix_fields() for every + call and merging for first call). + If there are underlying view(s) procedure first will be called for them. + + RETURN + 0 - OK + 1 - error +*/ + +bool st_table_list::setup_ancestor(THD *thd, Item **conds, + uint8 check_opt_type) +{ + Item **transl; + SELECT_LEX *select= &view->select_lex; + SELECT_LEX *current_select_save= thd->lex->current_select; + Item *item; + List_iterator_fast<Item> it(select->item_list); + uint i= 0; + bool save_set_query_id= thd->set_query_id; + bool save_wrapper= thd->lex->select_lex.no_wrap_view_item; + bool save_allow_sum_func= thd->allow_sum_func; + DBUG_ENTER("st_table_list::setup_ancestor"); + + if (ancestor->ancestor && + ancestor->setup_ancestor(thd, conds, + (check_opt_type == VIEW_CHECK_CASCADED ? + VIEW_CHECK_CASCADED : + VIEW_CHECK_NONE))) + DBUG_RETURN(1); + + if (field_translation) + { + /* prevent look up in SELECTs tree */ + thd->lex->current_select= &thd->lex->select_lex; + thd->set_query_id= 1; + /* this view was prepared already on previous PS/SP execution */ + Item **end= field_translation + select->item_list.elements; + for (Item **item= field_translation; item < end; item++) + { + /* TODO: fix for several tables in VIEW */ + uint want_privilege= ancestor->table->grant.want_privilege; + /* real rights will be checked in VIEW field */ + ancestor->table->grant.want_privilege= 0; + /* aggregate function are allowed */ + thd->allow_sum_func= 1; + if (!(*item)->fixed && (*item)->fix_fields(thd, ancestor, item)) + goto err; + ancestor->table->grant.want_privilege= want_privilege; + } + goto ok; + } + + /* view fields translation table */ + if (!(transl= + (Item**)(thd->current_arena->alloc(select->item_list.elements * sizeof(Item*))))) + { + DBUG_RETURN(1); + } + + /* prevent look up in SELECTs tree */ + thd->lex->current_select= &thd->lex->select_lex; + thd->lex->select_lex.no_wrap_view_item= 1; + + /* + Resolve all view items against ancestor table. + + TODO: do it only for real used fields "on demand" to mark really + used fields correctly. + */ + thd->set_query_id= 1; + while ((item= it++)) + { + /* TODO: fix for several tables in VIEW */ + uint want_privilege= ancestor->table->grant.want_privilege; + /* real rights will be checked in VIEW field */ + ancestor->table->grant.want_privilege= 0; + /* aggregate function are allowed */ + thd->allow_sum_func= 1; + if (!item->fixed && item->fix_fields(thd, ancestor, &item)) + goto err; + ancestor->table->grant.want_privilege= want_privilege; + transl[i++]= item; + } + field_translation= transl; + /* TODO: sort this list? Use hash for big number of fields */ + + if (where || + (check_opt_type == VIEW_CHECK_CASCADED && + ancestor->check_option)) + { + Item_arena *arena= thd->current_arena, backup; + TABLE_LIST *tbl= this; + if (arena->is_conventional()) + arena= 0; // For easier test + + if (where && !where->fixed && where->fix_fields(thd, ancestor, &where)) + goto err; + + if (arena) + thd->set_n_backup_item_arena(arena, &backup); + + if (check_opt_type) + { + if (where) + check_option= where->copy_andor_structure(thd); + if (check_opt_type == VIEW_CHECK_CASCADED) + { + check_option= and_conds(check_option, ancestor->check_option); + } + } + + /* + check that it is not VIEW in which we insert with INSERT SELECT + (in this case we can't add view WHERE condition to main SELECT_LEX) + */ + if (where && !no_where_clause) + { + /* Go up to join tree and try to find left join */ + for (; tbl; tbl= tbl->embedding) + { + if (tbl->outer_join) + { + /* + Store WHERE condition to ON expression for outer join, because + we can't use WHERE to correctly execute left joins on VIEWs and + this expression will not be moved to WHERE condition (i.e. will + be clean correctly for PS/SP) + */ + tbl->on_expr= and_conds(tbl->on_expr, where); + break; + } + } + if (tbl == 0) + { + if (outer_join) + { + /* + Store WHERE condition to ON expression for outer join, because + we can't use WHERE to correctly execute left joins on VIEWs and + this expression will not be moved to WHERE condition (i.e. will + be clean correctly for PS/SP) + */ + on_expr= and_conds(on_expr, where); + } + else + { + /* + It is conds of JOIN, but it will be stored in + st_select_lex::prep_where for next reexecution + */ + *conds= and_conds(*conds, where); + } + } + } + + if (arena) + thd->restore_backup_item_arena(arena, &backup); + } + /* + fix_fields do not need tables, because new are only AND operation and we + just need recollect statistics + */ + if (check_option && !check_option->fixed && + check_option->fix_fields(thd, 0, &check_option)) + goto err; + + /* full text function moving to current select */ + if (view->select_lex.ftfunc_list->elements) + { + Item_func_match *ifm; + List_iterator_fast<Item_func_match> + li(*(view->select_lex.ftfunc_list)); + while ((ifm= li++)) + current_select_save->ftfunc_list->push_front(ifm); + } + +ok: + thd->lex->select_lex.no_wrap_view_item= save_wrapper; + thd->lex->current_select= current_select_save; + thd->set_query_id= save_set_query_id; + thd->allow_sum_func= save_allow_sum_func; + DBUG_RETURN(0); + +err: + /* Hide "Unknown column" error */ + if (thd->net.last_errno == ER_BAD_FIELD_ERROR) + { + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), view_db.str, view_name.str); + } + thd->lex->select_lex.no_wrap_view_item= save_wrapper; + thd->lex->current_select= current_select_save; + thd->set_query_id= save_set_query_id; + thd->allow_sum_func= save_allow_sum_func; + DBUG_RETURN(1); +} + + +/* + cleunup items belonged to view fields translation table + + SYNOPSIS + st_table_list::cleanup_items() +*/ + +void st_table_list::cleanup_items() +{ + if (!field_translation) + return; + + Item **end= field_translation + view->select_lex.item_list.elements; + for (Item **item= field_translation; item < end; item++) + (*item)->walk(&Item::cleanup_processor, 0); +} + + +/* + check CHECK OPTION condition + + SYNOPSIS + check_option() + ignore_failure ignore check option fail + + RETURN + VIEW_CHECK_OK OK + VIEW_CHECK_ERROR FAILED + VIEW_CHECK_SKIP FAILED, but continue +*/ + +int st_table_list::view_check_option(THD *thd, bool ignore_failure) +{ + if (check_option && check_option->val_int() == 0) + { + if (ignore_failure) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED), + view_db.str, view_name.str); + return(VIEW_CHECK_SKIP); + } + else + { + my_error(ER_VIEW_CHECK_FAILED, MYF(0), view_db.str, view_name.str); + return(VIEW_CHECK_ERROR); + } + } + return(VIEW_CHECK_OK); +} + + +void Field_iterator_view::set(TABLE_LIST *table) +{ + ptr= table->field_translation; + array_end= ptr + table->view->select_lex.item_list.elements; +} + + +const char *Field_iterator_table::name() +{ + return (*ptr)->field_name; +} + + +Item *Field_iterator_table::item(THD *thd) +{ + return new Item_field(thd, *ptr); +} + + +const char *Field_iterator_view::name() +{ + return (*ptr)->name; +} + + /***************************************************************************** ** Instansiate templates *****************************************************************************/ diff --git a/sql/table.h b/sql/table.h index 2eb854f553d..f95be1fcccb 100644 --- a/sql/table.h +++ b/sql/table.h @@ -20,6 +20,8 @@ class Item; /* Needed by ORDER */ class GRANT_TABLE; class st_select_lex_unit; +class st_select_lex; +class COND_EQUAL; /* Order clause list element */ @@ -27,9 +29,13 @@ typedef struct st_order { struct st_order *next; Item **item; /* Point at item in select fields */ Item *item_ptr; /* Storage for initial item */ + Item **item_copy; /* For SPs; the original item ptr */ + int counter; /* position in SELECT list, correct + only if counter_used is true*/ bool asc; /* true if ascending */ bool free_me; /* true if item isn't shared */ bool in_field_list; /* true if in select field list */ + bool counter_used; /* parameter was counter of columns */ Field *field; /* If tmp-table group */ char *buff; /* If tmp-table group */ table_map used,depend_map; @@ -45,6 +51,13 @@ typedef struct st_grant_info enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2}; +enum frm_type_enum +{ + FRMTYPE_ERROR= 0, + FRMTYPE_TABLE, + FRMTYPE_VIEW +}; + typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filebyte */ @@ -71,6 +84,7 @@ enum timestamp_auto_set_type class Field_timestamp; class Field_blob; +class Table_triggers_list; struct st_table { handler *file; @@ -79,7 +93,7 @@ struct st_table { /* hash of field names (contains pointers to elements of field array) */ HASH name_hash; byte *record[2]; /* Pointer to records */ - byte *default_values; /* Default values for INSERT */ + byte *default_values; /* Default values for INSERT */ byte *insert_values; /* used by INSERT ... UPDATE */ uint fields; /* field count */ uint reclength; /* Recordlength */ @@ -163,6 +177,8 @@ struct st_table { REGINFO reginfo; /* field connections */ MEM_ROOT mem_root; GRANT_INFO grant; + /* Table's triggers, 0 if there are no of them */ + Table_triggers_list *triggers; /* A pair "database_name\0table_name\0", widely used as simply a db name */ char *table_cache_key; @@ -179,12 +195,8 @@ struct st_table { key_part_map const_key_parts[MAX_KEY]; ulong query_id; uchar frm_version; - - union /* Temporary variables */ - { - uint temp_pool_slot; /* Used by intern temp tables */ - struct st_table_list *pos_in_table_list; - }; + uint temp_pool_slot; /* Used by intern temp tables */ + struct st_table_list *pos_in_table_list;/* Element referring to this table */ /* number of select if it is derived table */ uint derived_select_number; THD *in_use; /* Which thread uses this */ @@ -195,32 +207,166 @@ struct st_table { #define JOIN_TYPE_LEFT 1 #define JOIN_TYPE_RIGHT 2 +#define VIEW_ALGORITHM_UNDEFINED 0 +#define VIEW_ALGORITHM_TMPTABLE 1 +#define VIEW_ALGORITHM_MERGE 2 + +/* view WITH CHECK OPTION parameter options */ +#define VIEW_CHECK_NONE 0 +#define VIEW_CHECK_LOCAL 1 +#define VIEW_CHECK_CASCADED 2 + +/* result of view WITH CHECK OPTION parameter check */ +#define VIEW_CHECK_OK 0 +#define VIEW_CHECK_ERROR 1 +#define VIEW_CHECK_SKIP 2 + +struct st_lex; +struct select_union; + typedef struct st_table_list { - struct st_table_list *next; + /* link in a local table list (used by SQL_LIST) */ + struct st_table_list *next_local; + /* link in a global list of all queries tables */ + struct st_table_list *next_global, **prev_global; char *db, *alias, *real_name; - char *option; /* Used by cache index */ + char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ + COND_EQUAL *cond_equal; /* Used with outer join */ struct st_table_list *natural_join; /* natural join on this table*/ /* ... join ... USE INDEX ... IGNORE INDEX */ - List<String> *use_index, *ignore_index; - TABLE *table; /* opened table */ - st_table_list *table_list; /* pointer to node of list of all tables */ - class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ - GRANT_INFO grant; + List<String> *use_index, *ignore_index; + TABLE *table; /* opened table */ + /* + select_result for derived table to pass it from table creation to table + filling procedure + */ + select_union *derived_result; + /* + Reference from aux_tables to local list entry of main select of + multi-delete statement: + delete t1 from t2,t1 where t1.a<'B' and t2.b=t1.b; + here it will be reference of first occurrence of t1 to second (as you + can see this lists can't be merged) + */ + st_table_list *correspondent_table; + st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + /* link to select_lex where this table was used */ + st_select_lex *select_lex; + st_lex *view; /* link on VIEW lex for merging */ + Item **field_translation; /* array of VIEW fields */ + /* ancestor of this table (VIEW merge algorithm) */ + st_table_list *ancestor; + /* most upper view this table belongs to */ + st_table_list *belong_to_view; + Item *where; /* VIEW WHERE clause condition */ + Item *check_option; /* WITH CHECK OPTION condition */ + LEX_STRING query; /* text of (CRETE/SELECT) statement */ + LEX_STRING md5; /* md5 of query text */ + LEX_STRING source; /* source of CREATE VIEW */ + LEX_STRING view_db; /* saved view database */ + LEX_STRING view_name; /* saved view name */ + LEX_STRING timestamp; /* GMT time stamp of last operation */ + ulonglong file_version; /* version of file's field set */ + ulonglong updatable_view; /* VIEW can be updated */ + ulonglong revision; /* revision control number */ + ulonglong algorithm; /* 0 any, 1 tmp tables , 2 merging */ + ulonglong with_check; /* WITH CHECK OPTION */ + /* + effective value of WITH CHECK OPTION (differ for temporary table + algorithm) + */ + uint8 effective_with_check; + uint effective_algorithm; /* which algorithm was really used */ + GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ uint32 db_length, real_name_length; + bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ - bool force_index; /* Prefer index over table scan */ - bool ignore_leaves; /* Preload only non-leaf nodes */ + bool force_index; /* prefer index over table scan */ + bool ignore_leaves; /* preload only non-leaf nodes */ + bool no_where_clause; /* do not attach WHERE to SELECT */ + table_map dep_tables; /* tables the table depends on */ + table_map on_expr_dep_tables; /* tables on expression depends on */ + struct st_nested_join *nested_join; /* if the element is a nested join */ + st_table_list *embedding; /* nested join containing the table */ + List<struct st_table_list> *join_list;/* join list the table belongs to */ bool cacheable_table; /* stop PS caching */ - /* used in multi-upd privelege check */ - bool table_in_update_from_clause; + /* used in multi-upd/views privilege check */ + bool table_in_first_from_clause; + bool skip_temporary; /* this table shouldn't be temporary */ + bool setup_is_done; /* setup_tables() is done */ + /* TRUE if this merged view contain auto_increment field */ + bool contain_auto_increment; + /* FRMTYPE_ERROR if any type is acceptable */ + enum frm_type_enum required_type; + char timestamp_buffer[20]; /* buffer for timestamp (19+1) */ + + void calc_md5(char *buffer); + void set_ancestor(); + int view_check_option(THD *thd, bool ignore_failure); + bool setup_ancestor(THD *thd, Item **conds, uint8 check_option); + void cleanup_items(); + bool placeholder() {return derived || view; } + void print(THD *thd, String *str); } TABLE_LIST; +class Item; + +class Field_iterator: public Sql_alloc +{ +public: + virtual ~Field_iterator() {} + virtual void set(TABLE_LIST *)= 0; + virtual void next()= 0; + virtual bool end_of_fields()= 0; /* Return 1 at end of list */ + virtual const char *name()= 0; + virtual Item *item(THD *)= 0; + virtual Field *field()= 0; +}; + + +class Field_iterator_table: public Field_iterator +{ + Field **ptr; +public: + Field_iterator_table() :ptr(0) {} + void set(TABLE_LIST *table) { ptr= table->table->field; } + void set_table(TABLE *table) { ptr= table->field; } + void next() { ptr++; } + bool end_of_fields() { return *ptr == 0; } + const char *name(); + Item *item(THD *thd); + Field *field() { return *ptr; } +}; + + +class Field_iterator_view: public Field_iterator +{ + Item **ptr, **array_end; +public: + Field_iterator_view() :ptr(0), array_end(0) {} + void set(TABLE_LIST *table); + void next() { ptr++; } + bool end_of_fields() { return ptr == array_end; } + const char *name(); + Item *item(THD *thd) { return *ptr; } + Field *field() { return 0; } +}; + +typedef struct st_nested_join +{ + List<TABLE_LIST> join_list; /* list of elements in the nested join */ + table_map used_tables; /* bitmap of tables in the nested join */ + table_map not_null_tables; /* tables that rejects nulls */ + struct st_join_table *first_nested;/* the first nested table in the plan */ + uint counter; /* to count tables in the nested join */ +} NESTED_JOIN; + typedef struct st_changed_table_list { struct st_changed_table_list *next; @@ -228,8 +374,7 @@ typedef struct st_changed_table_list uint32 key_length; } CHANGED_TABLE_LIST; -typedef struct st_open_table_list -{ +typedef struct st_open_table_list{ struct st_open_table_list *next; char *db,*table; uint32 in_use,locked; diff --git a/sql/time.cc b/sql/time.cc index e76b169b336..562f9956ccc 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -33,15 +33,6 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week) DBUG_RETURN ((int) ((daynr + 5L + (sunday_first_day_of_week ? 1L : 0L)) % 7)); } - /* Calc days in one year. works with 0 <= year <= 99 */ - -uint calc_days_in_year(uint year) -{ - return (year & 3) == 0 && (year%100 || (year%400 == 0 && year)) ? - 366 : 365; -} - - /* The bits in week_format has the following meaning: WEEK_MONDAY_FIRST (0) If not set Sunday is first day of week @@ -200,9 +191,16 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, uint flags) { int was_cut; - timestamp_type ts_type= str_to_datetime(str, length, l_time, flags, &was_cut); + THD *thd= current_thd; + timestamp_type ts_type; + + ts_type= str_to_datetime(str, length, l_time, + (flags | (thd->variables.sql_mode & + (MODE_INVALID_DATES | + MODE_NO_ZERO_DATE))), + &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, str, length, ts_type); + make_truncated_value_warning(current_thd, str, length, ts_type, NullS); return ts_type; } @@ -258,7 +256,8 @@ str_to_time_with_warn(const char *str, uint length, TIME *l_time) int was_cut; bool ret_val= str_to_time(str, length, l_time, &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, str, length, MYSQL_TIMESTAMP_TIME); + make_truncated_value_warning(current_thd, str, length, + MYSQL_TIMESTAMP_TIME, NullS); return ret_val; } @@ -772,16 +771,15 @@ void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type) + uint str_length, timestamp_type time_type, + const char *field_name) { char warn_buff[MYSQL_ERRMSG_SIZE]; const char *type_str; - + CHARSET_INFO *cs= &my_charset_latin1; char buff[128]; String str(buff,(uint32) sizeof(buff), system_charset_info); - str.length(0); - str.append(str_val, str_length); - str.append('\0'); + str.copy(str_val, str_length, system_charset_info); switch (time_type) { case MYSQL_TIMESTAMP_DATE: @@ -795,8 +793,15 @@ void make_truncated_value_warning(THD *thd, const char *str_val, type_str= "datetime"; break; } - sprintf(warn_buff, ER(ER_TRUNCATED_WRONG_VALUE), - type_str, str.ptr()); + if (field_name) + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + type_str, str.c_ptr(), field_name, + (ulong) thd->row_count); + else + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE), + type_str, str.ptr()); push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, warn_buff); } diff --git a/sql/tztime.cc b/sql/tztime.cc index c2143b0d5dd..50c496577e0 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1395,44 +1395,66 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, /* - Prepare table list with time zone related tables from preallocated array. + Prepare table list with time zone related tables from preallocated array + and add to global table list. SYNOPSIS tz_init_table_list() - tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects. + tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function prepares list of TABLE_LIST objects which can be used - for opening of time zone tables from preallocated array. + for opening of time zone tables from preallocated array. It also links + this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). */ -void -tz_init_table_list(TABLE_LIST *tz_tabs) +static void +tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) { bzero(tz_tabs, sizeof(TABLE_LIST) * 4); tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name"; tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone"; tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type"; tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition"; - tz_tabs[0].next= tz_tabs+1; - tz_tabs[1].next= tz_tabs+2; - tz_tabs[2].next= tz_tabs+3; + tz_tabs[0].next_global= tz_tabs[0].next_local= tz_tabs+1; + tz_tabs[1].next_global= tz_tabs[1].next_local= tz_tabs+2; + tz_tabs[2].next_global= tz_tabs[2].next_local= tz_tabs+3; tz_tabs[0].lock_type= tz_tabs[1].lock_type= tz_tabs[2].lock_type= tz_tabs[3].lock_type= TL_READ; tz_tabs[0].db= tz_tabs[1].db= tz_tabs[2].db= tz_tabs[3].db= (char *)"mysql"; + + /* Link into global list */ + tz_tabs[0].prev_global= *global_next_ptr; + tz_tabs[1].prev_global= &tz_tabs[0].next_global; + tz_tabs[2].prev_global= &tz_tabs[1].next_global; + tz_tabs[3].prev_global= &tz_tabs[2].next_global; + + **global_next_ptr= tz_tabs; + /* Update last-global-pointer to point to pointer in last table */ + *global_next_ptr= &tz_tabs[3].next_global; } /* - Create table list with time zone related tables. + Create table list with time zone related tables and add it to the end + of global table list. SYNOPSIS my_tz_get_table_list() - thd - current thread object + thd - current thread object + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function creates list of TABLE_LIST objects allocated in thd's - memroot, which can be used for opening of time zone tables. + memroot, which can be used for opening of time zone tables. It will also + link this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). NOTE my_tz_check_n_skip_implicit_tables() function depends on fact that @@ -1444,19 +1466,20 @@ tz_init_table_list(TABLE_LIST *tz_tabs) */ TABLE_LIST * -my_tz_get_table_list(THD *thd) +my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr) { TABLE_LIST *tz_tabs; + DBUG_ENTER("my_tz_get_table_list"); if (!time_zone_tables_exist) - return 0; + DBUG_RETURN(0); if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * 4))) - return &fake_time_zone_tables_list; + DBUG_RETURN(&fake_time_zone_tables_list); - tz_init_table_list(tz_tabs); + tz_init_table_list(tz_tabs, global_next_ptr); - return tz_tabs; + DBUG_RETURN(tz_tabs); } @@ -1490,7 +1513,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) { THD *thd; TABLE_LIST *tables= 0; - TABLE_LIST tables_buff[5]; + TABLE_LIST tables_buff[5], **last_global_next_ptr; TABLE *table; TZ_NAMES_ENTRY *tmp_tzname; my_bool return_val= 1; @@ -1557,9 +1580,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; tables_buff[0].db= thd->db; - tables_buff[0].next= tables_buff + 1; - /* Fill TABLE_LIST for rest of the time zone describing tables */ - tz_init_table_list(tables_buff + 1); + /* + Fill TABLE_LIST for the rest of the time zone describing tables + and link it to first one. + */ + last_global_next_ptr= &(tables_buff[0].next_global); + tz_init_table_list(tables_buff + 1, &last_global_next_ptr); if (open_tables(thd, tables_buff, &counter) || lock_tables(thd, tables_buff, counter)) @@ -1761,8 +1787,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) and it is specifically for this purpose). */ table= tz_tables->table; - tz_tables= tz_tables->next; - table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); + tz_tables= tz_tables->next_local; + table->field[0]->store(tz_name->ptr(), tz_name->length(), + &my_charset_latin1); /* It is OK to ignore ha_index_init()/ha_index_end() return values since mysql.time_zone* tables are MyISAM and these operations always succeed @@ -1773,7 +1800,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, 0, HA_READ_KEY_EXACT)) { +#ifdef EXTRA_DEBUG sql_print_error("Can't find description of time zone."); +#endif goto end; } @@ -1787,7 +1816,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) using the only index in this table). */ table= tz_tables->table; - tz_tables= tz_tables->next; + tz_tables= tz_tables->next_local; table->field[0]->store((longlong)tzid); (void)table->file->ha_index_init(0); @@ -1814,7 +1843,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) Right - using special index. */ table= tz_tables->table; - tz_tables= tz_tables->next; + tz_tables= tz_tables->next_local; table->field[0]->store((longlong)tzid); (void)table->file->ha_index_init(0); diff --git a/sql/tztime.h b/sql/tztime.h index 9f969639bd0..6d2388bb160 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -59,7 +59,7 @@ public: extern Time_zone * my_tz_UTC; extern Time_zone * my_tz_SYSTEM; -extern TABLE_LIST * my_tz_get_table_list(THD *thd); +extern TABLE_LIST * my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr); extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); extern void my_tz_free(); diff --git a/sql/udf_example.cc b/sql/udf_example.cc index 7e2ee9113b2..f0f33ed6fd7 100644 --- a/sql/udf_example.cc +++ b/sql/udf_example.cc @@ -56,7 +56,9 @@ ** ** Function 'myfunc_int' returns summary length of all its arguments. ** -** Function 'sequence' returns an sequence starting from a certain number +** Function 'sequence' returns an sequence starting from a certain number. +** +** Function 'myfunc_argument_name' returns name of argument. ** ** On the end is a couple of functions that converts hostnames to ip and ** vice versa. @@ -82,6 +84,7 @@ ** CREATE FUNCTION lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE FUNCTION reverse_lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE AGGREGATE FUNCTION avgcost RETURNS REAL SONAME "udf_example.so"; +** CREATE FUNCTION myfunc_argument_name RETURNS STRING SONAME "udf_example.so"; ** ** After this the functions will work exactly like native MySQL functions. ** Functions should be created only once. @@ -94,6 +97,7 @@ ** DROP FUNCTION lookup; ** DROP FUNCTION reverse_lookup; ** DROP FUNCTION avgcost; +** DROP FUNCTION myfunc_argument_name; ** ** The CREATE FUNCTION and DROP FUNCTION update the func@mysql table. All ** Active function will be reloaded on every restart of server @@ -984,4 +988,43 @@ avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* error ) return data->totalprice/double(data->totalquantity); } +extern "C" { +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message); +char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error); +} + +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message) +{ + if (args->arg_count != 1) + { + strmov(message,"myfunc_argument_name_init accepts only one argument"); + return 1; + } + initid->max_length= args->attribute_lengths[0]; + initid->maybe_null= 1; + initid->const_item= 1; + return 0; +} + +char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error) +{ + if (!args->attributes[0]) + { + null_value= 0; + return 0; + } + (*length)--; // space for ending \0 (for debugging purposes) + if (*length > args->attribute_lengths[0]) + *length= args->attribute_lengths[0]; + memcpy(result, args->attributes[0], *length); + result[*length]= 0; + return result; +} + #endif /* HAVE_DLOPEN */ diff --git a/sql/uniques.cc b/sql/uniques.cc index d060965aa66..b08727705e4 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -63,12 +63,255 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, comp_func_fixed_arg); /* If the following fail's the next add will also fail */ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16); + /* + If you change the following, change it in get_max_elements function, too. + */ max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size); open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)); } +/* + Calculate log2(n!) + + NOTES + Stirling's approximate formula is used: + + n! ~= sqrt(2*M_PI*n) * (n/M_E)^n + + Derivation of formula used for calculations is as follows: + + log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) = + + = (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2). +*/ + +inline double log2_n_fact(double x) +{ + return (log(2*M_PI*x)/2 + x*log(x/M_E)) / M_LN2; +} + + +/* + Calculate cost of merge_buffers function call for given sequence of + input stream lengths and store the number of rows in result stream in *last. + + SYNOPSIS + get_merge_buffers_cost() + buff_elems Array of #s of elements in buffers + elem_size Size of element stored in buffer + first Pointer to first merged element size + last Pointer to last merged element size + + RETURN + Cost of merge_buffers operation in disk seeks. + + NOTES + It is assumed that no rows are eliminated during merge. + The cost is calculated as + + cost(read_and_write) + cost(merge_comparisons). + + All bytes in the sequences is read and written back during merge so cost + of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write) + + For comparisons cost calculations we assume that all merged sequences have + the same length, so each of total_buf_size elements will be added to a sort + heap with (n_buffers-1) elements. This gives the comparison cost: + + total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID; +*/ + +static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, + uint *first, uint *last) +{ + uint total_buf_elems= 0; + for (uint *pbuf= first; pbuf <= last; pbuf++) + total_buf_elems+= *pbuf; + *last= total_buf_elems; + + int n_buffers= last - first + 1; + + /* Using log2(n)=log(n)/log(2) formula */ + return 2*((double)total_buf_elems*elem_size) / IO_SIZE + + total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2); +} + + +/* + Calculate cost of merging buffers into one in Unique::get, i.e. calculate + how long (in terms of disk seeks) the two calls + merge_many_buffs(...); + merge_buffers(...); + will take. + + SYNOPSIS + get_merge_many_buffs_cost() + buffer buffer space for temporary data, at least + Unique::get_cost_calc_buff_size bytes + maxbuffer # of full buffers + max_n_elems # of elements in first maxbuffer buffers + last_n_elems # of elements in last buffer + elem_size size of buffer element + + NOTES + maxbuffer+1 buffers are merged, where first maxbuffer buffers contain + max_n_elems elements each and last buffer contains last_n_elems elements. + + The current implementation does a dumb simulation of merge_many_buffs + function actions. + + RETURN + Cost of merge in disk seeks. +*/ + +static double get_merge_many_buffs_cost(uint *buffer, + uint maxbuffer, uint max_n_elems, + uint last_n_elems, int elem_size) +{ + register int i; + double total_cost= 0.0; + uint *buff_elems= buffer; /* #s of elements in each of merged sequences */ + + /* + Set initial state: first maxbuffer sequences contain max_n_elems elements + each, last sequence contains last_n_elems elements. + */ + for(i = 0; i < (int)maxbuffer; i++) + buff_elems[i]= max_n_elems; + buff_elems[maxbuffer]= last_n_elems; + + /* + Do it exactly as merge_many_buff function does, calling + get_merge_buffers_cost to get cost of merge_buffers. + */ + if (maxbuffer >= MERGEBUFF2) + { + while (maxbuffer >= MERGEBUFF2) + { + uint lastbuff= 0; + for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF) + { + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + i + MERGEBUFF-1); + lastbuff++; + } + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + maxbuffer); + maxbuffer= lastbuff; + } + } + + /* Simulate final merge_buff call. */ + total_cost += get_merge_buffers_cost(buff_elems, elem_size, + buff_elems, buff_elems + maxbuffer); + return total_cost; +} + + +/* + Calculate cost of using Unique for processing nkeys elements of size + key_size using max_in_memory_size memory. + + SYNOPSIS + Unique::get_use_cost() + buffer space for temporary data, use Unique::get_cost_calc_buff_size + to get # bytes needed. + nkeys #of elements in Unique + key_size size of each elements in bytes + max_in_memory_size amount of memory Unique will be allowed to use + + RETURN + Cost in disk seeks. + + NOTES + cost(using_unqiue) = + cost(create_trees) + (see #1) + cost(merge) + (see #2) + cost(read_result) (see #3) + + 1. Cost of trees creation + For each Unique::put operation there will be 2*log2(n+1) elements + comparisons, where n runs from 1 tree_size (we assume that all added + elements are different). Together this gives: + + n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!) + + then cost(tree_creation) = n_compares*ROWID_COMPARE_COST; + + Total cost of creating trees: + (n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost. + + Approximate value of log2(N!) is calculated by log2_n_fact function. + + 2. Cost of merging. + If only one tree is created by Unique no merging will be necessary. + Otherwise, we model execution of merge_many_buff function and count + #of merges. (The reason behind this is that number of buffers is small, + while size of buffers is big and we don't want to loose precision with + O(x)-style formula) + + 3. If only one tree is created by Unique no disk io will happen. + Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume + these will be random seeks. +*/ + +double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulong max_in_memory_size) +{ + ulong max_elements_in_tree; + ulong last_tree_elems; + int n_full_trees; /* number of trees in unique - 1 */ + double result; + + max_elements_in_tree= + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size); + + n_full_trees= nkeys / max_elements_in_tree; + last_tree_elems= nkeys % max_elements_in_tree; + + /* Calculate cost of creating trees */ + result= 2*log2_n_fact(last_tree_elems + 1.0); + if (n_full_trees) + result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0); + result /= TIME_FOR_COMPARE_ROWID; + + DBUG_PRINT("info",("unique trees sizes: %u=%u*%lu + %lu", nkeys, + n_full_trees, n_full_trees?max_elements_in_tree:0, + last_tree_elems)); + + if (!n_full_trees) + return result; + + /* + There is more then one tree and merging is necessary. + First, add cost of writing all trees to disk, assuming that all disk + writes are sequential. + */ + result += DISK_SEEK_BASE_COST * n_full_trees * + ceil(((double) key_size)*max_elements_in_tree / IO_SIZE); + result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE); + + /* Cost of merge */ + double merge_cost= get_merge_many_buffs_cost(buffer, n_full_trees, + max_elements_in_tree, + last_tree_elems, key_size); + if (merge_cost < 0.0) + return merge_cost; + + result += merge_cost; + /* + Add cost of reading the resulting sequence, assuming there were no + duplicate elements. + */ + result += ceil((double)key_size*nkeys/IO_SIZE); + + return result; +} + Unique::~Unique() { close_cached_file(&file); @@ -84,6 +327,7 @@ bool Unique::flush() elements+= tree.elements_in_tree; file_ptr.count=tree.elements_in_tree; file_ptr.file_pos=my_b_tell(&file); + if (tree_walk(&tree, (tree_walk_action) unique_write_to_file, (void*) this, left_root_right) || insert_dynamic(&file_ptrs, (gptr) &file_ptr)) @@ -94,6 +338,237 @@ bool Unique::flush() /* + Clear the tree and the file. + You must call reset() if you want to reuse Unique after walk(). +*/ + +void +Unique::reset() +{ + reset_tree(&tree); + /* + If elements != 0, some trees were stored in the file (see how + flush() works). Note, that we can not count on my_b_tell(&file) == 0 + here, because it can return 0 right after walk(), and walk() does not + reset any Unique member. + */ + if (elements) + { + reset_dynamic(&file_ptrs); + reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1); + } + elements= 0; +} + +/* + The comparison function, passed to queue_init() in merge_walk() must + use comparison function of Uniques::tree, but compare members of struct + BUFFPEK. +*/ + +struct BUFFPEK_COMPARE_CONTEXT +{ + qsort_cmp2 key_compare; + void *key_compare_arg; +}; + +C_MODE_START + +static int buffpek_compare(void *arg, byte *key_ptr1, byte *key_ptr2) +{ + BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg; + return ctx->key_compare(ctx->key_compare_arg, + *((byte **) key_ptr1), *((byte **)key_ptr2)); +} + +C_MODE_END + + +/* + DESCRIPTION + Function is very similar to merge_buffers, but instead of writing sorted + unique keys to the output file, it invokes walk_action for each key. + This saves I/O if you need to pass through all unique keys only once. + SYNOPSIS + merge_walk() + All params are 'IN' (but see comment for begin, end): + merge_buffer buffer to perform cached piece-by-piece loading + of trees; initially the buffer is empty + merge_buffer_size size of merge_buffer. Must be aligned with + key_length + key_length size of tree element; key_length * (end - begin) + must be less or equal than merge_buffer_size. + begin pointer to BUFFPEK struct for the first tree. + end pointer to BUFFPEK struct for the last tree; + end > begin and [begin, end) form a consecutive + range. BUFFPEKs structs in that range are used and + overwritten in merge_walk(). + walk_action element visitor. Action is called for each unique + key. + walk_action_arg argument to walk action. Passed to it on each call. + compare elements comparison function + compare_arg comparison function argument + file file with all trees dumped. Trees in the file + must contain sorted unique values. Cache must be + initialized in read mode. + RETURN VALUE + 0 ok + <> 0 error +*/ + +static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size, + uint key_length, BUFFPEK *begin, BUFFPEK *end, + tree_walk_action walk_action, void *walk_action_arg, + qsort_cmp2 compare, void *compare_arg, + IO_CACHE *file) +{ + BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg }; + QUEUE queue; + if (end <= begin || + merge_buffer_size < key_length * (end - begin + 1) || + init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0, + buffpek_compare, &compare_context)) + return 1; + /* we need space for one key when a piece of merge buffer is re-read */ + merge_buffer_size-= key_length; + uchar *save_key_buff= merge_buffer + merge_buffer_size; + uint max_key_count_per_piece= merge_buffer_size/(end-begin)/key_length; + /* if piece_size is aligned reuse_freed_buffer will always hit */ + uint piece_size= max_key_count_per_piece * key_length; + uint bytes_read; /* to hold return value of read_to_buffer */ + BUFFPEK *top; + int res= 1; + /* + Invariant: queue must contain top element from each tree, until a tree + is not completely walked through. + Here we're forcing the invariant, inserting one element from each tree + to the queue. + */ + for (top= begin; top != end; ++top) + { + top->base= merge_buffer + (top - begin) * piece_size; + top->max_keys= max_key_count_per_piece; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + DBUG_ASSERT(bytes_read); + queue_insert(&queue, (byte *) top); + } + top= (BUFFPEK *) queue_top(&queue); + while (queue.elements > 1) + { + /* + Every iteration one element is removed from the queue, and one is + inserted by the rules of the invariant. If two adjacent elements on + the top of the queue are not equal, biggest one is unique, because all + elements in each tree are unique. Action is applied only to unique + elements. + */ + void *old_key= top->key; + /* + read next key from the cache or from the file and push it to the + queue; this gives new top. + */ + top->key+= key_length; + if (--top->mem_count) + queue_replaced(&queue); + else /* next piece should be read */ + { + /* save old_key not to overwrite it in read_to_buffer */ + memcpy(save_key_buff, old_key, key_length); + old_key= save_key_buff; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + else if (bytes_read > 0) /* top->key, top->mem_count are reset */ + queue_replaced(&queue); /* in read_to_buffer */ + else + { + /* + Tree for old 'top' element is empty: remove it from the queue and + give all its memory to the nearest tree. + */ + queue_remove(&queue, 0); + reuse_freed_buff(&queue, top, key_length); + } + } + top= (BUFFPEK *) queue_top(&queue); + /* new top has been obtained; if old top is unique, apply the action */ + if (compare(compare_arg, old_key, top->key)) + { + if (walk_action(old_key, 1, walk_action_arg)) + goto end; + } + } + /* + Applying walk_action to the tail of the last tree: this is safe because + either we had only one tree in the beginning, either we work with the + last tree in the queue. + */ + do + { + do + { + if (walk_action(top->key, 1, walk_action_arg)) + goto end; + top->key+= key_length; + } + while (--top->mem_count); + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + } + while (bytes_read); + res= 0; +end: + delete_queue(&queue); + return res; +} + + +/* + DESCRIPTION + Walks consecutively through all unique elements: + if all elements are in memory, then it simply invokes 'tree_walk', else + all flushed trees are loaded to memory piece-by-piece, pieces are + sorted, and action is called for each unique value. + Note: so as merging resets file_ptrs state, this method can change + internal Unique state to undefined: if you want to reuse Unique after + walk() you must call reset() first! + SYNOPSIS + Unique:walk() + All params are 'IN': + action function-visitor, typed in include/my_tree.h + function is called for each unique element + arg argument for visitor, which is passed to it on each call + RETURN VALUE + 0 OK + <> 0 error + */ + +bool Unique::walk(tree_walk_action action, void *walk_action_arg) +{ + if (elements == 0) /* the whole tree is in memory */ + return tree_walk(&tree, action, walk_action_arg, left_root_right); + + /* flush current tree to the file to have some memory for merge buffer */ + if (flush()) + return 1; + if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0)) + return 1; + uchar *merge_buffer= (uchar *) my_malloc(max_in_memory_size, MYF(0)); + if (merge_buffer == 0) + return 1; + int res= merge_walk(merge_buffer, max_in_memory_size, size, + (BUFFPEK *) file_ptrs.buffer, + (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements, + action, walk_action_arg, + tree.compare, tree.custom_arg, &file); + x_free(merge_buffer); + return res; +} + +/* Modify the TABLE element so that when one calls init_records() the rows will be read in priority order. */ @@ -114,7 +589,7 @@ bool Unique::get(TABLE *table) return 0; } } - /* Not enough memory; Save the result to file */ + /* Not enough memory; Save the result to file && free memory used by tree */ if (flush()) return 1; diff --git a/sql/unireg.cc b/sql/unireg.cc index c82fcc4abef..decac2a0549 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -199,7 +199,7 @@ err3: keys number of keys to create key_info Keys to create db_file Handler to use. May be zero, in which case we use - create_info->db_type + create_info->db_type RETURN 0 ok 1 error @@ -213,11 +213,11 @@ int rea_create_table(THD *thd, my_string file_name, DBUG_ENTER("rea_create_table"); if (mysql_create_frm(thd, file_name, create_info, - create_fields, keys, key_info, NULL)) + create_fields, keys, key_info, NULL)) DBUG_RETURN(1); - if (ha_create_table(file_name,create_info,0)) + if (!create_info->frm_only && ha_create_table(file_name,create_info,0)) { - my_delete(file_name,MYF(0)); + my_delete(file_name,MYF(0)); DBUG_RETURN(1); } DBUG_RETURN(0); @@ -321,7 +321,7 @@ static uint pack_keys(uchar *keybuff,uint key_count,KEY *keyinfo) pos[6]=pos[7]=0; // For the future pos+=8; key_parts+=key->key_parts; - DBUG_PRINT("loop",("flags: %d key_parts: %d at %lx", + DBUG_PRINT("loop",("flags: %d key_parts: %d at 0x%lx", key->flags,key->key_parts, key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; diff --git a/sql/unireg.h b/sql/unireg.h index 4ab2ba26b15..2879e30d861 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -139,6 +139,7 @@ #define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */ #define READ_SCREENS 1024 /* Read screens, info and helpfile */ #define DELAYED_OPEN 4096 /* Open table later */ +#define NO_ERR_ON_NEW_FRM 8192 /* stop error sending on new format */ #define SC_INFO_LENGTH 4 /* Form format constant */ #define TE_INFO_LENGTH 3 |