diff options
author | Sergey Petrunya <psergey@askmonty.org> | 2011-03-01 13:21:48 +0300 |
---|---|---|
committer | Sergey Petrunya <psergey@askmonty.org> | 2011-03-01 13:21:48 +0300 |
commit | cb147b39654d3afea938253ed1457c088a5e87c9 (patch) | |
tree | 854ca74d9bf60c0a120feacde8e3b6951f46bf61 /sql | |
parent | b8f00542e2ed66803d747cc5151279b9edf1392a (diff) | |
parent | c6ba9598026b06f5d64e7508abb652ac22d50e48 (diff) | |
download | mariadb-git-cb147b39654d3afea938253ed1457c088a5e87c9.tar.gz |
Merge 5.3 -> 5.3-subqueries-mwl90
Diffstat (limited to 'sql')
44 files changed, 1003 insertions, 566 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index f81afa6b27b..c997846a4c6 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -149,8 +149,7 @@ ADD_CUSTOM_TARGET( ${PROJECT_SOURCE_DIR}/sql/message.rc ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) -ADD_DEPENDENCIES(mysqld GenServerSource) - +ADD_DEPENDENCIES(sql GenServerSource) # Remove the auto-generated files as part of 'Clean Solution' SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "lex_hash.h;sql_yacc.h;sql_yacc.cc;mysqld.def") diff --git a/sql/Makefile.am b/sql/Makefile.am index afbef9ca197..05cb56bfab7 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -81,7 +81,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ event_data_objects.h event_scheduler.h \ sql_partition.h partition_info.h partition_element.h \ contributors.h sql_servers.h \ - multi_range_read.h \ + multi_range_read.h sql_handler.h \ sql_join_cache.h \ create_options.h \ sql_expression_cache.h diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index c9c580c6442..322db38adf2 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -1942,4 +1942,7 @@ bool debug_sync_set_action(THD *thd, const char *action_str, size_t len) } +#else /* defined(ENABLED_DEBUG_SYNC) */ +/* prevent linker/lib warning about file without public symbols */ +int debug_sync_dummy; #endif /* defined(ENABLED_DEBUG_SYNC) */ diff --git a/sql/filesort.cc b/sql/filesort.cc index 8f03ee26691..6e3bf27afcc 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1215,7 +1215,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, QUEUE queue; qsort2_cmp cmp; void *first_cmp_arg; - element_count dupl_count; + element_count dupl_count= 0; uchar *src; THD::killed_state not_killable; uchar *unique_buff= param->unique_buff; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1489ead8b5a..0cf1713ed13 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -253,7 +253,6 @@ public: DBUG_RETURN(0); } virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share); - bool check_if_supported_virtual_columns(void) { return TRUE;} virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes); private: diff --git a/sql/handler.cc b/sql/handler.cc index 45de1aafc10..95dd4f71336 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2688,6 +2688,12 @@ void handler::print_error(int error, myf errflag) SET_FATAL_ERROR; textno=ER_KEY_NOT_FOUND; break; + case HA_ERR_ABORTED_BY_USER: + { + DBUG_ASSERT(table->in_use->killed); + table->in_use->send_kill_message(); + DBUG_VOID_RETURN; + } case HA_ERR_WRONG_MRG_TABLE_DEF: textno=ER_WRONG_MRG_TABLE; break; @@ -2737,7 +2743,10 @@ void handler::print_error(int error, myf errflag) textno=ER_DUP_UNIQUE; break; case HA_ERR_RECORD_CHANGED: - SET_FATAL_ERROR; + /* + This is not fatal error when using HANDLER interface + SET_FATAL_ERROR; + */ textno=ER_CHECKREAD; break; case HA_ERR_CRASHED: diff --git a/sql/handler.h b/sql/handler.h index 3beda9f7c88..23d64681d26 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -137,6 +137,7 @@ #define HA_BINLOG_STMT_CAPABLE (LL(1) << 35) /* Has automatic checksums and uses the new checksum format */ #define HA_HAS_NEW_CHECKSUM (LL(1) << 36) +#define HA_CAN_VIRTUAL_COLUMNS (LL(1) << 37) #define HA_MRR_CANT_SORT (LL(1) << 37) #define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (LL(1) << 38) @@ -1558,16 +1559,20 @@ public: DBUG_ENTER("ha_index_init"); DBUG_ASSERT(inited==NONE); if (!(result= index_init(idx, sorted))) - inited=INDEX; - end_range= NULL; + { + inited= INDEX; + active_index= idx; + end_range= NULL; + } DBUG_RETURN(result); } int ha_index_end() { DBUG_ENTER("ha_index_end"); DBUG_ASSERT(inited==INDEX); - inited=NONE; - end_range= NULL; + inited= NONE; + active_index= MAX_KEY; + end_range= NULL; DBUG_RETURN(index_end()); } /* This is called after index_init() if we need to do a index scan */ @@ -1750,7 +1755,12 @@ public: as there may be several calls to this routine. */ virtual void column_bitmaps_signal(); - uint get_index(void) const { return active_index; } + /* + We have to check for inited as some engines, like innodb, sets + active_index during table scan. + */ + uint get_index(void) const + { return inited == INDEX ? active_index : MAX_KEY; } virtual int close(void)=0; /** @@ -2003,6 +2013,7 @@ public: { return(NULL);} /* gets tablespace name from handler */ /** used in ALTER TABLE; 1 if changing storage engine is allowed */ virtual bool can_switch_engines() { return 1; } + virtual int can_continue_handler_scan() { return 0; } /** used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */ virtual int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) { return 0; } @@ -2259,8 +2270,8 @@ private: */ virtual int open(const char *name, int mode, uint test_if_locked)=0; - virtual int index_init(uint idx, bool sorted) { active_index= idx; return 0; } - virtual int index_end() { active_index= MAX_KEY; return 0; } + virtual int index_init(uint idx, bool sorted) { return 0; } + virtual int index_end() { return 0; } /** rnd_init() can be called two times without rnd_end() in between (it only makes sense if scan=1). diff --git a/sql/item.h b/sql/item.h index 08b283b6903..7c4b3c6e819 100644 --- a/sql/item.h +++ b/sql/item.h @@ -584,10 +584,17 @@ public: virtual void fix_after_pullout(st_select_lex *new_parent, Item **ref) {}; /* - should be used in case where we are sure that we do not need + This method should be used in case where we are sure that we do not need complete fix_fields() procedure. - */ - inline void quick_fix_field() { fixed= 1; } + Usually this method is used by the optimizer when it has to create a new + item out of other already fixed items. For example, if the optimizer has + to create a new Item_func for an inferred equality whose left and right + parts are already fixed items. In some cases the optimizer cannot use + directly fixed items as the arguments of the created functional item, + but rather uses intermediate type conversion items. Then the method is + supposed to be applied recursively. + */ + virtual inline void quick_fix_field() { fixed= 1; } /* Function returns 1 on overflow and -1 on fatal errors */ int save_in_field_no_warnings(Field *field, bool no_conversions); virtual int save_in_field(Field *field, bool no_conversions); diff --git a/sql/item_func.cc b/sql/item_func.cc index 7404e99ba1c..dfad8a9af06 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -205,6 +205,21 @@ Item_func::fix_fields(THD *thd, Item **ref) return FALSE; } +void +Item_func::quick_fix_field() +{ + Item **arg,**arg_end; + if (arg_count) + { + for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) + { + if (!(*arg)->fixed) + (*arg)->quick_fix_field(); + } + } + fixed= 1; +} + void Item_func::fix_after_pullout(st_select_lex *new_parent, Item **ref) { diff --git a/sql/item_func.h b/sql/item_func.h index 0501b6f2299..9c0d827d582 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -118,6 +118,7 @@ public: Item_func(THD *thd, Item_func *item); bool fix_fields(THD *, Item **ref); void fix_after_pullout(st_select_lex *new_parent, Item **ref); + void quick_fix_field(); table_map used_tables() const; table_map not_null_tables() const; void update_used_tables(); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index c0651857101..c4289045b7e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -4404,8 +4404,8 @@ bool Ordered_key::alloc_keys_buffers() { DBUG_ASSERT(key_buff_elements > 0); - if (!(key_buff= (rownum_t*) my_malloc(key_buff_elements * sizeof(rownum_t), - MYF(MY_WME)))) + if (!(key_buff= (rownum_t*) my_malloc((size_t)(key_buff_elements * + sizeof(rownum_t)), MYF(MY_WME)))) return TRUE; /* @@ -4414,7 +4414,7 @@ bool Ordered_key::alloc_keys_buffers() lookup offset. */ /* Notice that max_null_row is max array index, we need count, so +1. */ - if (bitmap_init(&null_key, NULL, max_null_row + 1, FALSE)) + if (bitmap_init(&null_key, NULL, (uint)(max_null_row + 1), FALSE)) return TRUE; cur_key_idx= HA_POS_ERROR; @@ -4478,7 +4478,7 @@ Ordered_key::cmp_keys_by_row_data_and_rownum(Ordered_key *key, void Ordered_key::sort_keys() { - my_qsort2(key_buff, key_buff_elements, sizeof(rownum_t), + my_qsort2(key_buff, (size_t) key_buff_elements, sizeof(rownum_t), (qsort2_cmp) &cmp_keys_by_row_data_and_rownum, (void*) this); /* Invalidate the current row position. */ cur_key_idx= HA_POS_ERROR; @@ -4794,8 +4794,8 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, */ if (!(merge_keys= (Ordered_key**) thd->alloc(keys_count * sizeof(Ordered_key*))) || - !(row_num_to_rowid= (uchar*) my_malloc(row_count * rowid_length * - sizeof(uchar), MYF(MY_WME)))) + !(row_num_to_rowid= (uchar*) my_malloc((size_t)(row_count * rowid_length), + MYF(MY_WME)))) return TRUE; /* Create the only non-NULL key if there is any. */ diff --git a/sql/item_subselect.h b/sql/item_subselect.h index a0db43fe9fe..772196a4ea3 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -1029,7 +1029,7 @@ public: void set_null(rownum_t row_num) { - bitmap_set_bit(&null_key, row_num); + bitmap_set_bit(&null_key, (uint)row_num); } bool is_null(rownum_t row_num) { @@ -1045,7 +1045,7 @@ public: } if (row_num > max_null_row || row_num < min_null_row) return FALSE; - return bitmap_is_set(&null_key, row_num); + return bitmap_is_set(&null_key, (uint)row_num); } void print(String *str); }; diff --git a/sql/lock.cc b/sql/lock.cc index 8f5b5ac233f..4cab521fada 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -84,41 +84,10 @@ extern HASH open_cache; -/* flags for get_lock_data */ -#define GET_LOCK_UNLOCK 1 -#define GET_LOCK_STORE_LOCKS 2 - -static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count, - uint flags, TABLE **write_locked); -static void reset_lock_data(MYSQL_LOCK *sql_lock); static int lock_external(THD *thd, TABLE **table,uint count); static int unlock_external(THD *thd, TABLE **table,uint count); static void print_lock_error(int error, const char *); -/* - Lock tables. - - SYNOPSIS - mysql_lock_tables() - thd The current thread. - tables An array of pointers to the tables to lock. - count The number of tables to lock. - flags Options: - MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK Ignore a global read lock - MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY Ignore SET GLOBAL READ_ONLY - MYSQL_LOCK_IGNORE_FLUSH Ignore a flush tables. - MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN Instead of reopening altered - or dropped tables by itself, - mysql_lock_tables() should - notify upper level and rely - on caller doing this. - need_reopen Out parameter, TRUE if some tables were altered - or deleted and should be reopened by caller. - - RETURN - A lock structure pointer on success. - NULL on error or if some tables should be reopen. -*/ /* Map the return value of thr_lock to an error from errmsg.txt */ static int thr_lock_errno_to_mysql[]= @@ -132,6 +101,7 @@ static int thr_lock_errno_to_mysql[]= @param flags Lock flags @return 0 if all the check passed, non zero if a check failed. */ + int mysql_lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) { bool log_table_write_query; @@ -194,81 +164,118 @@ int mysql_lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) DBUG_RETURN(0); } + +/* + Lock tables. + + SYNOPSIS + mysql_lock_tables() + thd The current thread. + tables An array of pointers to the tables to lock. + count The number of tables to lock. + flags Options: + MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK Ignore a global read lock + MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY Ignore SET GLOBAL READ_ONLY + MYSQL_LOCK_IGNORE_FLUSH Ignore a flush tables. + MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN Instead of reopening altered + or dropped tables by itself, + mysql_lock_tables() should + notify upper level and rely + on caller doing this. + need_reopen Out parameter, TRUE if some tables were altered + or deleted and should be reopened by caller. + + RETURN + A lock structure pointer on success. + NULL on error or if some tables should be reopen. +*/ + MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags, bool *need_reopen) { - MYSQL_LOCK *sql_lock; TABLE *write_lock_used; - int rc; - - DBUG_ENTER("mysql_lock_tables"); + MYSQL_LOCK *sql_lock; + DBUG_ENTER("mysql_lock_tables(tables)"); *need_reopen= FALSE; - if (mysql_lock_tables_check(thd, tables, count, flags)) - DBUG_RETURN (NULL); + DBUG_RETURN(NULL); - for (;;) + if (!(sql_lock= get_lock_data(thd, tables, count, GET_LOCK_STORE_LOCKS, + &write_lock_used)) || + ! sql_lock->table_count) + DBUG_RETURN(sql_lock); + + if (mysql_lock_tables(thd, sql_lock, write_lock_used != 0, flags, + need_reopen)) { - if (! (sql_lock= get_lock_data(thd, tables, count, GET_LOCK_STORE_LOCKS, - &write_lock_used)) || - ! sql_lock->table_count) - break; + /* Clear the lock type of all lock data to avoid reusage. */ + reset_lock_data(sql_lock, 1); + my_free(sql_lock, MYF(0)); + sql_lock= 0; + } + DBUG_RETURN(sql_lock); +} + + +/** + Lock a table based on a MYSQL_LOCK structure. - if (global_read_lock && write_lock_used && - ! (flags & MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK)) + mysql_lock_tables() + + @param thd The current thread. + @param sql_lock Tables that should be locked + @param write_lock_used 1 if any of the tables are write locked + @param flags See mysql_lock_tables() + @param need_reopen Out parameter, TRUE if some tables were altered + or deleted and should be reopened by caller. + + @return 0 ok + @return 1 error +*/ + +bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, + bool write_lock_used, + uint flags, bool *need_reopen) +{ + int rc; + bool error= 1; + DBUG_ENTER("mysql_lock_tables(sql_lock)"); + + *need_reopen= FALSE; + for (;;) + { + if (write_lock_used && !(flags & MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK)) { - /* - Someone has issued LOCK ALL TABLES FOR READ and we want a write lock - Wait until the lock is gone - */ - if (wait_if_global_read_lock(thd, 1, 1)) + if (global_read_lock) { - /* Clear the lock type of all lock data to avoid reusage. */ - reset_lock_data(sql_lock); - my_free((uchar*) sql_lock,MYF(0)); - sql_lock=0; - break; + /* + Someone has issued LOCK ALL TABLES FOR READ and we want a write lock + Wait until the lock is gone + */ + if (wait_if_global_read_lock(thd, 1, 1)) + break; + if (thd->version != refresh_version) + goto retry; } - if (thd->version != refresh_version) + + if (opt_readonly && + !(thd->security_ctx->master_access & SUPER_ACL) && + !thd->slave_thread) { - /* Clear the lock type of all lock data to avoid reusage. */ - reset_lock_data(sql_lock); - my_free((uchar*) sql_lock,MYF(0)); - goto retry; + /* + Someone has issued SET GLOBAL READ_ONLY=1 and we want a write lock. + We do not wait for READ_ONLY=0, and fail. + */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); + break; } } - if (!(flags & MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY) && - write_lock_used && - opt_readonly && - !(thd->security_ctx->master_access & SUPER_ACL) && - !thd->slave_thread) - { - /* - Someone has issued SET GLOBAL READ_ONLY=1 and we want a write lock. - We do not wait for READ_ONLY=0, and fail. - */ - reset_lock_data(sql_lock); - my_free((uchar*) sql_lock, MYF(0)); - sql_lock=0; - my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); - break; - } - thd_proc_info(thd, "System lock"); - DBUG_PRINT("info", ("thd->proc_info %s", thd->proc_info)); if (lock_external(thd, sql_lock->table, sql_lock->table_count)) - { - /* Clear the lock type of all lock data to avoid reusage. */ - reset_lock_data(sql_lock); - my_free((uchar*) sql_lock,MYF(0)); - sql_lock=0; break; - } thd_proc_info(thd, "Table lock"); - DBUG_PRINT("info", ("thd->proc_info %s", thd->proc_info)); - thd->locked=1; /* Copy the lock data array. thr_multi_lock() reorders its contens. */ memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks, sql_lock->lock_count * sizeof(*sql_lock->locks)); @@ -277,70 +284,66 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, sql_lock->lock_count, sql_lock->lock_count, thd->lock_id)]; - if (rc > 1) /* a timeout or a deadlock */ + if (rc) /* Locking failed */ { VOID(unlock_external(thd, sql_lock->table, sql_lock->table_count)); - my_error(rc, MYF(0)); - my_free((uchar*) sql_lock,MYF(0)); - sql_lock= 0; - break; - } - else if (rc == 1) /* aborted */ - { - /* - reset_lock_data is required here. If thr_multi_lock fails it - resets lock type for tables, which were locked before (and - including) one that caused error. Lock type for other tables - preserved. - */ - reset_lock_data(sql_lock); - thd->some_tables_deleted=1; // Try again - sql_lock->lock_count= 0; // Locks are already freed + if (rc > 1) + { + /* a timeout or a deadlock */ + my_error(rc, MYF(0)); + break; + } + /* We where aborted and should try again from upper level*/ + thd->some_tables_deleted= 1; } - else if (!thd->some_tables_deleted || (flags & MYSQL_LOCK_IGNORE_FLUSH)) + else { /* - Thread was killed or lock aborted. Let upper level close all - used tables and retry or give error. + Lock worked. Now check that nothing happend while we where waiting + to get the lock that would require us to free it. */ - thd->locked=0; - break; - } - else if (!thd->open_tables) - { - // Only using temporary tables, no need to unlock - thd->some_tables_deleted=0; - thd->locked=0; - break; + error= 0; + if (!thd->some_tables_deleted || (flags & MYSQL_LOCK_IGNORE_FLUSH)) + { + /* + Table was not signaled for deletion or we don't care if it was. + Return with table as locked. + */ + break; + } + else if (!thd->open_tables && !(flags & MYSQL_LOCK_NOT_TEMPORARY)) + { + /* + Only using temporary tables, no need to unlock. + We need the flag as open_tables is not enough to distingush if + we are only using temporary tables for tables used trough + the HANDLER interface. + + We reset some_tables_deleted as it doesn't make sense to have this + one when we are only using temporary tables. + */ + thd->some_tables_deleted=0; + break; + } + /* some table was altered or deleted. reopen tables marked deleted */ + error= 1; + mysql_unlock_tables(thd, sql_lock, 0); } - thd_proc_info(thd, 0); - /* some table was altered or deleted. reopen tables marked deleted */ - mysql_unlock_tables(thd,sql_lock); - thd->locked=0; retry: - sql_lock=0; if (flags & MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN) { *need_reopen= TRUE; break; } if (wait_for_tables(thd)) - break; // Couldn't open tables - } - thd_proc_info(thd, 0); - if (thd->killed) - { - thd->send_kill_message(); - if (sql_lock) - { - mysql_unlock_tables(thd,sql_lock); - sql_lock=0; - } + break; // Couldn't open tables + reset_lock_data(sql_lock, 0); // Set org locks and retry } + thd_proc_info(thd, 0); thd->set_time_after_lock(); - DBUG_RETURN (sql_lock); + DBUG_RETURN(error); } @@ -380,15 +383,15 @@ static int lock_external(THD *thd, TABLE **tables, uint count) DBUG_RETURN(0); } - -void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock) +void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock) { DBUG_ENTER("mysql_unlock_tables"); if (sql_lock->table_count) VOID(unlock_external(thd,sql_lock->table,sql_lock->table_count)); if (sql_lock->lock_count) thr_multi_unlock(sql_lock->locks,sql_lock->lock_count, 0); - my_free((uchar*) sql_lock,MYF(0)); + if (free_lock) + my_free((uchar*) sql_lock,MYF(0)); DBUG_VOID_RETURN; } @@ -847,12 +850,12 @@ static int unlock_external(THD *thd, TABLE **table,uint count) @param write_lock_used Store pointer to last table with WRITE_ALLOW_WRITE */ -static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, - uint flags, TABLE **write_lock_used) +MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, + uint flags, TABLE **write_lock_used) { uint i,tables,lock_count; MYSQL_LOCK *sql_lock; - THR_LOCK_DATA **locks, **locks_buf, **locks_start; + THR_LOCK_DATA **locks, **locks_buf; TABLE **to, **table_buf; DBUG_ENTER("get_lock_data"); @@ -891,7 +894,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, { TABLE *table; enum thr_lock_type lock_type; - + THR_LOCK_DATA **locks_start; if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; lock_type= table->reginfo.lock_type; @@ -904,12 +907,11 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, my_error(ER_OPEN_AS_READONLY,MYF(0),table->alias.c_ptr()); /* Clear the lock type of the lock data that are stored already. */ sql_lock->lock_count= (uint) (locks - sql_lock->locks); - reset_lock_data(sql_lock); + reset_lock_data(sql_lock, 1); my_free((uchar*) sql_lock,MYF(0)); DBUG_RETURN(0); } } - THR_LOCK_DATA **org_locks = locks; locks_start= locks; locks= table->file->store_lock(thd, locks, (flags & GET_LOCK_UNLOCK) ? TL_IGNORE : @@ -922,8 +924,13 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, } *to++= table; if (locks) - for ( ; org_locks != locks ; org_locks++) - (*org_locks)->debug_print_param= (void *) table; + { + for ( ; locks_start != locks ; locks_start++) + { + (*locks_start)->debug_print_param= (void *) table; + (*locks_start)->org_type= (*locks_start)->type; + } + } } /* We do not use 'tables', because there are cases where store_lock() @@ -964,10 +971,13 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, Clear the lock type of all lock data. This ensures that the next lock request will set its lock type properly. - @param sql_lock The MySQL lock. + @param sql_lock The MySQL lock. + @param unlock If set, then set lock type to TL_UNLOCK, + otherwise set to original lock type from + get_store_lock(). */ -static void reset_lock_data(MYSQL_LOCK *sql_lock) +void reset_lock_data(MYSQL_LOCK *sql_lock, bool unlock) { THR_LOCK_DATA **ldata; THR_LOCK_DATA **ldata_end; @@ -975,10 +985,7 @@ static void reset_lock_data(MYSQL_LOCK *sql_lock) for (ldata= sql_lock->locks, ldata_end= ldata + sql_lock->lock_count; ldata < ldata_end; ldata++) - { - /* Reset lock type. */ - (*ldata)->type= TL_UNLOCK; - } + (*ldata)->type= unlock ? TL_UNLOCK : (*ldata)->org_type; } diff --git a/sql/log.cc b/sql/log.cc index 7d837938ab4..e8858f0e3c4 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1899,6 +1899,7 @@ static int find_uniq_filename(char *name) size_t buf_length, length; char *start, *end; DBUG_ENTER("find_uniq_filename"); + LINT_INIT(number); length= dirname_part(buff, name, &buf_length); start= name + length; diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index d9dcd354b3e..0c8b60b72ed 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -316,6 +316,8 @@ int Mrr_simple_index_reader::get_next(char **range_info) !file->mrr_funcs.skip_index_tuple(file->mrr_iter, curr_range->ptr)) break; } + if (res && res != HA_ERR_END_OF_FILE && res != HA_ERR_KEY_NOT_FOUND) + file->print_error(res, MYF(0)); // Fatal error return res; } @@ -333,7 +335,7 @@ int Mrr_simple_index_reader::get_next(char **range_info) @retval 0 OK, next record was successfully read @retval HA_ERR_END_OF_FILE End of records - @retval Other Some other error + @retval Other Some other error; Error is printed */ int Mrr_ordered_index_reader::get_next(char **range_info) @@ -349,7 +351,7 @@ int Mrr_ordered_index_reader::get_next(char **range_info) { if ((res != HA_ERR_KEY_NOT_FOUND && res != HA_ERR_END_OF_FILE)) DBUG_RETURN(res); /* Some fatal error */ - + if (key_buffer->is_empty()) { DBUG_RETURN(HA_ERR_END_OF_FILE); @@ -902,7 +904,7 @@ error: close_second_handler(); /* Safety, not really needed but: */ strategy= NULL; - DBUG_RETURN(1); + DBUG_RETURN(res); use_default_impl: DBUG_ASSERT(primary_file->inited == handler::INDEX); @@ -1125,8 +1127,8 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf, if (bytes_for_rowids < (ptrdiff_t)rowid_buf_elem_size + 1) { - ptrdiff_t add= rowid_buf_elem_size + 1 - bytes_for_rowids; - bytes_for_rowids= rowid_buf_elem_size + 1; + ptrdiff_t add= (ptrdiff_t)(rowid_buf_elem_size + 1 - bytes_for_rowids); + bytes_for_rowids= (ptrdiff_t)rowid_buf_elem_size + 1; bytes_for_keys -= add; } @@ -1136,7 +1138,7 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf, key_buffer->set_buffer_space(rowid_buffer_end, full_buf_end); if (!key_buffer->have_space_for(key_buff_elem_size) || - !rowid_buffer.have_space_for(rowid_buf_elem_size)) + !rowid_buffer.have_space_for((size_t)rowid_buf_elem_size)) return TRUE; /* Failed to provide minimum space for one of the buffers */ return FALSE; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6ecbb1fe1df..08607fd41fc 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1493,15 +1493,6 @@ void mysqld_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); void reinit_stmt_before_use(THD *thd, LEX *lex); -/* sql_handler.cc */ -bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen); -bool mysql_ha_close(THD *thd, TABLE_LIST *tables); -bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, - List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); -void mysql_ha_flush(THD *thd); -void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables, bool is_locked); -void mysql_ha_cleanup(THD *thd); - /* sql_base.cc */ #define TMP_TABLE_KEY_EXTRA 8 void set_item_name(Item *item,char *pos,uint length); @@ -2201,6 +2192,10 @@ extern struct st_VioSSLFd * ssl_acceptor_fd; MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **table, uint count, uint flags, bool *need_reopen); +bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, + bool write_lock_used, + uint flags, bool *need_reopen); + /* mysql_lock_tables() and open_table() flags bits */ #define MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK 0x0001 #define MYSQL_LOCK_IGNORE_FLUSH 0x0002 @@ -2208,8 +2203,12 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **table, uint count, #define MYSQL_OPEN_TEMPORARY_ONLY 0x0008 #define MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY 0x0010 #define MYSQL_LOCK_PERF_SCHEMA 0x0020 +#define MYSQL_LOCK_NOT_TEMPORARY 0x0040 +/* flags for get_lock_data */ +#define GET_LOCK_UNLOCK 1 +#define GET_LOCK_STORE_LOCKS 2 -void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock); +void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock= 1); void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock); void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count); void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table, @@ -2221,6 +2220,7 @@ bool mysql_lock_abort_for_thread(THD *thd, TABLE *table); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, TABLE_LIST *haystack); +void reset_lock_data(MYSQL_LOCK *sql_lock, bool unlock); bool lock_global_read_lock(THD *thd); void unlock_global_read_lock(THD *thd); bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, @@ -2230,6 +2230,8 @@ bool make_global_read_lock_block_commit(THD *thd); bool set_protect_against_global_read_lock(void); void unset_protect_against_global_read_lock(void); void broadcast_refresh(void); +MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, + uint flags, TABLE **write_lock_used); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 7bd16724d22..177e2dc9a9c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -460,7 +460,7 @@ static bool volatile ready_to_exit; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; static my_bool opt_short_log_format= 0; static my_bool opt_ignore_wrong_options= 0, opt_expect_abort= 0; -static my_bool opt_sync= 0; +static my_bool opt_sync= 0, opt_thread_alarm; static uint kill_cached_threads, wake_thread; ulong thread_created; uint thread_handling; @@ -6008,7 +6008,7 @@ enum options_mysqld OPT_RANGE_ALLOC_BLOCK_SIZE, OPT_ALLOW_SUSPICIOUS_UDFS, OPT_QUERY_ALLOC_BLOCK_SIZE, OPT_QUERY_PREALLOC_SIZE, OPT_TRANS_ALLOC_BLOCK_SIZE, OPT_TRANS_PREALLOC_SIZE, - OPT_SYNC_FRM, OPT_SYNC_BINLOG, OPT_SYNC, + OPT_SYNC_FRM, OPT_SYNC_BINLOG, OPT_SYNC, OPT_THREAD_ALARM, OPT_SYNC_REPLICATION, OPT_SYNC_REPLICATION_SLAVE_ID, OPT_SYNC_REPLICATION_TIMEOUT, @@ -6333,7 +6333,7 @@ struct my_option my_long_options[] = "Disable initialization of builtin InnoDB plugin.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"init-connect", OPT_INIT_CONNECT, - "Command(s) that are executed for each new connection.", + "Command(s) that are executed for each new connection (but not for SUPER users).", &opt_init_connect, &opt_init_connect, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DISABLE_GRANT_OPTIONS @@ -7413,7 +7413,8 @@ thread is in the relay logs.", "index_condition_pushdown, firstmatch, loosescan, materialization, " "semijoin, partial_match_rowid_merge, partial_match_table_scan, " "subquery_cache, outer_join_with_cache, semijoin_with_cache, " - "join_cache_incremental, join_cache_hashed, join_cache_bka" + "join_cache_incremental, join_cache_hashed, join_cache_bka, " + "optimize_join_buffer_size" #ifndef DBUG_OFF ", table_elimination" #endif @@ -7585,6 +7586,10 @@ thread is in the relay logs.", "error. Used only if the connection has active cursors.", &table_lock_wait_timeout, &table_lock_wait_timeout, 0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0}, + {"thread-alarm", OPT_THREAD_ALARM, + "Enable/disable system thread alarm calls. Should only be turned off when running tests or debugging!!", + &opt_thread_alarm, &opt_thread_alarm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, + 0}, {"thread_cache_size", OPT_THREAD_CACHE_SIZE, "How many threads we should keep in a cache for reuse.", &thread_cache_size, &thread_cache_size, 0, GET_ULONG, @@ -9364,6 +9369,7 @@ static int get_options(int *argc,char **argv) */ my_disable_locking= myisam_single_user= test(opt_external_locking == 0); my_disable_sync= opt_sync == 0; + my_disable_thr_alarm= opt_thread_alarm == 0; my_default_record_cache_size=global_system_variables.read_buff_size; myisam_max_temp_length= (my_off_t) global_system_variables.myisam_max_sort_file_size; diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 3441d1e120a..1b313da8ce1 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -697,7 +697,8 @@ net_real_write(NET *net,const uchar *packet, size_t len) { my_bool old_mode; thr_end_alarm(&alarmed); - vio_blocking(net->vio, net_blocking, &old_mode); + if (!net_blocking) + vio_blocking(net->vio, net_blocking, &old_mode); } net->reading_or_writing=0; DBUG_RETURN(((int) (pos != end))); @@ -988,7 +989,8 @@ end: { my_bool old_mode; thr_end_alarm(&alarmed); - vio_blocking(net->vio, net_blocking, &old_mode); + if (!net_blocking) + vio_blocking(net->vio, net_blocking, &old_mode); } net->reading_or_writing=0; #ifdef DEBUG_DATA_PACKETS diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7650f558da7..72e5daf91f4 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -4926,7 +4926,7 @@ ha_rows get_table_cardinality_for_index_intersect(TABLE *table) { ha_rows d; double q; - for (q= table->file->stats.records, d= 1 ; q >= 10; q/= 10, d*= 10 ) ; + for (q= (double)table->file->stats.records, d= 1 ; q >= 10; q/= 10, d*= 10 ) ; return (ha_rows) (floor(q+0.5) * d); } } @@ -5092,7 +5092,7 @@ bool prepare_search_best_index_intersect(PARAM *param, return TRUE; size_t calc_cost_buff_size= - Unique::get_cost_calc_buff_size(records_in_scans, + Unique::get_cost_calc_buff_size((size_t)records_in_scans, common->key_size, common->max_memory_size); if (!(common->buff_elems= (uint *) alloc_root(param->mem_root, @@ -5434,7 +5434,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr, ulonglong max_memory_size= common_info->max_memory_size; records_sent_to_unique+= ext_index_scan_records; - cost= Unique::get_use_cost(buff_elems, records_sent_to_unique, key_size, + cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size, max_memory_size, compare_factor, TRUE, &next->in_memory); if (records_filtered_out_by_cpk) @@ -5444,7 +5444,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr, double cost2; bool in_memory2; ha_rows records2= records_sent_to_unique-records_filtered_out_by_cpk; - cost2= Unique::get_use_cost(buff_elems, records2, key_size, + cost2= Unique::get_use_cost(buff_elems, (size_t) records2, key_size, max_memory_size, compare_factor, TRUE, &in_memory2); cost2+= get_cpk_filter_cost(ext_index_scan_records, common_info->cpk_scan, @@ -11859,13 +11859,14 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, Find the range tree for the current keypart. We assume that index_range_tree points to the leftmost keypart in the index. */ - for (cur_range= index_range_tree; cur_range; + for (cur_range= index_range_tree; + cur_range && cur_range->type == SEL_ARG::KEY_RANGE; cur_range= cur_range->next_key_part) { if (cur_range->field->eq(cur_part->field)) break; } - if (!cur_range) + if (!cur_range || cur_range->type != SEL_ARG::KEY_RANGE) { if (min_max_arg_part) return FALSE; /* The current keypart has no range predicates at all. */ diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc index 2047f6f250c..da6086d6cdc 100644 --- a/sql/opt_range_mrr.cc +++ b/sql/opt_range_mrr.cc @@ -225,7 +225,7 @@ walk_up_n_right: RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i]; uint min_key_length= cur->min_key - seq->param->min_key; - range->ptr= (char*)(int)(key_tree->part); + range->ptr= (char*)(intptr)(key_tree->part); if (cur->min_key_flag & GEOM_FLAG) { range->range_flag= cur->min_key_flag; diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 54061931d5f..a872fa1f1e9 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6017,7 +6017,7 @@ ER_ONLY_INTEGERS_ALLOWED eng "Only integers allowed as number here" ger "An dieser Stelle sind nur Ganzzahlen zulässig" ER_UNSUPORTED_LOG_ENGINE - eng "This storage engine cannot be used for log tables"" + eng "This storage engine cannot be used for log tables" ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden" ER_BAD_LOG_STATEMENT eng "You cannot '%s' a log table if logging is enabled" @@ -6236,13 +6236,15 @@ ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN eng "The value specified for computed column '%s' in table '%s' ignored" ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN - eng "'%s' is not yet supported for computed columns" + eng "This is not yet supported for computed columns" ER_CONST_EXPR_IN_VCOL eng "Constant expression in computed column function is not allowed" ER_ROW_EXPR_FOR_VCOL eng "Expression for computed column cannot return a row" +ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS + eng "%s storage engine does not support computed columns" ER_UNKNOWN_OPTION eng "Unknown option '%-.64s'" ER_BAD_OPTION_VALUE diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 34f5f5e9d5d..ed08e20340d 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -22,6 +22,7 @@ #include "sp_head.h" #include "sp.h" #include "sql_trigger.h" +#include "sql_handler.h" #include <m_ctype.h> #include <my_dir.h> #include <hash.h> diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 084e2c8d78c..11447389186 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -44,6 +44,7 @@ #include "sp_cache.h" #include "sql_select.h" /* declares create_tmp_table() */ #include "debug_sync.h" +#include "sql_handler.h" /* The following is used to initialise Table_ident with a internal @@ -702,7 +703,7 @@ THD::THD() catalog= (char*)"std"; // the only catalog we have for now main_security_ctx.init(); security_ctx= &main_security_ctx; - locked=some_tables_deleted=no_errors=password= 0; + some_tables_deleted=no_errors=password= 0; query_start_used= 0; count_cuted_fields= CHECK_FIELD_IGNORE; killed= NOT_KILLED; diff --git a/sql/sql_class.h b/sql/sql_class.h index c6bc3d0c649..7e6aa086f4c 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -768,7 +768,7 @@ public: Server_side_cursor *cursor; inline char *query() { return query_string.str; } - inline uint32 query_length() { return query_string.length; } + inline uint32 query_length() { return (uint32)query_string.length; } void set_query_inner(char *query_arg, uint32 query_length_arg); /** @@ -1944,7 +1944,7 @@ public: bool slave_thread, one_shot_set; /* tells if current statement should binlog row-based(1) or stmt-based(0) */ bool current_stmt_binlog_row_based; - bool locked, some_tables_deleted; + bool some_tables_deleted; bool last_cuted_field; bool no_errors, password; bool extra_port; /* If extra connection */ @@ -3315,15 +3315,15 @@ public: bool get(TABLE *table); /* Cost of searching for an element in the tree */ - inline static double get_search_cost(uint tree_elems, uint compare_factor) + inline static double get_search_cost(ulonglong tree_elems, uint compare_factor) { return log((double) tree_elems) / (compare_factor * M_LN2); } - static double get_use_cost(uint *buffer, uint nkeys, uint key_size, + static double get_use_cost(uint *buffer, size_t nkeys, uint key_size, ulonglong max_in_memory_size, uint compare_factor, bool intersect_fl, bool *in_memory); - inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size, + inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size, ulonglong max_in_memory_size) { register ulonglong max_elems_in_tree= diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 7aa48524b20..b5478287fda 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -20,6 +20,7 @@ #include <mysys_err.h> #include "sp.h" #include "events.h" +#include "sql_handler.h" #include <my_dir.h> #include <m_ctype.h> #include "log.h" diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 5564d628594..e2cb17090a1 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -23,6 +23,7 @@ #include "sql_select.h" #include "sp_head.h" #include "sql_trigger.h" +#include "sql_handler.h" /** Implement DELETE SQL word. diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 69be8c8e9b4..7e651a2b4e2 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -56,9 +56,13 @@ second container. When the table is flushed, the pointer is cleared. */ +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + #include "mysql_priv.h" #include "sql_select.h" -#include <assert.h> +#include "sql_handler.h" #define HANDLER_TABLES_HASH_SIZE 120 @@ -66,6 +70,28 @@ static enum enum_ha_read_modes rkey_to_rnext[]= { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; /* + Set handler to state after create, but keep base information about + which table is used +*/ + +void SQL_HANDLER::reset() +{ + fields.empty(); + arena.free_items(); + free_root(&mem_root, MYF(0)); + my_free(lock, MYF(MY_ALLOW_ZERO_PTR)); + init(); +} + +/* Free all allocated data */ + +SQL_HANDLER::~SQL_HANDLER() +{ + reset(); + my_free(base_data, MYF(MY_ALLOW_ZERO_PTR)); +} + +/* Get hash key and hash key length. SYNOPSIS @@ -84,11 +110,11 @@ static enum enum_ha_read_modes rkey_to_rnext[]= Pointer to the TABLE_LIST struct. */ -static char *mysql_ha_hash_get_key(TABLE_LIST *tables, size_t *key_len_p, +static char *mysql_ha_hash_get_key(SQL_HANDLER *table, size_t *key_len, my_bool first __attribute__((unused))) { - *key_len_p= strlen(tables->alias) + 1 ; /* include '\0' in comparisons */ - return tables->alias; + *key_len= table->handler_name.length + 1 ; /* include '\0' in comparisons */ + return table->handler_name.str; } @@ -106,9 +132,9 @@ static char *mysql_ha_hash_get_key(TABLE_LIST *tables, size_t *key_len_p, Nothing */ -static void mysql_ha_hash_free(TABLE_LIST *tables) +static void mysql_ha_hash_free(SQL_HANDLER *table) { - my_free((char*) tables, MYF(0)); + delete table; } /** @@ -120,14 +146,21 @@ static void mysql_ha_hash_free(TABLE_LIST *tables) @note Though this function takes a list of tables, only the first list entry will be closed. + @mote handler_object is not deleted! @note Broadcasts refresh if it closed a table with old version. */ -static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables, +static void mysql_ha_close_table(SQL_HANDLER *handler, bool is_locked) { + THD *thd= handler->thd; + TABLE *table= handler->table; TABLE **table_ptr; + /* check if table was already closed */ + if (!table) + return; + /* Though we could take the table pointer from hash_tables->table, we must follow the thd->handler_tables chain anyway, as we need the @@ -135,13 +168,18 @@ static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables, for close_thread_table(). */ for (table_ptr= &(thd->handler_tables); - *table_ptr && (*table_ptr != tables->table); + *table_ptr && (*table_ptr != table); table_ptr= &(*table_ptr)->next) ; if (*table_ptr) { - (*table_ptr)->file->ha_index_or_rnd_end(); + if (handler->lock) + { + // Mark it unlocked, like in reset_lock_data() + reset_lock_data(handler->lock, 1); + } + table->file->ha_index_or_rnd_end(); if (! is_locked) VOID(pthread_mutex_lock(&LOCK_open)); if (close_thread_table(thd, table_ptr)) @@ -152,17 +190,15 @@ static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables, if (! is_locked) VOID(pthread_mutex_unlock(&LOCK_open)); } - else if (tables->table) + else { /* Must be a temporary table */ - TABLE *table= tables->table; table->file->ha_index_or_rnd_end(); table->query_id= thd->query_id; table->open_by_handler= 0; } - - /* Mark table as closed, ready for re-open if necessary. */ - tables->table= NULL; + my_free(handler->lock, MYF(MY_ALLOW_ZERO_PTR)); + handler->init(); } /* @@ -178,7 +214,7 @@ static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables, Though this function takes a list of tables, only the first list entry will be opened. 'reopen' is set when a handler table is to be re-opened. In this case, - 'tables' is the pointer to the hashed TABLE_LIST object which has been + 'tables' is the pointer to the hashed SQL_HANDLER object which has been saved on the original open. 'reopen' is also used to suppress the sending of an 'ok' message. @@ -187,17 +223,17 @@ static void mysql_ha_close_table(THD *thd, TABLE_LIST *tables, TRUE Error */ -bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) { - TABLE_LIST *hash_tables = NULL; - char *db, *name, *alias; - uint dblen, namelen, aliaslen, counter; + SQL_HANDLER *sql_handler= 0; + uint counter; int error; - TABLE *backup_open_tables; + TABLE *table, *backup_open_tables, *write_lock_used; + Query_arena backup_arena; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", tables->db, tables->table_name, tables->alias, - (int) reopen)); + reopen != 0)); if (tables->schema_table) { @@ -210,7 +246,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) if (! hash_inited(&thd->handler_tables_hash)) { /* - HASH entries are of type TABLE_LIST. + HASH entries are of type SQL_HANDLER */ if (hash_init(&thd->handler_tables_hash, &my_charset_latin1, HANDLER_TABLES_HASH_SIZE, 0, 0, @@ -288,8 +324,10 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) if (error) goto err; + table= tables->table; + /* There can be only one table in '*tables'. */ - if (! (tables->table->file->ha_table_flags() & HA_CAN_SQL_HANDLER)) + if (! (table->file->ha_table_flags() & HA_CAN_SQL_HANDLER)) { my_error(ER_ILLEGAL_HA, MYF(0), tables->alias); goto err; @@ -297,36 +335,69 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) if (! reopen) { - /* copy the TABLE_LIST struct */ - dblen= strlen(tables->db) + 1; - namelen= strlen(tables->table_name) + 1; - aliaslen= strlen(tables->alias) + 1; - if (!(my_multi_malloc(MYF(MY_WME), - &hash_tables, (uint) sizeof(*hash_tables), - &db, (uint) dblen, - &name, (uint) namelen, - &alias, (uint) aliaslen, + /* copy data to sql_handler */ + if (!(sql_handler= new SQL_HANDLER(thd))) + goto err; + init_alloc_root(&sql_handler->mem_root, 1024, 0); + + sql_handler->table= table; + sql_handler->db.length= strlen(tables->db); + sql_handler->table_name.length= strlen(tables->table_name); + sql_handler->handler_name.length= strlen(tables->alias); + + if (!(my_multi_malloc(MY_WME, + &sql_handler->db.str, + (uint) sql_handler->db.length + 1, + &sql_handler->table_name.str, + (uint) sql_handler->table_name.length + 1, + &sql_handler->handler_name.str, + (uint) sql_handler->handler_name.length + 1, NullS))) goto err; - /* structure copy */ - *hash_tables= *tables; - hash_tables->db= db; - hash_tables->table_name= name; - hash_tables->alias= alias; - memcpy(hash_tables->db, tables->db, dblen); - memcpy(hash_tables->table_name, tables->table_name, namelen); - memcpy(hash_tables->alias, tables->alias, aliaslen); + sql_handler->base_data= sql_handler->db.str; // Free this + memcpy(sql_handler->db.str, tables->db, sql_handler->db.length +1); + memcpy(sql_handler->table_name.str, tables->table_name, + sql_handler->table_name.length+1); + memcpy(sql_handler->handler_name.str, tables->alias, + sql_handler->handler_name.length +1); /* add to hash */ - if (my_hash_insert(&thd->handler_tables_hash, (uchar*) hash_tables)) + if (my_hash_insert(&thd->handler_tables_hash, (uchar*) sql_handler)) goto err; } + else + { + sql_handler= reopen; + sql_handler->reset(); + } + sql_handler->table= table; + + if (!(sql_handler->lock= get_lock_data(thd, &sql_handler->table, 1, + GET_LOCK_STORE_LOCKS, + &write_lock_used))) + goto err; + + /* Get a list of all fields for send_fields */ + thd->set_n_backup_active_arena(&sql_handler->arena, &backup_arena); + error= table->fill_item_list(&sql_handler->fields); + thd->restore_active_arena(&sql_handler->arena, &backup_arena); + + if (error) + { + if (reopen) + sql_handler= 0; + goto err; + } + + /* Always read all columns */ + table->read_set= &table->s->all_set; + table->vcol_set= &table->s->all_set; /* If it's a temp table, don't reset table->query_id as the table is being used by this handler. Otherwise, no meaning at all. */ - tables->table->open_by_handler= 1; + table->open_by_handler= 1; if (! reopen) my_ok(thd); @@ -334,10 +405,13 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) DBUG_RETURN(FALSE); err: - if (hash_tables) - my_free((char*) hash_tables, MYF(0)); + delete sql_handler; if (tables->table) - mysql_ha_close_table(thd, tables, FALSE); + { + SQL_HANDLER tmp_sql_handler(thd); + tmp_sql_handler.table= tables->table; + mysql_ha_close_table(&tmp_sql_handler, FALSE); + } DBUG_PRINT("exit",("ERROR")); DBUG_RETURN(TRUE); } @@ -362,17 +436,17 @@ err: bool mysql_ha_close(THD *thd, TABLE_LIST *tables) { - TABLE_LIST *hash_tables; + SQL_HANDLER *handler; DBUG_ENTER("mysql_ha_close"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", tables->db, tables->table_name, tables->alias)); - if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, - (uchar*) tables->alias, - strlen(tables->alias) + 1))) + if ((handler= (SQL_HANDLER*) hash_search(&thd->handler_tables_hash, + (uchar*) tables->alias, + strlen(tables->alias) + 1))) { - mysql_ha_close_table(thd, hash_tables, FALSE); - hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables); + mysql_ha_close_table(handler, FALSE); + hash_delete(&thd->handler_tables_hash, (uchar*) handler); } else { @@ -387,6 +461,167 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables) } +/** + Finds an open HANDLER table. + + @params name Name of handler to open + + @return 0 failure + @return handler +*/ + +SQL_HANDLER *mysql_ha_find_handler(THD *thd, const char *name) +{ + SQL_HANDLER *handler; + if ((handler= (SQL_HANDLER*) hash_search(&thd->handler_tables_hash, + (uchar*) name, + strlen(name) + 1))) + { + DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: %p", + handler->db.str, + handler->table_name.str, + handler->handler_name.str, handler->table)); + if (!handler->table) + { + /* The handler table has been closed. Re-open it. */ + TABLE_LIST tmp; + tmp.init_one_table(handler->db.str, handler->table_name.str, + TL_READ); + tmp.alias= handler->handler_name.str; + + if (mysql_ha_open(thd, &tmp, handler)) + { + DBUG_PRINT("exit",("reopen failed")); + return 0; + } + } + } + else + { + my_error(ER_UNKNOWN_TABLE, MYF(0), name, "HANDLER"); + return 0; + } + return handler; +} + + +/** + Check that condition and key name are ok + + @param handler + @param mode Read mode (RFIRST, RNEXT etc...) + @param keyname Key to use. + @param key_expr List of key column values + @param cond Where clause + @param in_prepare If we are in prepare phase (we can't evalute items yet) + + @return 0 ok + @return 1 error + + In ok, then values of used key and mode is stored in sql_handler +*/ + +static bool +mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, + Item *cond, bool in_prepare) +{ + THD *thd= handler->thd; + TABLE *table= handler->table; + if (cond) + { + /* This can only be true for temp tables */ + if (table->query_id != thd->query_id) + cond->cleanup(); // File was reopened + if ((!cond->fixed && + cond->fix_fields(thd, &cond)) || cond->check_cols(1)) + return 1; + } + + if (keyname) + { + /* Check if same as last keyname. If not, do a full lookup */ + if (handler->keyno < 0 || + my_strcasecmp(&my_charset_latin1, + keyname, + table->s->key_info[handler->keyno].name)) + { + if ((handler->keyno= find_type(keyname, &table->s->keynames, 1+2)-1)<0) + { + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, + handler->handler_name); + return 1; + } + } + + /* Check key parts */ + if (mode == RKEY) + { + TABLE *table= handler->table; + KEY *keyinfo= table->key_info + handler->keyno; + KEY_PART_INFO *key_part= keyinfo->key_part; + List_iterator<Item> it_ke(*key_expr); + Item *item; + key_part_map keypart_map; + uint key_len; + + if (key_expr->elements > keyinfo->key_parts) + { + my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts); + return 1; + } + for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) + { + my_bitmap_map *old_map; + /* note that 'item' can be changed by fix_fields() call */ + if ((!item->fixed && + item->fix_fields(thd, it_ke.ref())) || + (item= *it_ke.ref())->check_cols(1)) + return 1; + if (item->used_tables() & ~(RAND_TABLE_BIT | PARAM_TABLE_BIT)) + { + my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ"); + return 1; + } + if (!in_prepare) + { + old_map= dbug_tmp_use_all_columns(table, table->write_set); + (void) item->save_in_field(key_part->field, 1); + dbug_tmp_restore_column_map(table->write_set, old_map); + } + key_len+= key_part->store_length; + keypart_map= (keypart_map << 1) | 1; + } + handler->keypart_map= keypart_map; + handler->key_len= key_len; + } + else + { + /* + Check if the same index involved. + We need to always do this check because we may not have yet + called the handler since the last keyno change. + */ + if ((uint) handler->keyno != table->file->get_index()) + { + if (mode == RNEXT) + mode= RFIRST; + else if (mode == RPREV) + mode= RLAST; + } + } + } + else if (table->file->inited != handler::RND) + { + /* Convert RNEXT to RFIRST if we haven't started row scan */ + if (mode == RNEXT) + mode= RFIRST; + } + handler->mode= mode; // Store adjusted mode + return 0; +} + /* Read from a HANDLER table. @@ -413,147 +648,76 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, enum ha_rkey_function ha_rkey_mode, Item *cond, ha_rows select_limit_cnt, ha_rows offset_limit_cnt) { - TABLE_LIST *hash_tables; - TABLE *table, *backup_open_tables; - MYSQL_LOCK *lock; + SQL_HANDLER *handler; + TABLE *table; List<Item> list; Protocol *protocol= thd->protocol; char buff[MAX_FIELD_WIDTH]; String buffer(buff, sizeof(buff), system_charset_info); - int error, keyno= -1; + int error, keyno; uint num_rows; uchar *UNINIT_VAR(key); - uint UNINIT_VAR(key_len); bool need_reopen; + List_iterator<Item> it; DBUG_ENTER("mysql_ha_read"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", tables->db, tables->table_name, tables->alias)); - thd->lex->select_lex.context.resolve_in_table_list_only(tables); - list.push_front(new Item_field(&thd->lex->select_lex.context, - NULL, NULL, "*")); - List_iterator<Item> it(list); - it++; retry: - if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, - (uchar*) tables->alias, - strlen(tables->alias) + 1))) + if (!(handler= mysql_ha_find_handler(thd, tables->alias))) + goto err0; + + table= handler->table; + tables->table= table; // This is used by fix_fields + + /* save open_tables state */ + if (handler->lock->lock_count > 0) { - table= hash_tables->table; - DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx", - hash_tables->db, hash_tables->table_name, - hash_tables->alias, (long) table)); - if (!table) + bool lock_error; + + handler->lock->locks[0]->type= handler->lock->locks[0]->org_type; + lock_error= mysql_lock_tables(thd, handler->lock, 0, + (MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN | + (handler->table->s->tmp_table == + NO_TMP_TABLE ? + MYSQL_LOCK_NOT_TEMPORARY : 0)), + &need_reopen); + if (need_reopen) { - /* - The handler table has been closed. Re-open it. - */ - if (mysql_ha_open(thd, hash_tables, 1)) + mysql_ha_close_table(handler, FALSE); + if (thd->stmt_arena->is_stmt_execute()) { - DBUG_PRINT("exit",("reopen failed")); + /* + As we have already sent field list and types to the client, we can't + handle any changes in the table format for prepared statements. + Better to force a reprepare. + */ + my_error(ER_NEED_REPREPARE, MYF(0)); goto err0; } - table= hash_tables->table; - DBUG_PRINT("info",("re-opened '%s'.'%s' as '%s' tab %p", - hash_tables->db, hash_tables->table_name, - hash_tables->alias, table)); - } - -#if MYSQL_VERSION_ID < 40100 - if (*tables->db && strcmp(table->table_cache_key, tables->db)) - { - DBUG_PRINT("info",("wrong db")); - table= NULL; + /* + The lock might have been aborted, we need to manually reset + thd->some_tables_deleted because handler's tables are closed + in a non-standard way. Otherwise we might loop indefinitely. + */ + thd->some_tables_deleted= 0; + goto retry; } -#endif - } - else - table= NULL; - - if (!table) - { -#if MYSQL_VERSION_ID < 40100 - char buff[MAX_DBKEY_LENGTH]; - if (*tables->db) - strxnmov(buff, sizeof(buff)-1, tables->db, ".", tables->table_name, - NullS); - else - strncpy(buff, tables->alias, sizeof(buff)); - my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER"); -#else - my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); -#endif - goto err0; - } - tables->table=table; - - /* save open_tables state */ - backup_open_tables= thd->open_tables; - /* - mysql_lock_tables() needs thd->open_tables to be set correctly to - be able to handle aborts properly. When the abort happens, it's - safe to not protect thd->handler_tables because it won't close any - tables. - */ - thd->open_tables= thd->handler_tables; - - lock= mysql_lock_tables(thd, &tables->table, 1, - MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN, &need_reopen); - - /* restore previous context */ - thd->open_tables= backup_open_tables; - - if (need_reopen) - { - mysql_ha_close_table(thd, hash_tables, FALSE); - /* - The lock might have been aborted, we need to manually reset - thd->some_tables_deleted because handler's tables are closed - in a non-standard way. Otherwise we might loop indefinitely. - */ - thd->some_tables_deleted= 0; - goto retry; - } - - if (!lock) - goto err0; // mysql_lock_tables() printed error message already - - // Always read all columns - tables->table->read_set= &tables->table->s->all_set; - - if (cond) - { - if (table->query_id != thd->query_id) - cond->cleanup(); // File was reopened - if ((!cond->fixed && - cond->fix_fields(thd, &cond)) || cond->check_cols(1)) - goto err; - } - if (keyname) - { - if ((keyno=find_type(keyname, &table->s->keynames, 1+2)-1)<0) - { - my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, tables->alias); - goto err; - } - /* Check if the same index involved. */ - if ((uint) keyno != table->file->get_index()) - { - if (mode == RNEXT) - mode= RFIRST; - else if (mode == RPREV) - mode= RLAST; - } + if (lock_error) + goto err0; // mysql_lock_tables() printed error message already } - if (insert_fields(thd, &thd->lex->select_lex.context, - tables->db, tables->alias, &it, 0)) + if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 0)) goto err; + mode= handler->mode; + keyno= handler->keyno; - protocol->send_fields(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); + it.init(handler->fields); + protocol->send_fields(&handler->fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); /* In ::external_lock InnoDB resets the fields which tell it that @@ -569,6 +733,8 @@ retry: case RNEXT: if (table->file->inited != handler::NONE) { + if ((error= table->file->can_continue_handler_scan())) + break; if (keyname) { /* Check if we read from the same index. */ @@ -576,9 +742,7 @@ retry: error= table->file->ha_index_next(table->record[0]); } else - { error= table->file->ha_rnd_next(table->record[0]); - } break; } /* else fall through */ @@ -595,7 +759,7 @@ retry: if (!(error= table->file->ha_rnd_init(1))) error= table->file->ha_rnd_next(table->record[0]); } - mode=RNEXT; + mode= RNEXT; break; case RPREV: DBUG_ASSERT(keyname != 0); @@ -603,7 +767,9 @@ retry: DBUG_ASSERT((uint) keyno == table->file->get_index()); if (table->file->inited != handler::NONE) { - error=table->file->ha_index_prev(table->record[0]); + if ((error= table->file->can_continue_handler_scan())) + break; + error= table->file->ha_index_prev(table->record[0]); break; } /* else fall through */ @@ -612,54 +778,28 @@ retry: table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno, 1); error= table->file->ha_index_last(table->record[0]); - mode=RPREV; + mode= RPREV; break; case RNEXT_SAME: /* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */ DBUG_ASSERT(keyname != 0); - error= table->file->ha_index_next_same(table->record[0], key, key_len); + error= table->file->ha_index_next_same(table->record[0], key, + handler->key_len); break; case RKEY: { DBUG_ASSERT(keyname != 0); - KEY *keyinfo=table->key_info+keyno; - KEY_PART_INFO *key_part=keyinfo->key_part; - if (key_expr->elements > keyinfo->key_parts) - { - my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts); - goto err; - } - List_iterator<Item> it_ke(*key_expr); - Item *item; - key_part_map keypart_map; - for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) - { - my_bitmap_map *old_map; - // 'item' can be changed by fix_fields() call - if ((!item->fixed && - item->fix_fields(thd, it_ke.ref())) || - (item= *it_ke.ref())->check_cols(1)) - goto err; - if (item->used_tables() & ~RAND_TABLE_BIT) - { - my_error(ER_WRONG_ARGUMENTS,MYF(0),"HANDLER ... READ"); - goto err; - } - old_map= dbug_tmp_use_all_columns(table, table->write_set); - (void) item->save_in_field(key_part->field, 1); - dbug_tmp_restore_column_map(table->write_set, old_map); - key_len+=key_part->store_length; - keypart_map= (keypart_map << 1) | 1; - } - if (!(key= (uchar*) thd->calloc(ALIGN_SIZE(key_len)))) + if (!(key= (uchar*) thd->calloc(ALIGN_SIZE(handler->key_len)))) goto err; table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno, 1); - key_copy(key, table->record[0], table->key_info + keyno, key_len); + key_copy(key, table->record[0], table->key_info + keyno, + handler->key_len); error= table->file->ha_index_read_map(table->record[0], - key, keypart_map, ha_rkey_mode); - mode=rkey_to_rnext[(int)ha_rkey_mode]; + key, handler->keypart_map, + ha_rkey_mode); + mode= rkey_to_rnext[(int)ha_rkey_mode]; break; } default: @@ -673,9 +813,13 @@ retry: continue; if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { - sql_print_error("mysql_ha_read: Got error %d when reading table '%s'", - error, tables->table_name); + /* Don't give error in the log file for some expected problems */ + if (error != HA_ERR_RECORD_CHANGED && error != HA_ERR_WRONG_COMMAND) + sql_print_error("mysql_ha_read: Got error %d when reading " + "table '%s'", + error, tables->table_name); table->file->print_error(error,MYF(0)); + table->file->ha_index_or_rnd_end(); goto err; } goto ok; @@ -703,13 +847,13 @@ retry: num_rows++; } ok: - mysql_unlock_tables(thd,lock); + mysql_unlock_tables(thd, handler->lock, 0); my_eof(thd); DBUG_PRINT("exit",("OK")); DBUG_RETURN(FALSE); err: - mysql_unlock_tables(thd,lock); + mysql_unlock_tables(thd, handler->lock, 0); err0: DBUG_PRINT("exit",("ERROR")); DBUG_RETURN(TRUE); @@ -717,6 +861,28 @@ err0: /** + Prepare for handler read + + For parameters, see mysql_ha_read() +*/ + +SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, Item *cond) +{ + SQL_HANDLER *handler; + DBUG_ENTER("mysql_ha_read_prepare"); + if (!(handler= mysql_ha_find_handler(thd, tables->alias))) + DBUG_RETURN(0); + tables->table= handler->table; // This is used by fix_fields + if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 1)) + DBUG_RETURN(0); + DBUG_RETURN(handler); +} + + + +/** Scan the handler tables hash for matching tables. @param thd Thread identifier. @@ -727,30 +893,32 @@ err0: table was matched. */ -static TABLE_LIST *mysql_ha_find(THD *thd, TABLE_LIST *tables) +static SQL_HANDLER *mysql_ha_find_match(THD *thd, TABLE_LIST *tables) { - TABLE_LIST *hash_tables, *head= NULL, *first= tables; - DBUG_ENTER("mysql_ha_find"); + SQL_HANDLER *hash_tables, *head= NULL; + TABLE_LIST *first= tables; + DBUG_ENTER("mysql_ha_find_match"); /* search for all handlers with matching table names */ for (uint i= 0; i < thd->handler_tables_hash.records; i++) { - hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i); + hash_tables= (SQL_HANDLER*) hash_element(&thd->handler_tables_hash, i); + for (tables= first; tables; tables= tables->next_local) { if ((! *tables->db || - ! my_strcasecmp(&my_charset_latin1, hash_tables->db, tables->db)) && - ! my_strcasecmp(&my_charset_latin1, hash_tables->table_name, + ! my_strcasecmp(&my_charset_latin1, hash_tables->db.str, + tables->db)) && + ! my_strcasecmp(&my_charset_latin1, hash_tables->table_name.str, tables->table_name)) + { + /* Link into hash_tables list */ + hash_tables->next= head; + head= hash_tables; break; - } - if (tables) - { - hash_tables->next_local= head; - head= hash_tables; + } } } - DBUG_RETURN(head); } @@ -767,18 +935,18 @@ static TABLE_LIST *mysql_ha_find(THD *thd, TABLE_LIST *tables) void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables, bool is_locked) { - TABLE_LIST *hash_tables, *next; + SQL_HANDLER *hash_tables, *next; DBUG_ENTER("mysql_ha_rm_tables"); DBUG_ASSERT(tables); - hash_tables= mysql_ha_find(thd, tables); + hash_tables= mysql_ha_find_match(thd, tables); while (hash_tables) { - next= hash_tables->next_local; + next= hash_tables->next; if (hash_tables->table) - mysql_ha_close_table(thd, hash_tables, is_locked); + mysql_ha_close_table(hash_tables, is_locked); hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables); hash_tables= next; } @@ -798,16 +966,16 @@ void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables, bool is_locked) void mysql_ha_flush(THD *thd) { - TABLE_LIST *hash_tables; + SQL_HANDLER *hash_tables; DBUG_ENTER("mysql_ha_flush"); safe_mutex_assert_owner(&LOCK_open); for (uint i= 0; i < thd->handler_tables_hash.records; i++) { - hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i); + hash_tables= (SQL_HANDLER*) hash_element(&thd->handler_tables_hash, i); if (hash_tables->table && hash_tables->table->needs_reopen_or_name_lock()) - mysql_ha_close_table(thd, hash_tables, TRUE); + mysql_ha_close_table(hash_tables, TRUE); } DBUG_VOID_RETURN; @@ -824,14 +992,14 @@ void mysql_ha_flush(THD *thd) void mysql_ha_cleanup(THD *thd) { - TABLE_LIST *hash_tables; + SQL_HANDLER *hash_tables; DBUG_ENTER("mysql_ha_cleanup"); for (uint i= 0; i < thd->handler_tables_hash.records; i++) { - hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i); + hash_tables= (SQL_HANDLER*) hash_element(&thd->handler_tables_hash, i); if (hash_tables->table) - mysql_ha_close_table(thd, hash_tables, FALSE); + mysql_ha_close_table(hash_tables, FALSE); } hash_free(&thd->handler_tables_hash); diff --git a/sql/sql_handler.h b/sql/sql_handler.h new file mode 100644 index 00000000000..54e72e9f50e --- /dev/null +++ b/sql/sql_handler.h @@ -0,0 +1,61 @@ +/* Copyright (C) 2010 Monty Program Ab + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* Open handlers are stored here */ + +class SQL_HANDLER { +public: + TABLE *table; + List<Item> fields; /* Fields, set on open */ + THD *thd; + LEX_STRING handler_name; + LEX_STRING db; + LEX_STRING table_name; + MEM_ROOT mem_root; + MYSQL_LOCK *lock; + + key_part_map keypart_map; + int keyno; /* Used key */ + uint key_len; + enum enum_ha_read_modes mode; + + /* This is only used when deleting many handler objects */ + SQL_HANDLER *next; + + Query_arena arena; + char *base_data; + SQL_HANDLER(THD *thd_arg) : + thd(thd_arg), arena(&mem_root, Query_arena::INITIALIZED) + { init(); clear_alloc_root(&mem_root); base_data= 0; } + void init() { keyno= -1; table= 0; lock= 0; } + void reset(); + + ~SQL_HANDLER(); +}; + +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen); +bool mysql_ha_close(THD *thd, TABLE_LIST *tables); +bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, + List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); +void mysql_ha_flush(THD *thd); +void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables, bool is_locked); +void mysql_ha_cleanup(THD *thd); + +SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, Item *cond); diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index ec8fd9997f6..7b26842eb9f 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -720,12 +720,12 @@ ulong JOIN_CACHE::get_min_join_buffer_size() { if (!min_buff_size) { - ulong len= 0; + size_t len= 0; for (JOIN_TAB *tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, FALSE)) len+= tab->get_max_used_fieldlength(); len+= get_record_max_affix_length() + get_max_key_addon_space_per_record(); - ulong min_sz= len*min_records; - ulong add_sz= 0; + size_t min_sz= len*min_records; + size_t add_sz= 0; for (uint i=0; i < min_records; i++) add_sz+= join_tab_scan->aux_buffer_incr(i+1); avg_aux_buffer_incr= add_sz/min_records; @@ -771,9 +771,9 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size) { if (!max_buff_size) { - ulong max_sz; - ulong min_sz= get_min_join_buffer_size(); - ulong len= 0; + size_t max_sz; + size_t min_sz= get_min_join_buffer_size(); + size_t len= 0; for (JOIN_TAB *tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, FALSE)) len+= tab->get_used_fieldlength(); len+= get_record_max_affix_length(); @@ -781,7 +781,7 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size) len+= get_max_key_addon_space_per_record() + avg_aux_buffer_incr; space_per_record= len; - ulong limit_sz= join->thd->variables.join_buff_size; + size_t limit_sz= join->thd->variables.join_buff_size; if (join_tab->join_buffer_size_limit) set_if_smaller(limit_sz, join_tab->join_buffer_size_limit); if (!optimize_buff_size) @@ -844,8 +844,8 @@ int JOIN_CACHE::alloc_buffer() min_buff_size= 0; max_buff_size= 0; min_records= 1; - max_records= partial_join_cardinality <= join_buff_space_limit ? - (ulonglong) partial_join_cardinality : join_buff_space_limit; + max_records= (size_t) (partial_join_cardinality <= join_buff_space_limit ? + (ulonglong) partial_join_cardinality : join_buff_space_limit); set_if_bigger(max_records, 10); min_buff_size= get_min_join_buffer_size(); buff_size= get_max_join_buffer_size(optimize_buff_size); @@ -916,10 +916,10 @@ fail: bool JOIN_CACHE::shrink_join_buffer_in_ratio(ulonglong n, ulonglong d) { - ulonglong next_buff_size; + size_t next_buff_size; if (n < d) return FALSE; - next_buff_size= (ulonglong) ((double) buff_size / n * d); + next_buff_size= (size_t) ((double) buff_size / n * d); set_if_bigger(next_buff_size, min_buff_size); buff_size= next_buff_size; return realloc_buffer(); @@ -1215,7 +1215,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full) /* Make an adjustment for the size of the auxiliary buffer if there is any */ uint incr= aux_buffer_incr(records); - ulong rem= rem_space(); + size_t rem= rem_space(); aux_buff_size+= len+incr < rem ? incr : rem; /* @@ -2395,7 +2395,7 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr) enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last) { - uint cnt; + ulonglong cnt; enum_nested_loop_state rc= NESTED_LOOP_OK; bool is_first_inner= join_tab == join_tab->first_unmatched; @@ -3881,7 +3881,8 @@ bool bka_skip_index_tuple(range_seq_t rseq, char *range_info) { DBUG_ENTER("bka_skip_index_tuple"); JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq; - DBUG_RETURN(cache->skip_index_tuple(range_info)); + bool res= cache->skip_index_tuple(range_info); + DBUG_RETURN(res); } diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h index 6aa15adabe3..3b05d534064 100644 --- a/sql/sql_join_cache.h +++ b/sql/sql_join_cache.h @@ -219,13 +219,13 @@ protected: The expected length of a record in the join buffer together with all prefixes and postfixes */ - ulong avg_record_length; + size_t avg_record_length; /* The expected size of the space per record in the auxiliary buffer */ - ulong avg_aux_buffer_incr; + size_t avg_aux_buffer_incr; /* Expected join buffer space used for one record */ - ulong space_per_record; + size_t space_per_record; /* Pointer to the beginning of the join buffer */ uchar *buff; @@ -233,26 +233,26 @@ protected: Size of the entire memory allocated for the join buffer. Part of this memory may be reserved for the auxiliary buffer. */ - ulong buff_size; + size_t buff_size; /* The minimal join buffer size when join buffer still makes sense to use */ - ulong min_buff_size; + size_t min_buff_size; /* The maximum expected size if the join buffer to be used */ - ulong max_buff_size; + size_t max_buff_size; /* Size of the auxiliary buffer */ - ulong aux_buff_size; + size_t aux_buff_size; /* The number of records put into the join buffer */ - ulong records; + size_t records; /* The number of records in the fully refilled join buffer of the minimal size equal to min_buff_size */ - ulong min_records; + size_t min_records; /* The maximum expected number of records to be put in the join buffer at one refill */ - ulong max_records; + size_t max_records; /* Pointer to the current position in the join buffer. @@ -401,7 +401,7 @@ protected: virtual uint aux_buffer_incr(ulong recno); /* Shall calculate how much space is remaining in the join buffer */ - virtual ulong rem_space() + virtual size_t rem_space() { return max(buff_size-(end_pos-buff)-aux_buff_size,0); } @@ -579,9 +579,9 @@ public: virtual int init(); /* Get the current size of the cache join buffer */ - ulong get_join_buffer_size() { return buff_size; } + size_t get_join_buffer_size() { return buff_size; } /* Set the size of the cache join buffer to a new value */ - void set_join_buffer_size(ulong sz) { buff_size= sz; } + void set_join_buffer_size(size_t sz) { buff_size= sz; } /* Get the minimum possible size of the cache join buffer */ virtual ulong get_min_join_buffer_size(); @@ -924,7 +924,7 @@ protected: Calculate how much space in the buffer would not be occupied by records, key entries and additional memory for the MMR buffer. */ - ulong rem_space() + size_t rem_space() { return max(last_key_entry-end_pos-aux_buff_size,0); } @@ -1262,7 +1262,7 @@ protected: Get the number of ranges in the cache buffer passed to the MRR interface. For each record its own range is passed. */ - uint get_number_of_ranges_for_mrr() { return records; } + uint get_number_of_ranges_for_mrr() { return (uint)records; } /* Setup the MRR buffer as the space between the last record put diff --git a/sql/sql_lifo_buffer.h b/sql/sql_lifo_buffer.h index af26f8b5652..34f9624436d 100644 --- a/sql/sql_lifo_buffer.h +++ b/sql/sql_lifo_buffer.h @@ -102,8 +102,8 @@ public: void sort(qsort2_cmp cmp_func, void *cmp_func_arg) { - uint elem_size= size1 + size2; - uint n_elements= used_size() / elem_size; + size_t elem_size= size1 + size2; + size_t n_elements= used_size() / elem_size; my_qsort2(used_area(), n_elements, elem_size, cmp_func, cmp_func_arg); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 9212017d955..2482775d5cd 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -28,6 +28,7 @@ #include "events.h" #include "sql_trigger.h" #include "debug_sync.h" +#include "sql_handler.h" #ifdef WITH_ARIA_STORAGE_ENGINE #include "../storage/maria/ha_maria.h" @@ -3186,12 +3187,17 @@ end_with_restore_list: DBUG_EXECUTE_IF("after_mysql_insert", { - const char act[]= + const char act1[]= "now " "wait_for signal.continue"; + const char act2[]= + "now " + "signal signal.continued"; DBUG_ASSERT(opt_debug_sync_timeout > 0); - DBUG_ASSERT(!debug_sync_set_action(current_thd, - STRING_WITH_LEN(act))); + DBUG_ASSERT(!debug_sync_set_action(thd, + STRING_WITH_LEN(act1))); + DBUG_ASSERT(!debug_sync_set_action(thd, + STRING_WITH_LEN(act2))); };); break; } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index a1243589ed1..9bfa8d61e0a 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -95,6 +95,7 @@ When one supplies long data for a placeholder: #else #include <mysql_com.h> #endif +#include "sql_handler.h" /** A result class used to send cursor rows using the binary protocol. @@ -243,6 +244,8 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) int error; THD *thd= stmt->thd; DBUG_ENTER("send_prep_stmt"); + DBUG_PRINT("enter",("stmt->id: %lu columns: %d param_count: %d", + stmt->id, columns, stmt->param_count)); buff[0]= 0; /* OK packet indicator */ int4store(buff+1, stmt->id); @@ -1835,6 +1838,56 @@ static bool mysql_test_insert_select(Prepared_statement *stmt, return res; } +/** + Validate SELECT statement. + + In case of success, if this query is not EXPLAIN, send column list info + back to the client. + + @param stmt prepared statement + @param tables list of tables used in the query + + @retval 0 success + @retval 1 error, error message is set in THD + @retval 2 success, and statement metadata has been sent +*/ + +static int mysql_test_handler_read(Prepared_statement *stmt, + TABLE_LIST *tables) +{ + THD *thd= stmt->thd; + LEX *lex= stmt->lex; + SQL_HANDLER *ha_table; + DBUG_ENTER("mysql_test_select"); + + lex->select_lex.context.resolve_in_select_list= TRUE; + + /* + We don't have to test for permissions as this is already done during + HANDLER OPEN + */ + if (!(ha_table= mysql_ha_read_prepare(thd, tables, lex->ha_read_mode, + lex->ident.str, + lex->insert_list, + lex->select_lex.where))) + DBUG_RETURN(1); + + if (!stmt->is_sql_prepare()) + { + if (!lex->result && !(lex->result= new (stmt->mem_root) select_send)) + { + my_error(ER_OUTOFMEMORY, MYF(0), sizeof(select_send)); + DBUG_RETURN(1); + } + if (send_prep_stmt(stmt, ha_table->fields.elements) || + lex->result->send_fields(ha_table->fields, Protocol::SEND_EOF) || + thd->protocol->flush()) + DBUG_RETURN(1); + DBUG_RETURN(2); + } + DBUG_RETURN(0); +} + /** Perform semantic analysis of the parsed tree and send a response packet @@ -1949,6 +2002,11 @@ static bool check_prepared_statement(Prepared_statement *stmt) res= mysql_test_insert_select(stmt, tables); break; + case SQLCOM_HA_READ: + res= mysql_test_handler_read(stmt, tables); + /* Statement and field info has already been sent */ + DBUG_RETURN(res == 1 ? TRUE : FALSE); + /* Note that we don't need to have cases in this list if they are marked with CF_STATUS_COMMAND in sql_command_flags diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index e85e730db5b..5c8d1add04b 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -19,7 +19,7 @@ #include "mysql_priv.h" #include "sql_trigger.h" - +#include "sql_handler.h" static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ba19093ced2..35988ea32d7 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -42,8 +42,8 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", "MAYBE_REF","ALL","range","index","fulltext", "ref_or_null","unique_subquery","index_subquery", - "index_merge","hash" -}; + "index_merge", "hash_ALL", "hash_range", + "hash_index", "hash_index_merge" }; const char *copy_to_tmp_table= "Copying to tmp table"; @@ -1075,6 +1075,7 @@ JOIN::optimize() for (JOIN_TAB *tab= jt_range->start + first_tab_offs; tab < jt_range->end; tab++) { + uint key_copy_index=0; for (uint i=0; i < tab->ref.key_parts; i++) { @@ -1090,13 +1091,14 @@ JOIN::optimize() { *ref_item_ptr= ref_item; Item *item= ref_item->real_item(); - store_key *key_copy= tab->ref.key_copy[i]; + store_key *key_copy= tab->ref.key_copy[key_copy_index]; if (key_copy->type() == store_key::FIELD_STORE_KEY) { store_key_field *field_copy= ((store_key_field *)key_copy); field_copy->change_source_field((Item_field *) item); } } + key_copy_index++; } } first_tab_offs= 0; @@ -1735,7 +1737,7 @@ bool JOIN::shrink_join_buffers(JOIN_TAB *jt, cache= tab->cache; if (cache) { - ulong buff_size; + size_t buff_size; if (needed_space < cache->get_min_join_buffer_size()) return TRUE; if (cache->shrink_join_buffer_in_ratio(curr_space, needed_space)) @@ -1753,7 +1755,7 @@ bool JOIN::shrink_join_buffers(JOIN_TAB *jt, DBUG_ASSERT(cache); if (needed_space < cache->get_min_join_buffer_size()) return TRUE; - cache->set_join_buffer_size(needed_space); + cache->set_join_buffer_size((size_t)needed_space); return FALSE; } @@ -4085,6 +4087,7 @@ add_keyuse(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field, } keyuse.used_tables= key_field->val->used_tables(); keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; + keyuse.ref_table_rows= 0; keyuse.null_rejecting= key_field->null_rejecting; keyuse.cond_guard= key_field->cond_guard; keyuse.sj_pred_no= key_field->sj_pred_no; @@ -5110,19 +5113,20 @@ best_access_path(JOIN *join, /* Estimate the cost of the hash join access to the table */ ha_rows rnd_records= matching_candidates_in_table(s, found_constraint); - tmp= s->table->file->scan_time(); + tmp= s->quick ? s->quick->read_time : s->table->file->scan_time(); + tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + /* We read the table as many times as join buffer becomes full. */ tmp*= (1.0 + floor((double) cache_record_length(join,idx) * record_count / (double) thd->variables.join_buff_size)); - tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; best_time= tmp + (record_count*join_sel) / TIME_FOR_COMPARE * rnd_records; best= tmp; records= rows2double(rnd_records); best_key= hj_start_key; best_ref_depends_map= 0; - best_uses_jbuf= test(!disable_jbuf); + best_uses_jbuf= TRUE; } /* @@ -5596,7 +5600,7 @@ optimize_straight_join(JOIN *join, table_map join_tables) All other cases are in-between these two extremes. Thus the parameter 'search_depth' controlls the exhaustiveness of the search. The higher the - value, the longer the optimizaton time and possibly the better the + value, the longer the optimization time and possibly the better the resulting plan. The lower the value, the fewer alternative plans are estimated, but the more likely to get a bad QEP. @@ -6698,7 +6702,7 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab, keyinfo->key_length=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; keyinfo->flags= HA_GENERATED_KEY; - keyinfo->name= (char *) "hj_key"; + keyinfo->name= (char *) "$hj"; keyinfo->rec_per_key= (ulong*) thd->calloc(sizeof(ulong)*key_parts); if (!keyinfo->rec_per_key) DBUG_RETURN(TRUE); @@ -7400,6 +7404,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) for (tab= next_depth_first_tab(join, NULL); tab; tab= next_depth_first_tab(join, tab), i++) { + bool is_hj; /* first_inner is the X in queries like: SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X @@ -7476,9 +7481,20 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) add_cond_and_fix(&tmp, tab->select_cond); } + + is_hj= (tab->type == JT_REF || tab->type == JT_EQ_REF) && + (join->allowed_join_cache_types & JOIN_CACHE_HASHED_BIT) && + ((join->max_allowed_join_cache_level+1)/2 == 2 || + ((join->max_allowed_join_cache_level+1)/2 > 2 && + is_hash_join_key_no(tab->ref.key))) && + (!tab->emb_sj_nest || + join->allowed_semijoin_with_cache) && + (!(tab->table->map & join->outer_join) || + join->allowed_outer_join_with_cache); + if (cond && !tmp && tab->quick) { // Outer join - if (tab->type != JT_ALL) + if (tab->type != JT_ALL && !is_hj) { /* Don't use the quick method @@ -7559,9 +7575,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { /* Use quick key read if it's a constant and it's not used with key reading */ - if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF - && tab->type != JT_FT && (tab->type != JT_REF || - (uint) tab->ref.key == tab->quick->index)) + if ((tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF + && tab->type != JT_FT && + (tab->type != JT_REF || + (uint) tab->ref.key == tab->quick->index)) || is_hj) { sel->quick=tab->quick; // Use value from get_quick_... sel->quick_keys.clear_all(); @@ -8571,6 +8588,9 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) if (jcl) tab[-1].next_select=sub_select_cache; + + if (tab->cache && tab->cache->get_join_alg() == JOIN_CACHE::BNLH_JOIN_ALG) + tab->type= JT_HASH; switch (tab->type) { case JT_SYSTEM: // Only happens with left join @@ -8584,7 +8604,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - else if ((!jcl || jcl > 4) && !tab->is_ref_for_hash_join()) + else if (!jcl || jcl > 4) push_index_cond(tab, tab->ref.key); break; case JT_EQ_REF: @@ -8596,7 +8616,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - else if ((!jcl || jcl > 4) && !tab->is_ref_for_hash_join()) + else if (!jcl || jcl > 4) push_index_cond(tab, tab->ref.key); break; case JT_REF_OR_NULL: @@ -8611,10 +8631,11 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) table->enable_keyread(); - else if ((!jcl || jcl > 4) &&!tab->is_ref_for_hash_join()) + else if (!jcl || jcl > 4) push_index_cond(tab, tab->ref.key); break; case JT_ALL: + case JT_HASH: /* If previous table use cache If the incoming data set is already sorted don't use cache. @@ -8689,7 +8710,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) */ tab->index=find_shortest_key(table, & table->covering_keys); tab->read_first_record= join_read_first; - tab->type=JT_NEXT; // Read with index_first / index_next + /* Read with index_first / index_next */ + tab->type= tab->type == JT_ALL ? JT_NEXT : JT_HASH_NEXT; } } if (tab->select && tab->select->quick && @@ -8734,6 +8756,11 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) sort_by_tab->type= JT_ALL; sort_by_tab->read_first_record= join_init_read_record; } + else if (sort_by_tab->type == JT_HASH_NEXT) + { + sort_by_tab->type= JT_HASH; + sort_by_tab->read_first_record= join_init_read_record; + } } break; } @@ -13498,6 +13525,11 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table, DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;); if (write_err) goto err; + if (thd->killed) + { + thd->send_kill_message(); + goto err_killed; + } } if (!new_table.no_rows && new_table.file->ha_end_bulk_insert()) goto err; @@ -13530,6 +13562,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table, err: DBUG_PRINT("error",("Got error: %d",write_err)); table->file->print_error(write_err, MYF(0)); +err_killed: (void) table->file->ha_rnd_end(); (void) new_table.file->close(); err1: @@ -19340,7 +19373,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, TABLE *table=tab->table; TABLE_LIST *table_list= tab->table->pos_in_table_list; char buff[512]; - char buff1[512], buff2[512], buff3[512]; + char buff1[512], buff2[512], buff3[512], buff4[512]; char keylen_str_buf[64]; my_bool key_read; String extra(buff, sizeof(buff),cs); @@ -19348,10 +19381,17 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, String tmp1(buff1,sizeof(buff1),cs); String tmp2(buff2,sizeof(buff2),cs); String tmp3(buff3,sizeof(buff3),cs); + String tmp4(buff4,sizeof(buff4),cs); + char hash_key_prefix[]= "#hash#"; + KEY *key_info= 0; + uint key_len= 0; + bool is_hj= tab->type == JT_HASH || tab->type ==JT_HASH_NEXT; + extra.length(0); tmp1.length(0); tmp2.length(0); tmp3.length(0); + tmp4.length(0); quick_type= -1; /* Don't show eliminated tables */ @@ -19369,21 +19409,19 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, join->select_lex->type; item_list.push_back(new Item_string(stype, strlen(stype), cs)); - if (tab->type == JT_ALL && tab->select && tab->select->quick) + if ((tab->type == JT_ALL || tab->type == JT_HASH) && + tab->select && tab->select->quick) { quick_type= tab->select->quick->get_type(); if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) || (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) || (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) || (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION)) - tab->type = JT_INDEX_MERGE; + tab->type= tab->type == JT_ALL ? JT_INDEX_MERGE : JT_HASH_INDEX_MERGE; else - tab->type = JT_RANGE; + tab->type= tab->type == JT_ALL ? JT_RANGE : JT_HASH_RANGE; } - if (tab->cache && tab->cache->get_join_alg() == JOIN_CACHE::BNLH_JOIN_ALG) - tab->type= JT_HASH; - /* table */ if (table->derived_select_number) { @@ -19455,45 +19493,66 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(item_null); /* Build "key", "key_len", and "ref" values and add them to item_list */ - if (tab->ref.key_parts) + if (tab->type == JT_NEXT) + { + key_info= table->key_info+tab->index; + key_len= key_info->key_length; + } + else if (tab->ref.key_parts) + { + key_info= tab->get_keyinfo_by_key_no(tab->ref.key); + key_len= tab->ref.key_length; + } + if (key_info) { - KEY *key_info= tab->get_keyinfo_by_key_no(tab->ref.key); register uint length; - item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name), - system_charset_info)); - length= (longlong10_to_str(tab->ref.key_length, keylen_str_buf, 10) - + if (is_hj) + tmp2.append(hash_key_prefix, strlen(hash_key_prefix), cs); + tmp2.append(key_info->name, strlen(key_info->name), cs); + length= (longlong10_to_str(key_len, keylen_str_buf, 10) - keylen_str_buf); - item_list.push_back(new Item_string(keylen_str_buf, length, - system_charset_info)); - for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) + tmp3.append(keylen_str_buf, length, cs); + if (tab->ref.key_parts) { - if (tmp2.length()) - tmp2.append(','); - tmp2.append((*ref)->name(), strlen((*ref)->name()), - system_charset_info); - } - item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); + for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) + { + if (tmp4.length()) + tmp4.append(','); + tmp4.append((*ref)->name(), strlen((*ref)->name()), cs); + } + } } - else if (tab->type == JT_NEXT) + if (is_hj && tab->type != JT_HASH) { - KEY *key_info=table->key_info+ tab->index; - register uint length; - item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name),cs)); - length= (longlong10_to_str(key_info->key_length, keylen_str_buf, 10) - - keylen_str_buf); - item_list.push_back(new Item_string(keylen_str_buf, - length, - system_charset_info)); - item_list.push_back(item_null); + tmp2.append(':'); + tmp3.append(':'); } - else if (tab->select && tab->select->quick) + if (tab->type == JT_HASH_NEXT) { + register uint length; + key_info= table->key_info+tab->index; + key_len= key_info->key_length; + tmp2.append(key_info->name, strlen(key_info->name), cs); + length= (longlong10_to_str(key_len, keylen_str_buf, 10) - + keylen_str_buf); + tmp3.append(keylen_str_buf, length, cs); + } + if (tab->select && tab->select->quick) tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3); - item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); - item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs)); - item_list.push_back(item_null); + if (key_info || (tab->select && tab->select->quick)) + { + if (tmp2.length()) + item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); + else + item_list.push_back(item_null); + if (tmp3.length()) + item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs)); + else + item_list.push_back(item_null); + if (key_info && tab->type != JT_NEXT) + item_list.push_back(new Item_string(tmp4.ptr(),tmp4.length(),cs)); + else + item_list.push_back(item_null); } else { @@ -19543,8 +19602,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, ha_rows examined_rows; if (tab->select && tab->select->quick) examined_rows= tab->select->quick->records; - else if (tab->type == JT_NEXT || tab->type == JT_ALL || - tab->type == JT_HASH) + else if (tab->type == JT_NEXT || tab->type == JT_ALL || is_hj) { if (tab->limit) examined_rows= tab->limit; diff --git a/sql/sql_select.h b/sql/sql_select.h index 7d6a5d2b7f3..6c294b7af27 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -135,7 +135,7 @@ typedef struct st_table_ref enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF, JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL, JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY, JT_INDEX_MERGE, - JT_HASH}; + JT_HASH, JT_HASH_RANGE, JT_HASH_NEXT, JT_HASH_INDEX_MERGE}; class JOIN; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5732385950a..3f20e8afa36 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1942,8 +1942,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) pthread_mutex_lock(&mysys_var->mutex); thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0); #ifndef EMBEDDED_LIBRARY - thd_info->state_info= (char*) (tmp->locked ? "Locked" : - tmp->net.reading_or_writing ? + thd_info->state_info= (char*) (tmp->net.reading_or_writing ? (tmp->net.reading_or_writing == 2 ? "Writing to net" : thd_info->command == COM_SLEEP ? "" : @@ -2068,8 +2067,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) table->field[5]->store(utime / 1000000, TRUE); /* STATE */ #ifndef EMBEDDED_LIBRARY - val= (char*) (tmp->locked ? "Locked" : - tmp->net.reading_or_writing ? + val= (char*) (tmp->net.reading_or_writing ? (tmp->net.reading_or_writing == 2 ? "Writing to net" : tmp->command == COM_SLEEP ? "" : diff --git a/sql/sql_string.h b/sql/sql_string.h index 5da0d38f24d..c9eaf924e4d 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -363,7 +363,7 @@ public: void qs_append(const char *str) { - qs_append(str, strlen(str)); + qs_append(str, (uint32)strlen(str)); } void qs_append(const char *str, uint32 len); void qs_append(double d); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 05a9c66cf80..7138c82d891 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -24,6 +24,7 @@ #include "sql_trigger.h" #include "sql_show.h" #include "debug_sync.h" +#include "sql_handler.h" #ifdef __WIN__ #include <io.h> @@ -3929,12 +3930,16 @@ bool mysql_create_table_no_lock(THD *thd, } /* Give warnings for not supported table options */ - if (create_info->transactional && !file->ht->commit) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - file->engine_name()->str, - "TRANSACTIONAL=1"); +#if defined(WITH_ARIA_STORAGE_ENGINE) + extern handlerton *maria_hton; + if (file->ht != maria_hton) +#endif + if (create_info->transactional) + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ILLEGAL_HA_CREATE_OPTION, + ER(ER_ILLEGAL_HA_CREATE_OPTION), + file->engine_name()->str, + "TRANSACTIONAL=1"); VOID(pthread_mutex_lock(&LOCK_open)); if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) @@ -6276,9 +6281,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, def->field=field; if (field->stored_in_db != def->stored_in_db) { - my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, - MYF(0), - "Changing the STORED status"); + my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, MYF(0)); goto err; } if (!def->after) @@ -7226,6 +7229,16 @@ view_err: /* Non-primary unique key. */ needed_online_flags|= HA_ONLINE_ADD_UNIQUE_INDEX; needed_fast_flags|= HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES; + if (ignore) + { + /* + If ignore is used, we have to remove all duplicate rows, + which require a full table copy. + */ + need_copy_table= ALTER_TABLE_DATA_CHANGED; + pk_changed= 2; // Don't change need_copy_table + break; + } } } else diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 29147e7bf2a..e704c4ca175 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -239,14 +239,18 @@ void print_keyuse(KEYUSE *keyuse) char buff[256]; char buf2[64]; const char *fieldname; + JOIN_TAB *join_tab= keyuse->table->reginfo.join_tab; + KEY *key_info= join_tab->get_keyinfo_by_key_no(keyuse->key); String str(buff,(uint32) sizeof(buff), system_charset_info); str.length(0); keyuse->val->print(&str, QT_ORDINARY); str.append('\0'); - if (keyuse->keypart == FT_KEYPART) + if (keyuse->is_for_hash_join()) + fieldname= keyuse->table->field[keyuse->keypart]->field_name; + else if (keyuse->keypart == FT_KEYPART) fieldname= "FT_KEYPART"; else - fieldname= keyuse->table->key_info[keyuse->key].key_part[keyuse->keypart].field->field_name; + fieldname= key_info->key_part[keyuse->keypart].field->field_name; longlong2str(keyuse->used_tables, buf2, 16, 0); DBUG_LOCK_FILE; fprintf(DBUG_FILE, "KEYUSE: %s.%s=%s optimize: %u used_tables: %s " diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 1c4add27e57..8b95a36bc35 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -19,6 +19,7 @@ #include "sp_head.h" #include "sql_trigger.h" #include "parse_file.h" +#include "sql_handler.h" /*************************************************************************/ diff --git a/sql/table.cc b/sql/table.cc index 410d15bce4e..cd2cf51e2e0 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2327,14 +2327,11 @@ partititon_err: /* Check virtual columns against table's storage engine. */ if (share->vfields && - ((outparam->file && - !outparam->file->check_if_supported_virtual_columns()) || - (!outparam->file && share->db_type() && - share->db_type()->db_type == DB_TYPE_CSV_DB))) // Workaround for CSV - { - my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, - MYF(0), - "Specified storage engine"); + !(outparam->file && + (outparam->file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))) + { + my_error(ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS, MYF(0), + plugin_name(share->db_plugin)->str); error_reported= TRUE; goto err; } diff --git a/sql/table.h b/sql/table.h index 4d72e94ee30..d4dbc075bc5 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1830,7 +1830,7 @@ typedef struct st_nested_join 2. All child join nest nodes are fully covered. */ - bool is_fully_covered() const { return join_list.elements == counter; } + bool is_fully_covered() const { return n_tables == counter; } } NESTED_JOIN; diff --git a/sql/uniques.cc b/sql/uniques.cc index e309caf9849..cfce53b70d7 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -290,17 +290,17 @@ static double get_merge_many_buffs_cost(uint *buffer, these will be random seeks. */ -double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, +double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size, ulonglong max_in_memory_size, uint compare_factor, bool intersect_fl, bool *in_memory) { - ulong max_elements_in_tree; - ulong last_tree_elems; + size_t max_elements_in_tree; + size_t last_tree_elems; int n_full_trees; /* number of trees in unique - 1 */ double result; - max_elements_in_tree= ((ulong) max_in_memory_size / + max_elements_in_tree= ((size_t) max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); n_full_trees= nkeys / max_elements_in_tree; @@ -312,9 +312,10 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0); result /= compare_factor; - DBUG_PRINT("info",("unique trees sizes: %u=%u*%lu + %lu", nkeys, - n_full_trees, n_full_trees?max_elements_in_tree:0, - last_tree_elems)); + DBUG_PRINT("info",("unique trees sizes: %u=%u*%u + %u", (uint)nkeys, + (uint)n_full_trees, + (uint)(n_full_trees?max_elements_in_tree:0), + (uint)last_tree_elems)); if (in_memory) *in_memory= !n_full_trees; |