diff options
author | monty@narttu.mysql.fi <> | 2003-11-04 09:40:36 +0200 |
---|---|---|
committer | monty@narttu.mysql.fi <> | 2003-11-04 09:40:36 +0200 |
commit | 4e4725377d27ff0101788fd7ed89670614ed8294 (patch) | |
tree | d387df22d5d3ffed50ecd8620f1eb18bc61ead7e /sql | |
parent | 30af2d0f49a203ae4fb97194e899fa5e13a42c1d (diff) | |
parent | e1e82a4e00dd2cee2ff56accdd3fdbd5afeadb6a (diff) | |
download | mariadb-git-4e4725377d27ff0101788fd7ed89670614ed8294.tar.gz |
Merge with 4.0
Diffstat (limited to 'sql')
-rw-r--r-- | sql/item_func.cc | 2 | ||||
-rw-r--r-- | sql/log_event.cc | 64 | ||||
-rw-r--r-- | sql/log_event.h | 8 | ||||
-rw-r--r-- | sql/mysqld.cc | 25 | ||||
-rw-r--r-- | sql/slave.cc | 27 | ||||
-rw-r--r-- | sql/slave.h | 2 | ||||
-rw-r--r-- | sql/sql_base.cc | 25 | ||||
-rw-r--r-- | sql/sql_cache.cc | 26 | ||||
-rw-r--r-- | sql/sql_class.cc | 5 | ||||
-rw-r--r-- | sql/sql_class.h | 11 | ||||
-rw-r--r-- | sql/sql_parse.cc | 87 | ||||
-rw-r--r-- | sql/sql_select.cc | 80 | ||||
-rw-r--r-- | sql/sql_test.cc | 4 | ||||
-rw-r--r-- | sql/unireg.cc | 9 | ||||
-rw-r--r-- | sql/unireg.h | 2 |
15 files changed, 297 insertions, 80 deletions
diff --git a/sql/item_func.cc b/sql/item_func.cc index e6120f2e93c..3ed0396e373 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2571,7 +2571,7 @@ err: bool Item_func_get_user_var::const_item() const { - return var_entry && current_thd->query_id != var_entry->update_query_id; + return (!var_entry || current_thd->query_id != var_entry->update_query_id); } diff --git a/sql/log_event.cc b/sql/log_event.cc index 62dc56e7fd2..c0fd781cede 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -768,11 +768,49 @@ int Query_log_event::write(IO_CACHE* file) int Query_log_event::write_data(IO_CACHE* file) { + char buf[QUERY_HEADER_LEN]; + if (!query) return -1; - char buf[QUERY_HEADER_LEN]; - int4store(buf + Q_THREAD_ID_OFFSET, thread_id); + /* + We want to store the thread id: + (- as an information for the user when he reads the binlog) + - if the query uses temporary table: for the slave SQL thread to know to + which master connection the temp table belongs. + Now imagine we (write_data()) are called by the slave SQL thread (we are + logging a query executed by this thread; the slave runs with + --log-slave-updates). Then this query will be logged with + thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of + the same name were created simultaneously on the master (in the master + binlog you have + CREATE TEMPORARY TABLE t; (thread 1) + CREATE TEMPORARY TABLE t; (thread 2) + ...) + then in the slave's binlog there will be + CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) + CREATE TEMPORARY TABLE t; (thread_id_of_the_slave_SQL_thread) + which is bad (same thread id!). + + To avoid this, we log the thread's thread id EXCEPT for the SQL + slave thread for which we log the original (master's) thread id. + Now this moves the bug: what happens if the thread id on the + master was 10 and when the slave replicates the query, a + connection number 10 is opened by a normal client on the slave, + and updates a temp table of the same name? We get a problem + again. To avoid this, in the handling of temp tables (sql_base.cc) + we use thread_id AND server_id. TODO when this is merged into + 4.1: in 4.1, slave_proxy_id has been renamed to pseudo_thread_id + and is a session variable: that's to make mysqlbinlog work with + temp tables. We probably need to introduce + + SET PSEUDO_SERVER_ID + for mysqlbinlog in 4.1. mysqlbinlog would print: + SET PSEUDO_SERVER_ID= + SET PSEUDO_THREAD_ID= + for each query using temp tables. + */ + int4store(buf + Q_THREAD_ID_OFFSET, slave_proxy_id); int4store(buf + Q_EXEC_TIME_OFFSET, exec_time); buf[Q_DB_LEN_OFFSET] = (char) db_len; int2store(buf + Q_ERR_CODE_OFFSET, error_code); @@ -790,12 +828,14 @@ int Query_log_event::write_data(IO_CACHE* file) #ifndef MYSQL_CLIENT Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, bool using_trans) - :Log_event(thd_arg, !thd_arg->tmp_table_used ? - 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans), + :Log_event(thd_arg, !thd_arg->tmp_table_used ? + 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans), data_buf(0), query(query_arg), db(thd_arg->db), q_len((uint32) query_length), - error_code(thd_arg->killed ? ER_SERVER_SHUTDOWN: thd_arg->net.last_errno), - thread_id(thd_arg->thread_id) + error_code(thd_arg->killed ? ER_SERVER_SHUTDOWN: thd_arg->net.last_errno), + thread_id(thd_arg->thread_id), + /* save the original thread id; we already know the server id */ + slave_proxy_id(thd_arg->slave_proxy_id) { time_t end_time; time(&end_time); @@ -836,7 +876,7 @@ Query_log_event::Query_log_event(const char* buf, int event_len, return; memcpy(data_buf, buf + Q_DATA_OFFSET, data_len); - thread_id = uint4korr(buf + Q_THREAD_ID_OFFSET); + slave_proxy_id= thread_id= uint4korr(buf + Q_THREAD_ID_OFFSET); db = data_buf; db_len = (uint)buf[Q_DB_LEN_OFFSET]; query=data_buf + db_len + 1; @@ -907,8 +947,8 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli) { thd->set_time((time_t)when); thd->query_length= q_len; - VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query = (char*)query; + VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_id = query_id++; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->query_error= 0; // clear error @@ -1276,7 +1316,7 @@ void Load_log_event::pack_info(Protocol *protocol) int Load_log_event::write_data_header(IO_CACHE* file) { char buf[LOAD_HEADER_LEN]; - int4store(buf + L_THREAD_ID_OFFSET, thread_id); + int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id); int4store(buf + L_EXEC_TIME_OFFSET, exec_time); int4store(buf + L_SKIP_LINES_OFFSET, skip_lines); buf[L_TBL_LEN_OFFSET] = (char)table_name_len; @@ -1317,7 +1357,9 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, enum enum_duplicates handle_dup, bool using_trans) :Log_event(thd_arg, 0, using_trans), thread_id(thd_arg->thread_id), - num_fields(0), fields(0), field_lens(0),field_block_len(0), + slave_proxy_id(thd_arg->slave_proxy_id), + num_fields(0),fields(0), + field_lens(0),field_block_len(0), table_name(table_name_arg ? table_name_arg : ""), db(db_arg), fname(ex->file_name) { @@ -1422,7 +1464,7 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, char* buf_end = (char*)buf + event_len; uint header_len= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; const char* data_head = buf + header_len; - thread_id = uint4korr(data_head + L_THREAD_ID_OFFSET); + slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET); exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); table_name_len = (uint)data_head[L_TBL_LEN_OFFSET]; diff --git a/sql/log_event.h b/sql/log_event.h index 8ba5d0379a0..6cc8a7ca06d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -455,6 +455,13 @@ public: uint32 db_len; uint16 error_code; ulong thread_id; + /* + For events created by Query_log_event::exec_event (and + Load_log_event::exec_event()) we need the *original* thread id, to be able + to log the event with the original (=master's) thread id (fix for + BUG#1686). + */ + ulong slave_proxy_id; #ifndef MYSQL_CLIENT Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, @@ -543,6 +550,7 @@ protected: public: ulong thread_id; + ulong slave_proxy_id; uint32 table_name_len; uint32 db_len; uint32 fname_len; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 1052eeaf11b..7c432a05786 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1985,7 +1985,7 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) #endif -const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0 }; +const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0,0}; bool open_log(MYSQL_LOG *log, const char *hostname, const char *opt_name, const char *extension, @@ -2733,7 +2733,7 @@ default_service_handling(char **argv, const char *extra_opt) { char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end; - end= path_and_service + sizeof(path_and_service)-1; + end= path_and_service + sizeof(path_and_service)-3; /* We have to quote filename if it contains spaces */ pos= add_quoted_string(path_and_service, file_path, end); @@ -2743,7 +2743,9 @@ default_service_handling(char **argv, *pos++= ' '; pos= add_quoted_string(pos, extra_opt, end); } - *pos= 0; // Ensure end null + /* We must have servicename last */ + *pos++= ' '; + strmake(pos, servicename, (uint) (end+2 - pos)); if (Service.got_service_option(argv, "install")) { @@ -2786,10 +2788,16 @@ int main(int argc, char **argv) if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, file_path, "")) return 0; - if (Service.IsService(argv[1])) + if (Service.IsService(argv[1])) /* Start an optional service */ { - /* start an optional service */ - load_default_groups[0]= argv[1]; + /* + Only add the service name to the groups read from the config file + if it's not "MySQL". (The default service name should be 'mysqld' + but we started a bad tradition by calling it MySQL from the start + and we are now stuck with it. + */ + if (my_strcasecmp(argv[1],"mysql")) + load_default_groups[3]= argv[1]; start_mode= 1; Service.Init(argv[1], mysql_service); return 0; @@ -2797,8 +2805,7 @@ int main(int argc, char **argv) } else if (argc == 3) /* install or remove any optional service */ { - if (!default_service_handling(argv, argv[2], argv[2], file_path, - argv[2])) + if (!default_service_handling(argv, argv[2], argv[2], file_path, "")) return 0; if (Service.IsService(argv[2])) { @@ -2810,6 +2817,8 @@ int main(int argc, char **argv) opt_argc= 2; // Skip service-name opt_argv=argv; start_mode= 1; + if (my_strcasecmp(argv[2],"mysql")) + load_default_groups[3]= argv[2]; Service.Init(argv[2], mysql_service); return 0; } diff --git a/sql/slave.cc b/sql/slave.cc index 415969a2955..561b32b668c 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -355,6 +355,22 @@ void init_slave_skip_errors(const char* arg) } } +void st_relay_log_info::close_temporary_tables() +{ + TABLE *table,*next; + + for (table=save_temporary_tables ; table ; table=next) + { + next=table->next; + /* + Don't ask for disk deletion. For now, anyway they will be deleted when + slave restarts, but it is a better intention to not delete them. + */ + close_temporary(table, 0); + } + save_temporary_tables= 0; + slave_open_temp_tables= 0; +} /* purge_relay_logs() @@ -847,6 +863,7 @@ static int end_slave_on_walk(MASTER_INFO* mi, gptr /*unused*/) void end_slave() { + /* This is called when the server terminates, in close_connections(). */ if (active_mi) { /* @@ -1428,7 +1445,7 @@ file '%s', errno %d)", fname, my_errno); if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */, &msg)) { - sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4"); + sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; } rli->group_master_log_name[0]= 0; @@ -3166,8 +3183,6 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: - /* Free temporary tables etc */ - thd->cleanup(); VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query = thd->db = 0; // extra safety VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -3585,6 +3600,12 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) } rli->inited = 0; rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT); + /* + Delete the slave's temporary tables from memory. + In the future there will be other actions than this, to ensure persistance + of slave's temp tables after shutdown. + */ + rli->close_temporary_tables(); DBUG_VOID_RETURN; } diff --git a/sql/slave.h b/sql/slave.h index 618b04311b9..f8093826f58 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -320,10 +320,10 @@ typedef struct st_relay_log_info int wait_for_pos(THD* thd, String* log_name, longlong log_pos, longlong timeout); + void close_temporary_tables(); /* Check if UNTIL condition is satisfied. See slave.cc for more. */ bool is_until_satisfied(); - } RELAY_LOG_INFO; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 88a1d21354b..3976ebd81f4 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -575,6 +575,8 @@ TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) uint key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; TABLE *table,**prev; + int4store(key+key_length,thd->server_id); + key_length += 4; int4store(key+key_length,thd->variables.pseudo_thread_id); key_length += 4; @@ -603,18 +605,27 @@ bool close_temporary_table(THD *thd, const char *db, const char *table_name) return 0; } +/* + Used by ALTER TABLE when the table is a temporary one. It changes something + only if the ALTER contained a RENAME clause (otherwise, table_name is the old + name). + Prepares a table cache key, which is the concatenation of db, table_name and + thd->slave_proxy_id, separated by '\0'. +*/ bool rename_temporary_table(THD* thd, TABLE *table, const char *db, const char *table_name) { char *key; if (!(key=(char*) alloc_root(&table->mem_root, (uint) strlen(db)+ - (uint) strlen(table_name)+6))) + (uint) strlen(table_name)+6+4))) return 1; /* purecov: inspected */ table->key_length=(uint) (strmov((table->real_name=strmov(table->table_cache_key=key, db)+1), table_name) - table->table_cache_key)+1; + int4store(key+table->key_length,thd->server_id); + table->key_length += 4; int4store(key+table->key_length,thd->variables.pseudo_thread_id); table->key_length += 4; return 0; @@ -771,12 +782,13 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, if (thd->killed) DBUG_RETURN(0); key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; - int4store(key + key_length, thd->variables.pseudo_thread_id); + int4store(key + key_length, thd->server_id); + int4store(key + key_length + 4, thd->variables.pseudo_thread_id); for (table=thd->temporary_tables; table ; table=table->next) { - if (table->key_length == key_length+4 && - !memcmp(table->table_cache_key,key,key_length+4)) + if (table->key_length == key_length+8 && + !memcmp(table->table_cache_key,key,key_length+8)) { if (table->query_id == thd->query_id) { @@ -1671,7 +1683,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, total of 6 extra bytes in my_malloc in addition to table/db stuff */ if (!(tmp_table=(TABLE*) my_malloc(sizeof(*tmp_table)+(uint) strlen(db)+ - (uint) strlen(table_name)+6, + (uint) strlen(table_name)+6+4, MYF(MY_WME)))) DBUG_RETURN(0); /* purecov: inspected */ @@ -1694,6 +1706,9 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, +1), table_name) - tmp_table->table_cache_key)+1; int4store(tmp_table->table_cache_key + tmp_table->key_length, + thd->server_id); + tmp_table->key_length += 4; + int4store(tmp_table->table_cache_key + tmp_table->key_length, thd->variables.pseudo_thread_id); tmp_table->key_length += 4; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 0bae41da002..a4ae8fd5946 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2761,11 +2761,33 @@ my_bool Query_cache::move_by_type(byte **border, relink(block, new_block, next, prev, pnext, pprev); if (queries_blocks == block) queries_blocks = new_block; + Query_cache_block_table *beg_of_table_table= block->table(0), + *end_of_table_table= block->table(n_tables); + byte *beg_of_new_table_table= (byte*) new_block->table(0); + for (TABLE_COUNTER_TYPE j=0; j < n_tables; j++) { Query_cache_block_table *block_table = new_block->table(j); - block_table->next->prev = block_table; - block_table->prev->next = block_table; + + // use aligment from begining of table if 'next' is in same block + if ((beg_of_table_table <= block_table->next) && + (block_table->next < end_of_table_table)) + ((Query_cache_block_table *)(beg_of_new_table_table + + (((byte*)block_table->next) - + ((byte*)beg_of_table_table))))->prev= + block_table; + else + block_table->next->prev= block_table; + + // use aligment from begining of table if 'prev' is in same block + if ((beg_of_table_table <= block_table->prev) && + (block_table->prev < end_of_table_table)) + ((Query_cache_block_table *)(beg_of_new_table_table + + (((byte*)block_table->prev) - + ((byte*)beg_of_table_table))))->next= + block_table; + else + block_table->prev->next = block_table; } DBUG_PRINT("qcache", ("after circle tt")); *border += len; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ed01e1bb038..6b43c2f796e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -396,6 +396,11 @@ bool THD::store_globals() return 1; mysys_var=my_thread_var; dbug_thread_id=my_thread_id(); + /* + By default 'slave_proxy_id' is 'thread_id'. They may later become different + if this is the slave SQL thread. + */ + slave_proxy_id= thread_id; return 0; } diff --git a/sql/sql_class.h b/sql/sql_class.h index c66ebb77020..7a8dc067256 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -481,7 +481,11 @@ public: char priv_host[MAX_HOSTNAME]; /* remote (peer) port */ uint16 peer_port; - /* Points to info-string that will show in SHOW PROCESSLIST */ + /* + Points to info-string that we show in SHOW PROCESSLIST + You are supposed to update thd->proc_info only if you have coded + a time-consuming piece that MySQL can get stuck in for a long time. + */ const char *proc_info; /* points to host if host is available, otherwise points to ip */ const char *host_or_ip; @@ -510,6 +514,11 @@ public: enum enum_server_command command; uint32 server_id; uint32 file_id; // for LOAD DATA INFILE + /* + Used in error messages to tell user in what part of MySQL we found an + error. E. g. when where= "having clause", if fix_fields() fails, user + will know that the error was in having clause. + */ const char *where; time_t start_time,time_after_lock,user_time; time_t connect_time,thr_create_time; // track down slow pthread_create diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d6f8e2f66c7..b3c17167a3a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -172,6 +172,7 @@ static int get_or_create_user_conn(THD *thd, const char *user, } } thd->user_connect=uc; + uc->connections++; end: (void) pthread_mutex_unlock(&LOCK_user_conn); return return_val; @@ -314,14 +315,15 @@ int check_user(THD *thd, enum enum_server_command command, thd->db_access=0; /* Don't allow user to connect if he has done too many queries */ - if ((ur.questions || ur.updates || - ur.connections || max_user_connections) && - get_or_create_user_conn(thd,thd->user,thd->host_or_ip,&ur)) - DBUG_RETURN(-1); - if (thd->user_connect && (thd->user_connect->user_resources.connections || - max_user_connections) && - check_for_max_user_connections(thd, thd->user_connect)) - DBUG_RETURN(-1); + if ((ur.questions || ur.updates || ur.connections || + max_user_connections) && + get_or_create_user_conn(thd,thd->user,thd->host_or_ip,&ur)) + DBUG_RETURN(-1); + if (thd->user_connect && + (thd->user_connect->user_resources.connections || + max_user_connections) && + check_for_max_user_connections(thd, thd->user_connect)) + DBUG_RETURN(-1); /* Change database if necessary: OK or FAIL is sent in mysql_change_db */ if (db && db[0]) @@ -386,42 +388,84 @@ void init_max_user_conn(void) } +/* + check if user has already too many connections + + SYNOPSIS + check_for_max_user_connections() + thd Thread handle + uc User connect object + + NOTES + If check fails, we decrease user connection count, which means one + shouldn't call decrease_user_connections() after this function. + + RETURN + 0 ok + 1 error +*/ + static int check_for_max_user_connections(THD *thd, USER_CONN *uc) { int error=0; DBUG_ENTER("check_for_max_user_connections"); + (void) pthread_mutex_lock(&LOCK_user_conn); if (max_user_connections && - (max_user_connections < (uint) uc->connections)) + max_user_connections < (uint) uc->connections) { net_printf(thd,ER_TOO_MANY_USER_CONNECTIONS, uc->user); error=1; goto end; } - uc->connections++; if (uc->user_resources.connections && - uc->conn_per_hour++ >= uc->user_resources.connections) + uc->user_resources.connections <= uc->conn_per_hour) { net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_connections", (long) uc->user_resources.connections); error=1; + goto end; } -end: + uc->conn_per_hour++; + + end: + if (error) + uc->connections--; // no need for decrease_user_connections() here + (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_RETURN(error); } +/* + Decrease user connection count + + SYNOPSIS + decrease_user_connections() + uc User connection object + + NOTES + If there is a n user connection object for a connection + (which only happens if 'max_user_connections' is defined or + if someone has created a resource grant for a user), then + the connection count is always incremented on connect. + + The user connect object is not freed if some users has + 'max connections per hour' defined as we need to be able to hold + count over the lifetime of the connection. +*/ + static void decrease_user_connections(USER_CONN *uc) { DBUG_ENTER("decrease_user_connections"); - if ((uc->connections && !--uc->connections) && !mqh_used) + (void) pthread_mutex_lock(&LOCK_user_conn); + DBUG_ASSERT(uc->connections); + if (!--uc->connections && !mqh_used) { /* Last connection for user; Delete it */ - (void) pthread_mutex_lock(&LOCK_user_conn); (void) hash_delete(&hash_user_connections,(byte*) uc); - (void) pthread_mutex_unlock(&LOCK_user_conn); } + (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_VOID_RETURN; } @@ -1227,15 +1271,17 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *save_user= thd->user; char *save_priv_user= thd->priv_user; char *save_db= thd->db; - USER_CONN *save_uc= thd->user_connect; - thd->user= my_strdup(user, MYF(0)); - if (!thd->user) + USER_CONN *save_user_connect= thd->user_connect; + + if (!(thd->user= my_strdup(user, MYF(0)))) { thd->user= save_user; send_error(thd, ER_OUT_OF_RESOURCES); break; } + /* Clear variables that are allocated */ + thd->user_connect= 0; int res= check_user(thd, COM_CHANGE_USER, passwd, passwd_len, db, false); if (res) @@ -1246,6 +1292,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, x_free(thd->user); thd->user= save_user; thd->priv_user= save_priv_user; + thd->user_connect= save_user_connect; thd->master_access= save_master_access; thd->db_access= save_db_access; thd->db= save_db; @@ -1254,8 +1301,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, else { /* we've authenticated new user */ - if (max_connections && save_uc) - decrease_user_connections(save_uc); + if (save_user_connect) + decrease_user_connections(save_user_connect); x_free((gptr) save_db); x_free((gptr) save_user); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index bdaed894a52..3decbd62a2c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1453,10 +1453,21 @@ JOIN::exec() { DBUG_VOID_RETURN; } + /* + Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser + chose FILESORT to be faster than INDEX SCAN or there is no + suitable index present. + Note, that create_sort_index calls test_if_skip_sort_order and may + finally replace sorting with index scan if there is a LIMIT clause in + the query. XXX: it's never shown in EXPLAIN! + OPTION_FOUND_ROWS supersedes LIMIT and is taken into account. + */ if (create_sort_index(thd, curr_join, curr_join->group_list ? curr_join->group_list : curr_join->order, - curr_join->select_limit, unit->select_limit_cnt)) + curr_join->select_limit, + (select_options & OPTION_FOUND_ROWS ? + HA_POS_ERROR : unit->select_limit_cnt))) DBUG_VOID_RETURN; } } @@ -2856,8 +2867,6 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, !(s->table->force_index && best_key)) { // Check full join ha_rows rnd_records= s->found_records; - /* Estimate cost of reading table. */ - tmp= s->table->file->scan_time(); /* If there is a restriction on the table, assume that 25% of the rows can be skipped on next part. @@ -2867,36 +2876,57 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, if (found_constraint) rnd_records-= rnd_records/4; - if (s->on_expr) // Can't use join cache + /* + Range optimizer never proposes a RANGE if it isn't better + than FULL: so if RANGE is present, it's always preferred to FULL. + Here we estimate its cost. + */ + if (s->quick) { + /* + For each record we: + - read record range through 'quick' + - skip rows which does not satisfy WHERE constraints + */ tmp= record_count * - /* We have to read the whole table for each record */ - (tmp + - /* - And we have to skip rows which does not satisfy join - condition for each record. - */ - (s->records - rnd_records)/(double) TIME_FOR_COMPARE); + (s->quick->read_time + + (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE); } else { - /* We read the table as many times as join buffer becomes full. */ - tmp*= (1.0 + floor((double) cache_record_length(join,idx) * - record_count / - (double) thd->variables.join_buff_size)); - /* - We don't make full cartesian product between rows in the scanned - table and existing records because we skip all rows from the - scanned table, which does not satisfy join condition when - we read the table (see flush_cached_records for details). Here we - take into account cost to read and skip these records. - */ - tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + /* Estimate cost of reading table. */ + tmp= s->table->file->scan_time(); + if (s->on_expr) // Can't use join cache + { + /* + For each record we have to: + - read the whole table record + - skip rows which does not satisfy join condition + */ + tmp= record_count * + (tmp + + (s->records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* We read the table as many times as join buffer becomes full. */ + tmp*= (1.0 + floor((double) cache_record_length(join,idx) * + record_count / + (double) thd->variables.join_buff_size)); + /* + We don't make full cartesian product between rows in the scanned + table and existing records because we skip all rows from the + scanned table, which does not satisfy join condition when + we read the table (see flush_cached_records for details). Here we + take into account cost to read and skip these records. + */ + tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + } } /* We estimate the cost of evaluating WHERE clause for found records - as record_count * rnd_records + TIME_FOR_COMPARE. This cost plus + as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus tmp give us total cost of using TABLE SCAN */ if (best == DBL_MAX || @@ -3320,6 +3350,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) table_map used_tables; if (join->tables > 1) cond->update_used_tables(); // Tablenr may have changed + if (join->const_tables == join->tables) + join->const_table_map|=RAND_TABLE_BIT; { // Check const tables COND *const_cond= make_cond_for_table(cond,join->const_table_map,(table_map) 0); diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 47f0932b221..c14d30f8d34 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -22,7 +22,11 @@ #include "sql_select.h" #include <hash.h> #include <thr_alarm.h> +#if defined(HAVE_MALLINFO) && defined(HAVE_MALLOC_H) #include <malloc.h> +#elif defined(HAVE_MALLINFO) && defined(HAVE_SYS_MALLOC_H) +#include <sys/malloc.h> +#endif /* Intern key cache variables */ extern "C" pthread_mutex_t THR_LOCK_keycache; diff --git a/sql/unireg.cc b/sql/unireg.cc index 8606830e450..e93e290bd4c 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -150,16 +150,19 @@ int rea_create_table(THD *thd, my_string file_name, my_free((gptr) screen_buff,MYF(0)); my_free((gptr) keybuff, MYF(0)); - VOID(my_close(file,MYF(MY_WME))); - if (ha_create_table(file_name,create_info,0)) + if (my_sync(file, MYF(MY_WME))) goto err2; + if (my_close(file,MYF(MY_WME)) || + ha_create_table(file_name,create_info,0)) + goto err3; DBUG_RETURN(0); err: my_free((gptr) screen_buff,MYF(0)); my_free((gptr) keybuff, MYF(0)); +err2: VOID(my_close(file,MYF(MY_WME))); - err2: +err3: my_delete(file_name,MYF(0)); DBUG_RETURN(1); } /* rea_create_table */ diff --git a/sql/unireg.h b/sql/unireg.h index 66a274f3863..8d62959317d 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -43,7 +43,7 @@ #define ERRMAPP 1 /* Errormap f|r my_error */ #define LIBLEN FN_REFLEN-FN_LEN /* Max l{ngd p} dev */ -#define MAX_DBKEY_LENGTH (FN_LEN*2+6) /* extra 4 bytes for slave tmp +#define MAX_DBKEY_LENGTH (FN_LEN*2+1+1+4+4) /* extra 4+4 bytes for slave tmp * tables */ #define MAX_ALIAS_NAME 256 #define MAX_FIELD_NAME 34 /* Max colum name length +2 */ |