diff options
author | monty@narttu.mysql.fi <> | 2003-03-19 22:25:44 +0200 |
---|---|---|
committer | monty@narttu.mysql.fi <> | 2003-03-19 22:25:44 +0200 |
commit | b883a9c01c9caba9e74bdc6f4f5b57b1a81a0fd4 (patch) | |
tree | abbee7d78d1399086c301a5bd02f3ba4a9e1ea38 /sql | |
parent | 48a9c1239c6b2b2ba27f8a1a9a0df98af204d53b (diff) | |
parent | 584729430a77280e753cc4bf67d2bb6d7f94a6b8 (diff) | |
download | mariadb-git-b883a9c01c9caba9e74bdc6f4f5b57b1a81a0fd4.tar.gz |
Merge with 4.0.12
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_innodb.h | 3 | ||||
-rw-r--r-- | sql/ha_myisam.cc | 30 | ||||
-rw-r--r-- | sql/handler.cc | 9 | ||||
-rw-r--r-- | sql/handler.h | 1 | ||||
-rw-r--r-- | sql/item_func.cc | 2 | ||||
-rw-r--r-- | sql/key.cc | 8 | ||||
-rw-r--r-- | sql/lex.h | 1 | ||||
-rw-r--r-- | sql/log.cc | 2 | ||||
-rw-r--r-- | sql/mysqld.cc | 2 | ||||
-rw-r--r-- | sql/password.c | 12 | ||||
-rw-r--r-- | sql/slave.cc | 60 | ||||
-rw-r--r-- | sql/slave.h | 7 | ||||
-rw-r--r-- | sql/sql_class.cc | 2 | ||||
-rw-r--r-- | sql/sql_crypt.cc | 6 | ||||
-rw-r--r-- | sql/sql_parse.cc | 3 | ||||
-rw-r--r-- | sql/sql_repl.cc | 8 | ||||
-rw-r--r-- | sql/sql_show.cc | 4 | ||||
-rw-r--r-- | sql/sql_update.cc | 147 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 3 | ||||
-rw-r--r-- | sql/table.cc | 2 |
20 files changed, 238 insertions, 74 deletions
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 55832c0a079..56546e3e8d0 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -82,8 +82,7 @@ class ha_innobase: public handler HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE | HA_NO_PREFIX_CHAR_KEYS | - HA_TABLE_SCAN_ON_INDEX | - HA_NOT_MULTI_UPDATE), + HA_TABLE_SCAN_ON_INDEX), last_dup_key((uint) -1), start_of_scan(0) { diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 829ec5b0ade..126d0628f79 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1258,6 +1258,35 @@ longlong ha_myisam::get_auto_increment() } +/* + Find out how many rows there is in the given range + + SYNOPSIS + records_in_range() + inx Index to use + start_key Start of range. Null pointer if from first key + start_key_len Length of start key + start_search_flag Flag if start key should be included or not + end_key End of range. Null pointer if to last key + end_key_len Length of end key + end_search_flag Flag if start key should be included or not + + NOTES + start_search_flag can have one of the following values: + HA_READ_KEY_EXACT Include the key in the range + HA_READ_AFTER_KEY Don't include key in range + + end_search_flag can have one of the following values: + HA_READ_BEFORE_KEY Don't include key in range + HA_READ_AFTER_KEY Include all 'end_key' values in the range + + RETURN + HA_POS_ERROR Something is wrong with the index tree. + 0 There is no matching keys in the given range + number > 0 There is approximately 'number' matching rows in + the range. +*/ + ha_rows ha_myisam::records_in_range(int inx, const byte *start_key,uint start_key_len, enum ha_rkey_function start_search_flag, @@ -1272,6 +1301,7 @@ ha_rows ha_myisam::records_in_range(int inx, end_search_flag); } + int ha_myisam::ft_read(byte * buf) { int error; diff --git a/sql/handler.cc b/sql/handler.cc index d7ae960382c..5353e78cd11 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -121,8 +121,15 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) #endif case DB_TYPE_HEAP: return new ha_heap(table); - case DB_TYPE_MYISAM: default: // should never happen + { + enum db_type def=(enum db_type) current_thd->variables.table_type; + /* Try first with 'default table type' */ + if (db_type != def) + return get_new_handler(table, def); + } + /* Fall back to MyISAM */ + case DB_TYPE_MYISAM: return new ha_myisam(table); case DB_TYPE_MRG_MYISAM: return new ha_myisammrg(table); diff --git a/sql/handler.h b/sql/handler.h index 824f02b5a3d..c6a4578a26c 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -67,7 +67,6 @@ #define HA_CAN_FULLTEXT (HA_NO_PREFIX_CHAR_KEYS*2) #define HA_CAN_SQL_HANDLER (HA_CAN_FULLTEXT*2) #define HA_NO_AUTO_INCREMENT (HA_CAN_SQL_HANDLER*2) -#define HA_NOT_MULTI_UPDATE (HA_NO_AUTO_INCREMENT*2) /* Next record gives next record according last record read (even diff --git a/sql/item_func.cc b/sql/item_func.cc index 81ea91c5c3d..9abbb0e290b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -851,7 +851,7 @@ void Item_func_rand::fix_length_and_dec() double Item_func_rand::val() { - return rnd(rand); + return my_rnd(rand); } longlong Item_func_sign::val_int() diff --git a/sql/key.cc b/sql/key.cc index feda9e156b3..37ef6339f20 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -271,5 +271,13 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields) return 1; } } + + /* + If table handler has primary key as part of the index, check that primary + key is not updated + */ + if (idx != table->primary_key && table->primary_key < MAX_KEY && + (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) + return check_if_key_used(table, table->primary_key, fields); return 0; } diff --git a/sql/lex.h b/sql/lex.h index 80f84628d27..5b1cbb58c1e 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -257,6 +257,7 @@ static SYMBOL symbols[] = { { "MEDIUMINT", SYM(MEDIUMINT),0,0}, { "MERGE", SYM(MERGE_SYM),0,0}, { "MEDIUM", SYM(MEDIUM_SYM),0,0}, + { "MEMORY", SYM(MEMORY_SYM),0,0}, { "MIDDLEINT", SYM(MEDIUMINT),0,0}, /* For powerbuilder */ { "MIN_ROWS", SYM(MIN_ROWS),0,0}, { "MINUTE", SYM(MINUTE_SYM),0,0}, diff --git a/sql/log.cc b/sql/log.cc index 02e34b773ef..00bd44274f9 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -654,6 +654,8 @@ int MYSQL_LOG::purge_first_log(struct st_relay_log_info* rli) */ pthread_mutex_lock(&rli->log_space_lock); rli->log_space_total -= rli->relay_log_pos; + //tell the I/O thread to take the relay_log_space_limit into account + rli->ignore_log_space_limit= 0; pthread_mutex_unlock(&rli->log_space_lock); pthread_cond_broadcast(&rli->log_space_cond); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index c0a3527bc75..e9026eb5b00 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2761,7 +2761,7 @@ static void create_new_thread(THD *thd) max_used_connections=thread_count-delayed_insert_threads; thd->thread_id=thread_id++; for (uint i=0; i < 8 ; i++) // Generate password teststring - thd->scramble[i]= (char) (rnd(&sql_rand)*94+33); + thd->scramble[i]= (char) (my_rnd(&sql_rand)*94+33); thd->scramble[8]=0; // Back it up as old clients may need it memcpy(thd->old_scramble,thd->scramble,9); diff --git a/sql/password.c b/sql/password.c index 5ed05ae6c0e..9752bcc95eb 100644 --- a/sql/password.c +++ b/sql/password.c @@ -125,7 +125,7 @@ static void old_randominit(struct rand_struct *rand_st,ulong seed1) Generated pseudo random number */ -double rnd(struct rand_struct *rand_st) +double my_rnd(struct rand_struct *rand_st) { rand_st->seed1=(rand_st->seed1*3+rand_st->seed2) % rand_st->max_value; rand_st->seed2=(rand_st->seed1+rand_st->seed2+33) % rand_st->max_value; @@ -435,7 +435,7 @@ char get_password_version(const char *password) -inline uint char_val(char X) +static inline unsigned int char_val(char X) { return (uint) (X >= '0' && X <= '9' ? X-'0' : X >= 'A' && X <= 'Z' ? X-'A'+10 : @@ -652,10 +652,10 @@ char *scramble(char *to,const char *message,const char *password, randominit(&rand_st,hash_pass[0] ^ hash_message[0], hash_pass[1] ^ hash_message[1]); while (*msg++) - *to++= (char) (floor(rnd(&rand_st)*31)+64); + *to++= (char) (floor(my_rnd(&rand_st)*31)+64); if (!old_ver) { /* Make it harder to break */ - char extra=(char) (floor(rnd(&rand_st)*31)); + char extra=(char) (floor(my_rnd(&rand_st)*31)); while (to_start != to) *(to_start++)^=extra; } @@ -711,11 +711,11 @@ my_bool check_scramble(const char *scrambled, const char *message, hash_pass[1] ^ hash_message[1]); to=buff; for (pos=scrambled ; *pos ; pos++) - *to++=(char) (floor(rnd(&rand_st)*31)+64); + *to++=(char) (floor(my_rnd(&rand_st)*31)+64); if (old_ver) extra=0; else - extra=(char) (floor(rnd(&rand_st)*31)); + extra=(char) (floor(my_rnd(&rand_st)*31)); to=buff; while (*scrambled) { diff --git a/sql/slave.cc b/sql/slave.cc index 61348722d1e..c66f5c307d4 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -263,7 +263,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, if (log) // If not first log { if (strcmp(log, rli->linfo.log_file_name)) - rli->skip_log_purge=1; // Different name; Don't purge + rli->skip_log_purge= 1; // Different name; Don't purge if (rli->relay_log.find_log_pos(&rli->linfo, log, 1)) { *errmsg="Could not find target log during relay log initialization"; @@ -298,6 +298,12 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, my_b_seek(rli->cur_log,(off_t)pos); err: + /* + If we don't purge, we can't honour relay_log_space_limit ; + silently discard it + */ + if (rli->skip_log_purge) + rli->log_space_limit= 0; pthread_cond_broadcast(&rli->data_cond); if (need_data_lock) pthread_mutex_unlock(&rli->data_lock); @@ -1386,7 +1392,8 @@ static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli) thd->proc_info = "Waiting for relay log space to free"; while (rli->log_space_limit < rli->log_space_total && - !(slave_killed=io_slave_killed(thd,mi))) + !(slave_killed=io_slave_killed(thd,mi)) && + !rli->ignore_log_space_limit) { pthread_cond_wait(&rli->log_space_cond, &rli->log_space_lock); } @@ -1667,7 +1674,7 @@ bool flush_master_info(MASTER_INFO* mi) st_relay_log_info::st_relay_log_info() :info_fd(-1), cur_log_fd(-1), master_log_pos(0), save_temporary_tables(0), - cur_log_old_open_count(0), log_space_total(0), + cur_log_old_open_count(0), log_space_total(0), ignore_log_space_limit(0), slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0), inited(0), abort_slave(0), slave_running(0), skip_log_purge(0), @@ -2378,7 +2385,8 @@ reconnect done to recover from failed read"); } flush_master_info(mi); if (mi->rli.log_space_limit && mi->rli.log_space_limit < - mi->rli.log_space_total) + mi->rli.log_space_total && + !mi->rli.ignore_log_space_limit) if (wait_for_relay_log_space(&mi->rli)) { sql_print_error("Slave I/O thread aborted while waiting for relay \ @@ -2491,6 +2499,10 @@ slave_begin: pthread_cond_broadcast(&rli->start_cond); // This should always be set to 0 when the slave thread is started rli->pending = 0; + + //tell the I/O thread to take relay_log_space_limit into account from now on + rli->ignore_log_space_limit= 0; + if (init_relay_log_pos(rli, rli->relay_log_name, rli->relay_log_pos, @@ -3199,11 +3211,41 @@ Log_event* next_event(RELAY_LOG_INFO* rli) update. If we do not, show slave status will block */ pthread_mutex_unlock(&rli->data_lock); - /* Note that wait_for_update unlocks lock_log ! */ - rli->relay_log.wait_for_update(rli->sql_thd); - - // re-acquire data lock since we released it earlier - pthread_mutex_lock(&rli->data_lock); + + /* + Possible deadlock : + - the I/O thread has reached log_space_limit + - the SQL thread has read all relay logs, but cannot purge for some + reason: + * it has already purged all logs except the current one + * there are other logs than the current one but they're involved in + a transaction that finishes in the current one (or is not finished) + Solution : + Wake up the possibly waiting I/O thread, and set a boolean asking + the I/O thread to temporarily ignore the log_space_limit + constraint, because we do not want the I/O thread to block because of + space (it's ok if it blocks for any other reason (e.g. because the + master does not send anything). Then the I/O thread stops waiting + and reads more events. + The SQL thread decides when the I/O thread should take log_space_limit + into account again : ignore_log_space_limit is reset to 0 + in purge_first_log (when the SQL thread purges the just-read relay + log), and also when the SQL thread starts. We should also reset + ignore_log_space_limit to 0 when the user does RESET SLAVE, but in + fact, no need as RESET SLAVE requires that the slave + be stopped, and when the SQL thread is later restarted + ignore_log_space_limit will be reset to 0. + */ + pthread_mutex_lock(&rli->log_space_lock); + // prevent the I/O thread from blocking next times + rli->ignore_log_space_limit= 1; + // If the I/O thread is blocked, unblock it + pthread_cond_broadcast(&rli->log_space_cond); + pthread_mutex_unlock(&rli->log_space_lock); + // Note that wait_for_update unlocks lock_log ! + rli->relay_log.wait_for_update(rli->sql_thd); + // re-acquire data lock since we released it earlier + pthread_mutex_lock(&rli->data_lock); continue; } /* diff --git a/sql/slave.h b/sql/slave.h index 0ebd07bf6d8..a4db7388be5 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -156,7 +156,14 @@ typedef struct st_relay_log_info extra offset to be added to the position. */ ulonglong relay_log_pos, pending; + + /* + Handling of the relay_log_space_limit optional constraint. + ignore_log_space_limit is used to resolve a deadlock between I/O and SQL + threads, it makes the I/O thread temporarily forget about the constraint + */ ulonglong log_space_limit,log_space_total; + bool ignore_log_space_limit; /* InnoDB internally stores the master log position it has processed diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 022a31e7b5c..3a66e906837 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -186,7 +186,7 @@ THD::THD():user_time(0), is_fatal_error(0), */ { pthread_mutex_lock(&LOCK_thread_count); - ulong tmp=(ulong) (rnd(&sql_rand) * 0xffffffff); /* make all bits random */ + ulong tmp=(ulong) (my_rnd(&sql_rand) * 0xffffffff); /* make all bits random */ pthread_mutex_unlock(&LOCK_thread_count); randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id); } diff --git a/sql/sql_crypt.cc b/sql/sql_crypt.cc index f2e4a8934be..930ecfffef7 100644 --- a/sql/sql_crypt.cc +++ b/sql/sql_crypt.cc @@ -46,7 +46,7 @@ void SQL_CRYPT::crypt_init(ulong *rand_nr) for (i=0 ; i<= 255 ; i++) { - int idx= (uint) (rnd(&rand)*255.0); + int idx= (uint) (my_rnd(&rand)*255.0); char a= decode_buff[idx]; decode_buff[idx]= decode_buff[i]; decode_buff[+i]=a; @@ -62,7 +62,7 @@ void SQL_CRYPT::encode(char *str,uint length) { for (uint i=0; i < length; i++) { - shift^=(uint) (rnd(&rand)*255.0); + shift^=(uint) (my_rnd(&rand)*255.0); uint idx= (uint) (uchar) str[0]; *str++ = (char) ((uchar) encode_buff[idx] ^ shift); shift^= idx; @@ -74,7 +74,7 @@ void SQL_CRYPT::decode(char *str,uint length) { for (uint i=0; i < length; i++) { - shift^=(uint) (rnd(&rand)*255.0); + shift^=(uint) (my_rnd(&rand)*255.0); uint idx= (uint) ((unsigned char) str[0] ^ shift); *str = decode_buff[idx]; shift^= (uint) (uchar) *str++; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 3b27a65f4e1..8ca5bf2f12d 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -563,7 +563,10 @@ check_connections(THD *thd) thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors); /* Cut very long hostnames to avoid possible overflows */ if (thd->host) + { thd->host[min(strlen(thd->host), HOSTNAME_LENGTH)]= 0; + thd->host_or_ip= thd->host; + } if (connect_errors > max_connect_errors) return(ER_HOST_IS_BLOCKED); } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 63ba519885f..98bbd8bbb98 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -900,22 +900,21 @@ int change_master(THD* thd, MASTER_INFO* mi) if (lex_mi->relay_log_name) { - need_relay_log_purge = 0; - mi->rli.skip_log_purge=1; + need_relay_log_purge= 0; strmake(mi->rli.relay_log_name,lex_mi->relay_log_name, sizeof(mi->rli.relay_log_name)-1); } if (lex_mi->relay_log_pos) { - need_relay_log_purge=0; + need_relay_log_purge= 0; mi->rli.relay_log_pos=lex_mi->relay_log_pos; } flush_master_info(mi); if (need_relay_log_purge) { - mi->rli.skip_log_purge=0; + mi->rli.skip_log_purge= 0; thd->proc_info="purging old relay logs"; if (purge_relay_logs(&mi->rli, thd, 0 /* not only reset, but also reinit */, @@ -929,6 +928,7 @@ int change_master(THD* thd, MASTER_INFO* mi) else { const char* msg; + mi->rli.skip_log_purge= 1; /* Relay log is already initialized */ if (init_relay_log_pos(&mi->rli, mi->rli.relay_log_name, diff --git a/sql/sql_show.cc b/sql/sql_show.cc index db4de2962b7..ba7d7024eaf 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1334,10 +1334,10 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) { if ((thd_info->host= thd->alloc(LIST_PROCESS_HOST_LEN+1))) my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN, - "%s:%u", thd->host_or_ip, tmp->peer_port); + "%s:%u", tmp->host_or_ip, tmp->peer_port); } else - thd_info->host= thd->strdup(thd->host_or_ip); + thd_info->host= thd->strdup(tmp->host_or_ip); if ((thd_info->db=tmp->db)) // Safe test thd_info->db=thd->strdup(thd_info->db); thd_info->command=(int) tmp->command; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d07b4f1a8d5..e5c9d160725 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -23,6 +23,8 @@ #include "sql_acl.h" #include "sql_select.h" +static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); + /* Return 0 if row hasn't changed */ static bool compare_record(TABLE *table, ulong query_id) @@ -547,11 +549,12 @@ int multi_update::prepare(List<Item> ¬_used_values, SELECT_LEX_UNIT *unit) /* - Store first used table in main_table as this should be updated first - This is because we know that no row in this table will be read twice. + Initialize table for multi table - Create temporary tables to store changed values for all other tables - that are updated. + IMPLEMENTATION + - Update first table in join on the fly, if possible + - Create temporary tables to store changed values for all other tables + that are updated (and main_table if the above doesn't hold). */ bool @@ -565,53 +568,113 @@ multi_update::initialize_tables(JOIN *join) main_table=join->join_tab->table; trans_safe= transactional_tables= main_table->file->has_transactions(); log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE; - table_to_update= (main_table->file->table_flags() & HA_NOT_MULTI_UPDATE) ? - (TABLE *) 0 : main_table; - /* Create a temporary table for all tables after except main table */ + table_to_update= 0; + + /* Create a temporary table for keys to all tables, except main table */ for (table_ref= update_tables; table_ref; table_ref=table_ref->next) { TABLE *table=table_ref->table; - if (table != table_to_update) - { - uint cnt= table_ref->shared; - ORDER group; - List<Item> temp_fields= *fields_for_table[cnt]; - TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt; - - /* - Create a temporary table to store all fields that are changed for this - table. The first field in the temporary table is a pointer to the - original row so that we can find and update it - */ - - /* ok to be on stack as this is not referenced outside of this func */ - Field_string offset(table->file->ref_length, 0, "offset", - table, &my_charset_bin); - if (temp_fields.push_front(new Item_field(((Field *) &offset)))) - DBUG_RETURN(1); + uint cnt= table_ref->shared; + List<Item> temp_fields= *fields_for_table[cnt]; + ORDER group; - /* Make an unique key over the first field to avoid duplicated updates */ - bzero((char*) &group, sizeof(group)); - group.asc= 1; - group.item= (Item**) temp_fields.head_ref(); - - tmp_param->quick_group=1; - tmp_param->field_count=temp_fields.elements; - tmp_param->group_parts=1; - tmp_param->group_length= table->file->ref_length; - if (!(tmp_tables[cnt]=create_tmp_table(thd, - tmp_param, - temp_fields, - (ORDER*) &group, 0, 0, - TMP_TABLE_ALL_COLUMNS, - HA_POS_ERROR))) - DBUG_RETURN(1); - tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); + if (table == main_table) // First table in join + { + if (safe_update_on_fly(join->join_tab, &temp_fields)) + { + table_to_update= main_table; // Update table on the fly + continue; + } } + + TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt; + + /* + Create a temporary table to store all fields that are changed for this + table. The first field in the temporary table is a pointer to the + original row so that we can find and update it + */ + + /* ok to be on stack as this is not referenced outside of this func */ + Field_string offset(table->file->ref_length, 0, "offset", + table, 1, &my_charset_bin); + if (temp_fields.push_front(new Item_field(((Field *) &offset)))) + DBUG_RETURN(1); + + /* Make an unique key over the first field to avoid duplicated updates */ + bzero((char*) &group, sizeof(group)); + group.asc= 1; + group.item= (Item**) temp_fields.head_ref(); + + tmp_param->quick_group=1; + tmp_param->field_count=temp_fields.elements; + tmp_param->group_parts=1; + tmp_param->group_length= table->file->ref_length; + if (!(tmp_tables[cnt]=create_tmp_table(thd, + tmp_param, + temp_fields, + (ORDER*) &group, 0, 0, + TMP_TABLE_ALL_COLUMNS, + HA_POS_ERROR))) + DBUG_RETURN(1); + tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); } DBUG_RETURN(0); } +/* + Check if table is safe to update on fly + + SYNOPSIS + safe_update_on_fly + join_tab How table is used in join + fields Fields that are updated + + NOTES + We can update the first table in join on the fly if we know that + a row in this tabel will never be read twice. This is true under + the folloing conditions: + + - We are doing a table scan and the data is in a separate file (MyISAM) or + if we don't update a clustered key. + + - We are doing a range scan and we don't update the scan key or + the primary key for a clustered table handler. + + WARNING + This code is a bit dependent of how make_join_readinfo() works. + + RETURN + 0 Not safe to update + 1 Safe to update +*/ + +static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) +{ + TABLE *table= join_tab->table; + switch (join_tab->type) { + case JT_SYSTEM: + case JT_CONST: + case JT_EQ_REF: + return 1; // At most one matching row + case JT_REF: + return !check_if_key_used(table, join_tab->ref.key, *fields); + case JT_ALL: + /* If range search on index */ + if (join_tab->quick) + return !check_if_key_used(table, join_tab->quick->index, + *fields); + /* If scanning in clustered key */ + if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && + table->primary_key < MAX_KEY) + return !check_if_key_used(table, table->primary_key, *fields); + return 1; + default: + break; // Avoid compler warning + } + return 0; +} + multi_update::~multi_update() { diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index fe420549a59..e66e896c590 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -283,6 +283,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token MAX_UPDATES_PER_HOUR %token MEDIUM_SYM %token MERGE_SYM +%token MEMORY_SYM %token MIN_ROWS %token MYISAM_SYM %token NAMES_SYM @@ -1009,6 +1010,7 @@ table_types: | MYISAM_SYM { $$= DB_TYPE_MYISAM; } | MERGE_SYM { $$= DB_TYPE_MRG_MYISAM; } | HEAP_SYM { $$= DB_TYPE_HEAP; } + | MEMORY_SYM { $$= DB_TYPE_HEAP; } | BERKELEY_DB_SYM { $$= DB_TYPE_BERKELEY_DB; } | INNOBASE_SYM { $$= DB_TYPE_INNODB; }; @@ -4088,6 +4090,7 @@ keyword: | MAX_UPDATES_PER_HOUR {} | MEDIUM_SYM {} | MERGE_SYM {} + | MEMORY_SYM {} | MINUTE_SYM {} | MIN_ROWS {} | MODIFY_SYM {} diff --git a/sql/table.cc b/sql/table.cc index 58375ecdbeb..b36171cab93 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1245,7 +1245,7 @@ bool check_table_name(const char *name, uint length) } } #endif - if (*name == '/' || *name == FN_LIBCHAR || *name == FN_EXTCHAR) + if (*name == '/' || *name == '\\' || *name == FN_EXTCHAR) return 1; name++; } |