diff options
author | Alexey Kopytov <Alexey.Kopytov@Sun.com> | 2010-05-24 00:41:18 +0400 |
---|---|---|
committer | Alexey Kopytov <Alexey.Kopytov@Sun.com> | 2010-05-24 00:41:18 +0400 |
commit | 598cf5aec7c7cf4dbb158c87d80916b6e58e7ae6 (patch) | |
tree | f4f37b22f4271c44aa03c55abe609a058828bedc /sql | |
parent | dabd4a1c55cabbe73350c7c9b4c2bd6ab2db2b5c (diff) | |
parent | d72a4710aaad2f1e76ac20d50793f13f941ef899 (diff) | |
download | mariadb-git-598cf5aec7c7cf4dbb158c87d80916b6e58e7ae6.tar.gz |
Manual merge of mysql-5.1-bugteam to mysql-trunk-merge.
Conflicts:
conflict Makefile.am
conflict mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result
conflict mysql-test/suite/rpl/t/rpl_tmp_table_and_DDL.test
conflict sql/opt_sum.cc
conflict sql/set_var.cc
conflict sql/sql_base.cc
conflict sql/sql_priv.h
conflict sql/sql_show.cc
Diffstat (limited to 'sql')
-rw-r--r-- | sql/log_event.cc | 34 | ||||
-rw-r--r-- | sql/log_event_old.cc | 36 | ||||
-rw-r--r-- | sql/opt_sum.cc | 298 | ||||
-rw-r--r-- | sql/sql_class.h | 11 | ||||
-rw-r--r-- | sql/sql_delete.cc | 4 | ||||
-rw-r--r-- | sql/sql_show.cc | 103 | ||||
-rw-r--r-- | sql/sql_table.cc | 23 | ||||
-rw-r--r-- | sql/sql_table.h | 1 | ||||
-rw-r--r-- | sql/sql_update.cc | 3 | ||||
-rw-r--r-- | sql/table.cc | 34 |
10 files changed, 289 insertions, 258 deletions
diff --git a/sql/log_event.cc b/sql/log_event.cc index 7d60328381d..c9a015e8fba 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8927,11 +8927,28 @@ static bool record_compare(TABLE *table) { for (int i = 0 ; i < 2 ; ++i) { - saved_x[i]= table->record[i][0]; - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][0]|= 1U; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); + /* + If we have an X bit then we need to take care of it. + */ + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + { + saved_x[i]= table->record[i][0]; + table->record[i][0]|= 1U; + } + + /* + If (last_null_bit_pos == 0 && null_bytes > 1), then: + + X bit (if any) + N nullable fields + M Field_bit fields = 8 bits + + Ie, the entire byte is used. + */ + if (table->s->last_null_bit_pos > 0) + { + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } } } @@ -8971,8 +8988,11 @@ record_compare_exit: { for (int i = 0 ; i < 2 ; ++i) { - table->record[i][0]= saved_x[i]; - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + table->record[i][0]= saved_x[i]; + + if (table->s->last_null_bit_pos) + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; } } diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index f6c5b5d1023..76eda43aa48 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -314,12 +314,29 @@ static bool record_compare(TABLE *table) if (table->s->null_bytes > 0) { for (int i = 0 ; i < 2 ; ++i) - { - saved_x[i]= table->record[i][0]; - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][0]|= 1U; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); + { + /* + If we have an X bit then we need to take care of it. + */ + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + { + saved_x[i]= table->record[i][0]; + table->record[i][0]|= 1U; + } + + /* + If (last_null_bit_pos == 0 && null_bytes > 1), then: + + X bit (if any) + N nullable fields + M Field_bit fields = 8 bits + + Ie, the entire byte is used. + */ + if (table->s->last_null_bit_pos > 0) + { + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } } } @@ -359,8 +376,11 @@ record_compare_exit: { for (int i = 0 ; i < 2 ; ++i) { - table->record[i][0]= saved_x[i]; - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + table->record[i][0]= saved_x[i]; + + if (table->s->last_null_bit_pos > 0) + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; } } diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index fd2040a4979..0c79c8dc797 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -90,6 +90,123 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables) /** + Use index to read MIN(field) value. + + @param table Table object + @param ref Reference to the structure where we store the key value + @item_field Field used in MIN() + @range_fl Whether range endpoint is strict less than + @prefix_len Length of common key part for the range + + @retval + 0 No errors + HA_ERR_... Otherwise +*/ + +static int get_index_min_value(TABLE *table, TABLE_REF *ref, + Item_field *item_field, uint range_fl, + uint prefix_len) +{ + int error; + + if (!ref->key_length) + error= table->file->index_first(table->record[0]); + else + { + /* + Use index to replace MIN/MAX functions with their values + according to the following rules: + + 1) Insert the minimum non-null values where the WHERE clause still + matches, or + 2) a NULL value if there are only NULL values for key_part_k. + 3) Fail, producing a row of nulls + + Implementation: Read the smallest value using the search key. If + the interval is open, read the next value after the search + key. If read fails, and we're looking for a MIN() value for a + nullable column, test if there is an exact match for the key. + */ + if (!(range_fl & NEAR_MIN)) + /* + Closed interval: Either The MIN argument is non-nullable, or + we have a >= predicate for the MIN argument. + */ + error= table->file->index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_KEY_OR_NEXT); + else + { + /* + Open interval: There are two cases: + 1) We have only MIN() and the argument column is nullable, or + 2) there is a > predicate on it, nullability is irrelevant. + We need to scan the next bigger record first. + */ + error= table->file->index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_AFTER_KEY); + /* + If the found record is outside the group formed by the search + prefix, or there is no such record at all, check if all + records in that group have NULL in the MIN argument + column. If that is the case return that NULL. + + Check if case 1 from above holds. If it does, we should read + the skipped tuple. + */ + if (item_field->field->real_maybe_null() && + ref->key_buff[prefix_len] == 1 && + /* + Last keypart (i.e. the argument to MIN) is set to NULL by + find_key_for_maxmin only if all other keyparts are bound + to constants in a conjunction of equalities. Hence, we + can detect this by checking only if the last keypart is + NULL. + */ + (error == HA_ERR_KEY_NOT_FOUND || + key_cmp_if_same(table, ref->key_buff, ref->key, prefix_len))) + { + DBUG_ASSERT(item_field->field->real_maybe_null()); + error= table->file->index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_KEY_EXACT); + } + } + } + return error; +} + + +/** + Use index to read MAX(field) value. + + @param table Table object + @param ref Reference to the structure where we store the key value + @range_fl Whether range endpoint is strict greater than + + @retval + 0 No errors + HA_ERR_... Otherwise +*/ + +static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) +{ + return (ref->key_length ? + table->file->index_read_map(table->record[0], ref->key_buff, + make_prev_keypart_map(ref->key_parts), + range_fl & NEAR_MAX ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV) : + table->file->index_last(table->record[0])); +} + + + +/** Substitutes constants for some COUNT(), MIN() and MAX() functions. @param tables list of leaves of join table tree @@ -220,9 +337,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; break; case Item_sum::MIN_FUNC: + case Item_sum::MAX_FUNC: { + int is_max= test(item_sum->sum_func() == Item_sum::MAX_FUNC); /* - If MIN(expr) is the first part of a key or if all previous + If MIN/MAX(expr) is the first part of a key or if all previous parts of the key is found in the COND, then we can use indexes to find the key. */ @@ -241,89 +360,26 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Look for a partial key that can be used for optimization. If we succeed, ref.key_length will contain the length of this key, while prefix_len will contain the length of - the beginning of this key without field used in MIN(). + the beginning of this key without field used in MIN/MAX(). Type of range for the key part for this field will be returned in range_fl. */ if (table->file->inited || (outer_tables & table->map) || - !find_key_for_maxmin(0, &ref, item_field->field, conds, + !find_key_for_maxmin(is_max, &ref, item_field->field, conds, &range_fl, &prefix_len)) { const_result= 0; break; } - error= table->file->ha_index_init((uint) ref.key, 1); + table->file->ha_index_init((uint) ref.key, 1); + + error= is_max ? + get_index_max_value(table, &ref, range_fl) : + get_index_min_value(table, &ref, item_field, range_fl, + prefix_len); - if (!ref.key_length) - error= table->file->index_first(table->record[0]); - else - { - /* - Use index to replace MIN/MAX functions with their values - according to the following rules: - - 1) Insert the minimum non-null values where the WHERE clause still - matches, or - 2) a NULL value if there are only NULL values for key_part_k. - 3) Fail, producing a row of nulls - - Implementation: Read the smallest value using the search key. If - the interval is open, read the next value after the search - key. If read fails, and we're looking for a MIN() value for a - nullable column, test if there is an exact match for the key. - */ - if (!(range_fl & NEAR_MIN)) - /* - Closed interval: Either The MIN argument is non-nullable, or - we have a >= predicate for the MIN argument. - */ - error= table->file->index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_OR_NEXT); - else - { - /* - Open interval: There are two cases: - 1) We have only MIN() and the argument column is nullable, or - 2) there is a > predicate on it, nullability is irrelevant. - We need to scan the next bigger record first. - */ - error= table->file->index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_AFTER_KEY); - /* - If the found record is outside the group formed by the search - prefix, or there is no such record at all, check if all - records in that group have NULL in the MIN argument - column. If that is the case return that NULL. - - Check if case 1 from above holds. If it does, we should read - the skipped tuple. - */ - if (item_field->field->real_maybe_null() && - ref.key_buff[prefix_len] == 1 && - /* - Last keypart (i.e. the argument to MIN) is set to NULL by - find_key_for_maxmin only if all other keyparts are bound - to constants in a conjunction of equalities. Hence, we - can detect this by checking only if the last keypart is - NULL. - */ - (error == HA_ERR_KEY_NOT_FOUND || - key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len))) - { - DBUG_ASSERT(item_field->field->real_maybe_null()); - error= table->file->index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_EXACT); - } - } - } /* Verify that the read tuple indeed matches the search key */ - if (!error && reckey_in_range(0, &ref, item_field->field, + if (!error && reckey_in_range(is_max, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; table->set_keyread(FALSE); @@ -355,100 +411,18 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) item_sum->set_aggregator(item_sum->has_with_distinct() ? Aggregator::DISTINCT_AGGREGATOR : Aggregator::SIMPLE_AGGREGATOR); - if (!count) - { - /* If count == 0, then we know that is_exact_count == TRUE. */ - ((Item_sum_min*) item_sum)->aggregator_clear(); /* Set to NULL. */ - } - else - ((Item_sum_min*) item_sum)->reset(); /* Set to the constant value. */ - ((Item_sum_min*) item_sum)->make_const(); - recalc_const_item= 1; - break; - } - case Item_sum::MAX_FUNC: - { /* - If MAX(expr) is the first part of a key or if all previous - parts of the key is found in the COND, then we can use - indexes to find the key. + If count == 0 (so is_exact_count == TRUE) and + there're no outer joins, set to NULL, + otherwise set to the constant value. */ - Item *expr=item_sum->get_arg(0); - if (expr->real_item()->type() == Item::FIELD_ITEM) - { - uchar key_buff[MAX_KEY_LENGTH]; - TABLE_REF ref; - uint range_fl, prefix_len; - - ref.key_buff= key_buff; - Item_field *item_field= (Item_field*) (expr->real_item()); - TABLE *table= item_field->field->table; - - /* - Look for a partial key that can be used for optimization. - If we succeed, ref.key_length will contain the length of - this key, while prefix_len will contain the length of - the beginning of this key without field used in MAX(). - Type of range for the key part for this field will be - returned in range_fl. - */ - if (table->file->inited || (outer_tables & table->map) || - !find_key_for_maxmin(1, &ref, item_field->field, conds, - &range_fl, &prefix_len)) - { - const_result= 0; - break; - } - error= table->file->ha_index_init((uint) ref.key, 1); - - if (!ref.key_length) - error= table->file->index_last(table->record[0]); - else - error= table->file->index_read_map(table->record[0], key_buff, - make_prev_keypart_map(ref.key_parts), - range_fl & NEAR_MAX ? - HA_READ_BEFORE_KEY : - HA_READ_PREFIX_LAST_OR_PREV); - if (!error && reckey_in_range(1, &ref, item_field->field, - conds, range_fl, prefix_len)) - error= HA_ERR_KEY_NOT_FOUND; - table->set_keyread(FALSE); - table->file->ha_index_end(); - if (error) - { - if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE - /* HA_ERR_LOCK_DEADLOCK or some other error */ - table->file->print_error(error, MYF(ME_FATALERROR)); - return(error); - } - removed_tables|= table->map; - } - else if (!expr->const_item() || !is_exact_count) - { - /* - The optimization is not applicable in both cases: - (a) 'expr' is a non-constant expression. Then we can't - replace 'expr' by a constant. - (b) 'expr' is a costant. According to ANSI, MIN/MAX must return - NULL if the query does not return any rows. Thus, if we are not - able to determine if the query returns any rows, we can't apply - the optimization and replace MIN/MAX with a constant. - */ - const_result= 0; - break; - } - item_sum->set_aggregator(item_sum->has_with_distinct() ? - Aggregator::DISTINCT_AGGREGATOR : - Aggregator::SIMPLE_AGGREGATOR); - if (!count) + if (!count && !outer_tables) { - /* If count != 1, then we know that is_exact_count == TRUE. */ - ((Item_sum_max*) item_sum)->aggregator_clear(); /* Set to NULL. */ + item_sum->aggregator_clear(); } else - ((Item_sum_max*) item_sum)->reset(); /* Set to the constant value. */ - ((Item_sum_max*) item_sum)->make_const(); + item_sum->reset(); + item_sum->make_const(); recalc_const_item= 1; break; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 950ef83b237..a1a115cb53d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2051,8 +2051,15 @@ public: */ ha_rows sent_row_count; - /* - number of rows we read, sent or not, including in create_sort_index() + /** + Number of rows read and/or evaluated for a statement. Used for + slow log reporting. + + An examined row is defined as a row that is read and/or evaluated + according to a statement condition, including in + create_sort_index(). Rows may be counted more than once, e.g., a + statement including ORDER BY could possibly evaluate the row in + filesort() before reading it for e.g. update. */ ha_rows examined_row_count; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 10bdb8a22a6..25e470f56ea 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -262,6 +262,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(TRUE); } + thd->examined_row_count+= examined_rows; /* Filesort has already found and selected the rows we want to delete, so we don't need the where clause @@ -279,7 +280,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, free_underlaid_joins(thd, select_lex); DBUG_RETURN(TRUE); } - if (usable_index==MAX_KEY) + if (usable_index==MAX_KEY || (select && select->quick)) init_read_record(&info, thd, table, select, 1, 1, FALSE); else init_read_record_idx(&info, thd, table, 1, usable_index); @@ -318,6 +319,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, while (!(error=info.read_record(&info)) && !thd->killed && ! thd->is_error()) { + thd->examined_row_count++; // thd->is_error() is tested to disallow delete row on error if (!(select && select->skip_record())&& ! thd->is_error() ) { diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 412fcdbe9bd..8daf7a73a80 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4348,6 +4348,15 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) } +static inline void copy_field_as_string(Field *to_field, Field *from_field) +{ + char buff[MAX_FIELD_WIDTH]; + String tmp_str(buff, sizeof(buff), system_charset_info); + from_field->val_str(&tmp_str); + to_field->store(tmp_str.ptr(), tmp_str.length(), system_charset_info); +} + + /** @brief Store record into I_S.PARAMETERS table @@ -4514,18 +4523,26 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table, bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, const char *wild, bool full_access, const char *sp_user) { - String tmp_string; - String sp_db, sp_name, definer; MYSQL_TIME time; LEX *lex= thd->lex; CHARSET_INFO *cs= system_charset_info; - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DB], &sp_db); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_NAME], &sp_name); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DEFINER],&definer); + char sp_db_buff[NAME_LEN + 1], sp_name_buff[NAME_LEN + 1], + definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 2], + returns_buff[MAX_FIELD_WIDTH]; + + String sp_db(sp_db_buff, sizeof(sp_db_buff), cs); + String sp_name(sp_name_buff, sizeof(sp_name_buff), cs); + String definer(definer_buff, sizeof(definer_buff), cs); + String returns(returns_buff, sizeof(returns_buff), cs); + + proc_table->field[MYSQL_PROC_FIELD_DB]->val_str(&sp_db); + proc_table->field[MYSQL_PROC_FIELD_NAME]->val_str(&sp_name); + proc_table->field[MYSQL_PROC_FIELD_DEFINER]->val_str(&definer); + if (!full_access) - full_access= !strcmp(sp_user, definer.ptr()); - if (!full_access && - check_some_routine_access(thd, sp_db.ptr(), sp_name.ptr(), + full_access= !strcmp(sp_user, definer.c_ptr_safe()); + if (!full_access && + check_some_routine_access(thd, sp_db.c_ptr_safe(), sp_name.c_ptr_safe(), proc_table->field[MYSQL_PROC_MYSQL_TYPE]-> val_int() == TYPE_ENUM_PROCEDURE)) return 0; @@ -4539,32 +4556,30 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0) { restore_record(table, s->default_values); - if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0)) + if (!wild || !wild[0] || !wild_compare(sp_name.c_ptr_safe(), wild, 0)) { int enum_idx= (int) proc_table->field[MYSQL_PROC_FIELD_ACCESS]->val_int(); table->field[3]->store(sp_name.ptr(), sp_name.length(), cs); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME], - &tmp_string); - table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs); + + copy_field_as_string(table->field[0], + proc_table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]); table->field[1]->store(STRING_WITH_LEN("def"), cs); table->field[2]->store(sp_db.ptr(), sp_db.length(), cs); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_MYSQL_TYPE], - &tmp_string); - table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[4], + proc_table->field[MYSQL_PROC_MYSQL_TYPE]); + if (proc_table->field[MYSQL_PROC_MYSQL_TYPE]->val_int() == TYPE_ENUM_FUNCTION) { sp_head *sp; bool free_sp_head; - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_RETURNS], - &tmp_string); - + proc_table->field[MYSQL_PROC_FIELD_RETURNS]->val_str(&returns); sp= sp_load_for_information_schema(thd, proc_table, &sp_db, &sp_name, (ulong) proc_table-> field[MYSQL_PROC_FIELD_SQL_MODE]-> val_int(), TYPE_ENUM_FUNCTION, - tmp_string.c_ptr_safe(), + returns.c_ptr_safe(), "", &free_sp_head); if (sp) @@ -4595,24 +4610,19 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, if (full_access) { - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_BODY_UTF8], - &tmp_string); - table->field[14]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[14], + proc_table->field[MYSQL_PROC_FIELD_BODY_UTF8]); table->field[14]->set_notnull(); } table->field[13]->store(STRING_WITH_LEN("SQL"), cs); table->field[17]->store(STRING_WITH_LEN("SQL"), cs); - - - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DETERMINISTIC], - &tmp_string); - table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[18], + proc_table->field[MYSQL_PROC_FIELD_DETERMINISTIC]); table->field[19]->store(sp_data_access_name[enum_idx].str, sp_data_access_name[enum_idx].length , cs); + copy_field_as_string(table->field[21], + proc_table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SECURITY_TYPE], - &tmp_string); - table->field[21]->store(tmp_string.ptr(), tmp_string.length(), cs); bzero((char *)&time, sizeof(time)); ((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_CREATED])-> get_time(&time); @@ -4621,29 +4631,20 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, ((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_MODIFIED])-> get_time(&time); table->field[23]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + copy_field_as_string(table->field[24], + proc_table->field[MYSQL_PROC_FIELD_SQL_MODE]); + copy_field_as_string(table->field[25], + proc_table->field[MYSQL_PROC_FIELD_COMMENT]); - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_SQL_MODE], - &tmp_string); - table->field[24]->store(tmp_string.ptr(), tmp_string.length(), cs); - - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_COMMENT], - &tmp_string); - table->field[25]->store(tmp_string.ptr(), tmp_string.length(), cs); table->field[26]->store(definer.ptr(), definer.length(), cs); - - get_field(thd->mem_root, - proc_table->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT], - &tmp_string); - table->field[27]->store(tmp_string.ptr(), tmp_string.length(), cs); - - get_field(thd->mem_root, - proc_table->field[ MYSQL_PROC_FIELD_COLLATION_CONNECTION], - &tmp_string); - table->field[28]->store(tmp_string.ptr(), tmp_string.length(), cs); - - get_field(thd->mem_root, proc_table->field[MYSQL_PROC_FIELD_DB_COLLATION], - &tmp_string); - table->field[29]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[27], + proc_table-> + field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT]); + copy_field_as_string(table->field[28], + proc_table-> + field[MYSQL_PROC_FIELD_COLLATION_CONNECTION]); + copy_field_as_string(table->field[29], + proc_table->field[MYSQL_PROC_FIELD_DB_COLLATION]); return schema_table_store_record(thd, table); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8058ee20868..9177573971e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -426,6 +426,25 @@ uint filename_to_tablename(const char *from, char *to, uint to_length) /** + Check if given string begins with "#mysql50#" prefix + + @param name string to check cut + + @retval + FALSE no prefix found + @retval + TRUE prefix found +*/ + +bool check_mysql50_prefix(const char *name) +{ + return (name[0] == '#' && + !strncmp(name, MYSQL50_TABLE_NAME_PREFIX, + MYSQL50_TABLE_NAME_PREFIX_LENGTH)); +} + + +/** Check if given string begins with "#mysql50#" prefix, cut it if so. @param from string to check and cut @@ -440,9 +459,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length) uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length) { - if (from[0] == '#' && - !strncmp(from, MYSQL50_TABLE_NAME_PREFIX, - MYSQL50_TABLE_NAME_PREFIX_LENGTH)) + if (check_mysql50_prefix(from)) return (uint) (strmake(to, from + MYSQL50_TABLE_NAME_PREFIX_LENGTH, to_length - 1) - to); return 0; diff --git a/sql/sql_table.h b/sql/sql_table.h index 3114876f5ed..40b24605bd6 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -126,6 +126,7 @@ enum enum_explain_filename_mode uint filename_to_tablename(const char *from, char *to, uint to_length); uint tablename_to_filename(const char *from, char *to, uint to_length); uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length); +bool check_mysql50_prefix(const char *name); uint build_table_filename(char *buff, size_t bufflen, const char *db, const char *table, const char *ext, uint flags); uint build_table_shadow_filename(char *buff, size_t bufflen, diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 2abd307deba..2a017a4a64c 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -436,6 +436,7 @@ int mysql_update(THD *thd, { goto err; } + thd->examined_row_count+= examined_rows; /* Filesort has already found and selected the rows we want to update, so we don't need the where clause @@ -482,6 +483,7 @@ int mysql_update(THD *thd, while (!(error=info.read_record(&info)) && !thd->killed) { + thd->examined_row_count++; if (!(select && select->skip_record())) { if (table->file->was_semi_consistent_read()) @@ -588,6 +590,7 @@ int mysql_update(THD *thd, while (!(error=info.read_record(&info)) && !thd->killed) { + thd->examined_row_count++; if (!(select && select->skip_record())) { if (table->file->was_semi_consistent_read()) diff --git a/sql/table.cc b/sql/table.cc index 95a683d554b..5bd753d7ca9 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2747,44 +2747,30 @@ bool check_db_name(LEX_STRING *org_name) { char *name= org_name->str; uint name_length= org_name->length; + bool check_for_path_chars; if (!name_length || name_length > NAME_LEN) return 1; + if ((check_for_path_chars= check_mysql50_prefix(name))) + { + name+= MYSQL50_TABLE_NAME_PREFIX_LENGTH; + name_length-= MYSQL50_TABLE_NAME_PREFIX_LENGTH; + } + if (lower_case_table_names && name != any_db) my_casedn_str(files_charset_info, name); -#if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(system_charset_info)) - { - name_length= 0; - bool last_char_is_space= TRUE; - char *end= name + org_name->length; - while (name < end) - { - int len; - last_char_is_space= my_isspace(system_charset_info, *name); - len= my_ismbchar(system_charset_info, name, end); - if (!len) - len= 1; - name+= len; - name_length++; - } - return (last_char_is_space || name_length > NAME_CHAR_LEN); - } - else -#endif - return ((org_name->str[org_name->length - 1] != ' ') || - (name_length > NAME_CHAR_LEN)); /* purecov: inspected */ + return check_table_name(name, name_length, check_for_path_chars); } + /* Allow anything as a table name, as long as it doesn't contain an ' ' at the end returns 1 on error */ - bool check_table_name(const char *name, uint length, bool check_for_path_chars) { uint name_length= 0; // name length in symbols @@ -2812,10 +2798,10 @@ bool check_table_name(const char *name, uint length, bool check_for_path_chars) continue; } } +#endif if (check_for_path_chars && (*name == '/' || *name == '\\' || *name == '~' || *name == FN_EXTCHAR)) return 1; -#endif name++; name_length++; } |