diff options
author | Sergei Golubchik <serg@mariadb.org> | 2015-01-31 21:48:47 +0100 |
---|---|---|
committer | Sergei Golubchik <serg@mariadb.org> | 2015-01-31 21:48:47 +0100 |
commit | 4b21cd21fef2763d757aa15681c9c9a7ed5db3c9 (patch) | |
tree | e9e233392b47f93de12cecce1f7f403ce26057b0 /sql | |
parent | 0b049b40124d72d77c008d4441e4db2e77f0f127 (diff) | |
parent | a06624d61f36c70edd63adcfe2803bb7a8564de5 (diff) | |
download | mariadb-git-4b21cd21fef2763d757aa15681c9c9a7ed5db3c9.tar.gz |
Merge branch '10.0' into merge-wip
Diffstat (limited to 'sql')
43 files changed, 614 insertions, 248 deletions
diff --git a/sql/field.cc b/sql/field.cc index a6a2ad91c69..fe60a824568 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4899,7 +4899,7 @@ void Field_timestamp::set_explicit_default(Item *value) { if (((value->type() == Item::DEFAULT_VALUE_ITEM && !((Item_default_value*)value)->arg) || - (!maybe_null() && value->is_null()))) + (!maybe_null() && value->null_value))) return; set_has_explicit_value(); } diff --git a/sql/field.h b/sql/field.h index fec615ec4cf..ba4c6ff0302 100644 --- a/sql/field.h +++ b/sql/field.h @@ -670,23 +670,26 @@ public: functions but no GROUP BY clause) with no qualifying rows. If this is the case (in which TABLE::null_row is true), the field is considered to be NULL. + Note that if a table->null_row is set then also all null_bits are set for the row. - Otherwise, if the field is NULLable, it has a valid null_ptr - pointer, and its NULLity is recorded in the "null_bit" bit of - null_ptr[row_offset]. + In the case of the 'result_field' for GROUP BY, table->null_row might + refer to the *next* row in the table (when the algorithm is: read the + next row, see if any of group column values have changed, send the + result - grouped - row to the client if yes). So, table->null_row might + be wrong, but such a result_field is always nullable (that's defined by + original_field->maybe_null()) and we trust its null bit. */ - return (table->null_row ? TRUE : - null_ptr ? MY_TEST(null_ptr[row_offset] & null_bit) : 0); + return null_ptr ? null_ptr[row_offset] & null_bit : table->null_row; } inline bool is_real_null(my_ptrdiff_t row_offset= 0) const - { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } + { return null_ptr && (null_ptr[row_offset] & null_bit); } inline bool is_null_in_record(const uchar *record) const { if (!null_ptr) return 0; - return MY_TEST(record[(uint) (null_ptr - table->record[0])] & null_bit); + return record[(uint) (null_ptr - table->record[0])] & null_bit; } inline void set_null(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } diff --git a/sql/filesort.cc b/sql/filesort.cc index 569e1d37e0c..76f744df720 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -166,8 +166,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, TABLE_LIST *tab= table->pos_in_table_list; Item_subselect *subselect= tab ? tab->containing_subselect() : 0; - *found_rows= HA_POS_ERROR; - MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str); DEBUG_SYNC(thd, "filesort_start"); @@ -190,6 +188,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, my_b_clear(&buffpek_pointers); buffpek=0; error= 1; + *found_rows= HA_POS_ERROR; param.init_for_filesort(sortlength(thd, sortorder, s_length, &multi_byte_charset), @@ -690,8 +689,7 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select, ref_pos= ref_buff; quick_select=select && select->quick; record=0; - if (pq) // don't count unless pq is used - *found_rows= 0; + *found_rows= 0; flag= ((file->ha_table_flags() & HA_REC_NOT_IN_SEQ) || quick_select); if (flag) ref_pos= &file->ref[0]; @@ -814,14 +812,9 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select, if (write_record) { + ++(*found_rows); if (pq) { - /* - only count rows when pq is used - otherwise there might be - other filters *after* the filesort, we don't know the final row - count here - */ - (*found_rows)++; pq->push(ref_pos); idx= pq->num_elements(); } diff --git a/sql/handler.cc b/sql/handler.cc index eb2deeaffc1..cded7fb8cd8 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -86,9 +86,7 @@ static const LEX_STRING sys_table_aliases[]= }; const char *ha_row_type[] = { - "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", - "PAGE", - "?","?","?" + "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "PAGE" }; const char *tx_isolation_names[] = diff --git a/sql/handler.h b/sql/handler.h index f1e607ef58b..aff7bf42c27 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -440,8 +440,10 @@ enum legacy_db_type enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED, - ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, - ROW_TYPE_PAGE }; + ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGE }; + +/* not part of the enum, so that it shouldn't be in switch(row_type) */ +#define ROW_TYPE_MAX ((uint)ROW_TYPE_PAGE + 1) /* Specifies data storage format for individual columns */ enum column_format_type { @@ -1382,6 +1384,9 @@ handlerton *ha_default_tmp_handlerton(THD *thd); #define HTON_NO_BINLOG_ROW_OPT (1 << 9) #define HTON_SUPPORTS_EXTENDED_KEYS (1 <<10) //supports extended keys +// MySQL compatibility. Unused. +#define HTON_SUPPORTS_FOREIGN_KEYS (1 << 0) //Foreign key constraint supported. + class Ha_trx_info; struct THD_TRANS @@ -1572,7 +1577,7 @@ class partition_info; struct st_partition_iter; -enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES }; +enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES, HA_CHOICE_MAX }; enum enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT= 0, HA_STATS_AUTO_RECALC_ON, diff --git a/sql/item.cc b/sql/item.cc index e5c618c9e47..bff524880db 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -885,20 +885,20 @@ Item_ident::Item_ident(THD *thd, Item_ident *item) void Item_ident::cleanup() { DBUG_ENTER("Item_ident::cleanup"); -#ifdef CANT_BE_USED_AS_MEMORY_IS_FREED - db_name ? db_name : "(null)", - orig_db_name ? orig_db_name : "(null)", - table_name ? table_name : "(null)", - orig_table_name ? orig_table_name : "(null)", - field_name ? field_name : "(null)", - orig_field_name ? orig_field_name : "(null)")); -#endif + bool was_fixed= fixed; Item::cleanup(); db_name= orig_db_name; table_name= orig_table_name; field_name= orig_field_name; /* Store if this Item was depended */ - can_be_depended= MY_TEST(depended_from); + if (was_fixed) + { + /* + We can trust that depended_from set correctly only if this item + was fixed + */ + can_be_depended= MY_TEST(depended_from); + } DBUG_VOID_RETURN; } diff --git a/sql/item.h b/sql/item.h index e131aef8df5..ad16eab40f0 100644 --- a/sql/item.h +++ b/sql/item.h @@ -4512,7 +4512,7 @@ public: /** @todo Implement the is_null() method for this class. Currently calling is_null() - on any Item_cache object resolves to Item::is_null(), which reutns FALSE + on any Item_cache object resolves to Item::is_null(), which returns FALSE for any value. */ diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index f06747b6938..84ba2abf679 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -627,17 +627,6 @@ int Arg_comparator::set_compare_func(Item_result_field *item, Item_result type) } case STRING_RESULT: { - /* - We must set cmp_charset here as we may be called from for an automatic - generated item, like in natural join - */ - if (cmp_collation.set((*a)->collation, (*b)->collation) || - cmp_collation.derivation == DERIVATION_NONE) - { - my_coll_agg_error((*a)->collation, (*b)->collation, - owner->func_name()); - return 1; - } if (cmp_collation.collation == &my_charset_bin) { /* @@ -761,6 +750,37 @@ bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type, /** + Aggregate comparator argument charsets for comparison. + One of the arguments ("a" or "b") can be replaced, + typically by Item_string or Item_func_conv_charset. + + @return Aggregation result + @retval false - if no conversion is needed, + or if one of the arguments was converted + @retval true - on error, if arguments are not comparable. + + TODO: get rid of this method eventually and refactor the calling code. + Argument conversion should happen on the Item_func level. + Arg_comparator should get comparable arguments. +*/ +bool Arg_comparator::agg_arg_charsets_for_comparison() +{ + if (cmp_collation.set((*a)->collation, (*b)->collation, MY_COLL_CMP_CONV) || + cmp_collation.derivation == DERIVATION_NONE) + { + my_coll_agg_error((*a)->collation, (*b)->collation, owner->func_name()); + return true; + } + if (agg_item_set_converter(cmp_collation, owner->func_name(), + a, 1, MY_COLL_CMP_CONV, 1) || + agg_item_set_converter(cmp_collation, owner->func_name(), + b, 1, MY_COLL_CMP_CONV, 1)) + return true; + return false; +} + + +/** Prepare the comparator (set the comparison function) for comparing items *a1 and *a2 in the context of 'type'. @@ -787,10 +807,11 @@ int Arg_comparator::set_cmp_func(Item_result_field *owner_arg, (*a)->result_type() == STRING_RESULT && (*b)->result_type() == STRING_RESULT) { - DTCollation coll; - coll.set((*a)->collation.collation); - if (agg_item_set_converter(coll, owner->func_name(), - b, 1, MY_COLL_CMP_CONV, 1)) + /* + We must set cmp_collation here as we may be called from for an automatic + generated item, like in natural join + */ + if (agg_arg_charsets_for_comparison()) return 1; } if (type == INT_RESULT && diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index c789b217d5f..2f4cbf8788f 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -48,6 +48,14 @@ class Arg_comparator: public Sql_alloc THD *thd; Item *a_cache, *b_cache; // Cached values of a and b items // when one of arguments is NULL. + int set_compare_func(Item_result_field *owner, Item_result type); + inline int set_compare_func(Item_result_field *owner_arg) + { + return set_compare_func(owner_arg, item_cmp_type((*a)->result_type(), + (*b)->result_type())); + } + bool agg_arg_charsets_for_comparison(); + public: DTCollation cmp_collation; /* Allow owner function to use string buffers. */ @@ -58,12 +66,6 @@ public: Arg_comparator(Item **a1, Item **a2): a(a1), b(a2), set_null(TRUE), comparators(0), thd(0), a_cache(0), b_cache(0) {}; - int set_compare_func(Item_result_field *owner, Item_result type); - inline int set_compare_func(Item_result_field *owner_arg) - { - return set_compare_func(owner_arg, item_cmp_type((*a)->result_type(), - (*b)->result_type())); - } int set_cmp_func(Item_result_field *owner_arg, Item **a1, Item **a2, Item_result type); diff --git a/sql/item_func.cc b/sql/item_func.cc index d36b9bb6c03..92b20fbb15f 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -4192,9 +4192,10 @@ void mysql_ull_set_explicit_lock_duration(THD *thd) When MDL detects a lock wait timeout, it pushes an error into the statement diagnostics area. For GET_LOCK(), lock wait timeout is not an error, - but a special return value (0). NULL is returned in - case of error. - Capture and suppress lock wait timeout. + but a special return value (0). + Similarly, killing get_lock wait is not an error either, + but a return value NULL. + Capture and suppress lock wait timeouts and kills. */ class Lock_wait_timeout_handler: public Internal_error_handler @@ -4213,7 +4214,7 @@ public: bool Lock_wait_timeout_handler:: -handle_condition(THD * /* thd */, uint sql_errno, +handle_condition(THD *thd, uint sql_errno, const char * /* sqlstate */, Sql_condition::enum_warning_level /* level */, const char *message, @@ -4224,6 +4225,9 @@ handle_condition(THD * /* thd */, uint sql_errno, m_lock_wait_timeout= true; return true; /* condition handled */ } + if (thd->is_killed()) + return true; + return false; } @@ -4629,6 +4633,11 @@ longlong Item_func_sleep::val_int() mysql_cond_destroy(&cond); + DBUG_EXECUTE_IF("sleep_inject_query_done_debug_sync", { + debug_sync_set_action + (thd, STRING_WITH_LEN("dispatch_command_end SIGNAL query_done")); + };); + return MY_TEST(!error); // Return 1 killed } diff --git a/sql/item_func.h b/sql/item_func.h index 6f3fb64ba9a..3c46d6549a0 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,7 +1,7 @@ #ifndef ITEM_FUNC_INCLUDED #define ITEM_FUNC_INCLUDED -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2014, SkySQL Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -287,7 +287,8 @@ public: inline longlong check_integer_overflow(longlong value, bool val_unsigned) { if ((unsigned_flag && !val_unsigned && value < 0) || - (!unsigned_flag && val_unsigned && (ulonglong) value > LONGLONG_MAX)) + (!unsigned_flag && val_unsigned && + (ulonglong) value > (ulonglong) LONGLONG_MAX)) return raise_integer_overflow(); return value; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 6165cf8ce51..171750363ed 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1564,10 +1564,9 @@ String *Item_temporal_hybrid_func::val_str_ascii(String *str) bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) { longlong value=args[0]->val_int(); - if (args[0]->null_value) - return (null_value= 1); - if ((fuzzy_date & TIME_NO_ZERO_DATE) && value == 0) - return (null_value= 1); + if ((null_value= (args[0]->null_value || + ((fuzzy_date & TIME_NO_ZERO_DATE) && value == 0)))) + return true; bzero(ltime, sizeof(MYSQL_TIME)); if (get_date_from_daynr((long) value, <ime->year, <ime->month, <ime->day)) diff --git a/sql/lock.cc b/sql/lock.cc index c9aa49a4057..c241e635e6b 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -303,15 +303,16 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags) int rc= 1; ulong timeout= (flags & MYSQL_LOCK_IGNORE_TIMEOUT) ? LONG_TIMEOUT : thd->variables.lock_wait_timeout; - + PSI_stage_info org_stage; DBUG_ENTER("mysql_lock_tables(sql_lock)"); - THD_STAGE_INFO(thd, stage_system_lock); + thd->enter_stage(&stage_system_lock, &org_stage, __func__, __FILE__, + __LINE__); if (sql_lock->table_count && lock_external(thd, sql_lock->table, sql_lock->table_count)) goto end; - thd_proc_info(thd, "Table lock"); + THD_STAGE_INFO(thd, stage_table_lock); /* Copy the lock data array. thr_multi_lock() reorders its contents. */ memmove(sql_lock->locks + sql_lock->lock_count, sql_lock->locks, @@ -326,7 +327,7 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags) (void) unlock_external(thd, sql_lock->table, sql_lock->table_count); end: - THD_STAGE_INFO(thd, stage_after_table_lock); + THD_STAGE_INFO(thd, org_stage); if (thd->killed) { diff --git a/sql/log.cc b/sql/log.cc index 8a247a50ee1..88f612e642b 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3069,7 +3069,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name, MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period) - :reset_master_pending(false), mark_xid_done_waiting(0), + :reset_master_pending(0), mark_xid_done_waiting(0), bytes_written(0), file_id(1), open_count(1), group_commit_queue(0), group_commit_queue_busy(FALSE), num_commits(0), num_group_commits(0), @@ -3919,12 +3919,13 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log, do this before we take the LOCK_log to not deadlock. */ mysql_mutex_lock(&LOCK_xid_list); - reset_master_pending= true; + reset_master_pending++; while (mark_xid_done_waiting > 0) mysql_cond_wait(&COND_xid_list, &LOCK_xid_list); mysql_mutex_unlock(&LOCK_xid_list); } + DEBUG_SYNC(thd, "reset_logs_after_set_reset_master_pending"); /* We need to get both locks to be sure that no one is trying to write to the index log file. @@ -4108,7 +4109,7 @@ err: DBUG_ASSERT(b->xid_count == 0); my_free(binlog_xid_count_list.get()); } - reset_master_pending= false; + reset_master_pending--; mysql_mutex_unlock(&LOCK_xid_list); } diff --git a/sql/log.h b/sql/log.h index 762dc5f54f4..390b8c3a046 100644 --- a/sql/log.h +++ b/sql/log.h @@ -475,7 +475,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG anyway). Instead we should signal COND_xid_list whenever a new binlog checkpoint arrives - when all have arrived, RESET MASTER will complete. */ - bool reset_master_pending; + uint reset_master_pending; ulong mark_xid_done_waiting; /* LOCK_log and LOCK_index are inited by init_pthread_objects() */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 07182390de3..9bf9da8c9fa 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1658,7 +1658,7 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, ev = new Execute_load_log_event(buf, event_len, description_event); break; case START_EVENT_V3: /* this is sent only by MySQL <=4.x */ - ev = new Start_log_event_v3(buf, description_event); + ev = new Start_log_event_v3(buf, event_len, description_event); break; case STOP_EVENT: ev = new Stop_log_event(buf, description_event); @@ -4637,11 +4637,16 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info) Start_log_event_v3::Start_log_event_v3() */ -Start_log_event_v3::Start_log_event_v3(const char* buf, +Start_log_event_v3::Start_log_event_v3(const char* buf, uint event_len, const Format_description_log_event *description_event) - :Log_event(buf, description_event) + :Log_event(buf, description_event), binlog_version(BINLOG_VERSION) { + if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET) + { + server_version[0]= 0; + return; + } buf+= LOG_EVENT_MINIMAL_HEADER_LEN; binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET); memcpy(server_version, buf+ST_SERVER_VER_OFFSET, @@ -4946,9 +4951,12 @@ Format_description_log_event(const char* buf, const Format_description_log_event* description_event) - :Start_log_event_v3(buf, description_event), event_type_permutation(0) + :Start_log_event_v3(buf, event_len, description_event), + common_header_len(0), post_header_len(NULL), event_type_permutation(0) { DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)"); + if (!Start_log_event_v3::is_valid()) + DBUG_VOID_RETURN; /* sanity check */ buf+= LOG_EVENT_MINIMAL_HEADER_LEN; if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN) DBUG_VOID_RETURN; /* sanity check */ diff --git a/sql/log_event.h b/sql/log_event.h index 599fa4ba34f..1cc75e0bd9a 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -2521,7 +2521,7 @@ public: void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Start_log_event_v3(const char* buf, + Start_log_event_v3(const char* buf, uint event_len, const Format_description_log_event* description_event); ~Start_log_event_v3() {} Log_event_type get_type_code() { return START_EVENT_V3;} @@ -2530,7 +2530,7 @@ public: #ifdef MYSQL_SERVER bool write(IO_CACHE* file); #endif - bool is_valid() const { return 1; } + bool is_valid() const { return server_version[0] != 0; } int get_data_size() { return START_V3_HEADER_LEN; //no variable-sized part diff --git a/sql/mdl.cc b/sql/mdl.cc index ec2dea0676f..87e19e035ae 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -2497,6 +2497,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout) my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); break; case MDL_wait::KILLED: + get_thd()->send_kill_message(); break; default: DBUG_ASSERT(0); diff --git a/sql/mdl.h b/sql/mdl.h index 4cefaa8d6c3..003ba242aae 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -351,7 +351,7 @@ public: NAME_LEN) - m_ptr + 1); m_hash_value= my_hash_sort(&my_charset_bin, (uchar*) m_ptr + 1, m_length - 1); - DBUG_ASSERT(ok_for_lower_case_names(db)); + DBUG_ASSERT(mdl_namespace == USER_LOCK || ok_for_lower_case_names(db)); } void mdl_key_init(const MDL_key *rhs) { diff --git a/sql/mysqld.cc b/sql/mysqld.cc index ea53e475192..a3df9d7bded 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5772,6 +5772,7 @@ int mysqld_main(int argc, char **argv) (char*) "" : mysqld_unix_port), mysqld_port, MYSQL_COMPILATION_COMMENT); + fclose(stdin); #if defined(_WIN32) && !defined(EMBEDDED_LIBRARY) Service.SetRunning(); #endif @@ -9783,6 +9784,8 @@ PSI_stage_info stage_storing_result_in_query_cache= { 0, "storing result in quer PSI_stage_info stage_storing_row_into_queue= { 0, "storing row into queue", 0}; PSI_stage_info stage_system_lock= { 0, "System lock", 0}; PSI_stage_info stage_unlocking_tables= { 0, "Unlocking tables", 0}; +PSI_stage_info stage_table_lock= { 0, "Table lock", 0}; +PSI_stage_info stage_filling_schema_table= { 0, "Filling schema table", 0}; PSI_stage_info stage_update= { 0, "update", 0}; PSI_stage_info stage_updating= { 0, "updating", 0}; PSI_stage_info stage_updating_main_table= { 0, "updating main table", 0}; @@ -9919,6 +9922,8 @@ PSI_stage_info *all_server_stages[]= & stage_storing_row_into_queue, & stage_system_lock, & stage_unlocking_tables, + & stage_table_lock, + & stage_filling_schema_table, & stage_update, & stage_updating, & stage_updating_main_table, diff --git a/sql/mysqld.h b/sql/mysqld.h index 8bd2759b5d0..94fd3e1b704 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -416,6 +416,8 @@ extern PSI_stage_info stage_storing_result_in_query_cache; extern PSI_stage_info stage_storing_row_into_queue; extern PSI_stage_info stage_system_lock; extern PSI_stage_info stage_unlocking_tables; +extern PSI_stage_info stage_table_lock; +extern PSI_stage_info stage_filling_schema_table; extern PSI_stage_info stage_update; extern PSI_stage_info stage_updating; extern PSI_stage_info stage_updating_main_table; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 15edd847e52..3cac6e5139e 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -13240,12 +13240,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) SYNOPSIS check_group_min_max_predicates() - cond [in] the expression tree being analyzed - min_max_arg [in] the field referenced by the MIN/MAX function(s) - image_type [in] - has_min_max_arg [out] true if the subtree being analyzed references min_max_arg - has_other_arg [out] true if the subtree being analyzed references a column - other min_max_arg + cond [in] the expression tree being analyzed + min_max_arg [in] the field referenced by the MIN/MAX function(s) + image_type [in] + has_min_max_arg [out] true if the subtree being analyzed references + min_max_arg + has_other_arg [out] true if the subtree being analyzed references a + column other min_max_arg DESCRIPTION The function walks recursively over the cond tree representing a WHERE @@ -13289,7 +13290,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, (2) the subtree passes the test, but it is an OR and it references both the min/max argument and other columns. */ - if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1 + if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1 image_type, &has_min_max, &has_other) || (func_type == Item_func::COND_OR_FUNC && has_min_max && has_other))//2 @@ -13305,7 +13306,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, a subquery in the WHERE clause. */ - if (cond_type == Item::SUBSELECT_ITEM) + if (unlikely(cond_type == Item::SUBSELECT_ITEM)) { Item_subselect *subs_cond= (Item_subselect*) cond; if (subs_cond->is_correlated) @@ -13322,7 +13323,14 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, } DBUG_RETURN(TRUE); } - + /* + Subquery with IS [NOT] NULL + TODO: Look into the cache_item and optimize it like we do for + subselect's above + */ + if (unlikely(cond_type == Item::CACHE_ITEM)) + DBUG_RETURN(cond->const_item()); + /* Condition of the form 'field' is equivalent to 'field <> 0' and thus satisfies the SA3 condition. @@ -13339,7 +13347,9 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, /* We presume that at this point there are no other Items than functions. */ DBUG_ASSERT(cond_type == Item::FUNC_ITEM); - + if (unlikely(cond_type != Item::FUNC_ITEM)) /* Safety */ + DBUG_RETURN(FALSE); + /* Test if cond references only group-by or non-group fields. */ Item_func *pred= (Item_func*) cond; Item_func::Functype pred_type= pred->functype(); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 9857a888340..b61b2bf3586 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -977,6 +977,7 @@ Master_info_index::Master_info_index() memcpy(index_file_name + dir_length, "multi-", 6); bzero((char*) &index_file, sizeof(index_file)); + index_file.file= -1; } Master_info_index::~Master_info_index() @@ -984,7 +985,7 @@ Master_info_index::~Master_info_index() /* This will close connection for all objects in the cache */ my_hash_free(&master_info_hash); end_io_cache(&index_file); - if (index_file.file > 0) + if (index_file.file >= 0) my_close(index_file.file, MYF(MY_WME)); } diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 62842810797..55cc699d078 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -105,9 +105,10 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) static void -finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry, - rpl_group_info *rgi) +finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, + rpl_parallel_entry *entry, rpl_group_info *rgi) { + THD *thd= rpt->thd; wait_for_commit *wfc= &rgi->commit_orderer; int err; @@ -138,25 +139,47 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry, signal_error_to_sql_driver_thread(thd, rgi, err); thd->wait_for_commit_ptr= NULL; + mysql_mutex_lock(&entry->LOCK_parallel_entry); /* - Record that this event group has finished (eg. transaction is - committed, if transactional), so other event groups will no longer - attempt to wait for us to commit. Once we have increased - entry->last_committed_sub_id, no other threads will execute - register_wait_for_prior_commit() against us. Thus, by doing one - extra (usually redundant) wakeup_subsequent_commits() we can ensure - that no register_wait_for_prior_commit() can ever happen without a - subsequent wakeup_subsequent_commits() to wake it up. - - We can race here with the next transactions, but that is fine, as - long as we check that we do not decrease last_committed_sub_id. If - this commit is done, then any prior commits will also have been - done and also no longer need waiting for. + We need to mark that this event group started its commit phase, in case we + missed it before (otherwise we would deadlock the next event group that is + waiting for this). In most cases (normal DML), it will be a no-op. */ - mysql_mutex_lock(&entry->LOCK_parallel_entry); + rgi->mark_start_commit_no_lock(); + if (entry->last_committed_sub_id < sub_id) + { + /* + Record that this event group has finished (eg. transaction is + committed, if transactional), so other event groups will no longer + attempt to wait for us to commit. Once we have increased + entry->last_committed_sub_id, no other threads will execute + register_wait_for_prior_commit() against us. Thus, by doing one + extra (usually redundant) wakeup_subsequent_commits() we can ensure + that no register_wait_for_prior_commit() can ever happen without a + subsequent wakeup_subsequent_commits() to wake it up. + + We can race here with the next transactions, but that is fine, as + long as we check that we do not decrease last_committed_sub_id. If + this commit is done, then any prior commits will also have been + done and also no longer need waiting for. + */ entry->last_committed_sub_id= sub_id; + /* Now free any GCOs in which all transactions have committed. */ + group_commit_orderer *tmp_gco= rgi->gco; + while (tmp_gco && + (!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id)) + tmp_gco= tmp_gco->prev_gco; + while (tmp_gco) + { + group_commit_orderer *prev_gco= tmp_gco->prev_gco; + tmp_gco->next_gco->prev_gco= NULL; + rpt->loc_free_gco(tmp_gco); + tmp_gco= prev_gco; + } + } + /* If this event group got error, then any following event groups that have not yet started should just skip their group, preparing for stop of the @@ -165,12 +188,6 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry, if (unlikely(rgi->worker_error) && entry->stop_on_error_sub_id == (uint64)ULONGLONG_MAX) entry->stop_on_error_sub_id= sub_id; - /* - We need to mark that this event group started its commit phase, in case we - missed it before (otherwise we would deadlock the next event group that is - waiting for this). In most cases (normal DML), it will be a no-op. - */ - rgi->mark_start_commit_no_lock(); mysql_mutex_unlock(&entry->LOCK_parallel_entry); thd->clear_error(); @@ -336,6 +353,7 @@ do_retry: until after the unmark. */ rgi->unmark_start_commit(); + DEBUG_SYNC(thd, "rpl_parallel_retry_after_unmark"); /* We might get the deadlock error that causes the retry during commit, while @@ -558,7 +576,7 @@ handle_rpl_parallel_thread(void *arg) bool in_event_group= false; bool skip_event_group= false; rpl_group_info *group_rgi= NULL; - group_commit_orderer *gco, *tmp_gco; + group_commit_orderer *gco; uint64 event_gtid_sub_id= 0; rpl_sql_thread_info sql_info(NULL); int err; @@ -658,7 +676,7 @@ handle_rpl_parallel_thread(void *arg) */ group_rgi->cleanup_context(thd, 1); in_event_group= false; - finish_event_group(thd, group_rgi->gtid_sub_id, + finish_event_group(rpt, group_rgi->gtid_sub_id, qev->entry_for_queued, group_rgi); rpt->loc_free_rgi(group_rgi); @@ -713,8 +731,12 @@ handle_rpl_parallel_thread(void *arg) mysql_mutex_lock(&entry->LOCK_parallel_entry); if (!(gco->flags & group_commit_orderer::INSTALLED)) { - if (gco->prev_gco) - gco->prev_gco->next_gco= gco; + group_commit_orderer *prev_gco= gco->prev_gco; + if (prev_gco) + { + prev_gco->last_sub_id= gco->prior_sub_id; + prev_gco->next_gco= gco; + } gco->flags|= group_commit_orderer::INSTALLED; } wait_count= gco->wait_count; @@ -731,6 +753,8 @@ handle_rpl_parallel_thread(void *arg) if (thd->check_killed() && !rgi->worker_error) { DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed"); + thd->clear_error(); + thd->get_stmt_da()->reset_diagnostics_area(); thd->send_kill_message(); slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); @@ -747,18 +771,6 @@ handle_rpl_parallel_thread(void *arg) } while (wait_count > entry->count_committing_event_groups); } - if ((tmp_gco= gco->prev_gco)) - { - /* - Now all the event groups in the previous batch have entered their - commit phase, and will no longer access their gco. So we can free - it here. - */ - DBUG_ASSERT(!tmp_gco->prev_gco); - gco->prev_gco= NULL; - rpt->loc_free_gco(tmp_gco); - } - if (entry->force_abort && wait_count > entry->stop_count) { /* @@ -834,6 +846,7 @@ handle_rpl_parallel_thread(void *arg) { DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit"); rgi->mark_start_commit(); + DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit"); } /* @@ -854,6 +867,7 @@ handle_rpl_parallel_thread(void *arg) thd->get_stmt_da()->reset_diagnostics_area(); my_error(ER_LOCK_DEADLOCK, MYF(0)); err= 1; + DEBUG_SYNC(thd, "rpl_parallel_simulate_temp_err_xid"); }); if (!err) #endif @@ -893,7 +907,7 @@ handle_rpl_parallel_thread(void *arg) if (end_of_group) { in_event_group= false; - finish_event_group(thd, event_gtid_sub_id, entry, rgi); + finish_event_group(rpt, event_gtid_sub_id, entry, rgi); rpt->loc_free_rgi(rgi); thd->rgi_slave= group_rgi= rgi= NULL; skip_event_group= false; @@ -934,7 +948,7 @@ handle_rpl_parallel_thread(void *arg) */ mysql_mutex_unlock(&rpt->LOCK_rpl_thread); signal_error_to_sql_driver_thread(thd, group_rgi, 1); - finish_event_group(thd, group_rgi->gtid_sub_id, + finish_event_group(rpt, group_rgi->gtid_sub_id, group_rgi->parallel_entry, group_rgi); in_event_group= false; mysql_mutex_lock(&rpt->LOCK_rpl_thread); @@ -983,7 +997,6 @@ handle_rpl_parallel_thread(void *arg) static void dealloc_gco(group_commit_orderer *gco) { - DBUG_ASSERT(!gco->prev_gco /* Must only free after dealloc previous */); mysql_cond_destroy(&gco->COND_group_commit_orderer); my_free(gco); } @@ -1362,7 +1375,8 @@ rpl_parallel_thread::free_rgi(rpl_group_info *rgi) group_commit_orderer * -rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev) +rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, + uint64 prior_sub_id) { group_commit_orderer *gco; mysql_mutex_assert_owner(&LOCK_rpl_thread); @@ -1378,6 +1392,7 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev) gco->wait_count= wait_count; gco->prev_gco= prev; gco->next_gco= NULL; + gco->prior_sub_id= prior_sub_id; gco->flags= 0; return gco; } @@ -1386,7 +1401,6 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev) void rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco) { - DBUG_ASSERT(!gco->prev_gco /* Must not free until wait has completed. */); if (!loc_gco_list) loc_gco_last_ptr_ptr= &gco->next_gco; else @@ -1593,8 +1607,12 @@ static void free_rpl_parallel_entry(void *element) { rpl_parallel_entry *e= (rpl_parallel_entry *)element; - if (e->current_gco) + while (e->current_gco) + { + group_commit_orderer *prev_gco= e->current_gco->prev_gco; dealloc_gco(e->current_gco); + e->current_gco= prev_gco; + } mysql_cond_destroy(&e->COND_parallel_entry); mysql_mutex_destroy(&e->LOCK_parallel_entry); my_free(e); @@ -2142,7 +2160,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, */ uint64 count= e->count_queued_event_groups; - if (!(gco= cur_thread->get_gco(count, gco))) + if (!(gco= cur_thread->get_gco(count, gco, e->current_sub_id))) { cur_thread->free_rgi(rgi); cur_thread->free_qev(qev); diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index 2062c89e62e..f1145d4cc4e 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -39,9 +39,12 @@ struct inuse_relaylog; rpl_parallel_entry::count_committing_event_groups has reached gco->next_gco->wait_count. - - When gco->wait_count is reached for a worker and the wait completes, - the worker frees gco->prev_gco; at this point it is guaranteed not to - be needed any longer. + - The gco lives until all its event groups have completed their commit. + This is detected by rpl_parallel_entry::last_committed_sub_id being + greater than or equal gco->last_sub_id. Once this happens, the gco is + freed. Note that since update of last_committed_sub_id can happen + out-of-order, the thread that frees a given gco can be for any later + event group, not necessarily an event group from the gco being freed. */ struct group_commit_orderer { /* Wakeup condition, used with rpl_parallel_entry::LOCK_parallel_entry. */ @@ -50,6 +53,16 @@ struct group_commit_orderer { group_commit_orderer *prev_gco; group_commit_orderer *next_gco; /* + The sub_id of last event group in this the previous GCO. + Only valid if prev_gco != NULL. + */ + uint64 prior_sub_id; + /* + The sub_id of the last event group in this GCO. Only valid when next_gco + is non-NULL. + */ + uint64 last_sub_id; + /* This flag is set when this GCO has been installed into the next_gco pointer of the previous GCO. */ @@ -190,7 +203,8 @@ struct rpl_parallel_thread { LOCK_rpl_thread mutex. */ void free_rgi(rpl_group_info *rgi); - group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev); + group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev, + uint64 first_sub_id); /* Put a gco on the local free list, to be later released to the global free list by batch_free(). diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index ad33541b5ab..eb37b5c6cbc 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -1849,11 +1849,20 @@ void rpl_group_info::slave_close_thread_tables(THD *thd) static void -mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco) +mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco, + rpl_group_info *rgi) { + group_commit_orderer *tmp; uint64 count= ++e->count_committing_event_groups; - if (gco->next_gco && gco->next_gco->wait_count == count) - mysql_cond_broadcast(&gco->next_gco->COND_group_commit_orderer); + /* Signal any following GCO whose wait_count has been reached now. */ + tmp= gco; + while ((tmp= tmp->next_gco)) + { + uint64 wait_count= tmp->wait_count; + if (wait_count > count) + break; + mysql_cond_broadcast(&tmp->COND_group_commit_orderer); + } } @@ -1862,7 +1871,7 @@ rpl_group_info::mark_start_commit_no_lock() { if (did_mark_start_commit) return; - mark_start_commit_inner(parallel_entry, gco); + mark_start_commit_inner(parallel_entry, gco, this); did_mark_start_commit= true; } @@ -1877,7 +1886,7 @@ rpl_group_info::mark_start_commit() e= this->parallel_entry; mysql_mutex_lock(&e->LOCK_parallel_entry); - mark_start_commit_inner(e, gco); + mark_start_commit_inner(e, gco, this); mysql_mutex_unlock(&e->LOCK_parallel_entry); did_mark_start_commit= true; } diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index d8a30e75724..96db33206d9 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -561,6 +561,10 @@ struct rpl_group_info (When we execute in parallel the transactions that group committed together on the master, we still need to wait for any prior transactions to have reached the commit stage). + + The pointed-to gco is only valid for as long as + gtid_sub_id < parallel_entry->last_committed_sub_id. After that, it can + be freed by another thread. */ group_commit_orderer *gco; diff --git a/sql/sql_class.h b/sql/sql_class.h index 3b2b3d841a8..e6457f23a50 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3244,6 +3244,14 @@ public: mysql_mutex_unlock(&LOCK_thd_data); } } + inline void reset_kill_query() + { + if (killed < KILL_CONNECTION) + { + reset_killed(); + mysys_var->abort= 0; + } + } inline void send_kill_message() const { int err= killed_errno(); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 324812af8dd..5d896748d87 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -503,6 +503,15 @@ unconditional_materialization: bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived) { DBUG_ENTER("mysql_derived_merge_for_insert"); + DBUG_PRINT("enter", ("derived: %p", derived)); + DBUG_PRINT("info", ("merged_for_insert: %d is_materialized_derived: %d " + "is_multitable: %d single_table_updatable: %d " + "merge_underlying_list: %d", + derived->merged_for_insert, + derived->is_materialized_derived(), + derived->is_multitable(), + derived->single_table_updatable(), + derived->merge_underlying_list != 0)); if (derived->merged_for_insert) DBUG_RETURN(FALSE); if (derived->is_materialized_derived()) @@ -516,8 +525,9 @@ bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived) derived->table= derived->merge_underlying_list->table; derived->schema_table= derived->merge_underlying_list->schema_table; derived->merged_for_insert= TRUE; + DBUG_ASSERT(derived->table); } - } + } DBUG_RETURN(FALSE); } @@ -544,6 +554,7 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived) { SELECT_LEX_UNIT *unit= derived->get_unit(); DBUG_ENTER("mysql_derived_init"); + DBUG_PRINT("enter", ("derived: %p", derived)); // Skip already prepared views/DT if (!unit || unit->prepared) @@ -689,6 +700,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) thd->create_tmp_table_for_derived= FALSE; derived->table= derived->derived_result->table; + DBUG_ASSERT(derived->table); if (derived->is_derived() && derived->is_merged_derived()) first_select->mark_as_belong_to_derived(derived); @@ -956,8 +968,7 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_ENTER("mysql_derived_reinit"); st_select_lex_unit *unit= derived->get_unit(); - if (derived->table) - derived->merged_for_insert= FALSE; + derived->merged_for_insert= FALSE; unit->unclean(); unit->types.empty(); /* for derived tables & PS (which can't be reset by Item_subselect) */ @@ -965,4 +976,3 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived) unit->set_thd(thd); DBUG_RETURN(FALSE); } - diff --git a/sql/sql_derived.h b/sql/sql_derived.h index f232445879e..1dffef7235b 100644 --- a/sql/sql_derived.h +++ b/sql/sql_derived.h @@ -23,6 +23,7 @@ struct LEX; bool mysql_handle_derived(LEX *lex, uint phases); bool mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases); bool mysql_handle_list_of_derived(LEX *lex, TABLE_LIST *dt_list, uint phases); +bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived); /** Cleans up the SELECT_LEX_UNIT for the derived table (if any). diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 2680019e3e2..9f4d5218b99 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -148,9 +148,11 @@ bool check_view_single_update(List<Item> &fields, List<Item> *values, if (view->check_single_table(&tbl, tables, view) || tbl == 0) goto error; + /* view->table should have been set in mysql_derived_merge_for_insert */ + DBUG_ASSERT(view->table); + /* - A buffer for the insert values was allocated for the merged view. - Use it. + Use buffer for the insert values that was allocated for the merged view. */ tbl->table->insert_values= view->table->insert_values; view->table= tbl->table; @@ -195,11 +197,12 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, table_map *map) { TABLE *table= table_list->table; + DBUG_ENTER("check_insert_fields"); if (!table_list->single_table_updatable()) { my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); - return -1; + DBUG_RETURN(-1); } if (fields.elements == 0 && values.elements != 0) @@ -208,18 +211,18 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, { my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0), table_list->view_db.str, table_list->view_name.str); - return -1; + DBUG_RETURN(-1); } if (values.elements != table->s->fields) { my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1L); - return -1; + DBUG_RETURN(-1); } #ifndef NO_EMBEDDED_ACCESS_CHECKS Field_iterator_table_ref field_it; field_it.set(table_list); if (check_grant_all_columns(thd, INSERT_ACL, &field_it)) - return -1; + DBUG_RETURN(-1); #endif /* No fields are provided so all fields must be provided in the values. @@ -237,7 +240,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, if (fields.elements != values.elements) { my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1L); - return -1; + DBUG_RETURN(-1); } thd->dup_field= 0; @@ -263,7 +266,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, thd->lex->select_lex.no_wrap_view_item= FALSE; if (res) - return -1; + DBUG_RETURN(-1); if (table_list->is_view() && table_list->is_merged_derived()) { @@ -271,14 +274,14 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, fields_and_values_from_different_maps ? (List<Item>*) 0 : &values, table_list, map, true)) - return -1; + DBUG_RETURN(-1); table= table_list->table; } if (check_unique && thd->dup_field) { my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dup_field->field_name); - return -1; + DBUG_RETURN(-1); } if (table->default_field) table->mark_default_fields_for_write(); @@ -296,10 +299,10 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, check_view_insertability(thd, table_list))) { my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); - return -1; + DBUG_RETURN(-1); } - return 0; + DBUG_RETURN(0); } @@ -1765,7 +1768,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int()); } - else + else if (prev_insert_id_for_cur_row) { table->file->restore_auto_increment(prev_insert_id_for_cur_row); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index daf87d7cf96..4f88f8d5c96 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1281,6 +1281,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->enable_slow_log= thd->variables.sql_log_slow; thd->query_plan_flags= QPLAN_INIT; thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */ + thd->reset_kill_query(); DEBUG_SYNC(thd,"dispatch_command_before_set_time"); @@ -5561,11 +5562,7 @@ finish: if (! thd->get_stmt_da()->is_set()) thd->send_kill_message(); } - if (thd->killed < KILL_CONNECTION) - { - thd->reset_killed(); - thd->mysys_var->abort= 0; - } + thd->reset_kill_query(); } if (thd->is_error() || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR)) trans_rollback_stmt(thd); @@ -6490,6 +6487,115 @@ bool check_global_access(THD *thd, ulong want_access, bool no_errors) #endif } + +/** + Checks foreign key's parent table access. + + @param thd [in] Thread handler + @param create_info [in] Create information (like MAX_ROWS, ENGINE or + temporary table flag) + @param alter_info [in] Initial list of columns and indexes for the + table to be created + + @retval + false ok. + @retval + true error or access denied. Error is sent to client in this case. +*/ +bool check_fk_parent_table_access(THD *thd, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) +{ + Key *key; + List_iterator<Key> key_iterator(alter_info->key_list); + + while ((key= key_iterator++)) + { + if (key->type == Key::FOREIGN_KEY) + { + TABLE_LIST parent_table; + bool is_qualified_table_name; + Foreign_key *fk_key= (Foreign_key *)key; + LEX_STRING db_name; + LEX_STRING table_name= { fk_key->ref_table.str, + fk_key->ref_table.length }; + const ulong privileges= (SELECT_ACL | INSERT_ACL | UPDATE_ACL | + DELETE_ACL | REFERENCES_ACL); + + // Check if tablename is valid or not. + DBUG_ASSERT(table_name.str != NULL); + if (check_table_name(table_name.str, table_name.length, false)) + { + my_error(ER_WRONG_TABLE_NAME, MYF(0), table_name.str); + return true; + } + + if (fk_key->ref_db.str) + { + is_qualified_table_name= true; + db_name.str= (char *) thd->memdup(fk_key->ref_db.str, + fk_key->ref_db.length+1); + db_name.length= fk_key->ref_db.length; + + // Check if database name is valid or not. + if (fk_key->ref_db.str && check_db_name(&db_name)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str); + return true; + } + } + else if (thd->lex->copy_db_to(&db_name.str, &db_name.length)) + return true; + else + is_qualified_table_name= false; + + // if lower_case_table_names is set then convert tablename to lower case. + if (lower_case_table_names) + { + table_name.str= (char *) thd->memdup(fk_key->ref_table.str, + fk_key->ref_table.length+1); + table_name.length= my_casedn_str(files_charset_info, table_name.str); + } + + parent_table.init_one_table(db_name.str, db_name.length, + table_name.str, table_name.length, + table_name.str, TL_IGNORE); + + /* + Check if user has any of the "privileges" at table level on + "parent_table". + Having privilege on any of the parent_table column is not + enough so checking whether user has any of the "privileges" + at table level only here. + */ + if (check_some_access(thd, privileges, &parent_table) || + parent_table.grant.want_privilege) + { + if (is_qualified_table_name) + { + const size_t qualified_table_name_len= NAME_LEN + 1 + NAME_LEN + 1; + char *qualified_table_name= (char *) thd->alloc(qualified_table_name_len); + + my_snprintf(qualified_table_name, qualified_table_name_len, "%s.%s", + db_name.str, table_name.str); + table_name.str= qualified_table_name; + } + + my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), + "REFERENCES", + thd->security_ctx->priv_user, + thd->security_ctx->host_or_ip, + table_name.str); + + return true; + } + } + } + + return false; +} + + /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -7911,7 +8017,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user, host.str[0] == '%' means that host name was not given. See sql_yacc.yy */ if (((user->host.str[0] == '%' && !user->host.str[1]) || - !strcmp(tmp->security_ctx->host, user->host.str)) && + !strcmp(tmp->security_ctx->host_or_ip, user->host.str)) && !strcmp(tmp->security_ctx->user, user->user.str)) { if (!(thd->security_ctx->master_access & SUPER_ACL) && @@ -8566,7 +8672,9 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) goto err; } - error= FALSE; + + if (check_fk_parent_table_access(thd, &lex->create_info, &lex->alter_info)) + goto err; /* For CREATE TABLE we should not open the table even if it exists. @@ -8574,6 +8682,8 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, */ lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB; + error= FALSE; + err: DBUG_RETURN(error); } diff --git a/sql/sql_parse.h b/sql/sql_parse.h index 0620e278b79..5e1d867347c 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -46,6 +46,9 @@ bool delete_precheck(THD *thd, TABLE_LIST *tables); bool insert_precheck(THD *thd, TABLE_LIST *tables); bool create_table_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *create_table); +bool check_fk_parent_table_access(THD *thd, + HA_CREATE_INFO *create_info, + Alter_info *alter_info); bool parse_sql(THD *thd, Parser_state *parser_state, Object_creation_ctx *creation_ctx, bool do_pfs_digest=false); diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 46849e266b5..e9a0f138212 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -3989,7 +3989,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, we copy string values to a plugin's memroot. */ if (mysqld_server_started && - (((*opt)->flags & (PLUGIN_VAR_STR | PLUGIN_VAR_NOCMDOPT | + (((*opt)->flags & (PLUGIN_VAR_TYPEMASK | PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_MEMALLOC)) == PLUGIN_VAR_STR)) { sysvar_str_t* str= (sysvar_str_t *)*opt; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index cbb5868623c..90c3110b63a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -291,18 +291,18 @@ static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s, void dbug_serve_apcs(THD *thd, int n_calls) { const char *save_proc_info= thd->proc_info; - /* This is so that mysqltest knows we're ready to serve requests: */ - thd_proc_info(thd, "show_explain_trap"); /* Busy-wait for n_calls APC requests to arrive and be processed */ int n_apcs= thd->apc_target.n_calls_processed + n_calls; while (thd->apc_target.n_calls_processed < n_apcs) { - my_sleep(300); + /* This is so that mysqltest knows we're ready to serve requests: */ + thd_proc_info(thd, "show_explain_trap"); + my_sleep(30000); + thd_proc_info(thd, save_proc_info); if (thd->check_killed()) break; } - thd_proc_info(thd, save_proc_info); } @@ -3043,6 +3043,7 @@ void JOIN::exec_inner() const ha_rows select_limit_arg= select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : unit->select_limit_cnt; + curr_join->filesort_found_rows= filesort_limit_arg != HA_POS_ERROR; DBUG_PRINT("info", ("has_group_by %d " "curr_join->table_count %d " @@ -3089,7 +3090,8 @@ void JOIN::exec_inner() *curr_fields_list), Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); error= do_select(curr_join, curr_fields_list, NULL, procedure); - if (curr_join->order && curr_join->filesort_found_rows) + if (curr_join->order && curr_join->sortorder && + curr_join->filesort_found_rows) { /* Use info provided by filesort. */ DBUG_ASSERT(curr_join->table_count > curr_join->const_tables); @@ -10066,7 +10068,7 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys) else { /* Mark keyuses for this key to be excluded */ - for (KEYUSE *curr=save_first_keyuse; curr < first_keyuse; curr++) + for (KEYUSE *curr=save_first_keyuse; curr < keyuse; curr++) { curr->key= MAX_KEY; } @@ -12295,8 +12297,8 @@ public: { TRASH(ptr, size); } Item *and_level; - Item_func *cmp_func; - COND_CMP(Item *a,Item_func *b) :and_level(a),cmp_func(b) {} + Item_bool_func2 *cmp_func; + COND_CMP(Item *a,Item_bool_func2 *b) :and_level(a),cmp_func(b) {} }; /** @@ -13685,6 +13687,75 @@ static void update_const_equal_items(COND *cond, JOIN_TAB *tab, bool const_key) } +/** + Check if + WHERE expr=value AND expr=const + can be rewritten as: + WHERE const=value AND expr=const + + @param target - the target operator whose "expr" argument will be + replaced to "const". + @param target_expr - the target's "expr" which will be replaced to "const". + @param target_value - the target's second argument, it will remain unchanged. + @param source - the equality expression ("=" or "<=>") that + can be used to rewrite the "target" part + (under certain conditions, see the code). + @param source_expr - the source's "expr". It should be exactly equal to + the target's "expr" to make condition rewrite possible. + @param source_const - the source's "const" argument, it will be inserted + into "target" instead of "expr". +*/ +static bool +can_change_cond_ref_to_const(Item_bool_func2 *target, + Item *target_expr, Item *target_value, + Item_bool_func2 *source, + Item *source_expr, Item *source_const) +{ + if (!target_expr->eq(source_expr,0) || + target_value == source_const || + target_expr->cmp_context != source_expr->cmp_context) + return false; + if (target_expr->cmp_context == STRING_RESULT) + { + /* + In this example: + SET NAMES utf8 COLLATE utf8_german2_ci; + DROP TABLE IF EXISTS t1; + CREATE TABLE t1 (a CHAR(10) CHARACTER SET utf8); + INSERT INTO t1 VALUES ('o-umlaut'),('oe'); + SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe'; + + the query should return only the row with 'oe'. + It should not return 'o-umlaut', because 'o-umlaut' does not match + the right part of the condition: a='oe' + ('o-umlaut' is not equal to 'oe' in utf8_general_ci, + which is the collation of the field "a"). + + If we change the right part from: + ... AND a='oe' + to + ... AND 'oe' COLLATE utf8_german2_ci='oe' + it will be evalulated to TRUE and removed from the condition, + so the overall query will be simplified to: + + SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci; + + which will erroneously start to return both 'oe' and 'o-umlaut'. + So changing "expr" to "const" is not possible if the effective + collations of "target" and "source" are not exactly the same. + + Note, the code before the fix for MDEV-7152 only checked that + collations of "source_const" and "target_value" are the same. + This was not enough, as the bug report demonstrated. + */ + return + target->compare_collation() == source->compare_collation() && + target_value->collation.collation == source_const->collation.collation; + } + return true; // Non-string comparison +} + + /* change field = field to field = const for each found field = const in the and_level @@ -13693,6 +13764,7 @@ static void update_const_equal_items(COND *cond, JOIN_TAB *tab, bool const_key) static void change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, Item *and_father, Item *cond, + Item_bool_func2 *field_value_owner, Item *field, Item *value) { if (cond->type() == Item::COND_ITEM) @@ -13703,7 +13775,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, Item *item; while ((item=li++)) change_cond_ref_to_const(thd, save_list,and_level ? cond : item, item, - field, value); + field_value_owner, field, value); return; } if (cond->eq_cmp_result() == Item::COND_OK) @@ -13715,11 +13787,8 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, Item *right_item= args[1]; Item_func::Functype functype= func->functype(); - if (right_item->eq(field,0) && left_item != value && - right_item->cmp_context == field->cmp_context && - (left_item->result_type() != STRING_RESULT || - value->result_type() != STRING_RESULT || - left_item->collation.collation == value->collation.collation)) + if (can_change_cond_ref_to_const(func, right_item, left_item, + field_value_owner, field, value)) { Item *tmp=value->clone_item(); if (tmp) @@ -13738,11 +13807,8 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, func->set_cmp_func(); } } - else if (left_item->eq(field,0) && right_item != value && - left_item->cmp_context == field->cmp_context && - (right_item->result_type() != STRING_RESULT || - value->result_type() != STRING_RESULT || - right_item->collation.collation == value->collation.collation)) + else if (can_change_cond_ref_to_const(func, left_item, right_item, + field_value_owner, field, value)) { Item *tmp= value->clone_item(); if (tmp) @@ -13791,7 +13857,8 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, Item **args= cond_cmp->cmp_func->arguments(); if (!args[0]->const_item()) change_cond_ref_to_const(thd, &save,cond_cmp->and_level, - cond_cmp->and_level, args[0], args[1]); + cond_cmp->and_level, + cond_cmp->cmp_func, args[0], args[1]); } } } @@ -13813,14 +13880,14 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, resolve_const_item(thd, &args[1], args[0]); func->update_used_tables(); change_cond_ref_to_const(thd, save_list, and_father, and_father, - args[0], args[1]); + func, args[0], args[1]); } else if (left_const) { resolve_const_item(thd, &args[0], args[1]); func->update_used_tables(); change_cond_ref_to_const(thd, save_list, and_father, and_father, - args[1], args[0]); + func, args[1], args[0]); } } } @@ -18931,7 +18998,8 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), records are read. Because of optimization in some cases it can provide only select_limit_cnt+1 records. */ - if (join->order && join->filesort_found_rows && + if (join->order && join->sortorder && + join->filesort_found_rows && join->select_options & OPTION_FOUND_ROWS) { DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT")); @@ -18953,8 +19021,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), /* Join over all rows in table; Return number of found rows */ TABLE *table=jt->table; - join->select_options ^= OPTION_FOUND_ROWS; - if (join->filesort_found_rows) + join->select_options ^= OPTION_FOUND_ROWS; + if (table->sort.record_pointers || + (table->sort.io_cache && my_b_inited(table->sort.io_cache))) { /* Using filesort */ join->send_records= table->sort.found_records; @@ -20808,11 +20877,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, select, filesort_limit, 0, &examined_rows, &found_rows); table->sort.found_records= filesort_retval; - if (found_rows != HA_POS_ERROR) - { - tab->records= found_rows; // For SQL_CALC_ROWS - join->filesort_found_rows= true; - } + tab->records= found_rows; // For SQL_CALC_ROWS if (quick_created) { diff --git a/sql/sql_select.h b/sql/sql_select.h index 07c0ca88e0f..a50ed130df8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1370,7 +1370,6 @@ public: emb_sjm_nest= NULL; sjm_lookup_tables= 0; - filesort_found_rows= false; /* The following is needed because JOIN::cleanup(true) may be called for joins for which JOIN::optimize was aborted with an error before a proper diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 044b60d7b12..a43eae0d441 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -4029,7 +4029,7 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys, Again we don't do this for SHOW COLUMNS/KEYS because of backward compatibility. */ - if (!is_show_fields_or_keys && result && thd->is_error() && + if (!is_show_fields_or_keys && result && (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE || thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT)) { @@ -5005,12 +5005,11 @@ err: column with the error text, and clear the error so that the operation can continue. */ - const char *error= thd->is_error() ? thd->get_stmt_da()->message() : ""; + const char *error= thd->get_stmt_da()->message(); table->field[20]->store(error, strlen(error), cs); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); + thd->get_stmt_da()->sql_errno(), error); thd->clear_error(); } @@ -7865,12 +7864,13 @@ bool get_schema_tables_result(JOIN *join, THD *thd= join->thd; LEX *lex= thd->lex; bool result= 0; - const char *old_proc_info; + PSI_stage_info org_stage; DBUG_ENTER("get_schema_tables_result"); Warnings_only_error_handler err_handler; thd->push_internal_handler(&err_handler); - old_proc_info= thd_proc_info(thd, "Filling schema table"); + thd->enter_stage(&stage_filling_schema_table, &org_stage, __func__, __FILE__, + __LINE__); JOIN_TAB *tab; for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES); @@ -7974,7 +7974,7 @@ bool get_schema_tables_result(JOIN *join, } else if (result) my_error(ER_UNKNOWN_ERROR, MYF(0)); - thd_proc_info(thd, old_proc_info); + THD_STAGE_INFO(thd, org_stage); DBUG_RETURN(result); } diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index d368145ca73..4ce1f3ec22a 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -2355,9 +2355,15 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index) int rc= 0; KEY *key_info= &table->key_info[index]; ha_rows rows= 0; - Index_prefix_calc index_prefix_calc(table, key_info); + DBUG_ENTER("collect_statistics_for_index"); + /* No statistics for FULLTEXT indexes. */ + if (key_info->flags & HA_FULLTEXT) + DBUG_RETURN(rc); + + Index_prefix_calc index_prefix_calc(table, key_info); + DEBUG_SYNC(table->in_use, "statistics_collection_start1"); DEBUG_SYNC(table->in_use, "statistics_collection_start2"); @@ -2391,7 +2397,7 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index) if (!rc) index_prefix_calc.get_avg_frequency(); - DBUG_RETURN(rc); + DBUG_RETURN(rc); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 729030ee064..e61d108731e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -8360,9 +8360,21 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } /* - If this is an ALTER TABLE and no explicit row type specified reuse - the table's row type. - Note : this is the same as if the row type was specified explicitly. + If foreign key is added then check permission to access parent table. + + In function "check_fk_parent_table_access", create_info->db_type is used + to identify whether engine supports FK constraint or not. Since + create_info->db_type is set here, check to parent table access is delayed + till this point for the alter operation. + */ + if ((alter_info->flags & Alter_info::ADD_FOREIGN_KEY) && + check_fk_parent_table_access(thd, create_info, alter_info)) + DBUG_RETURN(true); + + /* + If this is an ALTER TABLE and no explicit row type specified reuse + the table's row type. + Note: this is the same as if the row type was specified explicitly. */ if (create_info->row_type == ROW_TYPE_NOT_USED) { @@ -9488,12 +9500,12 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, /* - Recreates tables by calling mysql_alter_table(). + Recreates one table by calling mysql_alter_table(). SYNOPSIS mysql_recreate_table() thd Thread handler - tables Tables to recreate + table_list Table to recreate table_copy Recreate the table by using ALTER TABLE COPY algorithm @@ -9505,13 +9517,15 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy) { HA_CREATE_INFO create_info; Alter_info alter_info; - DBUG_ENTER("mysql_recreate_table"); - DBUG_ASSERT(!table_list->next_global); + TABLE_LIST *next_table= table_list->next_global; + DBUG_ENTER("mysql_recreate_table"); /* Set lock type which is appropriate for ALTER TABLE. */ table_list->lock_type= TL_READ_NO_INSERT; /* Same applies to MDL request. */ table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE); + /* hide following tables from open_tables() */ + table_list->next_global= NULL; bzero((char*) &create_info, sizeof(create_info)); create_info.row_type=ROW_TYPE_NOT_USED; @@ -9523,9 +9537,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy) if (table_copy) alter_info.requested_algorithm= Alter_info::ALTER_TABLE_ALGORITHM_COPY; - DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, + bool res= mysql_alter_table(thd, NullS, NullS, &create_info, table_list, &alter_info, 0, - (ORDER *) 0, 0)); + (ORDER *) 0, 0); + table_list->next_global= next_table; + DBUG_RETURN(res); } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index b37750bae71..a3d2b1bc4ac 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -34,6 +34,7 @@ #include "sp.h" #include "sp_cache.h" #include "datadict.h" // dd_frm_is_view() +#include "sql_derived.h" #define MD5_BUFF_LENGTH 33 @@ -690,7 +691,7 @@ err: /* number of required parameters for making view */ -static const int required_view_parameters= 14; +static const int required_view_parameters= 15; /* table of VIEW .frm field descriptors @@ -741,6 +742,9 @@ static File_option view_parameters[]= {{(char*) STRING_WITH_LEN("view_body_utf8")}, my_offsetof(TABLE_LIST, view_body_utf8), FILE_OPTIONS_ESTRING}, + {{ C_STRING_WITH_LEN("mariadb-version")}, + my_offsetof(TABLE_LIST, mariadb_version), + FILE_OPTIONS_ULONGLONG}, {{NullS, 0}, 0, FILE_OPTIONS_STRING} }; @@ -841,6 +845,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, version 2 - empty definer_host means a role */ view->file_version= 2; + view->mariadb_version= MYSQL_VERSION_ID; view->calc_md5(md5); if (!(view->md5.str= (char*) thd->memdup(md5, 32))) { @@ -1079,6 +1084,15 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, DBUG_PRINT("info", ("VIEW %s.%s is already processed on previous PS/SP execution", table->view_db.str, table->view_name.str)); + + /* + Clear old variables in the TABLE_LIST that could be left from an old view + This is only needed if there was an error at last usage of view, + in which case the reinit call wasn't done. + See MDEV-6668 for details. + */ + mysql_derived_reinit(thd, NULL, table); + DBUG_RETURN(0); } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 04be3f10a19..c3f8fdc2e23 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3556,13 +3556,13 @@ static Sys_var_bit Sys_log_off( static bool fix_sql_log_bin_after_update(sys_var *self, THD *thd, enum_var_type type) { - if (type == OPT_SESSION) - { - if (thd->variables.sql_log_bin) - thd->variables.option_bits |= OPTION_BIN_LOG; - else - thd->variables.option_bits &= ~OPTION_BIN_LOG; - } + DBUG_ASSERT(type == OPT_SESSION); + + if (thd->variables.sql_log_bin) + thd->variables.option_bits |= OPTION_BIN_LOG; + else + thd->variables.option_bits &= ~OPTION_BIN_LOG; + return FALSE; } @@ -3584,7 +3584,10 @@ static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var) return TRUE; if (var->type == OPT_GLOBAL) - return FALSE; + { + my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), self->name.str, "SESSION"); + return TRUE; + } if (error_if_in_trans_or_substatement(thd, ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN, @@ -3595,9 +3598,9 @@ static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var) } static Sys_var_mybool Sys_log_binlog( - "sql_log_bin", "sql_log_bin", - SESSION_VAR(sql_log_bin), NO_CMD_LINE, - DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_sql_log_bin), + "sql_log_bin", "Controls whether logging to the binary log is done", + SESSION_VAR(sql_log_bin), NO_CMD_LINE, DEFAULT(TRUE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_sql_log_bin), ON_UPDATE(fix_sql_log_bin_after_update)); static Sys_var_bit Sys_sql_warnings( @@ -4878,7 +4881,7 @@ static Sys_var_ulong Sys_sp_cache_size( "The soft upper limit for number of cached stored routines for " "one connection.", GLOBAL_VAR(stored_program_cache_size), CMD_LINE(REQUIRED_ARG), - VALID_RANGE(256, 512 * 1024), DEFAULT(256), BLOCK_SIZE(1)); + VALID_RANGE(0, 512 * 1024), DEFAULT(256), BLOCK_SIZE(1)); export const char *plugin_maturity_names[]= { "unknown", "experimental", "alpha", "beta", "gamma", "stable", 0 }; diff --git a/sql/table.cc b/sql/table.cc index d0cffc8e78e..14f61837489 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -827,6 +827,24 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end, } +/** ensures that the enum value (read from frm) is within limits + + if not - issues a warning and resets the value to 0 + (that is, 0 is assumed to be a default value) +*/ + +static uint enum_value_with_check(THD *thd, TABLE_SHARE *share, + const char *name, uint value, uint limit) +{ + if (value < limit) + return value; + + sql_print_warning("%s.frm: invalid value %d for the field %s", + share->normalized_path.str, value, name); + return 0; +} + + /** Check if a collation has changed number @@ -836,8 +854,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end, @retval new collation number (same as current collation number of no change) */ -static uint -upgrade_collation(ulong mysql_version, uint cs_number) +static uint upgrade_collation(ulong mysql_version, uint cs_number) { if (mysql_version >= 50300 && mysql_version <= 50399) { @@ -861,8 +878,6 @@ upgrade_collation(ulong mysql_version, uint cs_number) } - - /** Read data from a binary .frm file image into a TABLE_SHARE @@ -1058,9 +1073,12 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, share->incompatible_version|= HA_CREATE_USED_CHARSET; share->avg_row_length= uint4korr(frm_image+34); - share->transactional= (ha_choice) (frm_image[39] & 3); - share->page_checksum= (ha_choice) ((frm_image[39] >> 2) & 3); - share->row_type= (enum row_type) frm_image[40]; + share->transactional= (ha_choice) + enum_value_with_check(thd, share, "transactional", frm_image[39] & 3, HA_CHOICE_MAX); + share->page_checksum= (ha_choice) + enum_value_with_check(thd, share, "page_checksum", (frm_image[39] >> 2) & 3, HA_CHOICE_MAX); + share->row_type= (enum row_type) + enum_value_with_check(thd, share, "row_format", frm_image[40], ROW_TYPE_MAX); if (cs_new && !(share->table_charset= get_charset(cs_new, MYF(MY_WME)))) goto err; @@ -4719,23 +4737,26 @@ bool TABLE_LIST::check_single_table(TABLE_LIST **table_arg, bool TABLE_LIST::set_insert_values(MEM_ROOT *mem_root) { + DBUG_ENTER("set_insert_values"); if (table) { + DBUG_PRINT("info", ("setting insert_value for table")); if (!table->insert_values && !(table->insert_values= (uchar *)alloc_root(mem_root, table->s->rec_buff_length))) - return TRUE; + DBUG_RETURN(TRUE); } else { + DBUG_PRINT("info", ("setting insert_value for view")); DBUG_ASSERT(is_view_or_derived() && is_merged_derived()); for (TABLE_LIST *tbl= (TABLE_LIST*)view->select_lex.table_list.first; tbl; tbl= tbl->next_local) if (tbl->set_insert_values(mem_root)) - return TRUE; + DBUG_RETURN(TRUE); } - return FALSE; + DBUG_RETURN(FALSE); } @@ -6888,15 +6909,16 @@ void TABLE_LIST::reset_const_table() bool TABLE_LIST::handle_derived(LEX *lex, uint phases) { - SELECT_LEX_UNIT *unit= get_unit(); - if (unit) + SELECT_LEX_UNIT *unit; + DBUG_ENTER("handle_derived"); + if ((unit= get_unit())) { for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) if (sl->handle_derived(lex, phases)) - return TRUE; - return mysql_handle_single_derived(lex, this, phases); + DBUG_RETURN(TRUE); + DBUG_RETURN(mysql_handle_single_derived(lex, this, phases)); } - return FALSE; + DBUG_RETURN(FALSE); } diff --git a/sql/table.h b/sql/table.h index 3720d2e6b56..b020223854b 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1882,6 +1882,7 @@ struct TABLE_LIST LEX_STRING timestamp; /* GMT time stamp of last operation */ st_lex_user definer; /* definer of view */ ulonglong file_version; /* version of file's field set */ + ulonglong mariadb_version; /* version of server on creation */ ulonglong updatable_view; /* VIEW can be updated */ /** @brief The declared algorithm, if this is a view. |