diff options
author | Michael Widenius <monty@askmonty.org> | 2010-08-05 22:56:11 +0300 |
---|---|---|
committer | Michael Widenius <monty@askmonty.org> | 2010-08-05 22:56:11 +0300 |
commit | d042146e5b41b6222df3e6b4c16a44f3ef2939b7 (patch) | |
tree | e40f5b435f7d17ed1c24dfb787ab8cd5f4f7d5d8 /sql | |
parent | 3e610bc58d0ba325fdca3b06dfa976ecc513dc1e (diff) | |
parent | f0f21036951f6fed2fddeb58375425f957751fd5 (diff) | |
download | mariadb-git-d042146e5b41b6222df3e6b4c16a44f3ef2939b7.tar.gz |
Merge with MariaDB 5.1.49
Removed references to HA_END_SPACE_KEY (which has been 0 for a long time)
Diffstat (limited to 'sql')
65 files changed, 2300 insertions, 1522 deletions
diff --git a/sql/authors.h b/sql/authors.h index 4a321bebb7d..bab319e3dcb 100644 --- a/sql/authors.h +++ b/sql/authors.h @@ -86,6 +86,7 @@ struct show_table_authors_st show_table_authors[]= { { "Eric Herman", "Amsterdam, Netherlands", "Bug fixing - federated" }, { "Andrey Hristov", "Walldorf, Germany", "Event scheduler (5.1)" }, { "Alexander (Alexi) Ivanov", "St. Petersburg, Russia", "Replication" }, + { "Mattias Jonsson", "Uppsala, Sweden", "Partitioning" }, { "Alexander (Salle) Keremidarski", "Sofia, Bulgaria", "Bug fixing" }, { "Mats Kindahl", "Storvreta, Sweden", "Replication" }, diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 9b428ed6ec9..959f6ec06c0 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -833,8 +833,9 @@ bool get_next_time(const Time_zone *time_zone, my_time_t *next, } else { - long diff_months= (long) (local_now.year - local_start.year)*12 + - (local_now.month - local_start.month); + long diff_months= ((long) local_now.year - (long) local_start.year)*12 + + ((long) local_now.month - (long) local_start.month); + /* Unlike for seconds above, the formula below returns the interval that, when added to the local_start, will give the time in the @@ -1432,7 +1433,10 @@ Event_job_data::execute(THD *thd, bool drop) thd->set_query(sp_sql.c_ptr_safe(), sp_sql.length()); { - Parser_state parser_state(thd, thd->query(), thd->query_length()); + Parser_state parser_state; + if (parser_state.init(thd, thd->query(), thd->query_length())) + goto end; + lex_start(thd); if (parse_sql(thd, & parser_state, creation_ctx)) diff --git a/sql/field.cc b/sql/field.cc index f621dab7539..597ec4f68d0 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5313,7 +5313,6 @@ String *Field_time::val_str(String *val_buffer, bool Field_time::get_date(MYSQL_TIME *ltime, uint fuzzydate) { - long tmp; THD *thd= table ? table->in_use : current_thd; if (!(fuzzydate & TIME_FUZZY_DATE)) { @@ -5323,19 +5322,7 @@ bool Field_time::get_date(MYSQL_TIME *ltime, uint fuzzydate) thd->row_count); return 1; } - tmp=(long) sint3korr(ptr); - ltime->neg=0; - if (tmp < 0) - { - ltime->neg= 1; - tmp=-tmp; - } - ltime->hour=tmp/10000; - tmp-=ltime->hour*10000; - ltime->minute= tmp/100; - ltime->second= tmp % 100; - ltime->year= ltime->month= ltime->day= ltime->second_part= 0; - return 0; + return Field_time::get_time(ltime); } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 0d60832b085..2bd2eabb58b 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -59,6 +59,8 @@ #include <mysql/plugin.h> +#include "debug_sync.h" + static const char *ha_par_ext= ".par"; #ifdef NOT_USED static int free_share(PARTITION_SHARE * share); @@ -87,7 +89,9 @@ static int partition_initialize(void *p) partition_hton->create= partition_create_handler; partition_hton->partition_flags= partition_flags; partition_hton->alter_table_flags= alter_table_flags; - partition_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; + partition_hton->flags= HTON_NOT_USER_SELECTABLE | + HTON_HIDDEN | + HTON_TEMPORARY_NOT_SUPPORTED; return 0; } @@ -356,7 +360,7 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) } else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) { - mem_alloc_error(2); + my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); } /* @@ -691,6 +695,7 @@ int ha_partition::rename_partitions(const char *path) DBUG_ASSERT(!strcmp(path, get_canonical_filename(m_file[0], path, norm_name_buff))); + DEBUG_SYNC(ha_thd(), "before_rename_partitions"); if (temp_partitions) { /* @@ -1835,6 +1840,13 @@ uint ha_partition::del_ren_cre_table(const char *from, handler **file, **abort_file; DBUG_ENTER("del_ren_cre_table()"); + /* Not allowed to create temporary partitioned tables */ + if (create_info && create_info->options & HA_LEX_CREATE_TMP_TABLE) + { + my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); + DBUG_RETURN(TRUE); + } + if (get_from_handler_file(from, ha_thd()->mem_root)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); @@ -2610,6 +2622,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(0); err_handler: + DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); bitmap_free(&m_bulk_insert_started); diff --git a/sql/handler.cc b/sql/handler.cc index 2b931b6f5c1..10f14be26ae 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -59,6 +59,7 @@ static const LEX_STRING sys_table_aliases[]= { C_STRING_WITH_LEN("NDB") }, { C_STRING_WITH_LEN("NDBCLUSTER") }, { C_STRING_WITH_LEN("HEAP") }, { C_STRING_WITH_LEN("MEMORY") }, { C_STRING_WITH_LEN("MERGE") }, { C_STRING_WITH_LEN("MRG_MYISAM") }, + { C_STRING_WITH_LEN("Aria") }, { C_STRING_WITH_LEN("Maria") }, {NullS, 0} }; diff --git a/sql/handler.h b/sql/handler.h index 41af87eb47d..ad4743b78b1 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1032,7 +1032,7 @@ typedef struct st_ha_create_information ulong avg_row_length; ulong used_fields; ulong key_block_size; - SQL_LIST merge_list; + SQL_I_List<TABLE_LIST> merge_list; handlerton *db_type; /** Row type of the table definition. diff --git a/sql/item.cc b/sql/item.cc index c670898a91f..2edeb4c0ee0 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3865,7 +3865,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) { Item **group_by_ref= NULL; Item **select_ref= NULL; - ORDER *group_list= (ORDER*) select->group_list.first; + ORDER *group_list= select->group_list.first; bool ambiguous_fields= FALSE; uint counter; enum_resolution_type resolution; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 43958b31212..36a999c9e76 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2786,6 +2786,8 @@ Item *Item_func_case::find_item(String *str) /* Compare every WHEN argument with it and return the first match */ for (uint i=0 ; i < ncases ; i+=2) { + if (args[i]->real_item()->type() == NULL_ITEM) + continue; cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); DBUG_ASSERT(cmp_type != ROW_RESULT); DBUG_ASSERT(cmp_items[(uint)cmp_type]); @@ -4024,9 +4026,17 @@ longlong Item_func_in::val_int() return (longlong) (!null_value && tmp != negated); } + if ((null_value= args[0]->real_item()->type() == NULL_ITEM)) + return 0; + have_null= 0; for (uint i= 1 ; i < arg_count ; i++) { + if (args[i]->real_item()->type() == NULL_ITEM) + { + have_null= TRUE; + continue; + } Item_result cmp_type= item_cmp_type(left_result_type, args[i]->result_type()); in_item= cmp_items[(uint)cmp_type]; DBUG_ASSERT(in_item); @@ -4590,13 +4600,14 @@ Item_func::optimize_type Item_func_like::select_optimize() const if (args[1]->const_item()) { String* res2= args[1]->val_str((String *)&cmp.value2); + const char *ptr2; - if (!res2) + if (!res2 || !(ptr2= res2->ptr())) return OPTIMIZE_NONE; - if (*res2->ptr() != wild_many) + if (*ptr2 != wild_many) { - if (args[0]->result_type() != STRING_RESULT || *res2->ptr() != wild_one) + if (args[0]->result_type() != STRING_RESULT || *ptr2 != wild_one) return OPTIMIZE_OP; } } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index d1c1d12520e..afde48b56f7 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1313,8 +1313,8 @@ public: else { args[0]->update_used_tables(); - if ((const_item_cache= !(used_tables_cache= args[0]->used_tables())) && - !with_subselect) + if ((const_item_cache= !(used_tables_cache= args[0]->used_tables()) && + !with_subselect)) { /* Remember if the value is always NULL or never NULL */ cached_value= (longlong) args[0]->is_null(); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 82bb02f362e..a82c93304e6 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -268,12 +268,12 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery, if (item->walk(processor, walk_subquery, argument)) return 1; } - for (order= (ORDER*) lex->order_list.first ; order; order= order->next) + for (order= lex->order_list.first ; order; order= order->next) { if ((*order->item)->walk(processor, walk_subquery, argument)) return 1; } - for (order= (ORDER*) lex->group_list.first ; order; order= order->next) + for (order= lex->group_list.first ; order; order= order->next) { if ((*order->item)->walk(processor, walk_subquery, argument)) return 1; @@ -1810,15 +1810,15 @@ int subselect_single_select_engine::prepare() SELECT_LEX *save_select= thd->lex->current_select; thd->lex->current_select= select_lex; if (join->prepare(&select_lex->ref_pointer_array, - (TABLE_LIST*) select_lex->table_list.first, + select_lex->table_list.first, select_lex->with_wild, select_lex->where, select_lex->order_list.elements + select_lex->group_list.elements, - (ORDER*) select_lex->order_list.first, - (ORDER*) select_lex->group_list.first, + select_lex->order_list.first, + select_lex->group_list.first, select_lex->having, - (ORDER*) 0, select_lex, + NULL, select_lex, select_lex->master_unit())) return 1; thd->lex->current_select= save_select; @@ -2482,14 +2482,13 @@ table_map subselect_engine::calc_const_tables(TABLE_LIST *table) table_map subselect_single_select_engine::upper_select_const_tables() { - return calc_const_tables((TABLE_LIST *) select_lex->outer_select()-> - leaf_tables); + return calc_const_tables(select_lex->outer_select()->leaf_tables); } table_map subselect_union_engine::upper_select_const_tables() { - return calc_const_tables((TABLE_LIST *) unit->outer_select()->leaf_tables); + return calc_const_tables(unit->outer_select()->leaf_tables); } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index d87080a2fee..2f79fd65ff3 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -2964,7 +2964,7 @@ int dump_leaf_key(uchar* key, element_count count __attribute__((unused)), Item_func_group_concat:: Item_func_group_concat(Name_resolution_context *context_arg, bool distinct_arg, List<Item> *select_list, - SQL_LIST *order_list, String *separator_arg) + SQL_I_List<ORDER> *order_list, String *separator_arg) :tmp_table_param(0), warning(0), separator(separator_arg), tree(0), unique_filter(NULL), table(0), order(0), context(context_arg), @@ -3008,7 +3008,7 @@ Item_func_group_concat(Name_resolution_context *context_arg, if (arg_count_order) { ORDER **order_ptr= order; - for (ORDER *order_item= (ORDER*) order_list->first; + for (ORDER *order_item= order_list->first; order_item != NULL; order_item= order_item->next) { diff --git a/sql/item_sum.h b/sql/item_sum.h index 2922d6c9644..7720de2450f 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -1248,7 +1248,7 @@ class Item_func_group_concat : public Item_sum public: Item_func_group_concat(Name_resolution_context *context_arg, bool is_distinct, List<Item> *is_select, - SQL_LIST *is_order, String *is_separator); + SQL_I_List<ORDER> *is_order, String *is_separator); Item_func_group_concat(THD *thd, Item_func_group_concat *item); ~Item_func_group_concat(); diff --git a/sql/log.cc b/sql/log.cc index 6094836c031..570418d97dd 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1474,11 +1474,6 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, trx_data->has_incident()); trx_data->reset(); - /* - We need to step the table map version after writing the - transaction cache to disk. - */ - mysql_bin_log.update_table_map_version(); statistic_increment(binlog_cache_use, &LOCK_status); if (trans_log->disk_writes != 0) { @@ -1504,13 +1499,6 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, } else // ...statement trx_data->truncate(trx_data->before_stmt_pos); - - /* - We need to step the table map version on a rollback to ensure - that a new table map event is generated instead of the one that - was written to the thrown-away transaction cache. - */ - mysql_bin_log.update_table_map_version(); } DBUG_ASSERT(thd->binlog_get_pending_rows_event() == NULL); @@ -1556,28 +1544,23 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) } /* - We commit the transaction if: - - - We are not in a transaction and committing a statement, or + We flush the cache if: - - We are in a transaction and a full transaction is committed + - we are committing a transaction or; + - no statement was committed before and just non-transactional + tables were updated. - Otherwise, we accumulate the statement + Otherwise, we collect the changes. */ - ulonglong const in_transaction= - thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN); DBUG_PRINT("debug", - ("all: %d, empty: %s, in_transaction: %s, all.modified_non_trans_table: %s, stmt.modified_non_trans_table: %s", + ("all: %d, empty: %s, all.modified_non_trans_table: %s, stmt.modified_non_trans_table: %s", all, YESNO(trx_data->empty()), - YESNO(in_transaction), YESNO(thd->transaction.all.modified_non_trans_table), YESNO(thd->transaction.stmt.modified_non_trans_table))); - - if (!in_transaction || all || - (!all && !trx_data->at_least_one_stmt_committed && - !stmt_has_updated_trans_table(thd) && - thd->transaction.stmt.modified_non_trans_table)) + if (ending_trans(thd, all) || + (trans_has_no_stmt_committed(thd, all) && + !stmt_has_updated_trans_table(thd) && stmt_has_updated_non_trans_table(thd))) { Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, TRUE, 0); error= binlog_end_trans(thd, trx_data, &qev, all); @@ -1640,7 +1623,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) On the other hand, if a statement is transactional, we just safely roll it back. */ - if ((thd->transaction.stmt.modified_non_trans_table || + if ((stmt_has_updated_non_trans_table(thd) || (thd->options & OPTION_KEEP_LOG)) && mysql_bin_log.check_write_error(thd)) trx_data->set_incident(); @@ -1649,20 +1632,19 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) else { /* - We flush the cache with a rollback, wrapped in a beging/rollback if: - . aborting a transaction that modified a non-transactional table; + We flush the cache with a rollback, wrapped in a begin/rollback if: + . aborting a transaction that modified a non-transactional table or + the OPTION_KEEP_LOG is activate. . aborting a statement that modified both transactional and non-transactional tables but which is not in the boundaries of any transaction or there was no early change; - . the OPTION_KEEP_LOG is activate. */ - if ((all && thd->transaction.all.modified_non_trans_table) || - (!all && thd->transaction.stmt.modified_non_trans_table && - !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT))) || - (!all && thd->transaction.stmt.modified_non_trans_table && - !trx_data->at_least_one_stmt_committed && - thd->current_stmt_binlog_row_based) || - ((thd->options & OPTION_KEEP_LOG))) + if ((ending_trans(thd, all) && + (trans_has_updated_non_trans_table(thd) || + (thd->options & OPTION_KEEP_LOG))) || + (trans_has_no_stmt_committed(thd, all) && + stmt_has_updated_non_trans_table(thd) && + thd->current_stmt_binlog_row_based)) { Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, TRUE, 0); error= binlog_end_trans(thd, trx_data, &qev, all); @@ -1671,8 +1653,8 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) Otherwise, we simply truncate the cache as there is no change on non-transactional tables as follows. */ - else if ((all && !thd->transaction.all.modified_non_trans_table) || - (!all && !thd->transaction.stmt.modified_non_trans_table)) + else if (ending_trans(thd, all) || + (!(thd->options & OPTION_KEEP_LOG) && !stmt_has_updated_non_trans_table(thd))) error= binlog_end_trans(thd, trx_data, 0, all); } if (!all) @@ -1769,7 +1751,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) non-transactional table. Otherwise, truncate the binlog cache starting from the SAVEPOINT command. */ - if (unlikely(thd->transaction.all.modified_non_trans_table || + if (unlikely(trans_has_updated_non_trans_table(thd) || (thd->options & OPTION_KEEP_LOG))) { String log_query; @@ -2493,7 +2475,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name, MYSQL_BIN_LOG::MYSQL_BIN_LOG() :bytes_written(0), prepared_xids(0), file_id(1), open_count(1), - need_start_event(TRUE), m_table_map_version(0), + need_start_event(TRUE), is_relay_log(0), description_event_for_exec(0), description_event_for_queue(0) { @@ -4007,6 +3989,67 @@ bool MYSQL_BIN_LOG::is_query_in_union(THD *thd, query_id_t query_id_param) query_id_param >= thd->binlog_evt_union.first_query_id); } +/** + This function checks if a transaction, either a multi-statement + or a single statement transaction is about to commit or not. + + @param thd The client thread that executed the current statement. + @param all Committing a transaction (i.e. TRUE) or a statement + (i.e. FALSE). + @return + @c true if committing a transaction, otherwise @c false. +*/ +bool ending_trans(const THD* thd, const bool all) +{ + return (all || (!all && !(thd->options & + (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))); +} + +/** + This function checks if a non-transactional table was updated by + the current transaction. + + @param thd The client thread that executed the current statement. + @return + @c true if a non-transactional table was updated, @c false + otherwise. +*/ +bool trans_has_updated_non_trans_table(const THD* thd) +{ + return (thd->transaction.all.modified_non_trans_table || + thd->transaction.stmt.modified_non_trans_table); +} + +/** + This function checks if any statement was committed and cached. + + @param thd The client thread that executed the current statement. + @param all Committing a transaction (i.e. TRUE) or a statement + (i.e. FALSE). + @return + @c true if at a statement was committed and cached, @c false + otherwise. +*/ +bool trans_has_no_stmt_committed(const THD* thd, bool all) +{ + binlog_trx_data *const trx_data= + (binlog_trx_data*) thd_get_ha_data(thd, binlog_hton); + + return (!all && !trx_data->at_least_one_stmt_committed); +} + +/** + This function checks if a non-transactional table was updated by the + current statement. + + @param thd The client thread that executed the current statement. + @return + @c true if a non-transactional table was updated, @c false otherwise. +*/ +bool stmt_has_updated_non_trans_table(const THD* thd) +{ + return (thd->transaction.stmt.modified_non_trans_table); +} /* These functions are placed in this file since they need access to @@ -4139,7 +4182,6 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans) DBUG_RETURN(error); binlog_table_maps++; - table->s->table_map_version= mysql_bin_log.table_map_version(); DBUG_RETURN(0); } @@ -4230,10 +4272,8 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, file= &trx_data->trans_log; /* - If we are writing to the log file directly, we could avoid - locking the log. This does not work since we need to step the - m_table_map_version below, and that change has to be protected - by the LOCK_log mutex. + If we are not writing to the log file directly, we could avoid + locking the log. */ pthread_mutex_lock(&LOCK_log); @@ -4247,24 +4287,6 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, DBUG_RETURN(1); } - /* - We step the table map version if we are writing an event - representing the end of a statement. We do this regardless of - wheather we write to the transaction cache or to directly to the - file. - - In an ideal world, we could avoid stepping the table map version - if we were writing to a transaction cache, since we could then - reuse the table map that was written earlier in the transaction - cache. This does not work since STMT_END_F implies closing all - table mappings on the slave side. - - TODO: Find a solution so that table maps does not have to be - written several times within a transaction. - */ - if (pending->get_flags(Rows_log_event::STMT_END_F)) - ++m_table_map_version; - delete pending; if (file == &log_file) @@ -4480,9 +4502,6 @@ err: set_write_error(thd); } - if (event_info->flags & LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F) - ++m_table_map_version; - pthread_mutex_unlock(&LOCK_log); DBUG_RETURN(error); } diff --git a/sql/log.h b/sql/log.h index c6cf8780c04..1cfd7ce9ae5 100644 --- a/sql/log.h +++ b/sql/log.h @@ -20,6 +20,11 @@ class Relay_log_info; class Format_description_log_event; +bool ending_trans(const THD* thd, const bool all); +bool trans_has_updated_non_trans_table(const THD* thd); +bool trans_has_no_stmt_committed(const THD* thd, const bool all); +bool stmt_has_updated_non_trans_table(const THD* thd); + /* Transaction Coordinator log - a base abstract class for two different implementations @@ -272,8 +277,6 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG */ bool no_auto_events; - ulonglong m_table_map_version; - int write_to_file(IO_CACHE *cache); /* This is used to start writing to a new log file. The difference from @@ -314,14 +317,6 @@ public: void unlog(ulong cookie, my_xid xid); int recover(IO_CACHE *log, Format_description_log_event *fdle); #if !defined(MYSQL_CLIENT) - bool is_table_mapped(TABLE *table) const - { - return table->s->table_map_version == table_map_version(); - } - - ulonglong table_map_version() const { return m_table_map_version; } - void update_table_map_version() { ++m_table_map_version; } - int flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event); int remove_pending_rows_event(THD *thd); diff --git a/sql/log_event.cc b/sql/log_event.cc index ee11ab79fd0..a1ecb79ff56 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -2405,13 +2405,29 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, charset_database_number= thd_arg->variables.collation_database->number; /* - If we don't use flags2 for anything else than options contained in - thd_arg->options, it would be more efficient to flags2=thd_arg->options - (OPTIONS_WRITTEN_TO_BIN_LOG would be used only at reading time). - But it's likely that we don't want to use 32 bits for 3 bits; in the future - we will probably want to reclaim the 29 bits. So we need the &. + We only replicate over the bits of flags2 that we need: the rest + are masked out by "& OPTIONS_WRITTEN_TO_BINLOG". + + We also force AUTOCOMMIT=1. Rationale (cf. BUG#29288): After + fixing BUG#26395, we always write BEGIN and COMMIT around all + transactions (even single statements in autocommit mode). This is + so that replication from non-transactional to transactional table + and error recovery from XA to non-XA table should work as + expected. The BEGIN/COMMIT are added in log.cc. However, there is + one exception: MyISAM bypasses log.cc and writes directly to the + binlog. So if autocommit is off, master has MyISAM, and slave has + a transactional engine, then the slave will just see one long + never-ending transaction. The only way to bypass explicit + BEGIN/COMMIT in the binlog is by using a non-transactional table. + So setting AUTOCOMMIT=1 will make this work as expected. + + Note: explicitly replicate AUTOCOMMIT=1 from master. We do not + assume AUTOCOMMIT=1 on slave; the slave still reads the state of + the autocommit flag as written by the master to the binlog. This + behavior may change after WL#4162 has been implemented. */ - flags2= (uint32) (thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG); + flags2= (uint32) (thd_arg->options & + (OPTIONS_WRITTEN_TO_BIN_LOG & ~OPTION_NOT_AUTOCOMMIT)); DBUG_ASSERT(thd_arg->variables.character_set_client->number < 256*256); DBUG_ASSERT(thd_arg->variables.collation_connection->number < 256*256); DBUG_ASSERT(thd_arg->variables.collation_server->number < 256*256); @@ -7546,12 +7562,6 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); error= 0; } - - if (!cache_stmt) - { - DBUG_PRINT("info", ("Marked that we need to keep log")); - thd->options|= OPTION_KEEP_LOG; - } } // if (table) /* @@ -8865,11 +8875,28 @@ static bool record_compare(TABLE *table) { for (int i = 0 ; i < 2 ; ++i) { - saved_x[i]= table->record[i][0]; - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][0]|= 1U; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); + /* + If we have an X bit then we need to take care of it. + */ + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + { + saved_x[i]= table->record[i][0]; + table->record[i][0]|= 1U; + } + + /* + If (last_null_bit_pos == 0 && null_bytes > 1), then: + + X bit (if any) + N nullable fields + M Field_bit fields = 8 bits + + Ie, the entire byte is used. + */ + if (table->s->last_null_bit_pos > 0) + { + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } } } @@ -8909,8 +8936,11 @@ record_compare_exit: { for (int i = 0 ; i < 2 ; ++i) { - table->record[i][0]= saved_x[i]; - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + table->record[i][0]= saved_x[i]; + + if (table->s->last_null_bit_pos) + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; } } @@ -9087,8 +9117,35 @@ int Rows_log_event::find_row(const Relay_log_info *rli) */ if (table->key_info->flags & HA_NOSAME) { - table->file->ha_index_end(); - goto ok; + /* Unique does not have non nullable part */ + if (!(table->key_info->flags & (HA_NULL_PART_KEY))) + { + table->file->ha_index_end(); + goto ok; + } + else + { + KEY *keyinfo= table->key_info; + /* + Unique has nullable part. We need to check if there is any field in the + BI image that is null and part of UNNI. + */ + bool null_found= FALSE; + for (uint i=0; i < keyinfo->key_parts && !null_found; i++) + { + uint fieldnr= keyinfo->key_part[i].fieldnr - 1; + Field **f= table->field+fieldnr; + null_found= (*f)->is_null(); + } + + if (!null_found) + { + table->file->ha_index_end(); + goto ok; + } + + /* else fall through to index scan */ + } } /* diff --git a/sql/log_event.h b/sql/log_event.h index db3950e6c25..d82fc385d86 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -463,10 +463,10 @@ struct sql_ex_info #define LOG_EVENT_SUPPRESS_USE_F 0x8 /* - The table map version internal to the log should be increased after - the event has been written to the binary log. + Note: this is a place holder for the flag + LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F (0x10), which is not used any + more, please do not reused this value for other flags. */ -#define LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F 0x10 /** @def LOG_EVENT_ARTIFICIAL_F diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 087d4c02d98..b96d1684d77 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -229,11 +229,6 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", const_cast<Relay_log_info*>(rli)->abort_slave= 1;); error= do_after_row_operations(table, error); - if (!ev->cache_stmt) - { - DBUG_PRINT("info", ("Marked that we need to keep log")); - ev_thd->options|= OPTION_KEEP_LOG; - } } /* @@ -342,12 +337,29 @@ static bool record_compare(TABLE *table) if (table->s->null_bytes > 0) { for (int i = 0 ; i < 2 ; ++i) - { - saved_x[i]= table->record[i][0]; - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][0]|= 1U; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); + { + /* + If we have an X bit then we need to take care of it. + */ + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + { + saved_x[i]= table->record[i][0]; + table->record[i][0]|= 1U; + } + + /* + If (last_null_bit_pos == 0 && null_bytes > 1), then: + + X bit (if any) + N nullable fields + M Field_bit fields = 8 bits + + Ie, the entire byte is used. + */ + if (table->s->last_null_bit_pos > 0) + { + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } } } @@ -387,8 +399,11 @@ record_compare_exit: { for (int i = 0 ; i < 2 ; ++i) { - table->record[i][0]= saved_x[i]; - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) + table->record[i][0]= saved_x[i]; + + if (table->s->last_null_bit_pos > 0) + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; } } @@ -1760,11 +1775,6 @@ int Old_rows_log_event::do_apply_event(Relay_log_info const *rli) DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", const_cast<Relay_log_info*>(rli)->abort_slave= 1;); error= do_after_row_operations(rli, error); - if (!cache_stmt) - { - DBUG_PRINT("info", ("Marked that we need to keep log")); - thd->options|= OPTION_KEEP_LOG; - } } // if (table) /* @@ -2413,8 +2423,35 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli) */ if (table->key_info->flags & HA_NOSAME) { - table->file->ha_index_end(); - DBUG_RETURN(0); + /* Unique does not have non nullable part */ + if (!(table->key_info->flags & (HA_NULL_PART_KEY))) + { + table->file->ha_index_end(); + DBUG_RETURN(0); + } + else + { + KEY *keyinfo= table->key_info; + /* + Unique has nullable part. We need to check if there is any field in the + BI image that is null and part of UNNI. + */ + bool null_found= FALSE; + for (uint i=0; i < keyinfo->key_parts && !null_found; i++) + { + uint fieldnr= keyinfo->key_part[i].fieldnr - 1; + Field **f= table->field+fieldnr; + null_found= (*f)->is_null(); + } + + if (!null_found) + { + table->file->ha_index_end(); + DBUG_RETURN(0); + } + + /* else fall through to index scan */ + } } /* diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 864f91df868..299597632dd 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -670,49 +670,6 @@ enum enum_check_fields CHECK_FIELD_ERROR_FOR_NULL }; - -/** Struct to handle simple linked lists. */ -typedef struct st_sql_list { - uint elements; - uchar *first; - uchar **next; - - st_sql_list() {} /* Remove gcc warning */ - inline void empty() - { - elements=0; - first=0; - next= &first; - } - inline void link_in_list(uchar *element,uchar **next_ptr) - { - elements++; - (*next)=element; - next= next_ptr; - *next=0; - } - inline void save_and_clear(struct st_sql_list *save) - { - *save= *this; - empty(); - } - inline void push_front(struct st_sql_list *save) - { - *save->next= first; /* link current list last */ - first= save->first; - elements+= save->elements; - } - inline void push_back(struct st_sql_list *save) - { - if (save->first) - { - *next= save->first; - next= save->next; - elements+= save->elements; - } - } -} SQL_LIST; - #if defined(MYSQL_DYNAMIC_PLUGIN) && defined(_WIN32) extern "C" THD *_current_thd_noinline(); #define _current_thd() _current_thd_noinline() @@ -1305,7 +1262,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, void prepare_triggers_for_insert_stmt(TABLE *table); int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, - SQL_LIST *order, ha_rows rows, ulonglong options, + SQL_I_List<ORDER> *order, ha_rows rows, ulonglong options, bool reset_auto_increment); bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create); @@ -1331,7 +1288,8 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last, TABLE_LIST *new_child_list, TABLE_LIST **new_last); bool reopen_table(TABLE *table); bool reopen_tables(THD *thd,bool get_locks,bool in_refresh); -thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table); +thr_lock_type read_lock_type_for_table(THD *thd, LEX *lex, + TABLE_LIST *table_list); void close_data_files_and_morph_locks(THD *thd, const char *db, const char *table_name); void close_handle_and_leave_table_as_lock(TABLE *table); @@ -1508,7 +1466,7 @@ Create_field * new_create_field(THD *thd, char *field_name, enum_field_types typ uint uint_geom_type, Virtual_column_info *vcol_info); void store_position_for_column(const char *name); -bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc); +bool add_to_list(THD *thd, SQL_I_List<ORDER> &list, Item *group,bool asc); bool push_new_name_resolution_context(THD *thd, TABLE_LIST *left_op, TABLE_LIST *right_op); @@ -1783,7 +1741,7 @@ extern pthread_mutex_t LOCK_gdl; #define WFRM_PACK_FRM 4 #define WFRM_KEEP_SHARE 8 bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags); -int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt); +int abort_and_upgrade_lock_and_close_table(ALTER_PARTITION_PARAM_TYPE *lpt); void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt); void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table); @@ -2360,6 +2318,7 @@ uint explain_filename(THD* thd, const char *from, char *to, uint to_length, uint filename_to_tablename(const char *from, char *to, uint to_length); uint tablename_to_filename(const char *from, char *to, uint to_length); uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length); +bool check_mysql50_prefix(const char *name); #endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */ #ifdef MYSQL_SERVER uint build_table_filename(char *buff, size_t bufflen, const char *db, diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3d03c5efaa0..261589cfd34 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -181,6 +181,21 @@ typedef fp_except fp_except_t; /* for IRIX to use set_fpc_csr() */ #include <sys/fpu.h> #endif +#ifdef HAVE_FPU_CONTROL_H +#include <fpu_control.h> +#endif +#if defined(__i386__) && !defined(HAVE_FPU_CONTROL_H) +# define fpu_control_t unsigned int +# define _FPU_EXTENDED 0x300 +# define _FPU_DOUBLE 0x200 +# if defined(__GNUC__) || defined(__SUNPRO_CC) +# define _FPU_GETCW(cw) asm volatile ("fnstcw %0" : "=m" (*&cw)) +# define _FPU_SETCW(cw) asm volatile ("fldcw %0" : : "m" (*&cw)) +# else +# define _FPU_GETCW(cw) (cw= 0) +# define _FPU_SETCW(cw) +# endif +#endif inline void setup_fpu() { @@ -202,7 +217,26 @@ inline void setup_fpu() /* Set FPU rounding mode to "round-to-nearest" */ fesetround(FE_TONEAREST); #endif /* HAVE_FESETROUND */ - + + /* + x86 (32-bit) requires FPU precision to be explicitly set to 64 bit + (double precision) for portable results of floating point operations. + However, there is no need to do so if compiler is using SSE2 for floating + point, double values will be stored and processed in 64 bits anyway. + */ +#if defined(__i386__) && !defined(__SSE2_MATH__) +#if defined(_WIN32) +#if !defined(_WIN64) + _control87(_PC_53, MCW_PC); +#endif /* !_WIN64 */ +#else /* !_WIN32 */ + fpu_control_t cw; + _FPU_GETCW(cw); + cw= (cw & ~_FPU_EXTENDED) | _FPU_DOUBLE; + _FPU_SETCW(cw); +#endif /* _WIN32 && */ +#endif /* __i386__ */ + #if defined(__sgi) && defined(HAVE_SYS_FPU_H) /* Enable denormalized DOUBLE values support for IRIX */ union fpc_csr n; @@ -1251,7 +1285,7 @@ extern "C" sig_handler print_signal_warning(int sig) { if (global_system_variables.log_warnings) sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id()); -#ifdef DONT_REMEMBER_SIGNAL +#ifdef SIGNAL_HANDLER_RESET_ON_DELIVERY my_sigset(sig,print_signal_warning); /* int. thread system calls */ #endif #if !defined(__WIN__) && !defined(__NETWARE__) @@ -3083,6 +3117,9 @@ int my_message_sql(uint error, const char *str, myf MyFlags) DBUG_RETURN(0); } + /* When simulating OOM, skip writing to error log to avoid mtr errors */ + DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_RETURN(0);); + if (!thd->no_warnings_for_error && !(MyFlags & ME_NO_WARNING_FOR_ERROR)) { @@ -3095,7 +3132,11 @@ int my_message_sql(uint error, const char *str, myf MyFlags) thd->no_warnings_for_error= FALSE; } } + to_error_log: + /* When simulating OOM, skip writing to error log to avoid mtr errors */ + DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_RETURN(0);); + if (!thd || (MyFlags & ME_NOREFRESH)) (*func)("%s: %s", my_progname_short, str); /* purecov: inspected */ DBUG_RETURN(0); @@ -3354,6 +3395,13 @@ static int init_common_variables(const char *conf_file_name, int argc, max_system_variables.pseudo_thread_id= (ulong)~0; server_start_time= flush_status_time= my_time(0); + /* TODO: remove this when my_time_t is 64 bit compatible */ + if (server_start_time >= (time_t) MY_TIME_T_MAX) + { + sql_print_error("This MySQL server doesn't support dates later then 2038"); + return 1; + } + rpl_filter= new Rpl_filter; binlog_filter= new Rpl_filter; if (!rpl_filter || !binlog_filter) @@ -5976,13 +6024,13 @@ enum options_mysqld struct my_option my_long_options[] = { - {"help", '?', "Display this help and exit.", - (uchar**) &opt_help, (uchar**) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + {"help", '?', "Display this help and exit.", + &opt_help, &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"abort-slave-event-count", OPT_ABORT_SLAVE_EVENT_COUNT, "Option used by mysql-test for debugging and testing of replication.", - (uchar**) &abort_slave_event_count, (uchar**) &abort_slave_event_count, + &abort_slave_event_count, &abort_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ {"allow-suspicious-udfs", OPT_ALLOW_SUSPICIOUS_UDFS, @@ -5990,33 +6038,34 @@ struct my_option my_long_options[] = "without corresponding xxx_init() or xxx_deinit(). That also means " "that one can load any function from any library, for example exit() " "from libc.so", - (uchar**) &opt_allow_suspicious_udfs, (uchar**) &opt_allow_suspicious_udfs, + &opt_allow_suspicious_udfs, &opt_allow_suspicious_udfs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0, + {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode " + "will also set transaction isolation level 'serializable'.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-increment-increment", OPT_AUTO_INCREMENT, "Auto-increment columns are incremented by this.", - (uchar**) &global_system_variables.auto_increment_increment, - (uchar**) &max_system_variables.auto_increment_increment, 0, GET_ULONG, + &global_system_variables.auto_increment_increment, + &max_system_variables.auto_increment_increment, 0, GET_ULONG, OPT_ARG, 1, 1, 65535, 0, 1, 0 }, {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET, "Offset added to Auto-increment columns. Used when auto-increment-increment != 1.", - (uchar**) &global_system_variables.auto_increment_offset, - (uchar**) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG, + &global_system_variables.auto_increment_offset, + &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG, 1, 1, 65535, 0, 1, 0 }, {"automatic-sp-privileges", OPT_SP_AUTOMATIC_PRIVILEGES, "Creating and dropping stored procedures alters ACLs. Disable with --skip-automatic-sp-privileges.", - (uchar**) &sp_automatic_privileges, (uchar**) &sp_automatic_privileges, + &sp_automatic_privileges, &sp_automatic_privileges, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"basedir", 'b', "Path to installation directory. All paths are usually resolved relative to this.", - (uchar**) &mysql_home_ptr, (uchar**) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, + &mysql_home_ptr, &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"big-tables", OPT_BIG_TABLES, "Allow big result sets by saving all temporary sets on file (solves most 'table full' errors).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", - (uchar**) &my_bind_addr_str, (uchar**) &my_bind_addr_str, 0, GET_STR, + &my_bind_addr_str, &my_bind_addr_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog_format", OPT_BINLOG_FORMAT, "Does not have any effect without '--log-bin'. " @@ -6029,10 +6078,11 @@ struct my_option my_long_options[] = "If ndbcluster is enabled and binlog_format is `mixed', the format switches" " to 'row' and back implicitly per each query accessing a NDB table." #endif - ,(uchar**) &opt_binlog_format, (uchar**) &opt_binlog_format, + , &opt_binlog_format, &opt_binlog_format, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, - "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", + "Tells the master it should log updates for the specified database, " + "and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB, "Tells the master that updates to the given database should not be logged to the binary log.", @@ -6041,12 +6091,10 @@ struct my_option my_long_options[] = "The maximum size of a row-based binary log event in bytes. Rows will be " "grouped into events smaller than this size if possible. " "The value has to be a multiple of 256.", - (uchar**) &opt_binlog_rows_event_max_size, - (uchar**) &opt_binlog_rows_event_max_size, 0, - GET_ULONG, REQUIRED_ARG, - /* def_value */ 1024, /* min_value */ 256, - /* max_value */ (longlong) ULONG_MAX, - /* sub_size */ 0, /* block_size */ 256, + &opt_binlog_rows_event_max_size, &opt_binlog_rows_event_max_size, + 0, GET_ULONG, REQUIRED_ARG, + /* def_value */ 1024, /* min_value */ 256, /* max_value */ ULONG_MAX, + /* sub_size */ 0, /* block_size */ 256, /* app_type */ 0 }, #ifndef DISABLE_GRANT_OPTIONS @@ -6055,134 +6103,138 @@ struct my_option my_long_options[] = #endif {"character-set-client-handshake", OPT_CHARACTER_SET_CLIENT_HANDSHAKE, "Don't ignore client side character set value sent during handshake.", - (uchar**) &opt_character_set_client_handshake, - (uchar**) &opt_character_set_client_handshake, + &opt_character_set_client_handshake, + &opt_character_set_client_handshake, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"character-set-filesystem", OPT_CHARACTER_SET_FILESYSTEM, "Set the filesystem character set.", - (uchar**) &character_set_filesystem_name, - (uchar**) &character_set_filesystem_name, + &character_set_filesystem_name, + &character_set_filesystem_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"character-set-server", 'C', "Set the default character set.", - (uchar**) &default_character_set_name, (uchar**) &default_character_set_name, + &default_character_set_name, &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"character-sets-dir", OPT_CHARSETS_DIR, - "Directory where character sets are.", (uchar**) &charsets_dir, - (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Directory where character sets are.", &charsets_dir, + &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"chroot", 'r', "Chroot mysqld daemon during startup.", - (uchar**) &mysqld_chroot, (uchar**) &mysqld_chroot, 0, GET_STR, REQUIRED_ARG, + &mysqld_chroot, &mysqld_chroot, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.", - (uchar**) &default_collation_name, (uchar**) &default_collation_name, + &default_collation_name, &default_collation_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"completion-type", OPT_COMPLETION_TYPE, "Default completion type.", - (uchar**) &global_system_variables.completion_type, - (uchar**) &max_system_variables.completion_type, 0, GET_ULONG, + &global_system_variables.completion_type, + &max_system_variables.completion_type, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 2, 0, 1, 0}, {"concurrent-insert", OPT_CONCURRENT_INSERT, "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0.", - (uchar**) &myisam_concurrent_insert, (uchar**) &myisam_concurrent_insert, + &myisam_concurrent_insert, &myisam_concurrent_insert, 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"console", OPT_CONSOLE, "Write error output on screen; don't remove the console window on windows.", - (uchar**) &opt_console, (uchar**) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, + &opt_console, &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"datadir", 'h', "Path to the database root.", (uchar**) &mysql_data_home, - (uchar**) &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"datadir", 'h', "Path to the database root.", &mysql_data_home, + &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"deadlock-search-depth-short", OPT_DEADLOCK_SEARCH_DEPTH_SHORT, "Short search depth for the two-step deadlock detection", - (uchar**) &global_system_variables.wt_deadlock_search_depth_short, - (uchar**) &max_system_variables.wt_deadlock_search_depth_short, + &global_system_variables.wt_deadlock_search_depth_short, + &max_system_variables.wt_deadlock_search_depth_short, 0, GET_ULONG, REQUIRED_ARG, 4, 0, 32, 0, 0, 0}, {"deadlock-search-depth-long", OPT_DEADLOCK_SEARCH_DEPTH_LONG, "Long search depth for the two-step deadlock detection", - (uchar**) &global_system_variables.wt_deadlock_search_depth_long, - (uchar**) &max_system_variables.wt_deadlock_search_depth_long, + &global_system_variables.wt_deadlock_search_depth_long, + &max_system_variables.wt_deadlock_search_depth_long, 0, GET_ULONG, REQUIRED_ARG, 15, 0, 33, 0, 0, 0}, {"deadlock-timeout-short", OPT_DEADLOCK_TIMEOUT_SHORT, "Short timeout for the two-step deadlock detection (in microseconds)", - (uchar**) &global_system_variables.wt_timeout_short, - (uchar**) &max_system_variables.wt_timeout_short, + &global_system_variables.wt_timeout_short, + &max_system_variables.wt_timeout_short, 0, GET_ULONG, REQUIRED_ARG, 10000, 0, ULONG_MAX, 0, 0, 0}, {"deadlock-timeout-long", OPT_DEADLOCK_TIMEOUT_LONG, "Long timeout for the two-step deadlock detection (in microseconds)", - (uchar**) &global_system_variables.wt_timeout_long, - (uchar**) &max_system_variables.wt_timeout_long, + &global_system_variables.wt_timeout_long, + &max_system_variables.wt_timeout_long, 0, GET_ULONG, REQUIRED_ARG, 50000000, 0, ULONG_MAX, 0, 0, 0}, #ifndef DBUG_OFF - {"debug", '#', "Debug log.", (uchar**) &default_dbug_option, - (uchar**) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug", '#', "Debug log.", &default_dbug_option, + &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug-crc-break", OPT_DEBUG_CRC, "Call my_debug_put_break_here() if crc matches this number (for debug).", - (uchar**) &opt_my_crc_dbug_check, (uchar**) &opt_my_crc_dbug_check, + &opt_my_crc_dbug_check, &opt_my_crc_dbug_check, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~(ulong) 0L, 0, 0, 0}, {"debug-flush", OPT_DEBUG_FLUSH, "Default debug log with flush after write", - (uchar**) 0, (uchar**) 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"default-character-set", OPT_DEFAULT_CHARACTER_SET_OLD, "Set the default character set (deprecated option, use --character-set-server instead).", - (uchar**) &default_character_set_name, (uchar**) &default_character_set_name, + &default_character_set_name, &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - {"default-collation", OPT_DEFAULT_COLLATION_OLD, "Set the default collation (deprecated option, use --collation-server instead).", - (uchar**) &default_collation_name, (uchar**) &default_collation_name, + {"default-collation", OPT_DEFAULT_COLLATION_OLD, "Set the default collation " + "(deprecated option, use --collation-server instead).", + &default_collation_name, &default_collation_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"default-storage-engine", OPT_STORAGE_ENGINE, "Set the default storage engine (table type) for tables.", - (uchar**)&default_storage_engine_str, (uchar**)&default_storage_engine_str, + &default_storage_engine_str, &default_storage_engine_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-table-type", OPT_STORAGE_ENGINE, "(deprecated) Use --default-storage-engine.", - (uchar**)&default_storage_engine_str, (uchar**)&default_storage_engine_str, + &default_storage_engine_str, &default_storage_engine_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.", - (uchar**) &default_tz_name, (uchar**) &default_tz_name, + &default_tz_name, &default_tz_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"delay-key-write", OPT_DELAY_KEY_WRITE, "Type of DELAY_KEY_WRITE.", 0,0,0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"delay-key-write-for-all-tables", OPT_DELAY_KEY_WRITE_ALL, - "Don't flush key buffers between writes for any MyISAM table. (Deprecated option, use --delay-key-write=all instead.)", + "Don't flush key buffers between writes for any MyISAM table. " + "(Deprecated option, use --delay-key-write=all instead.)", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_OPENSSL {"des-key-file", OPT_DES_KEY_FILE, "Load keys for des_encrypt() and des_encrypt from given file.", - (uchar**) &des_key_file, (uchar**) &des_key_file, 0, GET_STR, REQUIRED_ARG, + &des_key_file, &des_key_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_OPENSSL */ #ifdef HAVE_REPLICATION {"disconnect-slave-event-count", OPT_DISCONNECT_SLAVE_EVENT_COUNT, "Option used by mysql-test for debugging and testing of replication.", - (uchar**) &disconnect_slave_event_count, - (uchar**) &disconnect_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, - 0, 0, 0}, + &disconnect_slave_event_count, &disconnect_slave_event_count, + 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ {"enable-locking", OPT_ENABLE_LOCK, "Deprecated option, use --external-locking instead.", - (uchar**) &opt_external_locking, (uchar**) &opt_external_locking, + &opt_external_locking, &opt_external_locking, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef __NT__ {"enable-named-pipe", OPT_HAVE_NAMED_PIPE, "Enable the named pipe (NT).", - (uchar**) &opt_enable_named_pipe, (uchar**) &opt_enable_named_pipe, 0, GET_BOOL, + &opt_enable_named_pipe, &opt_enable_named_pipe, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif #ifdef HAVE_STACK_TRACE_ON_SEGV {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.", - (uchar**) &opt_do_pstack, (uchar**) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, + &opt_do_pstack, &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_STACK_TRACE_ON_SEGV */ {"engine-condition-pushdown", OPT_ENGINE_CONDITION_PUSHDOWN, "Push supported query conditions to the storage engine.", - (uchar**) &global_system_variables.engine_condition_pushdown, - (uchar**) &global_system_variables.engine_condition_pushdown, + &global_system_variables.engine_condition_pushdown, + &global_system_variables.engine_condition_pushdown, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, /* See how it's handled in get_one_option() */ {"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.", NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"exit-info", 'T', "Used for debugging. Use at your own risk.", 0, 0, 0, GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"external-locking", OPT_USE_LOCKING, "Use system (external) locking (disabled by default). With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running. Disable with --skip-external-locking.", - (uchar**) &opt_external_locking, (uchar**) &opt_external_locking, + {"external-locking", OPT_USE_LOCKING, "Use system (external) locking " + "(disabled by default). With this option enabled you can run myisamchk " + "to test (not repair) tables while the MySQL server is running. " + "Disable with --skip-external-locking.", + &opt_external_locking, &opt_external_locking, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -6190,73 +6242,70 @@ struct my_option my_long_options[] = easier to do */ {"extra-port", OPT_EXTRA_PORT, "Extra port number to use for tcp-connections in a one-thread-per-connection manner. 0 means don't use another port", - (uchar**) &mysqld_extra_port, - (uchar**) &mysqld_extra_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + &mysqld_extra_port, + &mysqld_extra_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"extra-max-connections", OPT_MAX_CONNECTIONS, "The number of connections on 'extra-port.", - (uchar**) &extra_max_connections, - (uchar**) &extra_max_connections, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 100000, + &extra_max_connections, + &extra_max_connections, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 100000, 0, 1, 0}, {"gdb", OPT_DEBUGGING, "Set up signals usable for debugging.", - (uchar**) &opt_debugging, (uchar**) &opt_debugging, + &opt_debugging, &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"general_log", OPT_GENERAL_LOG, - "Enable/disable general log.", (uchar**) &opt_log, - (uchar**) &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, + "Enable/disable general log.", &opt_log, + &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_LARGE_PAGES - {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \ -Disable with --skip-large-pages.", - (uchar**) &opt_large_pages, (uchar**) &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0, - 0, 0, 0}, + {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. " + "Disable with --skip-large-pages.", &opt_large_pages, &opt_large_pages, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"ignore-builtin-innodb", OPT_IGNORE_BUILTIN_INNODB , "Disable initialization of builtin InnoDB plugin.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection.", - (uchar**) &opt_init_connect, (uchar**) &opt_init_connect, 0, GET_STR_ALLOC, + &opt_init_connect, &opt_init_connect, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DISABLE_GRANT_OPTIONS {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.", - (uchar**) &opt_init_file, (uchar**) &opt_init_file, 0, GET_STR, REQUIRED_ARG, + &opt_init_file, &opt_init_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed by a slave server \ each time the SQL thread starts.", - (uchar**) &opt_init_slave, (uchar**) &opt_init_slave, 0, GET_STR_ALLOC, + &opt_init_slave, &opt_init_slave, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"language", 'L', "Client error messages in given language. May be given as a full path.", - (uchar**) &language_ptr, (uchar**) &language_ptr, 0, GET_STR, REQUIRED_ARG, + &language_ptr, &language_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"lc-time-names", OPT_LC_TIME_NAMES, "Set the language used for the month names and the days of the week.", - (uchar**) &lc_time_names_name, - (uchar**) &lc_time_names_name, + &lc_time_names_name, &lc_time_names_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"local-infile", OPT_LOCAL_INFILE, "Enable/disable LOAD DATA LOCAL INFILE (takes values 1 or 0).", - (uchar**) &opt_local_infile, - (uchar**) &opt_local_infile, 0, GET_BOOL, OPT_ARG, + &opt_local_infile, &opt_local_infile, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"log", 'l', "Log connections and queries to file (deprecated option, use " - "--general_log/--general_log_file instead).", (uchar**) &opt_logname, - (uchar**) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + "--general_log/--general_log_file instead).", &opt_logname, + &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"general_log_file", OPT_GENERAL_LOG_FILE, - "Log connections and queries to given file.", (uchar**) &opt_logname, - (uchar**) &opt_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Log connections and queries to given file.", &opt_logname, + &opt_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin", OPT_BIN_LOG, "Log update queries in binary format. Optional (but strongly recommended " "to avoid replication problems if server's hostname changes) argument " "should be the chosen location for the binary log files.", - (uchar**) &opt_bin_logname, (uchar**) &opt_bin_logname, 0, GET_STR_ALLOC, + &opt_bin_logname, &opt_bin_logname, 0, GET_STR_ALLOC, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin-index", OPT_BIN_LOG_INDEX, "File that holds the names for last binary log files.", - (uchar**) &opt_binlog_index_name, (uchar**) &opt_binlog_index_name, 0, GET_STR, + &opt_binlog_index_name, &opt_binlog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifndef TO_BE_REMOVED_IN_5_1_OR_6_0 /* @@ -6267,7 +6316,7 @@ each time the SQL thread starts.", */ {"log-bin-trust-routine-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS_OLD, "(deprecated) Use log-bin-trust-function-creators.", - (uchar**) &trust_function_creators, (uchar**) &trust_function_creators, 0, + &trust_function_creators, &trust_function_creators, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif /* @@ -6282,195 +6331,202 @@ each time the SQL thread starts.", "Note that if ALL connections to this server ALWAYS use row-based binary " "logging, the security issues do not exist and the binary logging cannot " "break, so you can safely set this to 1." - ,(uchar**) &trust_function_creators, (uchar**) &trust_function_creators, 0, + ,&trust_function_creators, &trust_function_creators, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-error", OPT_ERROR_LOG_FILE, "Error log file.", - (uchar**) &log_error_file_ptr, (uchar**) &log_error_file_ptr, 0, GET_STR, + &log_error_file_ptr, &log_error_file_ptr, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.", - (uchar**) &myisam_log_filename, (uchar**) &myisam_log_filename, 0, GET_STR, + &myisam_log_filename, &myisam_log_filename, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-long-format", '0', - "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.", + "Log some extra information to update log. Please note that this option " + "is deprecated; see --log-short-format option.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef WITH_CSV_STORAGE_ENGINE {"log-output", OPT_LOG_OUTPUT, "Syntax: log-output[=value[,value...]], where \"value\" could be TABLE, " "FILE or NONE.", - (uchar**) &log_output_str, (uchar**) &log_output_str, 0, + &log_output_str, &log_output_str, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES, "Log queries that are executed without benefit of any index to the slow log if it is open.", - (uchar**) &opt_log_queries_not_using_indexes, (uchar**) &opt_log_queries_not_using_indexes, + &opt_log_queries_not_using_indexes, &opt_log_queries_not_using_indexes, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-short-format", OPT_SHORT_LOG_FORMAT, "Don't log extra information to update and slow-query logs.", - (uchar**) &opt_short_log_format, (uchar**) &opt_short_log_format, + &opt_short_log_format, &opt_short_log_format, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slave-updates", OPT_LOG_SLAVE_UPDATES, - "Tells the slave to log the updates from the slave thread to the binary log. You will need to turn it on if you plan to daisy-chain the slaves.", - (uchar**) &opt_log_slave_updates, (uchar**) &opt_log_slave_updates, 0, GET_BOOL, + "Tells the slave to log the updates from the slave thread to the binary log. " + "You will need to turn it on if you plan to daisy-chain the slaves.", + &opt_log_slave_updates, &opt_log_slave_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slow-admin-statements", OPT_LOG_SLOW_ADMIN_STATEMENTS, - "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open. . Please note that this option is deprecated; see --log-slow-filter for filtering slow query log output", - (uchar**) &opt_log_slow_admin_statements, - (uchar**) &opt_log_slow_admin_statements, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + "Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements " + "to the slow log if it is open.", &opt_log_slow_admin_statements, + &opt_log_slow_admin_statements, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slow-slave-statements", OPT_LOG_SLOW_SLAVE_STATEMENTS, "Log slow statements executed by slave thread to the slow log if it is open.", - (uchar**) &opt_log_slow_slave_statements, - (uchar**) &opt_log_slow_slave_statements, + &opt_log_slow_slave_statements, + &opt_log_slow_slave_statements, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-slow-queries", OPT_SLOW_QUERY_LOG, "Log slow queries to a table or log file. Defaults logging to table " "mysql.slow_log or hostname-slow.log if --log-output=file is used. " "Must be enabled to activate other slow log options. " "(deprecated option, use --slow_query_log/--slow_query_log_file instead)", - (uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, OPT_ARG, + &opt_slow_logname, &opt_slow_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"slow-query-log-file", OPT_SLOW_QUERY_LOG_FILE, - "Log slow queries to given log file. Defaults logging to hostname-slow.log.", - (uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, + {"slow_query_log_file", OPT_SLOW_QUERY_LOG_FILE, + "Log slow queries to given log file. Defaults logging to hostname-slow.log. " + "Must be enabled to activate other slow log options.", + &opt_slow_logname, &opt_slow_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"log-tc", OPT_LOG_TC, "Path to transaction coordinator log (used for transactions that affect " "more than one storage engine, when binary log is disabled).", - (uchar**) &opt_tc_log_file, (uchar**) &opt_tc_log_file, 0, GET_STR, + &opt_tc_log_file, &opt_tc_log_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_MMAP {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", - (uchar**) &opt_tc_log_size, (uchar**) &opt_tc_log_size, 0, GET_ULONG, + &opt_tc_log_size, &opt_tc_log_size, 0, GET_ULONG, REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, (longlong) ULONG_MAX, 0, TC_LOG_PAGE_SIZE, 0}, #endif {"log-update", OPT_UPDATE_LOG, "The update log is deprecated since version 5.0, is replaced by the binary " "log and this option just turns on --log-bin instead.", - (uchar**) &opt_update_logname, (uchar**) &opt_update_logname, 0, GET_STR, + &opt_update_logname, &opt_update_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-warnings", 'W', "Log some not critical warnings to the log file.", - (uchar**) &global_system_variables.log_warnings, - (uchar**) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, + &global_system_variables.log_warnings, + &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, "INSERT/DELETE/UPDATE has lower priority than selects.", - (uchar**) &global_system_variables.low_priority_updates, - (uchar**) &max_system_variables.low_priority_updates, + &global_system_variables.low_priority_updates, + &max_system_variables.low_priority_updates, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"master-connect-retry", OPT_MASTER_CONNECT_RETRY, "The number of seconds the slave thread will sleep before retrying to " "connect to the master, in case the master goes down or the connection " "is lost.", - (uchar**) &master_connect_retry, (uchar**) &master_connect_retry, 0, GET_UINT, + &master_connect_retry, &master_connect_retry, 0, GET_UINT, REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"master-host", OPT_MASTER_HOST, - "Master hostname or IP address for replication. If not set, the slave thread will not be started. Note that the setting of master-host will be ignored if there exists a valid master.info file.", - (uchar**) &master_host, (uchar**) &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0, + "Master hostname or IP address for replication. If not set, the slave " + "thread will not be started. Note that the setting of master-host will " + "be ignored if there exists a valid master.info file.", + &master_host, &master_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-info-file", OPT_MASTER_INFO_FILE, - "The location and name of the file that remembers the master and where the I/O replication \ -thread is in the master's binlogs.", - (uchar**) &master_info_file, (uchar**) &master_info_file, 0, GET_STR, + "The location and name of the file that remembers the master and where " + "the I/O replication thread is in the master's binlogs.", + &master_info_file, &master_info_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-password", OPT_MASTER_PASSWORD, "The password the slave thread will authenticate with when connecting to " "the master. If not set, an empty password is assumed. The value in " "master.info will take precedence if it can be read.", - (uchar**)&master_password, (uchar**)&master_password, 0, + &master_password, &master_password, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-port", OPT_MASTER_PORT, - "The port the master is listening on. If not set, the compiled setting of MYSQL_PORT is assumed. If you have not tinkered with configure options, this should be 3306. The value in master.info will take precedence if it can be read.", - (uchar**) &master_port, (uchar**) &master_port, 0, GET_UINT, REQUIRED_ARG, + "The port the master is listening on. If not set, the compiled setting of " + "MYSQL_PORT is assumed. If you have not tinkered with configure options, " + "this should be 3306. The value in master.info will take precedence if it " + "can be read.", &master_port, &master_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, {"master-retry-count", OPT_MASTER_RETRY_COUNT, "The number of tries the slave will make to connect to the master before giving up.", - (uchar**) &master_retry_count, (uchar**) &master_retry_count, 0, GET_ULONG, + &master_retry_count, &master_retry_count, 0, GET_ULONG, REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0}, {"master-ssl", OPT_MASTER_SSL, "Enable the slave to connect to the master using SSL.", - (uchar**) &master_ssl, (uchar**) &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + &master_ssl, &master_ssl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-ca", OPT_MASTER_SSL_CA, "Master SSL CA file. Only applies if you have enabled master-ssl.", - (uchar**) &master_ssl_ca, (uchar**) &master_ssl_ca, 0, GET_STR, OPT_ARG, + &master_ssl_ca, &master_ssl_ca, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-capath", OPT_MASTER_SSL_CAPATH, "Master SSL CA path. Only applies if you have enabled master-ssl.", - (uchar**) &master_ssl_capath, (uchar**) &master_ssl_capath, 0, GET_STR, OPT_ARG, + &master_ssl_capath, &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-cert", OPT_MASTER_SSL_CERT, "Master SSL certificate file name. Only applies if you have enabled " "master-ssl.", - (uchar**) &master_ssl_cert, (uchar**) &master_ssl_cert, 0, GET_STR, OPT_ARG, + &master_ssl_cert, &master_ssl_cert, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-cipher", OPT_MASTER_SSL_CIPHER, "Master SSL cipher. Only applies if you have enabled master-ssl.", - (uchar**) &master_ssl_cipher, (uchar**) &master_ssl_capath, 0, GET_STR, OPT_ARG, + &master_ssl_cipher, &master_ssl_capath, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-ssl-key", OPT_MASTER_SSL_KEY, "Master SSL keyfile name. Only applies if you have enabled master-ssl.", - (uchar**) &master_ssl_key, (uchar**) &master_ssl_key, 0, GET_STR, OPT_ARG, + &master_ssl_key, &master_ssl_key, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"master-user", OPT_MASTER_USER, - "The username the slave thread will use for authentication when connecting to the master. The user must have FILE privilege. If the master user is not set, user test is assumed. The value in master.info will take precedence if it can be read.", - (uchar**) &master_user, (uchar**) &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, + "The username the slave thread will use for authentication when " + "connecting to the master. The user must have FILE privilege. " + "If the master user is not set, user test is assumed. The value " + "in master.info will take precedence if it can be read.", + &master_user, &master_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"max-binlog-dump-events", OPT_MAX_BINLOG_DUMP_EVENTS, "Option used by mysql-test for debugging and testing of replication.", - (uchar**) &max_binlog_dump_events, (uchar**) &max_binlog_dump_events, 0, + &max_binlog_dump_events, &max_binlog_dump_events, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ - {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", (uchar**) &locked_in_memory, - (uchar**) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"memlock", OPT_MEMLOCK, "Lock mysqld in memory.", &locked_in_memory, + &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef SAFE_MUTEX {"mutex-deadlock-detector", OPT_MUTEX_DEADLOCK_DETECTOR, "Enable checking of wrong mutex usage.", - (uchar**) &safe_mutex_deadlock_detector, - (uchar**) &safe_mutex_deadlock_detector, + &safe_mutex_deadlock_detector, + &safe_mutex_deadlock_detector, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, #endif {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", - (uchar**) &myisam_recover_options_str, (uchar**) &myisam_recover_options_str, 0, + &myisam_recover_options_str, &myisam_recover_options_str, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.", - (uchar**) &opt_ndb_connectstring, - (uchar**) &opt_ndb_connectstring, + &opt_ndb_connectstring, &opt_ndb_connectstring, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ndb-mgmd-host", OPT_NDB_MGMD, "Set host and port for ndb_mgmd. Syntax: hostname[:port]", - (uchar**) &opt_ndb_mgmd, - (uchar**) &opt_ndb_mgmd, + &opt_ndb_mgmd, &opt_ndb_mgmd, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ndb-nodeid", OPT_NDB_NODEID, "Nodeid for this mysqlserver in the cluster.", - (uchar**) &opt_ndb_nodeid, - (uchar**) &opt_ndb_nodeid, + &opt_ndb_nodeid, + &opt_ndb_nodeid, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, "Specify number of autoincrement values that are prefetched.", - (uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz, - (uchar**) &max_system_variables.ndb_autoincrement_prefetch_sz, + &global_system_variables.ndb_autoincrement_prefetch_sz, + &max_system_variables.ndb_autoincrement_prefetch_sz, 0, GET_ULONG, REQUIRED_ARG, 1, 1, 256, 0, 0, 0}, {"ndb-force-send", OPT_NDB_FORCE_SEND, "Force send of buffers to ndb immediately without waiting for " "other threads.", - (uchar**) &global_system_variables.ndb_force_send, - (uchar**) &global_system_variables.ndb_force_send, + &global_system_variables.ndb_force_send, + &global_system_variables.ndb_force_send, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb_force_send", OPT_NDB_FORCE_SEND, "same as --ndb-force-send.", - (uchar**) &global_system_variables.ndb_force_send, - (uchar**) &global_system_variables.ndb_force_send, + &global_system_variables.ndb_force_send, + &global_system_variables.ndb_force_send, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-extra-logging", OPT_NDB_EXTRA_LOGGING, "Turn on more logging in the error log.", - (uchar**) &ndb_extra_logging, - (uchar**) &ndb_extra_logging, + &ndb_extra_logging, + &ndb_extra_logging, 0, GET_INT, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_NDB_BINLOG {"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP, @@ -6478,86 +6534,87 @@ thread is in the master's binlogs.", "E.g., 3 means that if the difference between what epoch has been received " "from the storage nodes and what has been applied to the binlog is 3 or more, " "a status message will be sent to the cluster log.", - (uchar**) &ndb_report_thresh_binlog_epoch_slip, - (uchar**) &ndb_report_thresh_binlog_epoch_slip, + &ndb_report_thresh_binlog_epoch_slip, + &ndb_report_thresh_binlog_epoch_slip, 0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0}, {"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE, "Threshold on percentage of free memory before reporting binlog status. E.g., " "10 means that if amount of available memory for receiving binlog data from " "the storage nodes goes below 10%, " "a status message will be sent to the cluster log.", - (uchar**) &ndb_report_thresh_binlog_mem_usage, - (uchar**) &ndb_report_thresh_binlog_mem_usage, + &ndb_report_thresh_binlog_mem_usage, + &ndb_report_thresh_binlog_mem_usage, 0, GET_ULONG, REQUIRED_ARG, 10, 0, 100, 0, 0, 0}, #endif {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT, "Use exact records count during query planning and for fast " "select count(*), disable for faster queries.", - (uchar**) &global_system_variables.ndb_use_exact_count, - (uchar**) &global_system_variables.ndb_use_exact_count, + &global_system_variables.ndb_use_exact_count, + &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb_use_exact_count", OPT_NDB_USE_EXACT_COUNT, "Same as --ndb-use-exact-count.", - (uchar**) &global_system_variables.ndb_use_exact_count, - (uchar**) &global_system_variables.ndb_use_exact_count, + &global_system_variables.ndb_use_exact_count, + &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS, "Use transactions for large inserts, if enabled then large " "inserts will be split into several smaller transactions", - (uchar**) &global_system_variables.ndb_use_transactions, - (uchar**) &global_system_variables.ndb_use_transactions, + &global_system_variables.ndb_use_transactions, + &global_system_variables.ndb_use_transactions, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS, "Same as --ndb-use-transactions.", - (uchar**) &global_system_variables.ndb_use_transactions, - (uchar**) &global_system_variables.ndb_use_transactions, + &global_system_variables.ndb_use_transactions, + &global_system_variables.ndb_use_transactions, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-shm", OPT_NDB_SHM, "Use shared memory connections when available.", - (uchar**) &opt_ndb_shm, - (uchar**) &opt_ndb_shm, + &opt_ndb_shm, &opt_ndb_shm, 0, GET_BOOL, OPT_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0}, {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION, "Select nodes for transactions in a more optimal way.", - (uchar**) &opt_ndb_optimized_node_selection, - (uchar**) &opt_ndb_optimized_node_selection, + &opt_ndb_optimized_node_selection, + &opt_ndb_optimized_node_selection, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, { "ndb-cache-check-time", OPT_NDB_CACHE_CHECK_TIME, "A dedicated thread is created to, at the given milliseconds interval, " "invalidate the query cache if another MySQL server in the cluster has " "changed the data in the database.", - (uchar**) &opt_ndb_cache_check_time, (uchar**) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, + &opt_ndb_cache_check_time, &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, 0, 0, LONG_TIMEOUT, 0, 1, 0}, {"ndb-index-stat-enable", OPT_NDB_INDEX_STAT_ENABLE, "Use ndb index statistics in query optimization.", - (uchar**) &global_system_variables.ndb_index_stat_enable, - (uchar**) &max_system_variables.ndb_index_stat_enable, + &global_system_variables.ndb_index_stat_enable, + &max_system_variables.ndb_index_stat_enable, 0, GET_BOOL, OPT_ARG, 0, 0, 1, 0, 0, 0}, #endif {"ndb-use-copying-alter-table", OPT_NDB_USE_COPYING_ALTER_TABLE, - "Force ndbcluster to always copy tables at alter table (should only be used if on-line alter table fails).", - (uchar**) &global_system_variables.ndb_use_copying_alter_table, - (uchar**) &global_system_variables.ndb_use_copying_alter_table, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + "Force ndbcluster to always copy tables at alter table " + "(should only be used if on-line alter table fails).", + &global_system_variables.ndb_use_copying_alter_table, + &global_system_variables.ndb_use_copying_alter_table, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"new", 'n', "Use very new, possibly 'unsafe', functions.", - (uchar**) &global_system_variables.new_mode, - (uchar**) &max_system_variables.new_mode, + &global_system_variables.new_mode, + &max_system_variables.new_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef NOT_YET {"no-mix-table-types", OPT_NO_MIX_TYPE, "Don't allow commands that use two different table types.", - (uchar**) &opt_no_mix_types, (uchar**) &opt_no_mix_types, 0, GET_BOOL, NO_ARG, + &opt_no_mix_types, &opt_no_mix_types, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"old-alter-table", OPT_OLD_ALTER_TABLE, "Use old, non-optimized alter table.", - (uchar**) &global_system_variables.old_alter_table, - (uchar**) &max_system_variables.old_alter_table, 0, GET_BOOL, NO_ARG, + &global_system_variables.old_alter_table, + &max_system_variables.old_alter_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"old-passwords", OPT_OLD_PASSWORDS, "Use old password encryption method (needed for 4.0 and older clients).", - (uchar**) &global_system_variables.old_passwords, - (uchar**) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG, + {"old-passwords", OPT_OLD_PASSWORDS, "Use old password " + "encryption method (needed for 4.0 and older clients).", + &global_system_variables.old_passwords, + &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"one-thread", OPT_ONE_THREAD, "(Deprecated): Only use one thread (for debugging under Linux). Use " @@ -6566,10 +6623,10 @@ thread is in the master's binlogs.", {"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS, "Enable old-style user limits (before 5.0.3, user resources were counted " "per each user+host vs. per account).", - (uchar**) &opt_old_style_user_limits, (uchar**) &opt_old_style_user_limits, + &opt_old_style_user_limits, &opt_old_style_user_limits, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.", - (uchar**) &pidfile_name_ptr, (uchar**) &pidfile_name_ptr, 0, GET_STR, + &pidfile_name_ptr, &pidfile_name_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " @@ -6577,64 +6634,85 @@ thread is in the master's binlogs.", "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", - (uchar**) &mysqld_port, - (uchar**) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + &mysqld_port, + &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port-open-timeout", OPT_PORT_OPEN_TIMEOUT, "Maximum time in seconds to wait for the port to become free. " - "(Default: No wait).", (uchar**) &mysqld_port_timeout, - (uchar**) &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "(Default: No wait).", &mysqld_port_timeout, + &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) {"profiling_history_size", OPT_PROFILING, "Limit of query profiling memory.", - (uchar**) &global_system_variables.profiling_history_size, - (uchar**) &max_system_variables.profiling_history_size, + &global_system_variables.profiling_history_size, + &max_system_variables.profiling_history_size, 0, GET_ULONG, REQUIRED_ARG, 15, 0, 100, 0, 0, 0}, #endif {"relay-log", OPT_RELAY_LOG, "The location and name to use for relay logs.", - (uchar**) &opt_relay_logname, (uchar**) &opt_relay_logname, 0, + &opt_relay_logname, &opt_relay_logname, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"relay-log-index", OPT_RELAY_LOG_INDEX, "The location and name to use for the file that keeps a list of the last \ relay logs.", - (uchar**) &opt_relaylog_index_name, (uchar**) &opt_relaylog_index_name, 0, + &opt_relaylog_index_name, &opt_relaylog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"relay-log-info-file", OPT_RELAY_LOG_INFO_FILE, "The location and name of the file that remembers where the SQL replication \ thread is in the relay logs.", - (uchar**) &relay_log_info_file, (uchar**) &relay_log_info_file, 0, GET_STR, + &relay_log_info_file, &relay_log_info_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-db", OPT_REPLICATE_DO_DB, - "Tells the slave thread to restrict replication to the specified database. To specify more than one database, use the directive multiple times, once for each database. Note that this will only work if you do not use cross-database queries such as UPDATE some_db.some_table SET foo='bar' while having selected a different or no database. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-do-table=db_name.%.", + "Tells the slave thread to restrict replication to the specified database. " + "To specify more than one database, use the directive multiple times, " + "once for each database. Note that this will only work if you do not use " + "cross-database queries such as UPDATE some_db.some_table SET foo='bar' " + "while having selected a different or no database. If you need cross " + "database updates to work, make sure you have 3.23.28 or later, and use " + "replicate-wild-do-table=db_name.%.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-do-table", OPT_REPLICATE_DO_TABLE, - "Tells the slave thread to restrict replication to the specified table. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates, in contrast to replicate-do-db.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Tells the slave thread to restrict replication to the specified table. " + "To specify more than one table, use the directive multiple times, once " + "for each table. This will work for cross-database updates, in contrast " + "to replicate-do-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB, - "Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Tells the slave thread to not replicate to the specified database. To " + "specify more than one database to ignore, use the directive multiple " + "times, once for each database. This option will not work if you use " + "cross database updates. If you need cross database updates to work, " + "make sure you have 3.23.28 or later, and use replicate-wild-ignore-" + "table=db_name.%. ", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE, "Tells the slave thread to not replicate to the specified table. To specify " "more than one table to ignore, use the directive multiple times, once for " "each table. This will work for cross-database updates, in contrast to " - "replicate-ignore-db.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "replicate-ignore-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB, - "Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.", + "Updates to a database with a different name than the original. Example: " + "replicate-rewrite-db=master_db_name->slave_db_name.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID, - "In replication, if set to 1, do not skip events having our server id. \ -Default value is 0 (to break infinite loops in circular replication). \ -Can't be set to 1 if --log-slave-updates is used.", - (uchar**) &replicate_same_server_id, - (uchar**) &replicate_same_server_id, + "In replication, if set to 1, do not skip events having our server id. " + "Default value is 0 (to break infinite loops in circular replication). " + "Can't be set to 1 if --log-slave-updates is used.", + &replicate_same_server_id, &replicate_same_server_id, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, - "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", + "Tells the slave thread to restrict replication to the tables that match " + "the specified wildcard pattern. To specify more than one table, use the " + "directive multiple times, once for each table. This will work for cross-" + "database updates. Example: replicate-wild-do-table=foo%.bar% will " + "replicate only updates to tables in all databases that start with foo " + "and whose table names start with bar.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, - "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", + "Tells the slave thread to not replicate to the tables that match the " + "given wildcard pattern. To specify more than one table to ignore, use " + "the directive multiple times, once for each table. This will work for " + "cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% " + "will not do updates to tables in databases that start with foo and whose " + "table names start with bar.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, // In replication, we may need to tell the other servers how to connect {"report-host", OPT_REPORT_HOST, @@ -6645,19 +6723,22 @@ Can't be set to 1 if --log-slave-updates is used.", "from the socket once the slave connects. Due to NAT and other routing " "issues, that IP may not be valid for connecting to the slave from the " "master or other hosts.", - (uchar**) &report_host, (uchar**) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0, + &report_host, &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"report-password", OPT_REPORT_PASSWORD, "Undocumented.", - (uchar**) &report_password, (uchar**) &report_password, 0, GET_STR, + &report_password, &report_password, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"report-port", OPT_REPORT_PORT, - "Port for connecting to slave reported to the master during slave registration. Set it only if the slave is listening on a non-default port or if you have a special tunnel from the master or other clients to the slave. If not sure, leave this option unset.", - (uchar**) &report_port, (uchar**) &report_port, 0, GET_UINT, REQUIRED_ARG, + "Port for connecting to slave reported to the master during slave " + "registration. Set it only if the slave is listening on a non-default " + "port or if you have a special tunnel from the master or other clients " + "to the slave. If not sure, leave this option unset.", + &report_port, &report_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, - {"report-user", OPT_REPORT_USER, "Undocumented.", (uchar**) &report_user, - (uchar**) &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"report-user", OPT_REPORT_USER, "Undocumented.", &report_user, + &report_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"rpl-recovery-rank", OPT_RPL_RECOVERY_RANK, "Undocumented.", - (uchar**) &rpl_recovery_rank, (uchar**) &rpl_recovery_rank, 0, GET_ULONG, + &rpl_recovery_rank, &rpl_recovery_rank, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -6668,21 +6749,21 @@ Can't be set to 1 if --log-slave-updates is used.", #endif {"safe-user-create", OPT_SAFE_USER_CREATE, "Don't allow new user creation by the user who has no write privileges to the mysql.user table.", - (uchar**) &opt_safe_user_create, (uchar**) &opt_safe_user_create, 0, GET_BOOL, + &opt_safe_user_create, &opt_safe_user_create, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"safemalloc-mem-limit", OPT_SAFEMALLOC_MEM_LIMIT, "Simulate memory shortage when compiled with the --with-debug=full option.", 0, 0, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.", - (uchar**) &opt_secure_auth, (uchar**) &opt_secure_auth, 0, GET_BOOL, NO_ARG, + &opt_secure_auth, &opt_secure_auth, 0, GET_BOOL, NO_ARG, my_bool(0), 0, 0, 0, 0, 0}, {"secure-file-priv", OPT_SECURE_FILE_PRIV, "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory.", - (uchar**) &opt_secure_file_priv, (uchar**) &opt_secure_file_priv, 0, + &opt_secure_file_priv, &opt_secure_file_priv, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"server-id", OPT_SERVER_ID, "Uniquely identifies the server instance in the community of replication partners.", - (uchar**) &server_id, (uchar**) &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, UINT_MAX32, + &server_id, &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, UINT_MAX32, 0, 0, 0}, {"set-variable", 'O', "Change the value of a variable. Please note that this option is deprecated; " @@ -6690,20 +6771,20 @@ Can't be set to 1 if --log-slave-updates is used.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_SMEM {"shared-memory", OPT_ENABLE_SHARED_MEMORY, - "Enable the shared memory.",(uchar**) &opt_enable_shared_memory, (uchar**) &opt_enable_shared_memory, + "Enable the shared memory.",&opt_enable_shared_memory, &opt_enable_shared_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"shared-memory-base-name",OPT_SHARED_MEMORY_BASE_NAME, - "Base name of shared memory.", (uchar**) &shared_memory_base_name, (uchar**) &shared_memory_base_name, + "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"show-slave-auth-info", OPT_SHOW_SLAVE_AUTH_INFO, "Show user and password in SHOW SLAVE HOSTS on this master.", - (uchar**) &opt_show_slave_auth_info, (uchar**) &opt_show_slave_auth_info, 0, + &opt_show_slave_auth_info, &opt_show_slave_auth_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifndef DISABLE_GRANT_OPTIONS {"skip-grant-tables", OPT_SKIP_GRANT, "Start without grant tables. This gives all users FULL ACCESS to all tables.", - (uchar**) &opt_noacl, (uchar**) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, + &opt_noacl, &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0, @@ -6730,49 +6811,55 @@ Can't be set to 1 if --log-slave-updates is used.", "Don't allow 'SHOW DATABASE' commands.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-slave-start", OPT_SKIP_SLAVE_START, - "If set, slave is not autostarted.", (uchar**) &opt_skip_slave_start, - (uchar**) &opt_skip_slave_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + "If set, slave is not autostarted.", &opt_skip_slave_start, + &opt_skip_slave_start, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-stack-trace", OPT_SKIP_STACK_TRACE, "Don't print a stack trace on failure.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.", + {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. " + "Deprecated option. Use --skip-symbolic-links instead.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-thread-priority", OPT_SKIP_PRIOR, "Don't give threads different priorities. Deprecated option.", 0, 0, 0, GET_NO_ARG, NO_ARG, DEFAULT_SKIP_THREAD_PRIORITY, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR, - "The location where the slave should put its temporary files when \ -replicating a LOAD DATA INFILE command.", - (uchar**) &slave_load_tmpdir, (uchar**) &slave_load_tmpdir, 0, GET_STR_ALLOC, + "The location where the slave should put its temporary files when " + "replicating a LOAD DATA INFILE command.", + &slave_load_tmpdir, &slave_load_tmpdir, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"slave-skip-errors", OPT_SLAVE_SKIP_ERRORS, "Tells the slave thread to continue replication when a query event returns an error from the provided list.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"slave-exec-mode", OPT_SLAVE_EXEC_MODE, - "Modes for how replication events should be executed. Legal values are STRICT (default) and IDEMPOTENT. In IDEMPOTENT mode, replication will not stop for operations that are idempotent. In STRICT mode, replication will stop on any unexpected difference between the master and the slave.", - (uchar**) &slave_exec_mode_str, (uchar**) &slave_exec_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Modes for how replication events should be executed. Legal values are " + "STRICT (default) and IDEMPOTENT. In IDEMPOTENT mode, replication will " + "not stop for operations that are idempotent. In STRICT mode, replication " + "will stop on any unexpected difference between the master and the slave.", + &slave_exec_mode_str, &slave_exec_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"slow-query-log", OPT_SLOW_LOG, - "Enable/disable slow query log.", (uchar**) &opt_slow_log, - (uchar**) &opt_slow_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, + "Enable/disable slow query log.", &opt_slow_log, + &opt_slow_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"socket", OPT_SOCKET, "Socket file to use for connection.", - (uchar**) &mysqld_unix_port, (uchar**) &mysqld_unix_port, 0, GET_STR, + &mysqld_unix_port, &mysqld_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"sporadic-binlog-dump-fail", OPT_SPORADIC_BINLOG_DUMP_FAIL, "Option used by mysql-test for debugging and testing of replication.", - (uchar**) &opt_sporadic_binlog_dump_fail, - (uchar**) &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, + &opt_sporadic_binlog_dump_fail, + &opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, - "The update log is deprecated since version 5.0, is replaced by the binary \ -log and this option does nothing anymore.", + "The update log is deprecated since version 5.0, is replaced by the " + "binary log and this option does nothing anymore.", 0, 0, 0, GET_DISABLED, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sql-mode", OPT_SQL_MODE, - "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", - (uchar**) &sql_mode_str, (uchar**) &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, + "Syntax: sql-mode=option[,option[,option...]] where option can be one " + "of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, " + "ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", + &sql_mode_str, &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_OPENSSL #include "sslopt-longopts.h" @@ -6783,7 +6870,7 @@ log and this option does nothing anymore.", NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"symbolic-links", 's', "Enable symbolic link support.", - (uchar**) &my_use_symdir, (uchar**) &my_use_symdir, 0, GET_BOOL, NO_ARG, + &my_use_symdir, &my_use_symdir, 0, GET_BOOL, NO_ARG, /* The system call realpath() produces warnings under valgrind and purify. These are not suppressed: instead we disable symlinks @@ -6791,41 +6878,43 @@ log and this option does nothing anymore.", */ IF_VALGRIND(0,1), 0, 0, 0, 0, 0}, {"sysdate-is-now", OPT_SYSDATE_IS_NOW, - "Non-default option to alias SYSDATE() to NOW() to make it safe-replicable. Since 5.0, SYSDATE() returns a `dynamic' value different for different invocations, even within the same statement.", - (uchar**) &global_system_variables.sysdate_is_now, + "Non-default option to alias SYSDATE() to NOW() to make it safe-replicable. " + "Since 5.0, SYSDATE() returns a `dynamic' value different for different " + "invocations, even within the same statement.", + &global_system_variables.sysdate_is_now, 0, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, {"tc-heuristic-recover", OPT_TC_HEURISTIC_RECOVER, - "Decision to use in heuristic recover process. Possible values are COMMIT or ROLLBACK.", - (uchar**) &opt_tc_heuristic_recover, (uchar**) &opt_tc_heuristic_recover, + "Decision to use in heuristic recover process. Possible values are COMMIT " + "or ROLLBACK.", &opt_tc_heuristic_recover, &opt_tc_heuristic_recover, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined(ENABLED_DEBUG_SYNC) {"debug-sync-timeout", OPT_DEBUG_SYNC_TIMEOUT, "Enable the debug sync facility " "and optionally specify a default wait timeout in seconds. " "A zero value keeps the facility disabled.", - (uchar**) &opt_debug_sync_timeout, 0, + &opt_debug_sync_timeout, 0, 0, GET_UINT, OPT_ARG, 0, 0, UINT_MAX, 0, 0, 0}, #endif /* defined(ENABLED_DEBUG_SYNC) */ {"temp-pool", OPT_TEMP_POOL, #if (ENABLE_TEMP_POOL) - "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", + "Using this option will cause most temporary files created to use a small " + "set of names, rather than a unique name for each new file.", #else "This option is ignored on this OS.", #endif - (uchar**) &use_temp_pool, (uchar**) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, + &use_temp_pool, &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, - {"test-ignore-wrong-options", OPT_TEST_IGNORE_WRONG_OPTIONS, "Ignore wrong enums values in command line arguments. Useful only for test scripts", - (uchar**) &opt_ignore_wrong_options, (uchar**) &opt_ignore_wrong_options, + &opt_ignore_wrong_options, &opt_ignore_wrong_options, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"test-expect-abort", OPT_TEST_RESTART, "Expect that server aborts with 'abort'; Don't write out server variables on 'abort'. Useful only for test scripts", - (uchar**) &opt_expect_abort, (uchar**) &opt_expect_abort, + &opt_expect_abort, &opt_expect_abort, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"timed_mutexes", OPT_TIMED_MUTEXES, "Specify whether to time mutexes (only InnoDB mutexes are currently supported).", - (uchar**) &timed_mutexes, (uchar**) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0, + &timed_mutexes, &timed_mutexes, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files. Several paths may be specified, separated by a " @@ -6835,133 +6924,140 @@ log and this option does nothing anymore.", "colon (:)" #endif ", in this case they are used in a round-robin fashion.", - (uchar**) &opt_mysql_tmpdir, - (uchar**) &opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + &opt_mysql_tmpdir, &opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"transaction-isolation", OPT_TX_ISOLATION, "Default transaction isolation level.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"use-symbolic-links", OPT_SYMBOLIC_LINKS, "Enable symbolic link support. Deprecated option; use --symbolic-links instead.", - (uchar**) &my_use_symdir, (uchar**) &my_use_symdir, 0, GET_BOOL, NO_ARG, + {"use-symbolic-links", OPT_SYMBOLIC_LINKS, + "Enable symbolic link support. " + "Deprecated option; use --symbolic-links instead.", + &my_use_symdir, &my_use_symdir, 0, GET_BOOL, NO_ARG, IF_VALGRIND(0,1), 0, 0, 0, 0, 0}, {"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Used with --help option for detailed help.", - (uchar**) &opt_verbose, (uchar**) &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, - 0, 0}, + &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"warnings", OPT_WARNINGS, "Deprecated; use --log-warnings instead.", - (uchar**) &global_system_variables.log_warnings, - (uchar**) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, - 1, 0, (longlong) ULONG_MAX, 0, 0, 0}, - { "back_log", OPT_BACK_LOG, - "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", - (uchar**) &back_log, (uchar**) &back_log, 0, GET_ULONG, - REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 }, + &global_system_variables.log_warnings, + &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, + 1, 0, ULONG_MAX, 0, 0, 0}, + {"back_log", OPT_BACK_LOG, + "The number of outstanding connection requests MySQL can have. This " + "comes into play when the main MySQL thread gets very many connection " + "requests in a very short time.", &back_log, &back_log, 0, GET_ULONG, + REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 }, {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, - "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", - (uchar**) &binlog_cache_size, (uchar**) &binlog_cache_size, 0, GET_ULONG, + "The size of the cache to hold the SQL statements for the binary log " + "during a transaction. If you often use big, multi-statement " + "transactions you can increase this to get more performance.", + &binlog_cache_size, &binlog_cache_size, 0, GET_ULONG, REQUIRED_ARG, 32*1024L, IO_SIZE, (longlong) ULONG_MAX, 0, IO_SIZE, 0}, {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, - "Size of tree cache used in bulk insert optimization. Note that this is a limit per thread.", - (uchar**) &global_system_variables.bulk_insert_buff_size, - (uchar**) &max_system_variables.bulk_insert_buff_size, + "Size of tree cache used in bulk insert optimization. Note that this " + "is a limit per thread.", &global_system_variables.bulk_insert_buff_size, + &max_system_variables.bulk_insert_buff_size, 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, (longlong) ULONG_MAX, 0, 1, 0}, {"connect_timeout", OPT_CONNECT_TIMEOUT, - "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", - (uchar**) &connect_timeout, (uchar**) &connect_timeout, + "The number of seconds the mysqld server is waiting for a connect packet " + "before responding with 'Bad handshake'.", &connect_timeout, &connect_timeout, 0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 }, { "date_format", OPT_DATE_FORMAT, "The DATE format (for future).", - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], + &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], + &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "datetime_format", OPT_DATETIME_FORMAT, "The DATETIME/TIMESTAMP format (for future).", - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], + &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], + &opt_date_time_formats[MYSQL_TIMESTAMP_DATETIME], 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "default_week_format", OPT_DEFAULT_WEEK_FORMAT, "The default week format used by WEEK() functions.", - (uchar**) &global_system_variables.default_week_format, - (uchar**) &max_system_variables.default_week_format, + &global_system_variables.default_week_format, + &max_system_variables.default_week_format, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 7L, 0, 1, 0}, {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT, - "After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.", - (uchar**) &delayed_insert_limit, (uchar**) &delayed_insert_limit, 0, GET_ULONG, + "After inserting delayed_insert_limit rows, the INSERT DELAYED handler " + "will check if there are any SELECT statements pending. If so, it allows " + "these to execute before continuing.", + &delayed_insert_limit, &delayed_insert_limit, 0, GET_ULONG, REQUIRED_ARG, DELAYED_LIMIT, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", - (uchar**) &delayed_insert_timeout, (uchar**) &delayed_insert_timeout, 0, + &delayed_insert_timeout, &delayed_insert_timeout, 0, GET_ULONG, REQUIRED_ARG, DELAYED_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE, - "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", - (uchar**) &delayed_queue_size, (uchar**) &delayed_queue_size, 0, GET_ULONG, + "What size queue (in rows) should be allocated for handling INSERT DELAYED. " + "If the queue becomes full, any client that does INSERT DELAYED will wait " + "until there is room in the queue again.", + &delayed_queue_size, &delayed_queue_size, 0, GET_ULONG, REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"div_precision_increment", OPT_DIV_PRECINCREMENT, "Precision of the result of '/' operator will be increased on that value.", - (uchar**) &global_system_variables.div_precincrement, - (uchar**) &max_system_variables.div_precincrement, 0, GET_ULONG, + &global_system_variables.div_precincrement, + &max_system_variables.div_precincrement, 0, GET_ULONG, REQUIRED_ARG, 4, 0, DECIMAL_MAX_SCALE, 0, 0, 0}, {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS, "If non-zero, binary logs will be purged after expire_logs_days " "days; possible purges happen at startup and at binary log rotation.", - (uchar**) &expire_logs_days, - (uchar**) &expire_logs_days, 0, GET_ULONG, + &expire_logs_days, &expire_logs_days, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 99, 0, 1, 0}, { "flush_time", OPT_FLUSH_TIME, "A dedicated thread is created to flush all tables at the given interval.", - (uchar**) &flush_time, (uchar**) &flush_time, 0, GET_ULONG, REQUIRED_ARG, + &flush_time, &flush_time, 0, GET_ULONG, REQUIRED_ARG, FLUSH_TIME, 0, LONG_TIMEOUT, 0, 1, 0}, { "ft_boolean_syntax", OPT_FT_BOOLEAN_SYNTAX, "List of operators for MATCH ... AGAINST ( ... IN BOOLEAN MODE).", - 0, 0, 0, GET_STR, - REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, - "The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", - (uchar**) &ft_max_word_len, (uchar**) &ft_max_word_len, 0, GET_ULONG, + "The maximum length of the word to be included in a FULLTEXT index. " + "Note: FULLTEXT indexes must be rebuilt after changing this variable.", + &ft_max_word_len, &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10, HA_FT_MAXCHARLEN, 0, 1, 0}, { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, - "The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.", - (uchar**) &ft_min_word_len, (uchar**) &ft_min_word_len, 0, GET_ULONG, + "The minimum length of the word to be included in a FULLTEXT index. " + "Note: FULLTEXT indexes must be rebuilt after changing this variable.", + &ft_min_word_len, &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN, 0, 1, 0}, { "ft_query_expansion_limit", OPT_FT_QUERY_EXPANSION_LIMIT, "Number of best matches to use for query expansion.", - (uchar**) &ft_query_expansion_limit, (uchar**) &ft_query_expansion_limit, 0, GET_ULONG, + &ft_query_expansion_limit, &ft_query_expansion_limit, 0, GET_ULONG, REQUIRED_ARG, 20, 0, 1000, 0, 1, 0}, { "ft_stopword_file", OPT_FT_STOPWORD_FILE, "Use stopwords from this file instead of built-in list.", - (uchar**) &ft_stopword_file, (uchar**) &ft_stopword_file, 0, GET_STR, + &ft_stopword_file, &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, "The maximum length of the result of function group_concat.", - (uchar**) &global_system_variables.group_concat_max_len, - (uchar**) &max_system_variables.group_concat_max_len, 0, GET_ULONG, + &global_system_variables.group_concat_max_len, + &max_system_variables.group_concat_max_len, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, (longlong) ULONG_MAX, 0, 1, 0}, {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, - "The number of seconds the server waits for activity on an interactive connection before closing it.", - (uchar**) &global_system_variables.net_interactive_timeout, - (uchar**) &max_system_variables.net_interactive_timeout, 0, + "The number of seconds the server waits for activity on an interactive " + "connection before closing it.", + &global_system_variables.net_interactive_timeout, + &max_system_variables.net_interactive_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, {"join_buffer_size", OPT_JOIN_BUFF_SIZE, "The size of the buffer that is used for full joins.", - (uchar**) &global_system_variables.join_buff_size, - (uchar**) &max_system_variables.join_buff_size, 0, GET_ULONG, + &global_system_variables.join_buff_size, + &max_system_variables.join_buff_size, 0, GET_ULONG, REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, (longlong) ULONG_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, {"keep_files_on_create", OPT_KEEP_FILES_ON_CREATE, "Don't overwrite stale .MYD and .MYI even if no directory is specified.", - (uchar**) &global_system_variables.keep_files_on_create, - (uchar**) &max_system_variables.keep_files_on_create, + &global_system_variables.keep_files_on_create, + &max_system_variables.keep_files_on_create, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"key_buffer_size", OPT_KEY_BUFFER_SIZE, "The size of the buffer used for index blocks for MyISAM tables. Increase " "this to get better index handling (for all reads and multiple writes) to " "as much as you can afford; 1GB on a 4GB machine that mainly runs MySQL is " "quite common.", - (uchar**) &dflt_key_cache_var.param_buff_size, - (uchar**) 0, - 0, (GET_ULL | GET_ASK_ADDR), + &dflt_key_cache_var.param_buff_size, NULL, NULL, (GET_ULL | GET_ASK_ADDR), REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, SIZE_T_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD, @@ -6969,60 +7065,58 @@ log and this option does nothing anymore.", "until it is considered aged enough to be downgraded to a warm block. " "This specifies the percentage ratio of that number of hits to the total " "number of blocks in key cache.", - (uchar**) &dflt_key_cache_var.param_age_threshold, - (uchar**) 0, - 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, - 300, 100, (longlong) ULONG_MAX, 0, 100, 0}, + &dflt_key_cache_var.param_age_threshold, 0, 0, + (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, 300, 100, (longlong) ULONG_MAX, + 0, 100, 0}, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "The default size of key cache blocks.", - (uchar**) &dflt_key_cache_var.param_block_size, - (uchar**) 0, - 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, - KEY_CACHE_BLOCK_SIZE, 512, 1024 * 16, 0, 512, 0}, + &dflt_key_cache_var.param_block_size, NULL, NULL, (GET_ULONG | GET_ASK_ADDR), + REQUIRED_ARG, KEY_CACHE_BLOCK_SIZE, 512, 1024 * 16, 0, 512, 0}, {"key_cache_division_limit", OPT_KEY_CACHE_DIVISION_LIMIT, - "The minimum percentage of warm blocks in a key cache.", - (uchar**) &dflt_key_cache_var.param_division_limit, - (uchar**) 0, - 0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100, - 1, 100, 0, 1, 0}, + "The minimum percentage of warm blocks in key cache.", + &dflt_key_cache_var.param_division_limit, 0, 0, + (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100, 1, 100, 0, 1, 0}, {"key_cache_segments", OPT_KEY_CACHE_PARTITIONS, "The number of segments in a key cache", - (uchar**) &dflt_key_cache_var.param_partitions, - (uchar**) 0, + &dflt_key_cache_var.param_partitions, 0, 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, DEFAULT_KEY_CACHE_PARTITIONS, 0, MAX_KEY_CACHE_PARTITIONS, 0, 1, 0}, {"log-slow-filter", OPT_LOG_SLOW_FILTER, - "Log only the queries that followed certain execution plan. Multiple flags allowed in a comma-separated string. [admin, filesort, filesort_on_disk, full_join, full_scan, query_cache, query_cache_miss, tmp_table, tmp_table_on_disk]. Sets log-slow-admin-command to ON", + "Log only the queries that followed certain execution plan. Multiple flags " + "allowed in a comma-separated string. [admin, filesort, filesort_on_disk, " + "full_join, full_scan, query_cache, query_cache_miss, tmp_table, " + "tmp_table_on_disk]. Sets log-slow-admin-command to ON", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, QPLAN_ALWAYS_SET, 0, 0}, {"log-slow-rate_limit", OPT_LOG_SLOW_RATE_LIMIT, - "If set, only write to slow log every 'log_slow_rate_limit' query (use this to reduce output on slow query log)", - (uchar**) &global_system_variables.log_slow_rate_limit, - (uchar**) &max_system_variables.log_slow_rate_limit, 0, GET_ULONG, + "If set, only write to slow log every 'log_slow_rate_limit' query (use " + "this to reduce output on slow query log)", + &global_system_variables.log_slow_rate_limit, + &max_system_variables.log_slow_rate_limit, 0, GET_ULONG, REQUIRED_ARG, 1, 1, ~0L, 0, 1L, 0}, {"log-slow-verbosity", OPT_LOG_SLOW_VERBOSITY, - "Choose how verbose the messages to your slow log will be. Multiple flags allowed in a comma-separated string. [query_plan, innodb]", + "Choose how verbose the messages to your slow log will be. Multiple flags " + "allowed in a comma-separated string. [query_plan, innodb]", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"log-slow-file", OPT_SLOW_QUERY_LOG_FILE, "Log slow queries to given log file. Defaults logging to hostname-slow.log", - (uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, + &opt_slow_logname, &opt_slow_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"long_query_time", OPT_LONG_QUERY_TIME, "Log all queries that have taken more than long_query_time seconds to " "execute. The argument will be treated as a decimal value with " "microsecond precision.", - (uchar**) &long_query_time, (uchar**) &long_query_time, 0, GET_DOUBLE, + &long_query_time, &long_query_time, 0, GET_DOUBLE, REQUIRED_ARG, 10, 0, LONG_TIMEOUT, 0, 0, 0}, {"log-slow-time", OPT_LONG_QUERY_TIME, "Log all queries that have taken more than long_query_time seconds to execute to file. " "The argument will be treated as a decimal value with microsecond precission.", - (uchar**) &long_query_time, (uchar**) &long_query_time, 0, GET_DOUBLE, + &long_query_time, &long_query_time, 0, GET_DOUBLE, REQUIRED_ARG, 10, 0, LONG_TIMEOUT, 0, 0, 0}, {"lower_case_table_names", OPT_LOWER_CASE_TABLE_NAMES, "If set to 1, table names are stored in lowercase on disk and table names " "will be case-insensitive. Should be set to 2 if you are using a case-" "insensitive file system.", - (uchar**) &lower_case_table_names, - (uchar**) &lower_case_table_names, 0, GET_UINT, OPT_ARG, + &lower_case_table_names, &lower_case_table_names, 0, GET_UINT, OPT_ARG, #ifdef FN_NO_CASE_SENCE 1 #else @@ -7031,184 +7125,207 @@ log and this option does nothing anymore.", , 0, 2, 0, 1, 0}, {"max_allowed_packet", OPT_MAX_ALLOWED_PACKET, "The maximum packet length to send to or receive from server.", - (uchar**) &global_system_variables.max_allowed_packet, - (uchar**) &max_system_variables.max_allowed_packet, 0, GET_ULONG, - REQUIRED_ARG, 1024*1024L, 1024, 1024L*1024L*1024L, 0, 1024, 0}, + &global_system_variables.max_allowed_packet, + &max_system_variables.max_allowed_packet, 0, GET_ULONG, + REQUIRED_ARG, 1024*1024L, 1024, 1024L*1024L*1024L, MALLOC_OVERHEAD, 1024, 0}, {"max_binlog_cache_size", OPT_MAX_BINLOG_CACHE_SIZE, "Can be used to restrict the total size used to cache a multi-transaction query.", - (uchar**) &max_binlog_cache_size, (uchar**) &max_binlog_cache_size, 0, + &max_binlog_cache_size, &max_binlog_cache_size, 0, GET_ULL, REQUIRED_ARG, ULONG_MAX, IO_SIZE, ULONGLONG_MAX, 0, IO_SIZE, 0}, {"max_binlog_size", OPT_MAX_BINLOG_SIZE, - "Binary log will be rotated automatically when the size exceeds this \ -value. Will also apply to relay logs if max_relay_log_size is 0. \ -The minimum value for this variable is 4096.", - (uchar**) &max_binlog_size, (uchar**) &max_binlog_size, 0, GET_ULONG, + "Binary log will be rotated automatically when the size exceeds this " + "value. Will also apply to relay logs if max_relay_log_size is 0. " + "The minimum value for this variable is 4096.", + &max_binlog_size, &max_binlog_size, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L*1024L, IO_SIZE, 1024*1024L*1024L, 0, IO_SIZE, 0}, {"max_connect_errors", OPT_MAX_CONNECT_ERRORS, - "If there is more than this number of interrupted connections from a host this host will be blocked from further connections.", - (uchar**) &max_connect_errors, (uchar**) &max_connect_errors, 0, GET_ULONG, - REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, (longlong) ULONG_MAX, 0, 1, 0}, + "If there is more than this number of interrupted connections from a host " + "this host will be blocked from further connections.", + &max_connect_errors, &max_connect_errors, 0, GET_ULONG, + REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, (longlong) ULONG_MAX, 0, 1, 0}, // Default max_connections of 151 is larger than Apache's default max // children, to avoid "too many connections" error in a common setup {"max_connections", OPT_MAX_CONNECTIONS, - "The number of simultaneous clients allowed.", (uchar**) &max_connections, - (uchar**) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 100000, 0, 1, - 0}, + "The number of simultaneous clients allowed.", &max_connections, + &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 100000, 0, 1, 0}, {"max_delayed_threads", OPT_MAX_DELAYED_THREADS, - "Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.", - (uchar**) &global_system_variables.max_insert_delayed_threads, - (uchar**) &max_system_variables.max_insert_delayed_threads, + "Don't start more than this number of threads to handle INSERT DELAYED " + "statements. If set to zero, which means INSERT DELAYED is not used.", + &global_system_variables.max_insert_delayed_threads, + &max_system_variables.max_insert_delayed_threads, 0, GET_ULONG, REQUIRED_ARG, 20, 0, 16384, 0, 1, 0}, {"max_error_count", OPT_MAX_ERROR_COUNT, "Max number of errors/warnings to store for a statement.", - (uchar**) &global_system_variables.max_error_count, - (uchar**) &max_system_variables.max_error_count, + &global_system_variables.max_error_count, + &max_system_variables.max_error_count, 0, GET_ULONG, REQUIRED_ARG, DEFAULT_ERROR_COUNT, 0, 65535, 0, 1, 0}, {"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE, "Don't allow creation of heap tables bigger than this.", - (uchar**) &global_system_variables.max_heap_table_size, - (uchar**) &max_system_variables.max_heap_table_size, 0, GET_ULL, + &global_system_variables.max_heap_table_size, + &max_system_variables.max_heap_table_size, 0, GET_ULL, REQUIRED_ARG, 16*1024*1024L, 16384, MAX_MEM_TABLE_SIZE, MALLOC_OVERHEAD, 1024, 0}, {"max_join_size", OPT_MAX_JOIN_SIZE, "Joins that are probably going to read more than max_join_size records return an error.", - (uchar**) &global_system_variables.max_join_size, - (uchar**) &max_system_variables.max_join_size, 0, GET_HA_ROWS, REQUIRED_ARG, + &global_system_variables.max_join_size, + &max_system_variables.max_join_size, 0, GET_HA_ROWS, REQUIRED_ARG, HA_POS_ERROR, 1, HA_POS_ERROR, 0, 1, 0}, {"max_length_for_sort_data", OPT_MAX_LENGTH_FOR_SORT_DATA, "Max number of bytes in sorted records.", - (uchar**) &global_system_variables.max_length_for_sort_data, - (uchar**) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, + &global_system_variables.max_length_for_sort_data, + &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT, "Maximum number of prepared statements in the server.", - (uchar**) &max_prepared_stmt_count, (uchar**) &max_prepared_stmt_count, + &max_prepared_stmt_count, &max_prepared_stmt_count, 0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0}, {"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE, - "If non-zero: relay log will be rotated automatically when the size exceeds this value; if zero (the default): when the size exceeds max_binlog_size. 0 excepted, the minimum value for this variable is 4096.", - (uchar**) &max_relay_log_size, (uchar**) &max_relay_log_size, 0, GET_ULONG, + "If non-zero: relay log will be rotated automatically when the size " + "exceeds this value; if zero (the default): when the size exceeds " + "max_binlog_size. 0 excepted, the minimum value for this variable is 4096.", + &max_relay_log_size, &max_relay_log_size, 0, GET_ULONG, REQUIRED_ARG, 0L, 0L, 1024*1024L*1024L, 0, IO_SIZE, 0}, { "max_seeks_for_key", OPT_MAX_SEEKS_FOR_KEY, "Limit assumed max number of seeks when looking up rows based on a key.", - (uchar**) &global_system_variables.max_seeks_for_key, - (uchar**) &max_system_variables.max_seeks_for_key, 0, GET_ULONG, + &global_system_variables.max_seeks_for_key, + &max_system_variables.max_seeks_for_key, 0, GET_ULONG, REQUIRED_ARG, (longlong) ULONG_MAX, 1, (longlong) ULONG_MAX, 0, 1, 0 }, {"max_sort_length", OPT_MAX_SORT_LENGTH, - "The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored).", - (uchar**) &global_system_variables.max_sort_length, - (uchar**) &max_system_variables.max_sort_length, 0, GET_ULONG, + "The number of bytes to use when sorting BLOB or TEXT values (only the " + "first max_sort_length bytes of each value are used; the rest are ignored).", + &global_system_variables.max_sort_length, + &max_system_variables.max_sort_length, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, {"max_sp_recursion_depth", OPT_MAX_SP_RECURSION_DEPTH, "Maximum stored procedure recursion depth. (discussed with docs).", - (uchar**) &global_system_variables.max_sp_recursion_depth, - (uchar**) &max_system_variables.max_sp_recursion_depth, 0, GET_ULONG, + &global_system_variables.max_sp_recursion_depth, + &max_system_variables.max_sp_recursion_depth, 0, GET_ULONG, OPT_ARG, 0, 0, 255, 0, 1, 0 }, {"max_tmp_tables", OPT_MAX_TMP_TABLES, "Maximum number of temporary tables a client can keep open at a time.", - (uchar**) &global_system_variables.max_tmp_tables, - (uchar**) &max_system_variables.max_tmp_tables, 0, GET_ULONG, + &global_system_variables.max_tmp_tables, + &max_system_variables.max_tmp_tables, 0, GET_ULONG, REQUIRED_ARG, 32, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"max_user_connections", OPT_MAX_USER_CONNECTIONS, "The maximum number of active connections for a single user (0 = no limit).", - (uchar**) &max_user_connections, (uchar**) &max_user_connections, 0, GET_UINT, + &max_user_connections, &max_user_connections, 0, GET_UINT, REQUIRED_ARG, 0, 0, UINT_MAX, 0, 1, 0}, {"max_write_lock_count", OPT_MAX_WRITE_LOCK_COUNT, "After this many write locks, allow some read locks to run in between.", - (uchar**) &max_write_lock_count, (uchar**) &max_write_lock_count, 0, GET_ULONG, + &max_write_lock_count, &max_write_lock_count, 0, GET_ULONG, REQUIRED_ARG, (longlong) ULONG_MAX, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"min_examined_row_limit", OPT_MIN_EXAMINED_ROW_LIMIT, "Don't log queries which examine less than min_examined_row_limit rows to file.", - (uchar**) &global_system_variables.min_examined_row_limit, - (uchar**) &max_system_variables.min_examined_row_limit, 0, GET_ULONG, + &global_system_variables.min_examined_row_limit, + &max_system_variables.min_examined_row_limit, 0, GET_ULONG, REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1L, 0}, {"multi_range_count", OPT_MULTI_RANGE_COUNT, "Number of key ranges to request at once.", - (uchar**) &global_system_variables.multi_range_count, - (uchar**) &max_system_variables.multi_range_count, 0, + &global_system_variables.multi_range_count, + &max_system_variables.multi_range_count, 0, GET_ULONG, REQUIRED_ARG, 256, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", - (uchar**) &opt_myisam_block_size, - (uchar**) &opt_myisam_block_size, 0, GET_ULONG, REQUIRED_ARG, + &opt_myisam_block_size, &opt_myisam_block_size, 0, GET_ULONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0}, {"myisam_data_pointer_size", OPT_MYISAM_DATA_POINTER_SIZE, "Default pointer size to be used for MyISAM tables.", - (uchar**) &myisam_data_pointer_size, - (uchar**) &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG, + &myisam_data_pointer_size, + &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG, 6, 2, 7, 0, 1, 0}, + {"myisam_max_extra_sort_file_size", OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, + "This is a deprecated option that does nothing anymore. " + "It will be removed in MySQL " VER_CELOSIA, + &global_system_variables.myisam_max_extra_sort_file_size, + &max_system_variables.myisam_max_extra_sort_file_size, + 0, GET_ULL, REQUIRED_ARG, (ulonglong) INT_MAX32, + 0, (ulonglong) MAX_FILE_SIZE, 0, 1, 0}, {"myisam_max_sort_file_size", OPT_MYISAM_MAX_SORT_FILE_SIZE, - "Don't use the fast sort index method to created index if the temporary file would get bigger than this.", - (uchar**) &global_system_variables.myisam_max_sort_file_size, - (uchar**) &max_system_variables.myisam_max_sort_file_size, 0, + "Don't use the fast sort index method to created index if the temporary " + "file would get bigger than this.", + &global_system_variables.myisam_max_sort_file_size, + &max_system_variables.myisam_max_sort_file_size, 0, GET_ULL, REQUIRED_ARG, (longlong) LONG_MAX, 0, (ulonglong) MAX_FILE_SIZE, 0, 1024*1024, 0}, {"myisam_mmap_size", OPT_MYISAM_MMAP_SIZE, "Can be used to restrict the total memory used for memory mmaping of myisam files", - (uchar**) &myisam_mmap_size, (uchar**) &myisam_mmap_size, 0, + &myisam_mmap_size, &myisam_mmap_size, 0, GET_ULL, REQUIRED_ARG, SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 0, 1, 0}, {"myisam_repair_threads", OPT_MYISAM_REPAIR_THREADS, "Specifies whether several threads should be used when repairing MyISAM " "tables. For values > 1, one thread is used per index. The value of 1 " "disables parallel repair.", - (uchar**) &global_system_variables.myisam_repair_threads, - (uchar**) &max_system_variables.myisam_repair_threads, 0, + &global_system_variables.myisam_repair_threads, + &max_system_variables.myisam_repair_threads, 0, GET_ULONG, REQUIRED_ARG, 1, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"myisam_sort_buffer_size", OPT_MYISAM_SORT_BUFFER_SIZE, - "The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE.", - (uchar**) &global_system_variables.myisam_sort_buff_size, - (uchar**) &max_system_variables.myisam_sort_buff_size, 0, + "The buffer that is allocated when sorting the index when doing a REPAIR " + "or when creating indexes with CREATE INDEX or ALTER TABLE.", + &global_system_variables.myisam_sort_buff_size, + &max_system_variables.myisam_sort_buff_size, 0, GET_ULONG, REQUIRED_ARG, 8192 * 1024, 4096, (longlong) ULONG_MAX, 0, 1, 0}, {"myisam_use_mmap", OPT_MYISAM_USE_MMAP, "Use memory mapping for reading and writing MyISAM tables.", - (uchar**) &opt_myisam_use_mmap, - (uchar**) &opt_myisam_use_mmap, 0, GET_BOOL, NO_ARG, 0, - 0, 0, 0, 0, 0}, + &opt_myisam_use_mmap, &opt_myisam_use_mmap, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"myisam_stats_method", OPT_MYISAM_STATS_METHOD, "Specifies how MyISAM index statistics collection code should threat NULLs. " "Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), " "\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".", - (uchar**) &myisam_stats_method_str, (uchar**) &myisam_stats_method_str, 0, + &myisam_stats_method_str, &myisam_stats_method_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"net_buffer_length", OPT_NET_BUFFER_LENGTH, "Buffer length for TCP/IP and socket communication.", - (uchar**) &global_system_variables.net_buffer_length, - (uchar**) &max_system_variables.net_buffer_length, 0, GET_ULONG, + &global_system_variables.net_buffer_length, + &max_system_variables.net_buffer_length, 0, GET_ULONG, REQUIRED_ARG, 16384, 1024, 1024*1024L, 0, 1024, 0}, {"net_read_timeout", OPT_NET_READ_TIMEOUT, "Number of seconds to wait for more data from a connection before aborting the read.", - (uchar**) &global_system_variables.net_read_timeout, - (uchar**) &max_system_variables.net_read_timeout, 0, GET_ULONG, + &global_system_variables.net_read_timeout, + &max_system_variables.net_read_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_READ_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, {"net_retry_count", OPT_NET_RETRY_COUNT, "If a read on a communication port is interrupted, retry this many times before giving up.", - (uchar**) &global_system_variables.net_retry_count, - (uchar**) &max_system_variables.net_retry_count,0, + &global_system_variables.net_retry_count, + &max_system_variables.net_retry_count,0, GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, (longlong) ULONG_MAX, 0, 1, 0}, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT, "Number of seconds to wait for a block to be written to a connection before " "aborting the write.", - (uchar**) &global_system_variables.net_write_timeout, - (uchar**) &max_system_variables.net_write_timeout, 0, GET_ULONG, + &global_system_variables.net_write_timeout, + &max_system_variables.net_write_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, - {"old", OPT_OLD_MODE, "Use compatible behavior.", - (uchar**) &global_system_variables.old_mode, - (uchar**) &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, + { "old", OPT_OLD_MODE, "Use compatible behavior.", + &global_system_variables.old_mode, + &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"open_files_limit", OPT_OPEN_FILES_LIMIT, - "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", - (uchar**) &open_files_limit, (uchar**) &open_files_limit, 0, GET_ULONG, + "If this is not 0, then mysqld will use this value to reserve file " + "descriptors to use with setrlimit(). If this value is 0 then mysqld " + "will reserve max_connections*5 or max_connections + table_cache*2 " + "(whichever is larger) number of files.", + &open_files_limit, &open_files_limit, 0, GET_ULONG, REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0}, {"optimizer_prune_level", OPT_OPTIMIZER_PRUNE_LEVEL, - "Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows.", - (uchar**) &global_system_variables.optimizer_prune_level, - (uchar**) &max_system_variables.optimizer_prune_level, + "Controls the heuristic(s) applied during query optimization to prune " + "less-promising partial plans from the optimizer search space. Meaning: " + "0 - do not apply any heuristic, thus perform exhaustive search; 1 - " + "prune plans based on number of retrieved rows.", + &global_system_variables.optimizer_prune_level, + &max_system_variables.optimizer_prune_level, 0, GET_ULONG, OPT_ARG, 1, 0, 1, 0, 1, 0}, {"optimizer_search_depth", OPT_OPTIMIZER_SEARCH_DEPTH, - "Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Smaller values than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value; if set to MAX_TABLES+2, the optimizer will switch to the original find_best (used for testing/comparison).", - (uchar**) &global_system_variables.optimizer_search_depth, - (uchar**) &max_system_variables.optimizer_search_depth, + "Maximum depth of search performed by the query optimizer. Values larger " + "than the number of relations in a query result in better query plans, " + "but take longer to compile a query. Smaller values than the number of " + "tables in a relation result in faster optimization, but may produce " + "very bad query plans. If set to 0, the system will automatically pick " + "a reasonable value; if set to MAX_TABLES+2, the optimizer will switch " + "to the original find_best (used for testing/comparison).", + &global_system_variables.optimizer_search_depth, + &max_system_variables.optimizer_search_depth, 0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0}, {"optimizer_switch", OPT_OPTIMIZER_SWITCH, "optimizer_switch=option=val[,option=val...], where option={index_merge, " @@ -7217,18 +7334,17 @@ The minimum value for this variable is 4096.", ", table_elimination" #endif "} and val={on, off, default}.", - (uchar**) &optimizer_switch_str, (uchar**) &optimizer_switch_str, 0, GET_STR, REQUIRED_ARG, - /*OPTIMIZER_SWITCH_DEFAULT*/0, - 0, 0, 0, 0, 0}, - {"plugin-dir", OPT_PLUGIN_DIR, + &optimizer_switch_str, &optimizer_switch_str, 0, GET_STR, REQUIRED_ARG, + /*OPTIMIZER_SWITCH_DEFAULT*/0, 0, 0, 0, 0, 0}, + {"plugin_dir", OPT_PLUGIN_DIR, "Directory for plugins.", - (uchar**) &opt_plugin_dir_ptr, (uchar**) &opt_plugin_dir_ptr, 0, + &opt_plugin_dir_ptr, &opt_plugin_dir_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"plugin-load", OPT_PLUGIN_LOAD, "Optional semicolon-separated list of plugins to load, where each plugin is " "identified as name=library, where name is the plugin name and library " "is the plugin library in plugin_dir.", - (uchar**) &opt_plugin_load, (uchar**) &opt_plugin_load, 0, + &opt_plugin_load, &opt_plugin_load, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"plugin-maturity", OPT_PLUGIN_MATURITY, "The lowest desirable plugin maturity. Plugins less mature than that will not be installed or loaded.", @@ -7236,199 +7352,207 @@ The minimum value for this variable is 4096.", GET_ENUM, REQUIRED_ARG, server_maturity, 0, 0, 0, 0, 0}, {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE, "The size of the buffer that is allocated when preloading indexes.", - (uchar**) &global_system_variables.preload_buff_size, - (uchar**) &max_system_variables.preload_buff_size, 0, GET_ULONG, + &global_system_variables.preload_buff_size, + &max_system_variables.preload_buff_size, 0, GET_ULONG, REQUIRED_ARG, 32*1024L, 1024, 1024*1024*1024L, 0, 1, 0}, {"query_alloc_block_size", OPT_QUERY_ALLOC_BLOCK_SIZE, "Allocation block size for query parsing and execution.", - (uchar**) &global_system_variables.query_alloc_block_size, - (uchar**) &max_system_variables.query_alloc_block_size, 0, GET_ULONG, + &global_system_variables.query_alloc_block_size, + &max_system_variables.query_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024, 0}, #ifdef HAVE_QUERY_CACHE {"query_cache_limit", OPT_QUERY_CACHE_LIMIT, "Don't cache results that are bigger than this.", - (uchar**) &query_cache_limit, (uchar**) &query_cache_limit, 0, GET_ULONG, + &query_cache_limit, &query_cache_limit, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L, 0, (longlong) ULONG_MAX, 0, 1, 0}, {"query_cache_min_res_unit", OPT_QUERY_CACHE_MIN_RES_UNIT, "Minimal size of unit in which space for results is allocated (last unit " "will be trimmed after writing all result data).", - (uchar**) &query_cache_min_res_unit, (uchar**) &query_cache_min_res_unit, + &query_cache_min_res_unit, &query_cache_min_res_unit, 0, GET_ULONG, REQUIRED_ARG, QUERY_CACHE_MIN_RESULT_DATA_SIZE, 0, (longlong) ULONG_MAX, 0, 1, 0}, #endif /*HAVE_QUERY_CACHE*/ {"query_cache_size", OPT_QUERY_CACHE_SIZE, "The memory allocated to store results from old queries.", - (uchar**) &query_cache_size, (uchar**) &query_cache_size, 0, GET_ULONG, + &query_cache_size, &query_cache_size, 0, GET_ULONG, REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1024, 0}, #ifdef HAVE_QUERY_CACHE {"query_cache_type", OPT_QUERY_CACHE_TYPE, - "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache all results except SELECT SQL_NO_CACHE ... queries. 2 = DEMAND = Cache only SELECT SQL_CACHE ... queries.", - (uchar**) &global_system_variables.query_cache_type, - (uchar**) &max_system_variables.query_cache_type, + "0 = OFF = Don't cache or retrieve results. 1 = ON = Cache all results " + "except SELECT SQL_NO_CACHE ... queries. 2 = DEMAND = Cache only SELECT " + "SQL_CACHE ... queries.", &global_system_variables.query_cache_type, + &max_system_variables.query_cache_type, 0, GET_ULONG, REQUIRED_ARG, 1, 0, 2, 0, 1, 0}, {"query_cache_wlock_invalidate", OPT_QUERY_CACHE_WLOCK_INVALIDATE, "Invalidate queries in query cache on LOCK for write.", - (uchar**) &global_system_variables.query_cache_wlock_invalidate, - (uchar**) &max_system_variables.query_cache_wlock_invalidate, + &global_system_variables.query_cache_wlock_invalidate, + &max_system_variables.query_cache_wlock_invalidate, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, #endif /*HAVE_QUERY_CACHE*/ {"query_prealloc_size", OPT_QUERY_PREALLOC_SIZE, "Persistent buffer for query parsing and execution.", - (uchar**) &global_system_variables.query_prealloc_size, - (uchar**) &max_system_variables.query_prealloc_size, 0, GET_ULONG, + &global_system_variables.query_prealloc_size, + &max_system_variables.query_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE, (longlong) ULONG_MAX, 0, 1024, 0}, {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, "Allocation block size for storing ranges during optimization.", - (uchar**) &global_system_variables.range_alloc_block_size, - (uchar**) &max_system_variables.range_alloc_block_size, 0, GET_ULONG, + &global_system_variables.range_alloc_block_size, + &max_system_variables.range_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE, (longlong) ULONG_MAX, 0, 1024, 0}, {"read_buffer_size", OPT_RECORD_BUFFER, - "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", - (uchar**) &global_system_variables.read_buff_size, - (uchar**) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, + "Each thread that does a sequential scan allocates a buffer of this size " + "for each table it scans. If you do many sequential scans, you may want " + "to increase this value.", &global_system_variables.read_buff_size, + &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0}, {"read_only", OPT_READONLY, "Make all non-temporary tables read-only, with the exception of replication " "(slave) threads and users with the SUPER privilege.", - (uchar**) &opt_readonly, - (uchar**) &opt_readonly, + &opt_readonly, + &opt_readonly, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, {"read_rnd_buffer_size", OPT_RECORD_RND_BUFFER, "When reading rows in sorted order after a sort, the rows are read through " "this buffer to avoid disk seeks. If not set, then it's set to the value of " "record_buffer.", - (uchar**) &global_system_variables.read_rnd_buff_size, - (uchar**) &max_system_variables.read_rnd_buff_size, 0, + &global_system_variables.read_rnd_buff_size, + &max_system_variables.read_rnd_buff_size, 0, GET_ULONG, REQUIRED_ARG, 256*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0}, {"record_buffer", OPT_RECORD_BUFFER_OLD, "Alias for read_buffer_size. This variable is deprecated and will be removed in a future release.", - (uchar**) &global_system_variables.read_buff_size, - (uchar**) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, + &global_system_variables.read_buff_size, + &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, INT_MAX32, MALLOC_OVERHEAD, IO_SIZE, 0}, #ifdef HAVE_REPLICATION {"relay_log_purge", OPT_RELAY_LOG_PURGE, "0 = do not purge relay logs. 1 = purge them as soon as they are no more needed.", - (uchar**) &relay_log_purge, - (uchar**) &relay_log_purge, 0, GET_BOOL, NO_ARG, + &relay_log_purge, + &relay_log_purge, 0, GET_BOOL, NO_ARG, 1, 0, 1, 0, 1, 0}, {"relay_log_space_limit", OPT_RELAY_LOG_SPACE_LIMIT, "Maximum space to use for all relay logs.", - (uchar**) &relay_log_space_limit, - (uchar**) &relay_log_space_limit, 0, GET_ULL, REQUIRED_ARG, 0L, 0L, + &relay_log_space_limit, + &relay_log_space_limit, 0, GET_ULL, REQUIRED_ARG, 0L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, {"slave_compressed_protocol", OPT_SLAVE_COMPRESSED_PROTOCOL, "Use compression on master/slave protocol.", - (uchar**) &opt_slave_compressed_protocol, - (uchar**) &opt_slave_compressed_protocol, + &opt_slave_compressed_protocol, + &opt_slave_compressed_protocol, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, {"slave_net_timeout", OPT_SLAVE_NET_TIMEOUT, "Number of seconds to wait for more data from a master/slave connection before aborting the read.", - (uchar**) &slave_net_timeout, (uchar**) &slave_net_timeout, 0, + &slave_net_timeout, &slave_net_timeout, 0, GET_ULONG, REQUIRED_ARG, SLAVE_NET_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, {"slave_transaction_retries", OPT_SLAVE_TRANS_RETRIES, "Number of times the slave SQL thread will retry a transaction in case " "it failed with a deadlock or elapsed lock wait timeout, " "before giving up and stopping.", - (uchar**) &slave_trans_retries, (uchar**) &slave_trans_retries, 0, + &slave_trans_retries, &slave_trans_retries, 0, GET_ULONG, REQUIRED_ARG, 10L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, #endif /* HAVE_REPLICATION */ {"slow_launch_time", OPT_SLOW_LAUNCH_TIME, - "If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.", - (uchar**) &slow_launch_time, (uchar**) &slow_launch_time, 0, GET_ULONG, + "If creating the thread takes longer than this value (in seconds), " + "the Slow_launch_threads counter will be incremented.", + &slow_launch_time, &slow_launch_time, 0, GET_ULONG, REQUIRED_ARG, 2L, 0L, LONG_TIMEOUT, 0, 1, 0}, {"sort_buffer_size", OPT_SORT_BUFFER, "Each thread that needs to do a sort allocates a buffer of this size.", - (uchar**) &global_system_variables.sortbuff_size, - (uchar**) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, + &global_system_variables.sortbuff_size, + &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, (longlong) ULONG_MAX, MALLOC_OVERHEAD, 1, 0}, {"sync-binlog", OPT_SYNC_BINLOG, "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", - (uchar**) &sync_binlog_period, (uchar**) &sync_binlog_period, 0, GET_ULONG, + &sync_binlog_period, &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1, 0}, {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", - (uchar**) &opt_sync_frm, (uchar**) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, + &opt_sync_frm, &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"table_cache", OPT_TABLE_OPEN_CACHE, "Deprecated; use --table_open_cache instead.", - (uchar**) &table_cache_size, (uchar**) &table_cache_size, 0, GET_ULONG, + &table_cache_size, &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0}, {"table_definition_cache", OPT_TABLE_DEF_CACHE, "The number of cached table definitions.", - (uchar**) &table_def_size, (uchar**) &table_def_size, + &table_def_size, &table_def_size, 0, GET_ULONG, REQUIRED_ARG, TABLE_DEF_CACHE_DEFAULT, TABLE_DEF_CACHE_MIN, 512*1024L, 0, 1, 0}, {"table_open_cache", OPT_TABLE_OPEN_CACHE, "The number of cached open tables.", - (uchar**) &table_cache_size, (uchar**) &table_cache_size, 0, GET_ULONG, + &table_cache_size, &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0}, {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in seconds to wait for a table level lock before returning an " "error. Used only if the connection has active cursors.", - (uchar**) &table_lock_wait_timeout, (uchar**) &table_lock_wait_timeout, + &table_lock_wait_timeout, &table_lock_wait_timeout, 0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0}, {"thread_cache_size", OPT_THREAD_CACHE_SIZE, "How many threads we should keep in a cache for reuse.", - (uchar**) &thread_cache_size, (uchar**) &thread_cache_size, 0, GET_ULONG, + &thread_cache_size, &thread_cache_size, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 16384, 0, 1, 0}, {"thread_concurrency", OPT_THREAD_CONCURRENCY, - "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.", - (uchar**) &concurrency, (uchar**) &concurrency, 0, GET_ULONG, REQUIRED_ARG, + "Permits the application to give the threads system a hint for the " + "desired number of threads that should be run at the same time.", + &concurrency, &concurrency, 0, GET_ULONG, REQUIRED_ARG, DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0}, #if HAVE_POOL_OF_THREADS == 1 {"thread_pool_size", OPT_THREAD_CACHE_SIZE, "How many threads we should create to handle query requests in case of " "'thread_handling=pool-of-threads'.", - (uchar**) &thread_pool_size, (uchar**) &thread_pool_size, 0, GET_ULONG, + &thread_pool_size, &thread_pool_size, 0, GET_ULONG, REQUIRED_ARG, 20, 1, 16384, 0, 1, 0}, #endif {"thread_stack", OPT_THREAD_STACK, - "The stack size for each thread.", (uchar**) &my_thread_stack_size, - (uchar**) &my_thread_stack_size, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, + "The stack size for each thread.", &my_thread_stack_size, + &my_thread_stack_size, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, (sizeof(void*)<=4)?1024L*128L: ((256-16)*1024L), (longlong) ULONG_MAX, 0, 1024, 0}, { "time_format", OPT_TIME_FORMAT, "The TIME format (for future).", - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], - (uchar**) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], + &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], + &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tmp_table_size", OPT_TMP_TABLE_SIZE, - "If an internal in-memory temporary table exceeds this size, MySQL will" - " automatically convert it to an on-disk MyISAM/Maria table.", - (uchar**) &global_system_variables.tmp_table_size, - (uchar**) &max_system_variables.tmp_table_size, 0, GET_ULL, - REQUIRED_ARG, 16*1024*1024L, 0, MAX_MEM_TABLE_SIZE, 0, 1, 0}, + "If an internal in-memory temporary table exceeds this size, MySQL will " + "automatically convert it to an on-disk MyISAM/Maria table.", + &global_system_variables.tmp_table_size, + &max_system_variables.tmp_table_size, 0, GET_ULL, + REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, "Allocation block size for transactions to be stored in binary log.", - (uchar**) &global_system_variables.trans_alloc_block_size, - (uchar**) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, + &global_system_variables.trans_alloc_block_size, + &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, "Persistent buffer for transactions to be stored in binary log.", - (uchar**) &global_system_variables.trans_prealloc_size, - (uchar**) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, + &global_system_variables.trans_prealloc_size, + &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024, 0}, {"thread_handling", OPT_THREAD_HANDLING, "Define threads usage for handling queries: " "one-thread-per-connection or no-threads.", - (uchar**) &opt_thread_handling, (uchar**) &opt_thread_handling, + &opt_thread_handling, &opt_thread_handling, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, - "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", - (uchar**) &global_system_variables.updatable_views_with_limit, - (uchar**) &max_system_variables.updatable_views_with_limit, + "1 = YES = Don't issue an error message (warning only) if a VIEW without " + "presence of a key of the underlying table is used in queries with a " + "LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which " + "does not contain a key of the underlying table and the query uses a " + "LIMIT clause (usually get from GUI tools).", + &global_system_variables.updatable_views_with_limit, + &max_system_variables.updatable_views_with_limit, 0, GET_ULONG, REQUIRED_ARG, 1, 0, 1, 0, 1, 0}, {"wait_timeout", OPT_WAIT_TIMEOUT, "The number of seconds the server waits for activity on a connection before closing it.", - (uchar**) &global_system_variables.net_wait_timeout, - (uchar**) &max_system_variables.net_wait_timeout, 0, GET_ULONG, + &global_system_variables.net_wait_timeout, + &max_system_variables.net_wait_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT), 0, 1, 0}, {"userstat", OPT_USERSTAT, @@ -7441,8 +7565,9 @@ The minimum value for this variable is 4096.", "there are no dependencies between transactional and non-transactional " "tables such as in the statement INSERT INTO t_myisam SELECT * FROM " "t_innodb; otherwise, slaves may diverge from the master.", - (uchar**) &global_system_variables.binlog_direct_non_trans_update, (uchar**) &max_system_variables.binlog_direct_non_trans_update, 0, GET_BOOL, NO_ARG, 0, - 0, 0, 0, 0, 0}, + &global_system_variables.binlog_direct_non_trans_update, + &max_system_variables.binlog_direct_non_trans_update, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -8978,13 +9103,12 @@ mysqld_get_one_option(int optid, /** Handle arguments for multiple key caches. */ +C_MODE_START +static void* mysql_getopt_value(const char *, uint, + const struct my_option *, int *); +C_MODE_END -extern "C" int mysql_getopt_value(uchar **value, - const char *keyname, uint key_length, - const struct my_option *option, - int *error); - -static uchar* * +static void* mysql_getopt_value(const char *keyname, uint key_length, const struct my_option *option, int *error) { @@ -9006,13 +9130,13 @@ mysql_getopt_value(const char *keyname, uint key_length, } switch (option->id) { case OPT_KEY_BUFFER_SIZE: - return (uchar**) &key_cache->param_buff_size; + return &key_cache->param_buff_size; case OPT_KEY_CACHE_BLOCK_SIZE: - return (uchar**) &key_cache->param_block_size; + return &key_cache->param_block_size; case OPT_KEY_CACHE_DIVISION_LIMIT: - return (uchar**) &key_cache->param_division_limit; + return &key_cache->param_division_limit; case OPT_KEY_CACHE_AGE_THRESHOLD: - return (uchar**) &key_cache->param_age_threshold; + return &key_cache->param_age_threshold; case OPT_KEY_CACHE_PARTITIONS: return (uchar**) &key_cache->param_partitions; } diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 49199ccb6e4..4796a5601bf 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -16,11 +16,7 @@ /** @file - This file is the net layer API for the MySQL client/server protocol, - which is a tightly coupled, proprietary protocol owned by MySQL AB. - @note - Any re-implementations of this protocol must also be under GPL - unless one has got an license from MySQL AB stating otherwise. + This file is the net layer API for the MySQL client/server protocol. Write and read of logical packets to/from socket. @@ -914,7 +910,13 @@ my_real_read(NET *net, size_t *complen) ("Packets out of order (Found: %d, expected %u)", (int) net->buff[net->where_b + 3], net->pkt_nr)); -#ifdef EXTRA_DEBUG + /* + We don't make noise server side, since the client is expected + to break the protocol for e.g. --send LOAD DATA .. LOCAL where + the server expects the client to send a file, but the client + may reply with a new command instead. + */ +#if defined (EXTRA_DEBUG) && !defined (MYSQL_SERVER) fflush(stdout); fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n", (int) net->buff[net->where_b + 3], diff --git a/sql/opt_range.cc b/sql/opt_range.cc index ad404a100e3..2f88841d6a2 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1192,7 +1192,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param, TABLE *table) - :pk_quick_select(NULL), thd(thd_param) + :unique(NULL), pk_quick_select(NULL), thd(thd_param) { DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT"); index= MAX_KEY; @@ -1234,6 +1234,7 @@ QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT() List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); QUICK_RANGE_SELECT* quick; DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT"); + delete unique; quick_it.rewind(); while ((quick= quick_it++)) quick->file= NULL; @@ -7593,8 +7594,8 @@ check_quick_keys(PARAM *param, uint idx, SEL_ARG *key_tree, param->range_count++; if (!tmp_min_flag && ! tmp_max_flag && (uint) key_tree->part+1 == param->table->key_info[keynr].key_parts && - (param->table->key_info[keynr].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == - HA_NOSAME && min_key_length == max_key_length && + (param->table->key_info[keynr].flags & HA_NOSAME) && + min_key_length == max_key_length && !memcmp(param->min_key, param->max_key, min_key_length) && !param->first_null_comp) { @@ -7883,7 +7884,7 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { KEY *table_key=quick->head->key_info+quick->index; flag=EQ_RANGE; - if ((table_key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && + if ((table_key->flags & HA_NOSAME) && key->part == table_key->key_parts-1) { if (!(table_key->flags & HA_NULL_PART_KEY) || @@ -7933,7 +7934,7 @@ bool QUICK_RANGE_SELECT::unique_key_range() if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) { KEY *key=head->key_info+index; - return ((key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && + return ((key->flags & HA_NOSAME) && key->key_length == tmp->min_length); } } @@ -8052,8 +8053,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, range->min_length= range->max_length= ref->key_length; range->min_keypart_map= range->max_keypart_map= make_prev_keypart_map(ref->key_parts); - range->flag= ((ref->key_length == key_info->key_length && - (key_info->flags & HA_END_SPACE_KEY) == 0) ? EQ_RANGE : 0); + range->flag= (ref->key_length == key_info->key_length ? EQ_RANGE : 0); if (!(quick->key_parts=key_part=(KEY_PART *) alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts))) @@ -8123,7 +8123,6 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects); QUICK_RANGE_SELECT* cur_quick; int result; - Unique *unique= 0; handler *file= head->file; DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge"); @@ -8145,11 +8144,24 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() if (cur_quick->init() || cur_quick->reset()) goto err; - unique= new Unique(refpos_order_cmp, (void *)file, - file->ref_length, - thd->variables.sortbuff_size); - if (!unique) - goto err; + if (unique == NULL) + { + DBUG_EXECUTE_IF("index_merge_may_not_create_a_Unique", abort(); ); + DBUG_EXECUTE_IF("only_one_Unique_may_be_created", + DBUG_SET("+d,index_merge_may_not_create_a_Unique"); ); + + unique= new Unique(refpos_order_cmp, (void *)file, + file->ref_length, + thd->variables.sortbuff_size); + if (!unique) + goto err; + } + else + unique->reset(); + + DBUG_ASSERT(file->ref_length == unique->get_size()); + DBUG_ASSERT(thd->variables.sortbuff_size == unique->get_max_in_memory_size()); + for (;;) { while ((result= cur_quick->get_next()) == HA_ERR_END_OF_FILE) @@ -8193,7 +8205,6 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() sequence. */ result= unique->get(head); - delete unique; doing_pk_scan= FALSE; /* index_merge currently doesn't support "using index" at all @@ -8204,7 +8215,6 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() DBUG_RETURN(result); err: - delete unique; head->disable_keyread(); DBUG_RETURN(1); } @@ -8319,6 +8329,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next() if ((error= quick->get_next())) DBUG_RETURN(error); } + quick->file->position(quick->record); } memcpy(last_rowid, quick->file->ref, head->file->ref_length); last_rowid_count= 1; @@ -8502,8 +8513,6 @@ int QUICK_RANGE_SELECT::get_next() { int result; KEY_MULTI_RANGE *mrange; - key_range *start_key; - key_range *end_key; DBUG_ENTER("QUICK_RANGE_SELECT::get_next"); DBUG_ASSERT(multi_range_length && multi_range && (cur_range >= (QUICK_RANGE**) ranges.buffer) && @@ -8543,26 +8552,9 @@ int QUICK_RANGE_SELECT::get_next() mrange_slot < mrange_end; mrange_slot++) { - start_key= &mrange_slot->start_key; - end_key= &mrange_slot->end_key; last_range= *(cur_range++); - - start_key->key= (const uchar*) last_range->min_key; - start_key->length= last_range->min_length; - start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : - (last_range->flag & EQ_RANGE) ? - HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); - start_key->keypart_map= last_range->min_keypart_map; - end_key->key= (const uchar*) last_range->max_key; - end_key->length= last_range->max_length; - /* - We use HA_READ_AFTER_KEY here because if we are reading on a key - prefix. We want to find all keys with this prefix. - */ - end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : - HA_READ_AFTER_KEY); - end_key->keypart_map= last_range->max_keypart_map; - + last_range->make_min_endpoint(&mrange_slot->start_key); + last_range->make_max_endpoint(&mrange_slot->end_key); mrange_slot->range_flag= last_range->flag; } @@ -8586,51 +8578,54 @@ end: /* Get the next record with a different prefix. - SYNOPSIS - QUICK_RANGE_SELECT::get_next_prefix() - prefix_length length of cur_prefix - cur_prefix prefix of a key to be searched for + @param prefix_length length of cur_prefix + @param group_key_parts The number of key parts in the group prefix + @param cur_prefix prefix of a key to be searched for - DESCRIPTION - Each subsequent call to the method retrieves the first record that has a - prefix with length prefix_length different from cur_prefix, such that the - record with the new prefix is within the ranges described by - this->ranges. The record found is stored into the buffer pointed by - this->record. - The method is useful for GROUP-BY queries with range conditions to - discover the prefix of the next group that satisfies the range conditions. + Each subsequent call to the method retrieves the first record that has a + prefix with length prefix_length and which is different from cur_prefix, + such that the record with the new prefix is within the ranges described by + this->ranges. The record found is stored into the buffer pointed by + this->record. The method is useful for GROUP-BY queries with range + conditions to discover the prefix of the next group that satisfies the range + conditions. + + @todo - TODO This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both methods should be unified into a more general one to reduce code duplication. - RETURN - 0 on success - HA_ERR_END_OF_FILE if returned all keys - other if some error occurred + @retval 0 on success + @retval HA_ERR_END_OF_FILE if returned all keys + @retval other if some error occurred */ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, - key_part_map keypart_map, + uint group_key_parts, uchar *cur_prefix) { DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); + const key_part_map keypart_map= make_prev_keypart_map(group_key_parts); for (;;) { int result; - key_range start_key, end_key; if (last_range) { /* Read the next record in the same range with prefix after cur_prefix. - */ - DBUG_ASSERT(cur_prefix != 0); + */ + DBUG_ASSERT(cur_prefix != NULL); result= file->ha_index_read_map(record, cur_prefix, keypart_map, HA_READ_AFTER_KEY); - if (result || (file->compare_key(file->end_range) <= 0)) + if (result || last_range->max_keypart_map == 0) DBUG_RETURN(result); + + key_range previous_endpoint; + last_range->make_max_endpoint(&previous_endpoint, prefix_length, keypart_map); + if (file->compare_key(&previous_endpoint) <= 0) + DBUG_RETURN(0); } uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer); @@ -8642,21 +8637,9 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, } last_range= *(cur_range++); - start_key.key= (const uchar*) last_range->min_key; - start_key.length= min(last_range->min_length, prefix_length); - start_key.keypart_map= last_range->min_keypart_map & keypart_map; - start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : - (last_range->flag & EQ_RANGE) ? - HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); - end_key.key= (const uchar*) last_range->max_key; - end_key.length= min(last_range->max_length, prefix_length); - end_key.keypart_map= last_range->max_keypart_map & keypart_map; - /* - We use READ_AFTER_KEY here because if we are reading on a key - prefix we want to find all keys with this prefix - */ - end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : - HA_READ_AFTER_KEY); + key_range start_key, end_key; + last_range->make_min_endpoint(&start_key, prefix_length, keypart_map); + last_range->make_max_endpoint(&end_key, prefix_length, keypart_map); result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0, last_range->max_keypart_map ? &end_key : 0, @@ -8751,9 +8734,9 @@ bool QUICK_RANGE_SELECT::row_in_ranges() } /* - This is a hack: we inherit from QUICK_SELECT so that we can use the + This is a hack: we inherit from QUICK_RANGE_SELECT so that we can use the get_next() interface, but we have to hold a pointer to the original - QUICK_SELECT because its data are used all over the place. What + QUICK_RANGE_SELECT because its data are used all over the place. What should be done is to factor out the data that is needed into a base class (QUICK_SELECT), and then have two subclasses (_ASC and _DESC) which handle the ranges and implement the get_next() function. But @@ -10276,7 +10259,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, uint use_index, double read_cost_arg, ha_rows records_arg, uint key_infix_len_arg, uchar *key_infix_arg, MEM_ROOT *parent_alloc) - :join(join_arg), index_info(index_info_arg), + :file(table->file), join(join_arg), index_info(index_info_arg), group_prefix_len(group_prefix_len_arg), group_key_parts(group_key_parts_arg), have_min(have_min_arg), have_max(have_max_arg), seen_first_key(FALSE), doing_key_read(FALSE), @@ -10285,7 +10268,6 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, max_functions_it(NULL) { head= table; - file= head->file; index= use_index; record= head->record[0]; tmp_record= head->record[1]; @@ -10885,7 +10867,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() { uchar *cur_prefix= seen_first_key ? group_prefix : NULL; if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, - make_prev_keypart_map(group_key_parts), cur_prefix))) + group_key_parts, + cur_prefix))) DBUG_RETURN(result); seen_first_key= TRUE; } diff --git a/sql/opt_range.h b/sql/opt_range.h index ae02e19974d..758dffe7a52 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -71,6 +71,85 @@ class QUICK_RANGE :public Sql_alloc { dummy=0; #endif } + + /** + Initalizes a key_range object for communication with storage engine. + + This function facilitates communication with the Storage Engine API by + translating the minimum endpoint of the interval represented by this + QUICK_RANGE into an index range endpoint specifier for the engine. + + @param Pointer to an uninitialized key_range C struct. + + @param prefix_length The length of the search key prefix to be used for + lookup. + + @param keypart_map A set (bitmap) of keyparts to be used. + */ + void make_min_endpoint(key_range *kr, uint prefix_length, + key_part_map keypart_map) { + make_min_endpoint(kr); + kr->length= min(kr->length, prefix_length); + kr->keypart_map&= keypart_map; + } + + /** + Initalizes a key_range object for communication with storage engine. + + This function facilitates communication with the Storage Engine API by + translating the minimum endpoint of the interval represented by this + QUICK_RANGE into an index range endpoint specifier for the engine. + + @param Pointer to an uninitialized key_range C struct. + */ + void make_min_endpoint(key_range *kr) { + kr->key= (const uchar*)min_key; + kr->length= min_length; + kr->keypart_map= min_keypart_map; + kr->flag= ((flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + } + + /** + Initalizes a key_range object for communication with storage engine. + + This function facilitates communication with the Storage Engine API by + translating the maximum endpoint of the interval represented by this + QUICK_RANGE into an index range endpoint specifier for the engine. + + @param Pointer to an uninitialized key_range C struct. + + @param prefix_length The length of the search key prefix to be used for + lookup. + + @param keypart_map A set (bitmap) of keyparts to be used. + */ + void make_max_endpoint(key_range *kr, uint prefix_length, + key_part_map keypart_map) { + make_max_endpoint(kr); + kr->length= min(kr->length, prefix_length); + kr->keypart_map&= keypart_map; + } + + /** + Initalizes a key_range object for communication with storage engine. + + This function facilitates communication with the Storage Engine API by + translating the maximum endpoint of the interval represented by this + QUICK_RANGE into an index range endpoint specifier for the engine. + + @param Pointer to an uninitialized key_range C struct. + */ + void make_max_endpoint(key_range *kr) { + kr->key= (const uchar*)max_key; + kr->length= max_length; + kr->keypart_map= max_keypart_map; + /* + We use READ_AFTER_KEY here because if we are reading on a key + prefix we want to find all keys with this prefix + */ + kr->flag= (flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + } }; @@ -339,7 +418,7 @@ public: int reset(void); int get_next(); void range_end(); - int get_next_prefix(uint prefix_length, key_part_map keypart_map, + int get_next_prefix(uint prefix_length, uint group_key_parts, uchar *cur_prefix); bool reverse_sorted() { return 0; } bool unique_key_range(); @@ -429,6 +508,7 @@ public: class QUICK_INDEX_MERGE_SELECT : public QUICK_SELECT_I { + Unique *unique; public: QUICK_INDEX_MERGE_SELECT(THD *thd, TABLE *table); ~QUICK_INDEX_MERGE_SELECT(); @@ -613,13 +693,13 @@ private: class QUICK_GROUP_MIN_MAX_SELECT : public QUICK_SELECT_I { private: - handler *file; /* The handler used to get data. */ + handler * const file; /* The handler used to get data. */ JOIN *join; /* Descriptor of the current query */ KEY *index_info; /* The index chosen for data access */ uchar *record; /* Buffer where the next record is returned. */ uchar *tmp_record; /* Temporary storage for next_min(), next_max(). */ uchar *group_prefix; /* Key prefix consisting of the GROUP fields. */ - uint group_prefix_len; /* Length of the group prefix. */ + const uint group_prefix_len; /* Length of the group prefix. */ uint group_key_parts; /* A number of keyparts in the group prefix */ uchar *last_prefix; /* Prefix of the last group for detecting EOF. */ bool have_min; /* Specify whether we are computing */ diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 974eea08871..450ded89653 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -89,6 +89,126 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables) /** + Use index to read MIN(field) value. + + @param table Table object + @param ref Reference to the structure where we store the key value + @item_field Field used in MIN() + @range_fl Whether range endpoint is strict less than + @prefix_len Length of common key part for the range + + @retval + 0 No errors + HA_ERR_... Otherwise +*/ + +static int get_index_min_value(TABLE *table, TABLE_REF *ref, + Item_field *item_field, uint range_fl, + uint prefix_len) +{ + int error; + + if (!ref->key_length) + error= table->file->ha_index_first(table->record[0]); + else + { + /* + Use index to replace MIN/MAX functions with their values + according to the following rules: + + 1) Insert the minimum non-null values where the WHERE clause still + matches, or + 2) a NULL value if there are only NULL values for key_part_k. + 3) Fail, producing a row of nulls + + Implementation: Read the smallest value using the search key. If + the interval is open, read the next value after the search + key. If read fails, and we're looking for a MIN() value for a + nullable column, test if there is an exact match for the key. + */ + if (!(range_fl & NEAR_MIN)) + /* + Closed interval: Either The MIN argument is non-nullable, or + we have a >= predicate for the MIN argument. + */ + error= table->file->ha_index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_KEY_OR_NEXT); + else + { + /* + Open interval: There are two cases: + 1) We have only MIN() and the argument column is nullable, or + 2) there is a > predicate on it, nullability is irrelevant. + We need to scan the next bigger record first. + Open interval is not used if the search key involves the last keypart, + and it would not work. + */ + DBUG_ASSERT(prefix_len < ref->key_length); + error= table->file->ha_index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_AFTER_KEY); + /* + If the found record is outside the group formed by the search + prefix, or there is no such record at all, check if all + records in that group have NULL in the MIN argument + column. If that is the case return that NULL. + + Check if case 1 from above holds. If it does, we should read + the skipped tuple. + */ + if (item_field->field->real_maybe_null() && + ref->key_buff[prefix_len] == 1 && + /* + Last keypart (i.e. the argument to MIN) is set to NULL by + find_key_for_maxmin only if all other keyparts are bound + to constants in a conjunction of equalities. Hence, we + can detect this by checking only if the last keypart is + NULL. + */ + (error == HA_ERR_KEY_NOT_FOUND || + key_cmp_if_same(table, ref->key_buff, ref->key, prefix_len))) + { + DBUG_ASSERT(item_field->field->real_maybe_null()); + error= table->file->ha_index_read_map(table->record[0], + ref->key_buff, + make_prev_keypart_map(ref->key_parts), + HA_READ_KEY_EXACT); + } + } + } + return error; +} + + +/** + Use index to read MAX(field) value. + + @param table Table object + @param ref Reference to the structure where we store the key value + @range_fl Whether range endpoint is strict greater than + + @retval + 0 No errors + HA_ERR_... Otherwise +*/ + +static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) +{ + return (ref->key_length ? + table->file->ha_index_read_map(table->record[0], ref->key_buff, + make_prev_keypart_map(ref->key_parts), + range_fl & NEAR_MAX ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV) : + table->file->ha_index_last(table->record[0])); +} + + + +/** Substitutes constants for some COUNT(), MIN() and MAX() functions. @param tables list of leaves of join table tree @@ -220,9 +340,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; break; case Item_sum::MIN_FUNC: + case Item_sum::MAX_FUNC: { + int is_max= test(item_sum->sum_func() == Item_sum::MAX_FUNC); /* - If MIN(expr) is the first part of a key or if all previous + If MIN/MAX(expr) is the first part of a key or if all previous parts of the key is found in the COND, then we can use indexes to find the key. */ @@ -241,89 +363,25 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Look for a partial key that can be used for optimization. If we succeed, ref.key_length will contain the length of this key, while prefix_len will contain the length of - the beginning of this key without field used in MIN(). + the beginning of this key without field used in MIN/MAX(). Type of range for the key part for this field will be returned in range_fl. */ if (table->file->inited || (outer_tables & table->map) || - !find_key_for_maxmin(0, &ref, item_field->field, conds, + !find_key_for_maxmin(is_max, &ref, item_field->field, conds, &range_fl, &prefix_len)) { const_result= 0; break; } - error= table->file->ha_index_init((uint) ref.key, 1); + if (!(error= table->file->ha_index_init((uint) ref.key, 1))) + error= (is_max ? + get_index_max_value(table, &ref, range_fl) : + get_index_min_value(table, &ref, item_field, range_fl, + prefix_len)); - if (!ref.key_length) - error= table->file->ha_index_first(table->record[0]); - else - { - /* - Use index to replace MIN/MAX functions with their values - according to the following rules: - - 1) Insert the minimum non-null values where the WHERE clause still - matches, or - 2) a NULL value if there are only NULL values for key_part_k. - 3) Fail, producing a row of nulls - - Implementation: Read the smallest value using the search key. If - the interval is open, read the next value after the search - key. If read fails, and we're looking for a MIN() value for a - nullable column, test if there is an exact match for the key. - */ - if (!(range_fl & NEAR_MIN)) - /* - Closed interval: Either The MIN argument is non-nullable, or - we have a >= predicate for the MIN argument. - */ - error= table->file->ha_index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_OR_NEXT); - else - { - /* - Open interval: There are two cases: - 1) We have only MIN() and the argument column is nullable, or - 2) there is a > predicate on it, nullability is irrelevant. - We need to scan the next bigger record first. - */ - error= table->file->ha_index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_AFTER_KEY); - /* - If the found record is outside the group formed by the search - prefix, or there is no such record at all, check if all - records in that group have NULL in the MIN argument - column. If that is the case return that NULL. - - Check if case 1 from above holds. If it does, we should read - the skipped tuple. - */ - if (item_field->field->real_maybe_null() && - ref.key_buff[prefix_len] == 1 && - /* - Last keypart (i.e. the argument to MIN) is set to NULL by - find_key_for_maxmin only if all other keyparts are bound - to constants in a conjunction of equalities. Hence, we - can detect this by checking only if the last keypart is - NULL. - */ - (error == HA_ERR_KEY_NOT_FOUND || - key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len))) - { - DBUG_ASSERT(item_field->field->real_maybe_null()); - error= table->file->ha_index_read_map(table->record[0], - ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_EXACT); - } - } - } /* Verify that the read tuple indeed matches the search key */ - if (!error && reckey_in_range(0, &ref, item_field->field, + if (!error && reckey_in_range(is_max, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; table->disable_keyread(); @@ -352,98 +410,16 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; break; } - if (!count) - { - /* If count == 0, then we know that is_exact_count == TRUE. */ - ((Item_sum_min*) item_sum)->clear(); /* Set to NULL. */ - } - else - ((Item_sum_min*) item_sum)->reset(); /* Set to the constant value. */ - ((Item_sum_min*) item_sum)->make_const(); - recalc_const_item= 1; - break; - } - case Item_sum::MAX_FUNC: - { /* - If MAX(expr) is the first part of a key or if all previous - parts of the key is found in the COND, then we can use - indexes to find the key. + If count == 0 (so is_exact_count == TRUE) and + there're no outer joins, set to NULL, + otherwise set to the constant value. */ - Item *expr=item_sum->get_arg(0); - if (expr->real_item()->type() == Item::FIELD_ITEM) - { - uchar key_buff[MAX_KEY_LENGTH]; - TABLE_REF ref; - uint range_fl, prefix_len; - - ref.key_buff= key_buff; - Item_field *item_field= (Item_field*) (expr->real_item()); - TABLE *table= item_field->field->table; - - /* - Look for a partial key that can be used for optimization. - If we succeed, ref.key_length will contain the length of - this key, while prefix_len will contain the length of - the beginning of this key without field used in MAX(). - Type of range for the key part for this field will be - returned in range_fl. - */ - if (table->file->inited || (outer_tables & table->map) || - !find_key_for_maxmin(1, &ref, item_field->field, conds, - &range_fl, &prefix_len)) - { - const_result= 0; - break; - } - error= table->file->ha_index_init((uint) ref.key, 1); - - if (!ref.key_length) - error= table->file->ha_index_last(table->record[0]); - else - error= table->file->ha_index_read_map(table->record[0], key_buff, - make_prev_keypart_map(ref.key_parts), - range_fl & NEAR_MAX ? - HA_READ_BEFORE_KEY : - HA_READ_PREFIX_LAST_OR_PREV); - if (!error && reckey_in_range(1, &ref, item_field->field, - conds, range_fl, prefix_len)) - error= HA_ERR_KEY_NOT_FOUND; - table->disable_keyread(); - table->file->ha_index_end(); - if (error) - { - if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE - /* HA_ERR_LOCK_DEADLOCK or some other error */ - table->file->print_error(error, MYF(0)); - table->in_use->fatal_error(); - return(error); - } - removed_tables|= table->map; - } - else if (!expr->const_item() || !is_exact_count) - { - /* - The optimization is not applicable in both cases: - (a) 'expr' is a non-constant expression. Then we can't - replace 'expr' by a constant. - (b) 'expr' is a costant. According to ANSI, MIN/MAX must return - NULL if the query does not return any rows. Thus, if we are not - able to determine if the query returns any rows, we can't apply - the optimization and replace MIN/MAX with a constant. - */ - const_result= 0; - break; - } - if (!count) - { - /* If count != 1, then we know that is_exact_count == TRUE. */ - ((Item_sum_max*) item_sum)->clear(); /* Set to NULL. */ - } + if (!count && !outer_tables) + item_sum->clear(); else - ((Item_sum_max*) item_sum)->reset(); /* Set to the constant value. */ - ((Item_sum_max*) item_sum)->make_const(); + item_sum->reset(); + item_sum->make_const(); recalc_const_item= 1; break; } @@ -617,18 +593,19 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, key_part_map *key_part_used, uint *range_fl, uint *prefix_len) { + DBUG_ENTER("matching_cond"); if (!cond) - return 1; + DBUG_RETURN(TRUE); Field *field= field_part->field; if (!(cond->used_tables() & field->table->map)) { /* Condition doesn't restrict the used table */ - return 1; + DBUG_RETURN(TRUE); } if (cond->type() == Item::COND_ITEM) { if (((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC) - return 0; + DBUG_RETURN(FALSE); /* AND */ List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); @@ -637,13 +614,13 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, { if (!matching_cond(max_fl, ref, keyinfo, field_part, item, key_part_used, range_fl, prefix_len)) - return 0; + DBUG_RETURN(FALSE); } - return 1; + DBUG_RETURN(TRUE); } if (cond->type() != Item::FUNC_ITEM) - return 0; // Not operator, can't optimize + DBUG_RETURN(FALSE); // Not operator, can't optimize bool eq_type= 0; // =, <=> or IS NULL bool is_null_safe_eq= FALSE; // The operator is NULL safe, e.g. <=> @@ -677,7 +654,7 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, eq_type= 1; break; default: - return 0; // Can't optimize function + DBUG_RETURN(FALSE); // Can't optimize function } Item *args[3]; @@ -685,11 +662,11 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, /* Test if this is a comparison of a field and constant */ if (!simple_pred((Item_func*) cond, args, &inv)) - return 0; + DBUG_RETURN(FALSE); if (!is_null_safe_eq && !is_null && (args[1]->is_null() || (between && args[2]->is_null()))) - return FALSE; + DBUG_RETURN(FALSE); if (inv && !eq_type) less_fl= 1-less_fl; // Convert '<' -> '>' (etc) @@ -701,14 +678,14 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, { if (part > field_part) - return 0; // Field is beyond the tested parts + DBUG_RETURN(FALSE); // Field is beyond the tested parts if (part->field->eq(((Item_field*) args[0])->field)) break; // Found a part of the key for the field } bool is_field_part= part == field_part; if (!(is_field_part || eq_type)) - return 0; + DBUG_RETURN(FALSE); key_part_map org_key_part_used= *key_part_used; if (eq_type || between || max_fl == less_fl) @@ -728,6 +705,17 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, *key_part_used|= (key_part_map) 1 << (part - keyinfo->key_part); } + if (org_key_part_used == *key_part_used && + /* + The current search key is not being extended with a new key part. This + means that the a condition is added a key part for which there was a + previous condition. We can only overwrite such key parts in some special + cases, e.g. a > 2 AND a > 1 (here range_fl must be set to something). In + all other cases the WHERE condition is always false anyway. + */ + (eq_type || *range_fl == 0)) + DBUG_RETURN(FALSE); + if (org_key_part_used != *key_part_used || (is_field_part && (between || eq_type || max_fl == less_fl) && !cond->val_int())) @@ -773,11 +761,11 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, { if ((!is_null && !cond->val_int()) || (is_null && !test(part->field->is_null()))) - return 0; // Impossible test + DBUG_RETURN(FALSE); // Impossible test } else if (is_field_part) *range_fl&= ~(max_fl ? NO_MIN_RANGE : NO_MAX_RANGE); - return 1; + DBUG_RETURN(TRUE); } diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 5d952021365..174dcbf9c13 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -1539,7 +1539,7 @@ Dep_value_table *Dep_analysis_context::create_table_value(TABLE *table) for (uint i=0; i < table->s->keys; i++) { KEY *key= table->key_info + i; - if ((key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME) + if (key->flags & HA_NOSAME) { Dep_module_key *key_dep; if (!(key_dep= new Dep_module_key(tbl_dep, i, key->key_parts))) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 0fa13949aba..9b1b1f70784 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -976,7 +976,7 @@ bool load_master_data(THD* thd) host was specified; there could have been a problem when replication started, which led to relay log's IO_CACHE to not be inited. */ - if (flush_master_info(active_mi, 0)) + if (flush_master_info(active_mi, FALSE, FALSE)) sql_print_error("Failed to flush master info file"); } mysql_free_result(master_status_res); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index b8af53849f1..47fc88c9a8a 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -322,7 +322,7 @@ file '%s')", fname); mi->inited = 1; // now change cache READ -> WRITE - must do this before flush_master_info reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1); - if ((error=test(flush_master_info(mi, 1)))) + if ((error=test(flush_master_info(mi, TRUE, TRUE)))) sql_print_error("Failed to flush master info file"); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); @@ -348,10 +348,13 @@ err: 1 - flush master info failed 0 - all ok */ -int flush_master_info(Master_info* mi, bool flush_relay_log_cache) +int flush_master_info(Master_info* mi, + bool flush_relay_log_cache, + bool need_lock_relay_log) { IO_CACHE* file = &mi->file; char lbuf[22]; + int err= 0; DBUG_ENTER("flush_master_info"); DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos)); @@ -368,9 +371,23 @@ int flush_master_info(Master_info* mi, bool flush_relay_log_cache) When we come to this place in code, relay log may or not be initialized; the caller is responsible for setting 'flush_relay_log_cache' accordingly. */ - if (flush_relay_log_cache && - flush_io_cache(mi->rli.relay_log.get_log_file())) - DBUG_RETURN(2); + if (flush_relay_log_cache) + { + pthread_mutex_t *log_lock= mi->rli.relay_log.get_log_lock(); + IO_CACHE *log_file= mi->rli.relay_log.get_log_file(); + + if (need_lock_relay_log) + pthread_mutex_lock(log_lock); + + safe_mutex_assert_owner(log_lock); + err= flush_io_cache(log_file); + + if (need_lock_relay_log) + pthread_mutex_unlock(log_lock); + + if (err) + DBUG_RETURN(2); + } /* We flushed the relay log BEFORE the master.info file, because if we crash diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index 93fb0a98198..023879f84fa 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -108,7 +108,8 @@ int init_master_info(Master_info* mi, const char* master_info_fname, bool abort_if_no_master_info_file, int thread_mask); void end_master_info(Master_info* mi); -int flush_master_info(Master_info* mi, bool flush_relay_log_cache); - +int flush_master_info(Master_info* mi, + bool flush_relay_log_cache, + bool need_lock_relay_log); #endif /* HAVE_REPLICATION */ #endif /* RPL_MI_H */ diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 26daac1b629..46ae057f97e 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -115,7 +115,7 @@ int init_relay_log_info(Relay_log_info* rli, /* The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE. Note that the I/O thread flushes it to disk after writing every - event, in flush_master_info(mi, 1). + event, in flush_master_info(mi, 1, ?). */ /* diff --git a/sql/set_var.cc b/sql/set_var.cc index 5a5e498c785..3e2d27a012d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2829,10 +2829,26 @@ int set_var_collation_client::update(THD *thd) /****************************************************************************/ +bool sys_var_timestamp::check(THD *thd, set_var *var) +{ + time_t val; + var->save_result.ulonglong_value= var->value->val_int(); + val= (time_t) var->save_result.ulonglong_value; + if (val < (time_t) MY_TIME_T_MIN || val > (time_t) MY_TIME_T_MAX) + { + my_message(ER_UNKNOWN_ERROR, + "This version of MySQL doesn't support dates later than 2038", + MYF(0)); + return TRUE; + } + return FALSE; +} + + bool sys_var_timestamp::update(THD *thd, set_var *var) { thd->set_time((time_t) var->save_result.ulonglong_value); - return 0; + return FALSE; } @@ -4308,8 +4324,14 @@ bool sys_var_thd_dbug::check(THD *thd, set_var *var) bool sys_var_thd_dbug::update(THD *thd, set_var *var) { -#ifndef DBUG_OFF - const char *command= var ? var->value->str_value.c_ptr() : ""; + char buf[256]; + String str(buf, sizeof(buf), system_charset_info), *res; + const char *command; + + res= var->value->val_str(&str); + command= res ? res->c_ptr(): 0; + if (!command) + command= ""; if (var->type == OPT_GLOBAL) DBUG_SET_INITIAL(command); @@ -4329,7 +4351,6 @@ bool sys_var_thd_dbug::update(THD *thd, set_var *var) DBUG_PUSH(command); } } -#endif return 0; } diff --git a/sql/set_var.h b/sql/set_var.h index 4f0ef390cce..54e4ac7e2f1 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -668,6 +668,7 @@ public: Binlog_status_enum binlog_status_arg= NOT_IN_BINLOG) :sys_var(name_arg, NULL, binlog_status_arg) { chain_sys_var(chain); } + bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } diff --git a/sql/slave.cc b/sql/slave.cc index e92070ebcc1..3df5c0df0b2 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1439,7 +1439,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) " to the relay log, SHOW SLAVE STATUS may be" " inaccurate"); rli->relay_log.harvest_bytes_written(&rli->log_space_total); - if (flush_master_info(mi, 1)) + if (flush_master_info(mi, TRUE, TRUE)) sql_print_error("Failed to flush master info file"); delete ev; } @@ -2692,7 +2692,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); "could not queue event from master"); goto err; } - if (flush_master_info(mi, 1)) + if (flush_master_info(mi, TRUE, TRUE)) { sql_print_error("Failed to flush master info file"); goto err; diff --git a/sql/sp.cc b/sql/sp.cc index ac509a3bb2d..e2e7583b901 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -784,7 +784,12 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, thd->spcont= NULL; { - Parser_state parser_state(thd, defstr.c_ptr(), defstr.length()); + Parser_state parser_state; + if (parser_state.init(thd, defstr.c_ptr(), defstr.length())) + { + ret= SP_INTERNAL_ERROR; + goto end; + } lex_start(thd); @@ -1634,8 +1639,7 @@ extern "C" uchar* sp_sroutine_key(const uchar *ptr, size_t *plen, void sp_get_prelocking_info(THD *thd, bool *need_prelocking, bool *first_no_prelocking) { - Sroutine_hash_entry *routine; - routine= (Sroutine_hash_entry*)thd->lex->sroutines_list.first; + Sroutine_hash_entry *routine= thd->lex->sroutines_list.first; DBUG_ASSERT(routine); bool first_is_procedure= (routine->key.str[0] == TYPE_ENUM_PROCEDURE); @@ -1698,7 +1702,7 @@ static bool add_used_routine(LEX *lex, Query_arena *arena, memcpy(rn->key.str, key->str, key->length + 1); if (my_hash_insert(&lex->sroutines, (uchar *)rn)) return FALSE; - lex->sroutines_list.link_in_list((uchar *)rn, (uchar **)&rn->next); + lex->sroutines_list.link_in_list(rn, &rn->next); rn->belong_to_view= belong_to_view; return TRUE; } @@ -1744,7 +1748,7 @@ void sp_add_used_routine(LEX *lex, Query_arena *arena, void sp_remove_not_own_routines(LEX *lex) { Sroutine_hash_entry *not_own_rt, *next_rt; - for (not_own_rt= *(Sroutine_hash_entry **)lex->sroutines_list_own_last; + for (not_own_rt= *lex->sroutines_list_own_last; not_own_rt; not_own_rt= next_rt) { /* @@ -1755,7 +1759,7 @@ void sp_remove_not_own_routines(LEX *lex) hash_delete(&lex->sroutines, (uchar *)not_own_rt); } - *(Sroutine_hash_entry **)lex->sroutines_list_own_last= NULL; + *lex->sroutines_list_own_last= NULL; lex->sroutines_list.next= lex->sroutines_list_own_last; lex->sroutines_list.elements= lex->sroutines_list_own_elements; } @@ -1836,11 +1840,11 @@ sp_update_stmt_used_routines(THD *thd, LEX *lex, HASH *src, It will also add elements to end of 'LEX::sroutines_list' list. */ -static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src, +static void sp_update_stmt_used_routines(THD *thd, LEX *lex, + SQL_I_List<Sroutine_hash_entry> *src, TABLE_LIST *belong_to_view) { - for (Sroutine_hash_entry *rt= (Sroutine_hash_entry *)src->first; - rt; rt= rt->next) + for (Sroutine_hash_entry *rt= src->first; rt; rt= rt->next) (void)add_used_routine(lex, thd->stmt_arena, &rt->key, belong_to_view); } @@ -1975,8 +1979,7 @@ int sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock) { return sp_cache_routines_and_add_tables_aux(thd, lex, - (Sroutine_hash_entry *)lex->sroutines_list.first, - first_no_prelock); + lex->sroutines_list.first, first_no_prelock); } @@ -2000,8 +2003,7 @@ sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock) int sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, TABLE_LIST *view) { - Sroutine_hash_entry **last_cached_routine_ptr= - (Sroutine_hash_entry **)lex->sroutines_list.next; + Sroutine_hash_entry **last_cached_routine_ptr= lex->sroutines_list.next; sp_update_stmt_used_routines(thd, lex, &view->view->sroutines_list, view->top_table()); return sp_cache_routines_and_add_tables_aux(thd, lex, @@ -2030,8 +2032,7 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, { int ret= 0; - Sroutine_hash_entry **last_cached_routine_ptr= - (Sroutine_hash_entry **)lex->sroutines_list.next; + Sroutine_hash_entry **last_cached_routine_ptr= lex->sroutines_list.next; if (static_cast<int>(table->lock_type) >= static_cast<int>(TL_WRITE_ALLOW_WRITE)) diff --git a/sql/sp_head.cc b/sql/sp_head.cc index f7dc2c83641..9f9219566d7 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -34,6 +34,36 @@ extern "C" uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first); +/** + Helper function which operates on a THD object to set the query start_time to + the current time. + + @param[in, out] thd The session object + +*/ + +static void reset_start_time_for_sp(THD *thd) +{ + /* + Do nothing if the context is a trigger or function because time should be + constant during the execution of those. + */ + if (!thd->in_sub_stmt) + { + /* + First investigate if there is a cached time stamp + */ + if (thd->user_time) + { + thd->start_time= thd->user_time; + } + else + { + my_micro_time_and_time(&thd->start_time); + } + } +} + Item_result sp_map_result_type(enum enum_field_types type) { @@ -1227,10 +1257,13 @@ sp_head::execute(THD *thd) DBUG_PRINT("execute", ("Instruction %u", ip)); - /* Don't change NOW() in FUNCTION or TRIGGER */ - if (!thd->in_sub_stmt) - thd->set_time(); // Make current_time() et al work - + /* + We need to reset start_time to allow for time to flow inside a stored + procedure. This is only done for SP since time is suppose to be constant + during execution of triggers and functions. + */ + reset_start_time_for_sp(thd); + /* We have to set thd->stmt_arena before executing the instruction to store in the instruction free_list all new items, created @@ -1842,8 +1875,6 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) { bool err_status= FALSE; uint params = m_pcont->context_var_count(); - /* Query start time may be reset in a multi-stmt SP; keep this for later. */ - ulonglong utime_before_sp_exec= thd->utime_after_lock; sp_rcontext *save_spcont, *octx; sp_rcontext *nctx = NULL; bool save_enable_slow_log; @@ -2036,8 +2067,6 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) delete nctx; thd->spcont= save_spcont; - thd->utime_after_lock= utime_before_sp_exec; - DBUG_RETURN(err_status); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 80134d7c33b..66a4f7694f9 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1248,6 +1248,12 @@ void close_thread_tables(THD *thd) table->s->table_name.str, (long) table)); #endif +#if defined(ENABLED_DEBUG_SYNC) + /* debug_sync may not be initialized for some slave threads */ + if (thd->debug_sync_control) + DEBUG_SYNC(thd, "before_close_thread_tables"); +#endif + /* We are assuming here that thd->derived_tables contains ONLY derived tables for this substatement. i.e. instead of approach which uses @@ -2522,7 +2528,7 @@ bool check_if_table_exists(THD *thd, TABLE_LIST *table, bool *exists) put in the thread-open-list. flags Bitmap of flags to modify how open works: MYSQL_LOCK_IGNORE_FLUSH - Open table even if - someone has done a flush or namelock on it. + someone has done a flush on it. No version number checking is done. MYSQL_OPEN_TEMPORARY_ONLY - Open only temporary table not the base table or view. @@ -2812,8 +2818,10 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, ("Found table '%s.%s' with different refresh version", table_list->db, table_list->table_name)); - if (flags & MYSQL_LOCK_IGNORE_FLUSH) + /* Ignore FLUSH, but not name locks! */ + if (flags & MYSQL_LOCK_IGNORE_FLUSH && !table->open_placeholder) { + DBUG_ASSERT(table->db_stat); /* Force close at once after usage */ thd->version= table->s->version; continue; @@ -4445,7 +4453,8 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last, Return a appropriate read lock type given a table object. @param thd Thread context - @param table TABLE object for table to be locked + @param lex LEX for the current statement. + @param table_list Table list element for table to be locked. @remark Due to a statement-based replication limitation, statements such as INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need @@ -4454,19 +4463,32 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last, source table. If such a statement gets applied on the slave before the INSERT .. SELECT statement finishes, data on the master could differ from data on the slave and end-up with a discrepancy between - the binary log and table state. Furthermore, this does not apply to - I_S and log tables as it's always unsafe to replicate such tables - under statement-based replication as the table on the slave might - contain other data (ie: general_log is enabled on the slave). The - statement will be marked as unsafe for SBR in decide_logging_format(). + the binary log and table state. + This also applies to SELECT/SET/DO statements which use stored + functions. Calls to such functions are going to be logged as a + whole and thus should be serialized against concurrent changes + to tables used by those functions. This can be avoided if functions + only read data but doing so requires more complex analysis than it + is done now (unfortunately, due to bug #53921 "Wrong locks for + SELECTs used stored functions may lead to broken SBR" this rule + is not followed in cases when stored function or trigger use + simple SELECT and not a subselect in their body). + Furthermore, this does not apply to I_S and log tables as it's + always unsafe to replicate such tables under statement-based + replication as the table on the slave might contain other data + (ie: general_log is enabled on the slave). The statement will + be marked as unsafe for SBR in decide_logging_format(). */ -thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table) +thr_lock_type read_lock_type_for_table(THD *thd, LEX *lex, + TABLE_LIST *table_list) { bool log_on= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG); ulong binlog_format= thd->variables.binlog_format; if ((log_on == FALSE) || (binlog_format == BINLOG_FORMAT_ROW) || - (table->s->table_category == TABLE_CATEGORY_PERFORMANCE)) + (table_list->table->s->table_category == TABLE_CATEGORY_PERFORMANCE) || + (lex->sql_command == SQLCOM_SELECT && + ! table_list->prelocking_placeholder)) return TL_READ; else return TL_READ_NO_INSERT; @@ -4483,7 +4505,7 @@ thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table) counter - number of opened tables will be return using this parameter flags - bitmap of flags to modify how the tables will be open: MYSQL_LOCK_IGNORE_FLUSH - open table even if someone has - done a flush or namelock on it. + done a flush on it. NOTE Unless we are already in prelocked mode, this function will also precache @@ -4762,7 +4784,7 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) tables->table->reginfo.lock_type= thd->update_lock_default; else if (tables->lock_type == TL_READ_DEFAULT) tables->table->reginfo.lock_type= - read_lock_type_for_table(thd, tables->table); + read_lock_type_for_table(thd, thd->lex, tables); else tables->table->reginfo.lock_type= tables->lock_type; } @@ -5082,7 +5104,7 @@ int open_and_lock_tables_derived(THD *thd, TABLE_LIST *tables, bool derived) tables - list of tables for open flags - bitmap of flags to modify how the tables will be open: MYSQL_LOCK_IGNORE_FLUSH - open table even if someone has - done a flush or namelock on it. + done a flush on it. RETURN FALSE - ok @@ -5181,53 +5203,75 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables) set with all the capabilities bits set and one with no capabilities bits set. */ - handler::Table_flags flags_some_set= 0; - handler::Table_flags flags_all_set= + handler::Table_flags flags_write_some_set= 0; + handler::Table_flags flags_access_some_set= 0; + handler::Table_flags flags_write_all_set= HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE; - my_bool multi_engine= FALSE; - void* prev_ht= NULL; + /* + If different types of engines are about to be updated. + For example: Innodb and Falcon; Innodb and MyIsam. + */ + my_bool multi_write_engine= FALSE; + void* prev_write_ht= NULL; + + /* + If different types of engines are about to be accessed + and any of them is about to be updated. For example: + Innodb and Falcon; Innodb and MyIsam. + */ + my_bool multi_access_engine= FALSE; + void* prev_access_ht= NULL; for (TABLE_LIST *table= tables; table; table= table->next_global) { if (table->placeholder()) continue; if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE) thd->lex->set_stmt_unsafe(); + ulonglong const flags= table->table->file->ha_table_flags(); if (table->lock_type >= TL_WRITE_ALLOW_WRITE) { - ulonglong const flags= table->table->file->ha_table_flags(); DBUG_PRINT("info", ("table: %s; ha_table_flags: %s%s", table->table_name, FLAGSTR(flags, HA_BINLOG_STMT_CAPABLE), FLAGSTR(flags, HA_BINLOG_ROW_CAPABLE))); - if (prev_ht && prev_ht != table->table->file->ht) - multi_engine= TRUE; - prev_ht= table->table->file->ht; - flags_all_set &= flags; - flags_some_set |= flags; + if (prev_write_ht && prev_write_ht != table->table->file->ht) + multi_write_engine= TRUE; + prev_write_ht= table->table->file->ht; + flags_write_all_set &= flags; + flags_write_some_set |= flags; } - } - - DBUG_PRINT("info", ("flags_all_set: %s%s", - FLAGSTR(flags_all_set, HA_BINLOG_STMT_CAPABLE), - FLAGSTR(flags_all_set, HA_BINLOG_ROW_CAPABLE))); - DBUG_PRINT("info", ("flags_some_set: %s%s", - FLAGSTR(flags_some_set, HA_BINLOG_STMT_CAPABLE), - FLAGSTR(flags_some_set, HA_BINLOG_ROW_CAPABLE))); + if (prev_access_ht && prev_access_ht != table->table->file->ht) + multi_access_engine= TRUE; + prev_access_ht= table->table->file->ht; + flags_access_some_set |= flags; + } + + DBUG_PRINT("info", ("flags_write_all_set: %s%s", + FLAGSTR(flags_write_all_set, HA_BINLOG_STMT_CAPABLE), + FLAGSTR(flags_write_all_set, HA_BINLOG_ROW_CAPABLE))); + DBUG_PRINT("info", ("flags_write_some_set: %s%s", + FLAGSTR(flags_write_some_set, HA_BINLOG_STMT_CAPABLE), + FLAGSTR(flags_write_some_set, HA_BINLOG_ROW_CAPABLE))); + DBUG_PRINT("info", ("flags_access_some_set: %s%s", + FLAGSTR(flags_access_some_set, HA_BINLOG_STMT_CAPABLE), + FLAGSTR(flags_access_some_set, HA_BINLOG_ROW_CAPABLE))); + DBUG_PRINT("info", ("multi_write_engine: %s", + multi_write_engine ? "TRUE" : "FALSE")); + DBUG_PRINT("info", ("multi_access_engine: %s", + multi_access_engine ? "TRUE" : "FALSE")); DBUG_PRINT("info", ("thd->variables.binlog_format: %ld", thd->variables.binlog_format)); - DBUG_PRINT("info", ("multi_engine: %s", - multi_engine ? "TRUE" : "FALSE")); int error= 0; - if (flags_all_set == 0) + if (flags_write_all_set == 0) { my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0), "Statement cannot be logged to the binary log in" " row-based nor statement-based format"); } else if (thd->variables.binlog_format == BINLOG_FORMAT_STMT && - (flags_all_set & HA_BINLOG_STMT_CAPABLE) == 0) + (flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0) { my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0), "Statement-based format required for this statement," @@ -5235,7 +5279,7 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables) } else if ((thd->variables.binlog_format == BINLOG_FORMAT_ROW || thd->lex->is_stmt_unsafe()) && - (flags_all_set & HA_BINLOG_ROW_CAPABLE) == 0) + (flags_write_all_set & HA_BINLOG_ROW_CAPABLE) == 0) { my_error((error= ER_BINLOG_LOGGING_IMPOSSIBLE), MYF(0), "Row-based format required for this statement," @@ -5248,8 +5292,8 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables) statement cannot be logged atomically, so we generate an error rather than allowing the binlog to become corrupt. */ - if (multi_engine && - (flags_some_set & HA_HAS_OWN_BINLOGGING)) + if (multi_write_engine && + (flags_write_some_set & HA_HAS_OWN_BINLOGGING)) { error= ER_BINLOG_LOGGING_IMPOSSIBLE; my_error(error, MYF(0), @@ -5257,6 +5301,16 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables) " than one engine involved and at least one engine" " is self-logging"); } + /* + Reading from a self-logging engine and updating another engine + generates changes that are written to the binary log in the + statement format and may make slaves to diverge. In the mixed + mode, such changes should be written to the binary log in the + row format. + */ + else if (multi_access_engine && + (flags_access_some_set & HA_HAS_OWN_BINLOGGING)) + thd->lex->set_stmt_unsafe(); DBUG_PRINT("info", ("error: %d", error)); @@ -5276,7 +5330,7 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables) here. */ if (thd->lex->is_stmt_unsafe() || - (flags_all_set & HA_BINLOG_STMT_CAPABLE) == 0) + (flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0) { thd->set_current_stmt_binlog_row_based_if_mixed(); } @@ -5384,6 +5438,8 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) DBUG_RETURN(-1); } + DEBUG_SYNC(thd, "after_lock_tables_takes_lock"); + if (thd->lex->requires_prelocking() && thd->lex->sql_command != SQLCOM_LOCK_TABLES) { @@ -8889,7 +8945,7 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b) /* SYNOPSIS - abort_and_upgrade_lock() + abort_and_upgrade_lock_and_close_table() lpt Parameter passing struct All parameters passed through the ALTER_PARTITION_PARAM_TYPE object RETURN VALUE @@ -8898,7 +8954,7 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b) Remember old lock level (for possible downgrade later on), abort all waiting threads and ensure that all keeping locks currently are completed such that we own the lock exclusively and no other interaction - is ongoing. + is ongoing. Close the table and hold the name lock. thd Thread object table Table object @@ -8907,18 +8963,26 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b) old_lock_level Old lock level */ -int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt) +int abort_and_upgrade_lock_and_close_table(ALTER_PARTITION_PARAM_TYPE *lpt) { uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG; - DBUG_ENTER("abort_and_upgrade_locks"); + const char *db= lpt->db; + const char *table_name= lpt->table_name; + THD *thd= lpt->thd; + DBUG_ENTER("abort_and_upgrade_lock_and_close_table"); lpt->old_lock_type= lpt->table->reginfo.lock_type; + safe_mutex_assert_not_owner(&LOCK_open); VOID(pthread_mutex_lock(&LOCK_open)); /* If MERGE child, forward lock handling to parent. */ - mysql_lock_abort(lpt->thd, lpt->table->parent ? lpt->table->parent : - lpt->table, TRUE); - VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags, - FALSE)); + mysql_lock_abort(thd, lpt->table->parent ? lpt->table->parent : lpt->table, + TRUE); + if (remove_table_from_cache(thd, db, table_name, flags, FALSE)) + { + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(1); + } + close_data_files_and_morph_locks(thd, db, table_name); VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(0); } diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 07972d9b3e4..da582c37ae9 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -42,9 +42,13 @@ void mysql_client_binlog_statement(THD* thd) if (check_global_access(thd, SUPER_ACL)) DBUG_VOID_RETURN; - size_t coded_len= thd->lex->comment.length + 1; + size_t coded_len= thd->lex->comment.length; + if (!coded_len) + { + my_error(ER_SYNTAX_ERROR, MYF(0)); + DBUG_VOID_RETURN; + } size_t decoded_len= base64_needed_decoded_length(coded_len); - DBUG_ASSERT(coded_len > 0); /* Allocation @@ -145,14 +149,16 @@ void mysql_client_binlog_statement(THD* thd) /* Checking that the first event in the buffer is not truncated. */ - ulong event_len= uint4korr(bufptr + EVENT_LEN_OFFSET); - DBUG_PRINT("info", ("event_len=%lu, bytes_decoded=%d", - event_len, bytes_decoded)); - if (bytes_decoded < EVENT_LEN_OFFSET || (uint) bytes_decoded < event_len) + ulong event_len; + if (bytes_decoded < EVENT_LEN_OFFSET + 4 || + (event_len= uint4korr(bufptr + EVENT_LEN_OFFSET)) > + (uint) bytes_decoded) { my_error(ER_SYNTAX_ERROR, MYF(0)); goto end; } + DBUG_PRINT("info", ("event_len=%lu, bytes_decoded=%d", + event_len, bytes_decoded)); /* If we have not seen any Format_description_event, then we must @@ -190,17 +196,6 @@ void mysql_client_binlog_statement(THD* thd) bufptr += event_len; DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code())); -#ifndef HAVE_valgrind - /* - This debug printout should not be used for valgrind builds - since it will read from unassigned memory. - */ - DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx", - (long) (bufptr+EVENT_TYPE_OFFSET))); - DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu", - bytes_decoded, (long) bufptr, - (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET))); -#endif ev->thd= thd; /* We go directly to the application phase, since we don't need diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a6d783bf6f4..b4e34fc1890 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2191,9 +2191,21 @@ bool select_export::send_data(List<Item> &items) const char *from_end_pos; const char *error_pos; uint32 bytes; - bytes= well_formed_copy_nchars(write_cs, cvt_buff, sizeof(cvt_buff), + uint64 estimated_bytes= + ((uint64) res->length() / res->charset()->mbminlen + 1) * + write_cs->mbmaxlen + 1; + set_if_smaller(estimated_bytes, UINT_MAX32); + if (cvt_str.realloc((uint32) estimated_bytes)) + { + my_error(ER_OUTOFMEMORY, MYF(0), (uint32) estimated_bytes); + goto err; + } + + bytes= well_formed_copy_nchars(write_cs, (char *) cvt_str.ptr(), + cvt_str.alloced_length(), res->charset(), res->ptr(), res->length(), - sizeof(cvt_buff), + UINT_MAX32, // copy all input chars, + // i.e. ignore nchars parameter &well_formed_error_pos, &cannot_convert_error_pos, &from_end_pos); @@ -2211,6 +2223,15 @@ bool select_export::send_data(List<Item> &items) "string", printable_buff, item->name, (ulong) row_count); } + else if (from_end_pos < res->ptr() + res->length()) + { + /* + result is longer than UINT_MAX32 and doesn't fit into String + */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED), + item->full_name(), row_count); + } cvt_str.length(bytes); res= &cvt_str; } @@ -3928,7 +3949,6 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end) if (stmt_end) { pending->set_flags(Rows_log_event::STMT_END_F); - pending->flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F; binlog_table_maps= 0; } @@ -4056,7 +4076,6 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg, { Query_log_event qinfo(this, query_arg, query_len, is_trans, suppress_use, errcode); - qinfo.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F; /* Binlog table maps will be irrelevant after a Query_log_event (they are just removed on the slave side) so after the query diff --git a/sql/sql_class.h b/sql/sql_class.h index 3680e28246e..2240236a0e5 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1745,8 +1745,15 @@ public: */ ha_rows sent_row_count; - /* - number of rows we read, sent or not, including in create_sort_index() + /** + Number of rows read and/or evaluated for a statement. Used for + slow log reporting. + + An examined row is defined as a row that is read and/or evaluated + according to a statement condition, including in + create_sort_index(). Rows may be counted more than once, e.g., a + statement including ORDER BY could possibly evaluate the row in + filesort() before reading it for e.g. update. */ ha_rows examined_row_count; @@ -2073,6 +2080,11 @@ public: start_time= user_time= t; start_utime= utime_after_lock= my_micro_time(); } + /*TODO: this will be obsolete when we have support for 64 bit my_time_t */ + inline bool is_valid_time() + { + return (start_time < (time_t) MY_TIME_T_MAX); + } void set_time_after_lock() { utime_after_lock= my_micro_time(); } ulonglong current_utime() { return my_micro_time(); } inline ulonglong found_rows(void) @@ -3001,6 +3013,9 @@ public: void reset(); bool walk(tree_walk_action action, void *walk_action_arg); + uint get_size() const { return size; } + ulonglong get_max_in_memory_size() const { return max_in_memory_size; } + friend int unique_write_to_file(uchar* key, element_count count, Unique *unique); friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique); }; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 30c6c4fc653..1c2ae915259 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -257,6 +257,7 @@ end: #endif /* NO_EMBEDDED_ACCESS_CHECKS */ + /* Check for maximum allowable user connections, if the mysqld server is started with corresponding variable that is greater then 0. diff --git a/sql/sql_db.cc b/sql/sql_db.cc index b4a0e5c4c7d..1dd659283ac 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -642,6 +642,18 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, goto exit2; } + /* + Close and mark for re-open all HANDLER tables which are marked for flush + or which there are pending conflicing locks against. This is needed to + prevent deadlocks. + */ + if (thd->handler_tables_hash.records) + { + pthread_mutex_lock(&LOCK_open); + mysql_ha_flush(thd); + pthread_mutex_unlock(&LOCK_open); + } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); /* Check directory */ @@ -788,6 +800,18 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) if ((error=wait_if_global_read_lock(thd,0,1))) goto exit2; + /* + Close and mark for re-open all HANDLER tables which are marked for flush + or which there are pending conflicing locks against. This is needed to + prevent deadlocks. + */ + if (thd->handler_tables_hash.records) + { + pthread_mutex_lock(&LOCK_open); + mysql_ha_flush(thd); + pthread_mutex_unlock(&LOCK_open); + } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); /* @@ -886,6 +910,18 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) goto exit2; } + /* + Close and mark for re-open all HANDLER tables which are marked for flush + or which there are pending conflicing locks against. This is needed to + prevent deadlocks. + */ + if (thd->handler_tables_hash.records) + { + pthread_mutex_lock(&LOCK_open); + mysql_ha_flush(thd); + pthread_mutex_unlock(&LOCK_open); + } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); length= build_table_filename(path, sizeof(path) - 1, db, "", "", 0); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index acffb9f067f..361a5db1f2c 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -33,7 +33,7 @@ */ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, - SQL_LIST *order, ha_rows limit, ulonglong options, + SQL_I_List<ORDER> *order, ha_rows limit, ulonglong options, bool reset_auto_increment) { bool will_batch; @@ -84,7 +84,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (select_lex->setup_ref_array(thd, order->elements) || setup_order(thd, select_lex->ref_pointer_array, &tables, - fields, all_fields, (ORDER*) order->first)) + fields, all_fields, order->first)) { delete select; free_underlaid_joins(thd, &thd->lex->select_lex); @@ -230,14 +230,14 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ha_rows examined_rows; if ((!select || table->quick_keys.is_clear_all()) && limit != HA_POS_ERROR) - usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); + usable_index= get_index_for_order(table, order->first, limit); if (usable_index == MAX_KEY) { table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); - if (!(sortorder= make_unireg_sortorder((ORDER*) order->first, + if (!(sortorder= make_unireg_sortorder(order->first, &length, NULL)) || (table->sort.found_records = filesort(thd, table, sortorder, length, select, HA_POS_ERROR, 1, @@ -248,6 +248,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, free_underlaid_joins(thd, &thd->lex->select_lex); DBUG_RETURN(TRUE); } + thd->examined_row_count+= examined_rows; /* Filesort has already found and selected the rows we want to delete, so we don't need the where clause @@ -265,7 +266,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, free_underlaid_joins(thd, select_lex); DBUG_RETURN(TRUE); } - if (usable_index==MAX_KEY) + if (usable_index == MAX_KEY || (select && select->quick)) { if (init_read_record(&info, thd, table, select, 1, 1, FALSE)) { @@ -312,6 +313,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ! thd->is_error()) { update_virtual_fields(thd, table); + thd->examined_row_count++; // thd->is_error() is tested to disallow delete row on error if (!select || select->skip_record(thd) > 0) { @@ -555,7 +557,7 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b) int mysql_multi_delete_prepare(THD *thd) { LEX *lex= thd->lex; - TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxiliary_table_list.first; + TABLE_LIST *aux_tables= lex->auxiliary_table_list.first; TABLE_LIST *target_tbl; DBUG_ENTER("mysql_multi_delete_prepare"); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 37adf5c403a..782589f7d0f 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -281,13 +281,13 @@ bool mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) lex->current_select= first_select; res= mysql_select(thd, &first_select->ref_pointer_array, - (TABLE_LIST*) first_select->table_list.first, + first_select->table_list.first, first_select->with_wild, first_select->item_list, first_select->where, (first_select->order_list.elements+ first_select->group_list.elements), - (ORDER *) first_select->order_list.first, - (ORDER *) first_select->group_list.first, + first_select->order_list.first, + first_select->group_list.first, first_select->having, (ORDER*) NULL, (first_select->options | thd->options | SELECT_NO_UNLOCK), diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 61f29886da8..69be8c8e9b4 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -539,6 +539,14 @@ retry: my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, tables->alias); goto err; } + /* Check if the same index involved. */ + if ((uint) keyno != table->file->get_index()) + { + if (mode == RNEXT) + mode= RFIRST; + else if (mode == RPREV) + mode= RLAST; + } } if (insert_fields(thd, &thd->lex->select_lex.context, @@ -561,9 +569,16 @@ retry: case RNEXT: if (table->file->inited != handler::NONE) { - error=keyname ? - table->file->ha_index_next(table->record[0]) : - table->file->ha_rnd_next(table->record[0]); + if (keyname) + { + /* Check if we read from the same index. */ + DBUG_ASSERT((uint) keyno == table->file->get_index()); + error= table->file->ha_index_next(table->record[0]); + } + else + { + error= table->file->ha_rnd_next(table->record[0]); + } break; } /* else fall through */ @@ -584,6 +599,8 @@ retry: break; case RPREV: DBUG_ASSERT(keyname != 0); + /* Check if we read from the same index. */ + DBUG_ASSERT((uint) keyno == table->file->get_index()); if (table->file->inited != handler::NONE) { error=table->file->ha_index_prev(table->record[0]); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index a91c9f1eb54..ea138807122 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1515,7 +1515,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) table->file->adjust_next_insert_id_after_explicit_value( table->next_number_field->val_int()); info->touched++; - if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ && + if (((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) && !bitmap_is_subset(table->write_set, table->read_set)) || compare_record(table)) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index e380c94b58a..33121e5301d 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -110,39 +110,31 @@ st_parsing_options::reset() allows_derived= TRUE; } -Lex_input_stream::Lex_input_stream(THD *thd, - const char* buffer, - unsigned int length) -: m_thd(thd), - yylineno(1), - yytoklen(0), - yylval(NULL), - m_ptr(buffer), - m_tok_start(NULL), - m_tok_end(NULL), - m_end_of_query(buffer + length), - m_tok_start_prev(NULL), - m_buf(buffer), - m_buf_length(length), - m_echo(TRUE), - m_cpp_tok_start(NULL), - m_cpp_tok_start_prev(NULL), - m_cpp_tok_end(NULL), - m_body_utf8(NULL), - m_cpp_utf8_processed_ptr(NULL), - next_state(MY_LEX_START), - found_semicolon(NULL), - ignore_space(test(thd->variables.sql_mode & MODE_IGNORE_SPACE)), - stmt_prepare_mode(FALSE), - in_comment(NO_COMMENT), - m_underscore_cs(NULL) + +bool Lex_input_stream::init(THD *thd, const char *buff, unsigned int length) { + DBUG_EXECUTE_IF("bug42064_simulate_oom", + DBUG_SET("+d,simulate_out_of_memory");); + m_cpp_buf= (char*) thd->alloc(length + 1); + + DBUG_EXECUTE_IF("bug42064_simulate_oom", + DBUG_SET("-d,bug42064_simulate_oom");); + + if (m_cpp_buf == NULL) + return TRUE; + m_cpp_ptr= m_cpp_buf; + m_thd= thd; + m_ptr= buff; + m_end_of_query= buff + length; + m_buf= buff; + m_buf_length= length; + ignore_space= test(thd->variables.sql_mode & MODE_IGNORE_SPACE); + + return FALSE; } -Lex_input_stream::~Lex_input_stream() -{} /** The operation is called from the parser in order to @@ -1653,7 +1645,7 @@ void st_select_lex::init_select() linkage= UNSPECIFIED_TYPE; order_list.elements= 0; order_list.first= 0; - order_list.next= (uchar**) &order_list.first; + order_list.next= &order_list.first; /* Set limit and offset to default values */ select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ offset_limit= 0; /* denotes the default offset = 0 */ @@ -1984,7 +1976,7 @@ uint st_select_lex::get_in_sum_expr() TABLE_LIST* st_select_lex::get_table_list() { - return (TABLE_LIST*) table_list.first; + return table_list.first; } List<Item>* st_select_lex::get_item_list() @@ -2041,9 +2033,8 @@ void st_select_lex_unit::print(String *str, enum_query_type query_type) if (fake_select_lex->order_list.elements) { str->append(STRING_WITH_LEN(" order by ")); - fake_select_lex->print_order( - str, - (ORDER *) fake_select_lex->order_list.first, + fake_select_lex->print_order(str, + fake_select_lex->order_list.first, query_type); } fake_select_lex->print_limit(thd, str, query_type); @@ -2688,7 +2679,7 @@ TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local) { select_lex.context.table_list= select_lex.context.first_name_resolution_table= first->next_local; - select_lex.table_list.first= (uchar*) (first->next_local); + select_lex.table_list.first= first->next_local; select_lex.table_list.elements--; //safety first->next_local= 0; /* @@ -2720,7 +2711,7 @@ TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local) void st_lex::first_lists_tables_same() { - TABLE_LIST *first_table= (TABLE_LIST*) select_lex.table_list.first; + TABLE_LIST *first_table= select_lex.table_list.first; if (query_tables != first_table && first_table != 0) { TABLE_LIST *next; @@ -2767,9 +2758,9 @@ void st_lex::link_first_table_back(TABLE_LIST *first, if (link_to_local) { - first->next_local= (TABLE_LIST*) select_lex.table_list.first; + first->next_local= select_lex.table_list.first; select_lex.context.table_list= first; - select_lex.table_list.first= (uchar*) first; + select_lex.table_list.first= first; select_lex.table_list.elements++; //safety } } @@ -2935,7 +2926,7 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds, prep_having= *having_conds; *having_conds= having= prep_having->copy_andor_structure(thd); } - fix_prepare_info_in_table_list(thd, (TABLE_LIST *)table_list.first); + fix_prepare_info_in_table_list(thd, table_list.first); } } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index b571d7d707a..2f14f3eb575 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -596,8 +596,8 @@ public: st_lex *parent_lex; enum olap_type olap; /* FROM clause - points to the beginning of the TABLE_LIST::next_local list. */ - SQL_LIST table_list; - SQL_LIST group_list; /* GROUP BY clause. */ + SQL_I_List<TABLE_LIST> table_list; + SQL_I_List<ORDER> group_list; /* GROUP BY clause. */ List<Item> item_list; /* list of fields & expressions */ List<String> interval_list; bool is_item_list_lookup; @@ -619,8 +619,8 @@ public: TABLE_LIST *leaf_tables; const char *type; /* type of select for EXPLAIN */ - SQL_LIST order_list; /* ORDER clause */ - SQL_LIST *gorder_list; + SQL_I_List<ORDER> order_list; /* ORDER clause */ + SQL_I_List<ORDER> *gorder_list; Item *select_limit, *offset_limit; /* LIMIT clause parameters */ // Arrays of pointers to top elements of all_fields list Item **ref_pointer_array; @@ -782,7 +782,7 @@ public: { order_list.elements= 0; order_list.first= 0; - order_list.next= (uchar**) &order_list.first; + order_list.next= &order_list.first; } /* This method created for reiniting LEX in mysql_admin_table() and can be @@ -963,6 +963,8 @@ enum xa_option_words {XA_NONE, XA_JOIN, XA_RESUME, XA_ONE_PHASE, extern const LEX_STRING null_lex_str; extern const LEX_STRING empty_lex_str; +struct Sroutine_hash_entry; + /* Class representing list of all tables used by statement. It also contains information about stored functions used by statement @@ -1003,9 +1005,9 @@ public: We use these two members for restoring of 'sroutines_list' to the state in which it was right after query parsing. */ - SQL_LIST sroutines_list; - uchar **sroutines_list_own_last; - uint sroutines_list_own_elements; + SQL_I_List<Sroutine_hash_entry> sroutines_list; + Sroutine_hash_entry **sroutines_list_own_last; + uint sroutines_list_own_elements; /* These constructor and destructor serve for creation/destruction @@ -1157,10 +1159,39 @@ enum enum_comment_state class Lex_input_stream { public: - Lex_input_stream(THD *thd, const char* buff, unsigned int length); - ~Lex_input_stream(); + Lex_input_stream() : + yylineno(1), + yytoklen(0), + yylval(NULL), + m_tok_start(NULL), + m_tok_end(NULL), + m_tok_start_prev(NULL), + m_echo(TRUE), + m_cpp_tok_start(NULL), + m_cpp_tok_start_prev(NULL), + m_cpp_tok_end(NULL), + m_body_utf8(NULL), + m_cpp_utf8_processed_ptr(NULL), + next_state(MY_LEX_START), + found_semicolon(NULL), + stmt_prepare_mode(FALSE), + in_comment(NO_COMMENT), + m_underscore_cs(NULL) + { + } + + ~Lex_input_stream() + { + } /** + Object initializer. Must be called before usage. + + @retval FALSE OK + @retval TRUE Error + */ + bool init(THD *thd, const char *buff, unsigned int length); + /** Set the echo mode. When echo is true, characters parsed from the raw input stream are @@ -1610,7 +1641,8 @@ typedef struct st_lex : public Query_tables_list */ List<Name_resolution_context> context_stack; - SQL_LIST proc_list, auxiliary_table_list, save_list; + SQL_I_List<ORDER> proc_list; + SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list; Create_field *last_field; Item_sum *in_sum_func; udf_func udf; @@ -1740,7 +1772,7 @@ typedef struct st_lex : public Query_tables_list fields to TABLE object at table open (altough for latter pointer to table being opened is probably enough). */ - SQL_LIST trg_table_fields; + SQL_I_List<Item_trigger_field> trg_table_fields; /* stmt_definition_begin is intended to point to the next word after @@ -1954,10 +1986,21 @@ public: class Parser_state { public: - Parser_state(THD *thd, const char* buff, unsigned int length) - : m_lip(thd, buff, length), m_yacc() + Parser_state() + : m_yacc() {} + /** + Object initializer. Must be called before usage. + + @retval FALSE OK + @retval TRUE Error + */ + bool init(THD *thd, const char *buff, unsigned int length) + { + return m_lip.init(thd, buff, length); + } + ~Parser_state() {} diff --git a/sql/sql_list.h b/sql/sql_list.h index 93cdd20c299..15eb85ab52c 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -55,6 +55,73 @@ public: }; +/** + Simple intrusive linked list. + + @remark Similar in nature to base_list, but intrusive. It keeps a + a pointer to the first element in the list and a indirect + reference to the last element. +*/ +template <typename T> +class SQL_I_List :public Sql_alloc +{ +public: + uint elements; + /** The first element in the list. */ + T *first; + /** A reference to the next element in the list. */ + T **next; + + SQL_I_List() { empty(); } + + SQL_I_List(const SQL_I_List &tmp) :Sql_alloc() + { + elements= tmp.elements; + first= tmp.first; + next= elements ? tmp.next : &first; + } + + inline void empty() + { + elements= 0; + first= NULL; + next= &first; + } + + inline void link_in_list(T *element, T **next_ptr) + { + elements++; + (*next)= element; + next= next_ptr; + *next= NULL; + } + + inline void save_and_clear(SQL_I_List<T> *save) + { + *save= *this; + empty(); + } + + inline void push_front(SQL_I_List<T> *save) + { + /* link current list last */ + *save->next= first; + first= save->first; + elements+= save->elements; + } + + inline void push_back(SQL_I_List<T> *save) + { + if (save->first) + { + *next= save->first; + next= save->next; + elements+= save->elements; + } + } +}; + + /* Basic single linked list Used for item and item_buffs. diff --git a/sql/sql_load.cc b/sql/sql_load.cc index e5ef7bed5e0..d0506d75925 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -511,7 +511,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, else { Delete_file_log_event d(thd, db, transactional_table); - d.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F; (void) mysql_bin_log.write(&d); } } @@ -691,7 +690,6 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex, (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE : (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR), transactional_table, FALSE, errcode); - e.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F; return mysql_bin_log.write(&e); } @@ -929,6 +927,10 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, DBUG_RETURN(1); } } + + if (thd->is_error()) + read_info.error= 1; + if (read_info.error) break; if (skip_lines) @@ -1052,7 +1054,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),escape_char(escape) + :file(file_par),buffer(0),escape_char(escape) { read_charset= cs; field_term_ptr=(char*) field_term.ptr(); @@ -1101,6 +1103,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, MYF(MY_WME))) { my_free((uchar*) buffer,MYF(0)); /* purecov: inspected */ + buffer= 0; error=1; } else @@ -1131,9 +1134,8 @@ READ_INFO::~READ_INFO() { if (need_end_io_cache) ::end_io_cache(&cache); - my_free((uchar*) buffer,MYF(0)); - error=1; } + my_free((uchar*) buffer,MYF(MY_ALLOW_ZERO_PTR)); } diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc index dccfcbaf8ac..21deef8c664 100644 --- a/sql/sql_olap.cc +++ b/sql/sql_olap.cc @@ -146,14 +146,14 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) lex->last_selects=select_lex; - for (ORDER *order=(ORDER *)select_lex->group_list.first ; order ; order=order->next) + for (ORDER *order= select_lex->group_list.first ; order ; order=order->next) item_list_copy.push_back(*(order->item)); List<Item> all_fields(select_lex->item_list); if (setup_tables(lex->thd, &select_lex->context, &select_lex->top_join_list, - (TABLE_LIST *)select_lex->table_list.first + select_lex->table_list.first &select_lex->leaf_tables, FALSE) || setup_fields(lex->thd, 0, select_lex->item_list, MARK_COLUMNS_READ, &all_fields,1) || diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 0986a165c7a..af66c9447b0 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -658,7 +658,7 @@ void cleanup_items(Item *item) */ static -int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name) +int mysql_table_dump(THD *thd, LEX_STRING *db, LEX_STRING *table_name) { TABLE* table; TABLE_LIST* table_list; @@ -672,7 +672,7 @@ int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name) if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(1); // out of memory table_list->db= db->str; - table_list->table_name= table_list->alias= tbl_name; + table_list->table_name= table_list->alias= table_name->str; table_list->lock_type= TL_READ_NO_INSERT; table_list->prev_global= &table_list; // can be removed after merge with 4.1 @@ -683,8 +683,16 @@ int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name) goto err; /* purecov: end */ } + if (!table_name->length || + check_table_name(table_name->str, table_name->length, TRUE)) + { + my_error(ER_WRONG_TABLE_NAME, MYF(0), + table_name->str ? table_name->str : "NULL"); + error= 1; + goto err; + } if (lower_case_table_names) - my_casedn_str(files_charset_info, tbl_name); + my_casedn_str(files_charset_info, table_name->str); if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT, 0))) DBUG_RETURN(1); @@ -692,7 +700,7 @@ int mysql_table_dump(THD *thd, LEX_STRING *db, char *tbl_name) if (check_one_table_access(thd, SELECT_ACL, table_list)) goto err; thd->free_list = 0; - thd->set_query(tbl_name, (uint) strlen(tbl_name)); + thd->set_query(table_name->str, table_name->length); if ((error = mysqld_dump_create_info(thd, table_list, -1))) { my_error(ER_GET_ERRNO, MYF(0), my_errno); @@ -1003,6 +1011,19 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->query_plan_flags= QPLAN_INIT; thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */ thd->set_time(); + if (!thd->is_valid_time()) + { + /* + If the time has got past 2038 we need to shut this server down + We do this by making sure every command is a shutdown and we + have enough privileges to shut the server down + + TODO: remove this when we have full 64 bit my_time_t support + */ + thd->security_ctx->master_access|= SHUTDOWN_ACL; + command= COM_SHUTDOWN; + } + VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_id= global_query_id; @@ -1056,8 +1077,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_TABLE_DUMP: { - char *tbl_name; - LEX_STRING db; + LEX_STRING db, table; /* Safe because there is always a trailing \0 at the end of the packet */ uint db_len= *(uchar*) packet; if (db_len + 1 > packet_length || db_len > NAME_LEN) @@ -1083,9 +1103,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } db.length= db_len; - tbl_name= strmake(db.str, packet + 1, db_len)+1; - strmake(tbl_name, packet + db_len + 2, tbl_len); - if (mysql_table_dump(thd, &db, tbl_name) == 0) + table.length= tbl_len; + table.str= strmake(db.str, packet + 1, db_len) + 1; + strmake(table.str, packet + db_len + 2, tbl_len); + if (mysql_table_dump(thd, &db, &table) == 0) thd->main_da.disable_status(); break; } @@ -1304,8 +1325,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, mysql_reset_thd_for_next_command(thd, opt_userstat_running); thd->lex-> - select_lex.table_list.link_in_list((uchar*) &table_list, - (uchar**) &table_list.next_local); + select_lex.table_list.link_in_list(&table_list, + &table_list.next_local); thd->lex->add_to_query_tables(&table_list); /* switch on VIEW optimisation: do not fill temporary tables */ @@ -1447,8 +1468,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, SHUTDOWN_DEFAULT is 0. If client is >= 4.1.3, the shutdown level is in packet[0]. */ - enum mysql_enum_shutdown_level level= - (enum mysql_enum_shutdown_level) (uchar) packet[0]; + enum mysql_enum_shutdown_level level; + if (!thd->is_valid_time()) + level= SHUTDOWN_DEFAULT; + else + level= (enum mysql_enum_shutdown_level) (uchar) packet[0]; if (level == SHUTDOWN_DEFAULT) level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable else if (level != SHUTDOWN_WAIT_ALL_BUFFERS) @@ -1818,7 +1842,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, { DBUG_RETURN(1); } - TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first; + TABLE_LIST *table_list= select_lex->table_list.first; table_list->schema_select_lex= schema_select_lex; table_list->schema_table_reformed= 1; DBUG_RETURN(0); @@ -2014,7 +2038,7 @@ mysql_execute_command(THD *thd) /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; /* first table of first SELECT_LEX */ - TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; + TABLE_LIST *first_table= select_lex->table_list.first; /* list of all tables in query */ TABLE_LIST *all_tables; /* most outer SELECT_LEX_UNIT of query */ @@ -2049,7 +2073,7 @@ mysql_execute_command(THD *thd) all_tables= lex->query_tables; /* set context for commands which do not use setup_tables */ select_lex-> - context.resolve_in_table_list_only((TABLE_LIST*)select_lex-> + context.resolve_in_table_list_only(select_lex-> table_list.first); /* @@ -2395,7 +2419,7 @@ mysql_execute_command(THD *thd) thd->enable_slow_log= opt_log_slow_admin_statements; thd->query_plan_flags|= QPLAN_ADMIN; res = mysql_backup_table(thd, first_table); - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -2408,7 +2432,7 @@ mysql_execute_command(THD *thd) thd->enable_slow_log= opt_log_slow_admin_statements; thd->query_plan_flags|= QPLAN_ADMIN; res = mysql_restore_table(thd, first_table); - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -2702,7 +2726,7 @@ mysql_execute_command(THD *thd) if (create_info.used_fields & HA_CREATE_USED_UNION) { TABLE_LIST *tab; - for (tab= (TABLE_LIST*) create_info.merge_list.first; + for (tab= create_info.merge_list.first; tab; tab= tab->next_local) { @@ -2716,6 +2740,10 @@ mysql_execute_command(THD *thd) } } + /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ + if (create_info.options & HA_LEX_CREATE_TMP_TABLE) + thd->options|= OPTION_KEEP_LOG; + /* select_create is currently not re-execution friendly and needs to be created for every execution of a PS/SP. @@ -2875,7 +2903,6 @@ end_with_restore_list: check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0, is_schema_db(select_lex->db))|| check_merge_table_access(thd, first_table->db, - (TABLE_LIST *) create_info.merge_list.first)) goto error; /* purecov: inspected */ if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0)) @@ -3012,7 +3039,7 @@ end_with_restore_list: */ res= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -3025,7 +3052,7 @@ end_with_restore_list: thd->enable_slow_log= opt_log_slow_admin_statements; thd->query_plan_flags|= QPLAN_ADMIN; res = mysql_check_table(thd, first_table, &lex->check_opt); - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -3046,7 +3073,7 @@ end_with_restore_list: */ res= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -3070,7 +3097,7 @@ end_with_restore_list: */ res= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); } - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; lex->query_tables=all_tables; break; } @@ -3088,7 +3115,7 @@ end_with_restore_list: lex->value_list, select_lex->where, select_lex->order_list.elements, - (ORDER *) select_lex->order_list.first, + select_lex->order_list.first, unit->select_limit_cnt, lex->duplicates, lex->ignore)); /* mysql_update return 2 if we need to switch to multi-update */ @@ -3248,7 +3275,7 @@ end_with_restore_list: { /* Skip first table, which is the table we are inserting in */ TABLE_LIST *second_table= first_table->next_local; - select_lex->table_list.first= (uchar*) second_table; + select_lex->table_list.first= second_table; select_lex->context.table_list= select_lex->context.first_name_resolution_table= second_table; res= mysql_insert_select_prepare(thd); @@ -3279,7 +3306,7 @@ end_with_restore_list: delete sel_result; } /* revert changes for SP */ - select_lex->table_list.first= (uchar*) first_table; + select_lex->table_list.first= first_table; } /* @@ -3341,8 +3368,7 @@ end_with_restore_list: case SQLCOM_DELETE_MULTI: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - TABLE_LIST *aux_tables= - (TABLE_LIST *)thd->lex->auxiliary_table_list.first; + TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first; multi_delete *del_result; if (!thd->locked_tables && @@ -5362,7 +5388,7 @@ static bool check_show_access(THD *thd, TABLE_LIST *table) case SCH_STATISTICS: { TABLE_LIST *dst_table; - dst_table= (TABLE_LIST *) table->schema_select_lex->table_list.first; + dst_table= table->schema_select_lex->table_list.first; DBUG_ASSERT(dst_table); @@ -5982,10 +6008,15 @@ void mysql_parse(THD *thd, const char *inBuf, uint length, sp_cache_flush_obsolete(&thd->sp_proc_cache); sp_cache_flush_obsolete(&thd->sp_func_cache); - Parser_state parser_state(thd, inBuf, length); - - bool err= parse_sql(thd, & parser_state, NULL); - *found_semicolon= parser_state.m_lip.found_semicolon; + Parser_state parser_state; + bool err; + if (!(err= parser_state.init(thd, inBuf, length))) + { + err= parse_sql(thd, & parser_state, NULL); + *found_semicolon= parser_state.m_lip.found_semicolon; + } + else + *found_semicolon= NULL; if (!err) { @@ -6072,14 +6103,17 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) bool error= 0; DBUG_ENTER("mysql_test_parse_for_slave"); - Parser_state parser_state(thd, inBuf, length); - lex_start(thd); - mysql_reset_thd_for_next_command(thd, 0); + Parser_state parser_state; + if (!(error= parser_state.init(thd, inBuf, length))) + { + lex_start(thd); + mysql_reset_thd_for_next_command(thd, opt_userstat_running); - if (!parse_sql(thd, & parser_state, NULL) && - all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) - error= 1; /* Ignore question */ - thd->end_statement(); + if (!parse_sql(thd, & parser_state, NULL) && + all_tables_not_ok(thd, lex->select_lex.table_list.first)) + error= 1; /* Ignore question */ + thd->end_statement(); + } thd->cleanup_after_query(); DBUG_RETURN(error); } @@ -6218,7 +6252,7 @@ add_proc_to_list(THD* thd, Item *item) *item_ptr= item; order->item=item_ptr; order->free_me=0; - thd->lex->proc_list.link_in_list((uchar*) order,(uchar**) &order->next); + thd->lex->proc_list.link_in_list(order, &order->next); return 0; } @@ -6227,7 +6261,7 @@ add_proc_to_list(THD* thd, Item *item) save order by and tables in own lists. */ -bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) +bool add_to_list(THD *thd, SQL_I_List<ORDER> &list, Item *item,bool asc) { ORDER *order; DBUG_ENTER("add_to_list"); @@ -6239,7 +6273,7 @@ bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) order->free_me=0; order->used=0; order->counter_used= 0; - list.link_in_list((uchar*) order,(uchar**) &order->next); + list.link_in_list(order, &order->next); DBUG_RETURN(0); } @@ -6353,7 +6387,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, /* check that used name is unique */ if (lock_type != TL_IGNORE) { - TABLE_LIST *first_table= (TABLE_LIST*) table_list.first; + TABLE_LIST *first_table= table_list.first; if (lex->sql_command == SQLCOM_CREATE_VIEW) first_table= first_table ? first_table->next_local : NULL; for (TABLE_LIST *tables= first_table ; @@ -6395,7 +6429,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, previous table reference to 'ptr'. Here we also add one element to the list 'table_list'. */ - table_list.link_in_list((uchar*) ptr, (uchar**) &ptr->next_local); + table_list.link_in_list(ptr, &ptr->next_local); ptr->next_name_resolution_table= NULL; /* Link table in global list (all used tables) */ lex->add_to_query_tables(ptr); @@ -6628,7 +6662,7 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type) DBUG_ENTER("set_lock_for_tables"); DBUG_PRINT("enter", ("lock_type: %d for_update: %d", lock_type, for_update)); - for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first; + for (TABLE_LIST *tables= table_list.first; tables; tables= tables->next_local) { @@ -7355,8 +7389,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables) bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) { SELECT_LEX *select_lex= &thd->lex->select_lex; - TABLE_LIST *aux_tables= - (TABLE_LIST *)thd->lex->auxiliary_table_list.first; + TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first; TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last; DBUG_ENTER("multi_delete_precheck"); @@ -7402,13 +7435,13 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) bool multi_delete_set_locks_and_link_aux_tables(LEX *lex) { - TABLE_LIST *tables= (TABLE_LIST*)lex->select_lex.table_list.first; + TABLE_LIST *tables= lex->select_lex.table_list.first; TABLE_LIST *target_tbl; DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables"); lex->table_count= 0; - for (target_tbl= (TABLE_LIST *)lex->auxiliary_table_list.first; + for (target_tbl= lex->auxiliary_table_list.first; target_tbl; target_tbl= target_tbl->next_local) { lex->table_count++; @@ -7578,8 +7611,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, &create_table->grant.privilege, 0, 0, test(create_table->schema_table)) || check_merge_table_access(thd, create_table->db, - (TABLE_LIST *) - lex->create_info.merge_list.first)) + lex->create_info.merge_list.first)) goto err; if (want_priv != CREATE_TMP_ACL && check_grant(thd, want_priv, create_table, 0, 1, 0)) diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 65a58ba2f5a..3dae2d1e8b4 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3893,7 +3893,9 @@ bool mysql_unpack_partition(THD *thd, thd->lex= &lex; thd->variables.character_set_client= system_charset_info; - Parser_state parser_state(thd, part_buf, part_info_len); + Parser_state parser_state; + if (parser_state.init(thd, part_buf, part_info_len)) + goto end; lex_start(thd); *work_part_info_used= false; @@ -5956,32 +5958,6 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt) } } -/* - Unlock and close table before renaming and dropping partitions - SYNOPSIS - alter_close_tables() - lpt Struct carrying parameters - RETURN VALUES - 0 -*/ - -static int alter_close_tables(ALTER_PARTITION_PARAM_TYPE *lpt) -{ - THD *thd= lpt->thd; - const char *db= lpt->db; - const char *table_name= lpt->table_name; - DBUG_ENTER("alter_close_tables"); - /* - We need to also unlock tables and close all handlers. - We set lock to zero to ensure we don't do this twice - and we set db_stat to zero to ensure we don't close twice. - */ - pthread_mutex_lock(&LOCK_open); - close_data_files_and_morph_locks(thd, db, table_name); - pthread_mutex_unlock(&LOCK_open); - DBUG_RETURN(0); -} - /* Handle errors for ALTER TABLE for partitioning @@ -6279,9 +6255,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, write_log_drop_partition(lpt) || ERROR_INJECT_CRASH("crash_drop_partition_3") || (not_completed= FALSE) || - abort_and_upgrade_lock(lpt) || /* Always returns 0 */ - ERROR_INJECT_CRASH("crash_drop_partition_4") || - alter_close_tables(lpt) || + abort_and_upgrade_lock_and_close_table(lpt) || ERROR_INJECT_CRASH("crash_drop_partition_5") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, @@ -6346,9 +6320,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_CRASH("crash_add_partition_2") || mysql_change_partitions(lpt) || ERROR_INJECT_CRASH("crash_add_partition_3") || - abort_and_upgrade_lock(lpt) || /* Always returns 0 */ - ERROR_INJECT_CRASH("crash_add_partition_4") || - alter_close_tables(lpt) || + abort_and_upgrade_lock_and_close_table(lpt) || ERROR_INJECT_CRASH("crash_add_partition_5") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, @@ -6436,9 +6408,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, write_log_final_change_partition(lpt) || ERROR_INJECT_CRASH("crash_change_partition_4") || (not_completed= FALSE) || - abort_and_upgrade_lock(lpt) || /* Always returns 0 */ - ERROR_INJECT_CRASH("crash_change_partition_5") || - alter_close_tables(lpt) || + abort_and_upgrade_lock_and_close_table(lpt) || ERROR_INJECT_CRASH("crash_change_partition_6") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 7c5bbf60b53..c80cc55df4c 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2140,10 +2140,6 @@ typedef DECLARE_MYSQL_THDVAR_SIMPLE(thdvar_uint_t, uint); typedef DECLARE_MYSQL_THDVAR_SIMPLE(thdvar_ulong_t, ulong); typedef DECLARE_MYSQL_THDVAR_SIMPLE(thdvar_ulonglong_t, ulonglong); -#define SET_PLUGIN_VAR_RESOLVE(opt)\ - *(mysql_sys_var_ptr_p*)&((opt)->resolve)= mysql_sys_var_ptr -typedef uchar *(*mysql_sys_var_ptr_p)(void* a_thd, int offset); - /**************************************************************************** default variable data check and update functions @@ -2672,11 +2668,49 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock) return (uchar*)thd->variables.dynamic_variables_ptr + offset; } -static uchar *mysql_sys_var_ptr(void* a_thd, int offset) + +/** + For correctness and simplicity's sake, a pointer to a function + must be compatible with pointed-to type, that is, the return and + parameters types must be the same. Thus, a callback function is + defined for each scalar type. The functions are assigned in + construct_options to their respective types. +*/ + +static char *mysql_sys_var_char(THD* thd, int offset) +{ + return (char *) intern_sys_var_ptr(thd, offset, true); +} + +static int *mysql_sys_var_int(THD* thd, int offset) +{ + return (int *) intern_sys_var_ptr(thd, offset, true); +} + +static long *mysql_sys_var_long(THD* thd, int offset) { - return intern_sys_var_ptr((THD *)a_thd, offset, true); + return (long *) intern_sys_var_ptr(thd, offset, true); } +static unsigned long *mysql_sys_var_ulong(THD* thd, int offset) +{ + return (unsigned long *) intern_sys_var_ptr(thd, offset, true); +} + +static long long *mysql_sys_var_longlong(THD* thd, int offset) +{ + return (long long *) intern_sys_var_ptr(thd, offset, true); +} + +static unsigned long long *mysql_sys_var_ulonglong(THD* thd, int offset) +{ + return (unsigned long long *) intern_sys_var_ptr(thd, offset, true); +} + +static char **mysql_sys_var_str(THD* thd, int offset) +{ + return (char **) intern_sys_var_ptr(thd, offset, true); +} void plugin_thdvar_init(THD *thd) { @@ -3247,25 +3281,25 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp, continue; switch (opt->flags & PLUGIN_VAR_TYPEMASK) { case PLUGIN_VAR_BOOL: - SET_PLUGIN_VAR_RESOLVE((thdvar_bool_t *) opt); + ((thdvar_bool_t *) opt)->resolve= mysql_sys_var_char; break; case PLUGIN_VAR_INT: - SET_PLUGIN_VAR_RESOLVE((thdvar_int_t *) opt); + ((thdvar_int_t *) opt)->resolve= mysql_sys_var_int; break; case PLUGIN_VAR_LONG: - SET_PLUGIN_VAR_RESOLVE((thdvar_long_t *) opt); + ((thdvar_long_t *) opt)->resolve= mysql_sys_var_long; break; case PLUGIN_VAR_LONGLONG: - SET_PLUGIN_VAR_RESOLVE((thdvar_longlong_t *) opt); + ((thdvar_longlong_t *) opt)->resolve= mysql_sys_var_longlong; break; case PLUGIN_VAR_STR: - SET_PLUGIN_VAR_RESOLVE((thdvar_str_t *) opt); + ((thdvar_str_t *) opt)->resolve= mysql_sys_var_str; break; case PLUGIN_VAR_ENUM: - SET_PLUGIN_VAR_RESOLVE((thdvar_enum_t *) opt); + ((thdvar_enum_t *) opt)->resolve= mysql_sys_var_ulong; break; case PLUGIN_VAR_SET: - SET_PLUGIN_VAR_RESOLVE((thdvar_set_t *) opt); + ((thdvar_set_t *) opt)->resolve= mysql_sys_var_ulonglong; break; default: sql_print_error("Unknown variable type code 0x%x in plugin '%s'.", diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index e4940204386..600c140c776 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -702,6 +702,19 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, } #ifndef EMBEDDED_LIBRARY + +/** + Check whether this parameter data type is compatible with long data. + Used to detect whether a long data stream has been supplied to a + incompatible data type. +*/ +inline bool is_param_long_data_type(Item_param *param) +{ + return ((param->param_type >= MYSQL_TYPE_TINY_BLOB) && + (param->param_type <= MYSQL_TYPE_STRING)); +} + + /** Routines to assign parameters from data supplied by the client. @@ -771,6 +784,14 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array, DBUG_RETURN(1); } } + /* + A long data stream was supplied for this parameter marker. + This was done after prepare, prior to providing a placeholder + type (the types are supplied at execute). Check that the + supplied type of placeholder can accept a data stream. + */ + else if (!is_param_long_data_type(param)) + DBUG_RETURN(1); res= param->query_val_str(&str); if (param->convert_str_value(thd)) DBUG_RETURN(1); /* out of memory */ @@ -809,6 +830,14 @@ static bool insert_params(Prepared_statement *stmt, uchar *null_array, DBUG_RETURN(1); } } + /* + A long data stream was supplied for this parameter marker. + This was done after prepare, prior to providing a placeholder + type (the types are supplied at execute). Check that the + supplied type of placeholder can accept a data stream. + */ + else if (is_param_long_data_type(param)) + DBUG_RETURN(1); if (param->convert_str_value(stmt->thd)) DBUG_RETURN(1); /* out of memory */ } @@ -1242,7 +1271,7 @@ static int mysql_test_update(Prepared_statement *stmt, if (mysql_prepare_update(thd, table_list, &select->where, select->order_list.elements, - (ORDER *) select->order_list.first)) + select->order_list.first)) goto error; #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1750,11 +1779,10 @@ error: static int mysql_insert_select_prepare_tester(THD *thd) { SELECT_LEX *first_select= &thd->lex->select_lex; - TABLE_LIST *second_table= ((TABLE_LIST*)first_select->table_list.first)-> - next_local; + TABLE_LIST *second_table= first_select->table_list.first->next_local; /* Skip first table, which is the table we are inserting in */ - first_select->table_list.first= (uchar *) second_table; + first_select->table_list.first= second_table; thd->lex->select_lex.context.table_list= thd->lex->select_lex.context.first_name_resolution_table= second_table; @@ -1791,7 +1819,7 @@ static bool mysql_test_insert_select(Prepared_statement *stmt, return 1; /* store it, because mysql_insert_select_prepare_tester change it */ - first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; + first_local_table= lex->select_lex.table_list.first; DBUG_ASSERT(first_local_table != 0); res= @@ -1799,7 +1827,7 @@ static bool mysql_test_insert_select(Prepared_statement *stmt, &mysql_insert_select_prepare_tester, OPTION_SETUP_TABLES_DONE); /* revert changes made by mysql_insert_select_prepare_tester */ - lex->select_lex.table_list.first= (uchar*) first_local_table; + lex->select_lex.table_list.first= first_local_table; return res; } @@ -2343,10 +2371,10 @@ void reinit_stmt_before_use(THD *thd, LEX *lex) DBUG_ASSERT(sl->join == 0); ORDER *order; /* Fix GROUP list */ - for (order= (ORDER *)sl->group_list.first; order; order= order->next) + for (order= sl->group_list.first; order; order= order->next) order->item= &order->item_ptr; /* Fix ORDER list */ - for (order= (ORDER *)sl->order_list.first; order; order= order->next) + for (order= sl->order_list.first; order; order= order->next) order->item= &order->item_ptr; /* clear the no_error flag for INSERT/UPDATE IGNORE */ @@ -2383,7 +2411,7 @@ void reinit_stmt_before_use(THD *thd, LEX *lex) (multi-delete). We do a full clean up, although at the moment all we need to clean in the tables of MULTI-DELETE list is 'table' member. */ - for (TABLE_LIST *tables= (TABLE_LIST*) lex->auxiliary_table_list.first; + for (TABLE_LIST *tables= lex->auxiliary_table_list.first; tables; tables= tables->next_global) { @@ -3038,13 +3066,21 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) old_stmt_arena= thd->stmt_arena; thd->stmt_arena= this; - Parser_state parser_state(thd, thd->query(), thd->query_length()); + Parser_state parser_state; + if (parser_state.init(thd, thd->query(), thd->query_length())) + { + thd->restore_backup_statement(this, &stmt_backup); + thd->restore_active_arena(this, &stmt_backup); + thd->stmt_arena= old_stmt_arena; + DBUG_RETURN(TRUE); + } + parser_state.m_lip.stmt_prepare_mode= TRUE; lex_start(thd); error= parse_sql(thd, & parser_state, NULL) || - thd->is_error() || - init_param_array(this); + thd->is_error() || + init_param_array(this); lex->set_trg_event_type_for_tables(); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 7772b4d1ed2..1970287b06e 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1279,7 +1279,7 @@ bool change_master(THD* thd, Master_info* mi) Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never a slave before). */ - if (flush_master_info(mi, 0)) + if (flush_master_info(mi, FALSE, FALSE)) { my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file"); unlock_slave_threads(mi); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 042dc7e0730..59d9d72e778 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -266,15 +266,15 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, setup_tables_done_option changed for next rexecution */ res= mysql_select(thd, &select_lex->ref_pointer_array, - (TABLE_LIST*) select_lex->table_list.first, + select_lex->table_list.first, select_lex->with_wild, select_lex->item_list, select_lex->where, select_lex->order_list.elements + select_lex->group_list.elements, - (ORDER*) select_lex->order_list.first, - (ORDER*) select_lex->group_list.first, + select_lex->order_list.first, + select_lex->group_list.first, select_lex->having, - (ORDER*) lex->proc_list.first, + lex->proc_list.first, select_lex->options | thd->options | setup_tables_done_option, result, unit, select_lex); @@ -574,13 +574,21 @@ JOIN::prepare(Item ***rref_pointer_array, { Item *item= *ord->item; /* - Disregard sort order if there's only "{VAR}CHAR(0) NOT NULL" fields - there. Such fields don't contain any data to sort. + Disregard sort order if there's only + zero length NOT NULL fields (e.g. {VAR}CHAR(0) NOT NULL") or + zero length NOT NULL string functions there. + Such tuples don't contain any data to sort. */ if (!real_order && - (item->type() != Item::FIELD_ITEM || - ((Item_field *) item)->field->maybe_null() || - ((Item_field *) item)->field->sort_length())) + /* Not a zero length NOT NULL field */ + ((item->type() != Item::FIELD_ITEM || + ((Item_field *) item)->field->maybe_null() || + ((Item_field *) item)->field->sort_length()) && + /* AND not a zero length NOT NULL string function. */ + (item->type() != Item::FUNC_ITEM || + item->maybe_null || + item->result_type() != STRING_RESULT || + item->max_length))) real_order= TRUE; if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) @@ -2717,15 +2725,29 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds, as well as allow us to catch illegal cross references/ Warshall's algorithm is used to build the transitive closure. As we use bitmaps to represent the relation the complexity - of the algorithm is O((number of tables)^2). + of the algorithm is O((number of tables)^2). + + The classic form of the Warshall's algorithm would look like: + for (i= 0; i < table_count; i++) + { + for (j= 0; j < table_count; j++) + { + for (k= 0; k < table_count; k++) + { + if (bitmap_is_set(stat[j].dependent, i) && + bitmap_is_set(stat[i].dependent, k)) + bitmap_set_bit(stat[j].dependent, k); + } + } */ - for (i= 0, s= stat ; i < table_count ; i++, s++) + + for (s= stat ; s < stat_end ; s++) { - for (uint j= 0 ; j < table_count ; j++) + table= s->table; + for (JOIN_TAB *t= stat ; t < stat_end ; t++) { - table= stat[j].table; - if (s->dependent & table->map) - s->dependent |= table->reginfo.join_tab->dependent; + if (t->dependent & table->map) + t->dependent |= table->reginfo.join_tab->dependent; } if (outer_join & s->table->map) s->table->maybe_null= 1; @@ -2881,8 +2903,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds, !table->fulltext_searched && !table->pos_in_table_list->embedding) { - if ((table->key_info[key].flags & (HA_NOSAME | HA_END_SPACE_KEY)) - == HA_NOSAME) + if (table->key_info[key].flags & HA_NOSAME) { if (const_ref == eq_part) { // Found everything for ref. @@ -5836,8 +5857,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, DBUG_RETURN(0); if (j->type == JT_CONST) j->table->const_table= 1; - else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | - HA_END_SPACE_KEY)) != HA_NOSAME) || + else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) != HA_NOSAME) || keyparts != keyinfo->key_parts || null_ref_key) { /* Must read with repeat */ @@ -7279,7 +7299,8 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, *simple_order=0; // Must do a temp table to sort else if (!(order_tables & not_const_tables)) { - if (order->item[0]->with_subselect) + if (order->item[0]->with_subselect && + !(join->select_lex->options & SELECT_DESCRIBE)) order->item[0]->val_str(&order->item[0]->str_value); DBUG_PRINT("info",("removing: %s", order->item[0]->full_name())); continue; // skip const item @@ -8784,6 +8805,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) NESTED_JOIN *nested_join; TABLE_LIST *prev_table= 0; List_iterator<TABLE_LIST> li(*join_list); + bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN); DBUG_ENTER("simplify_joins"); /* @@ -8896,7 +8918,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) if (prev_table) { /* The order of tables is reverse: prev_table follows table */ - if (prev_table->straight) + if (prev_table->straight || straight_join) prev_table->dep_tables|= used_tables; if (prev_table->on_expr) { @@ -9178,6 +9200,46 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab) /** Nested joins perspective: Remove the last table from the join order. + The algorithm is the reciprocal of check_interleaving_with_nj(), hence + parent join nest nodes are updated only when the last table in its child + node is removed. The ASCII graphic below will clarify. + + %A table nesting such as <tt> t1 x [ ( t2 x t3 ) x ( t4 x t5 ) ] </tt>is + represented by the below join nest tree. + + @verbatim + NJ1 + _/ / \ + _/ / NJ2 + _/ / / \ + / / / \ + t1 x [ (t2 x t3) x (t4 x t5) ] + @endverbatim + + At the point in time when check_interleaving_with_nj() adds the table t5 to + the query execution plan, QEP, it also directs the node named NJ2 to mark + the table as covered. NJ2 does so by incrementing its @c counter + member. Since all of NJ2's tables are now covered by the QEP, the algorithm + proceeds up the tree to NJ1, incrementing its counter as well. All join + nests are now completely covered by the QEP. + + restore_prev_nj_state() does the above in reverse. As seen above, the node + NJ1 contains the nodes t2, t3, and NJ2. Its counter being equal to 3 means + that the plan covers t2, t3, and NJ2, @e and that the sub-plan (t4 x t5) + completely covers NJ2. The removal of t5 from the partial plan will first + decrement NJ2's counter to 1. It will then detect that NJ2 went from being + completely to partially covered, and hence the algorithm must continue + upwards to NJ1 and decrement its counter to 2. %A subsequent removal of t4 + will however not influence NJ1 since it did not un-cover the last table in + NJ2. + + SYNOPSIS + restore_prev_nj_state() + last join table to remove, it is assumed to be the last in current + partial join order. + + DESCRIPTION + Remove the last table from the partial join order and update the nested joins counters and join->cur_embedding_map. It is ok to call this function for the first table in join order (for which @@ -9191,19 +9253,20 @@ static void restore_prev_nj_state(JOIN_TAB *last) { TABLE_LIST *last_emb= last->table->pos_in_table_list->embedding; JOIN *join= last->join; - while (last_emb) + for (;last_emb != NULL; last_emb= last_emb->embedding) { - if (!(--last_emb->nested_join->counter)) - join->cur_embedding_map&= ~last_emb->nested_join->nj_map; - else if (last_emb->nested_join->n_tables-1 == - last_emb->nested_join->counter) - { - join->cur_embedding_map|= last_emb->nested_join->nj_map; - break; - } - else + NESTED_JOIN *nest= last_emb->nested_join; + DBUG_ASSERT(nest->counter > 0); + + bool was_fully_covered= nest->is_fully_covered(); + + if (--nest->counter == 0) + join->cur_embedding_map&= ~nest->nj_map; + + if (!was_fully_covered) break; - last_emb= last_emb->embedding; + + join->cur_embedding_map|= nest->nj_map; } } @@ -10337,7 +10400,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->null_flags= (uchar*) table->record[0]; share->null_fields= null_count+ hidden_null_count; - share->null_bytes= null_pack_length; + share->null_bytes= share->null_bytes_for_compare= null_pack_length; } null_count= (blob_count == 0) ? 1 : 0; hidden_field_count=param->hidden_field_count; @@ -10703,7 +10766,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list) { table->null_flags= (uchar*) table->record[0]; share->null_fields= null_count; - share->null_bytes= null_pack_length; + share->null_bytes= share->null_bytes_for_compare= null_pack_length; } table->in_use= thd; /* field->reset() may access table->in_use */ @@ -13633,6 +13696,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, uint nr; key_map keys; uint best_key_parts; + uint saved_best_key_parts= 0; int best_key_direction; ha_rows best_records; double read_time; @@ -13834,6 +13898,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, { best_key= nr; best_key_parts= keyinfo->key_parts; + saved_best_key_parts= used_key_parts; best_records= quick_records; is_best_covering= is_covering; best_key_direction= direction; @@ -13920,8 +13985,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ } } - used_key_parts= best_key_parts; order_direction= best_key_direction; + /* + saved_best_key_parts is actual number of used keyparts found by the + test_if_order_by_key function. It could differ from keyinfo->key_parts, + thus we have to restore it in case of desc order as it affects + QUICK_SELECT_DESC behaviour. + */ + used_key_parts= (order_direction == -1) ? + saved_best_key_parts : best_key_parts; } else DBUG_RETURN(0); @@ -17074,15 +17146,15 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) thd->lex->current_select= first; unit->set_limit(unit->global_parameters); res= mysql_select(thd, &first->ref_pointer_array, - (TABLE_LIST*) first->table_list.first, + first->table_list.first, first->with_wild, first->item_list, first->where, first->order_list.elements + first->group_list.elements, - (ORDER*) first->order_list.first, - (ORDER*) first->group_list.first, + first->order_list.first, + first->group_list.first, first->having, - (ORDER*) thd->lex->proc_list.first, + thd->lex->proc_list.first, first->options | thd->options | SELECT_DESCRIBE, result, unit, first); } @@ -17392,7 +17464,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if (group_list.elements) { str->append(STRING_WITH_LEN(" group by ")); - print_order(str, (ORDER *) group_list.first, query_type); + print_order(str, group_list.first, query_type); switch (olap) { case CUBE_TYPE: @@ -17423,7 +17495,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if (order_list.elements) { str->append(STRING_WITH_LEN(" order by ")); - print_order(str, (ORDER *) order_list.first, query_type); + print_order(str, order_list.first, query_type); } // limit diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 4dca54b6290..e1d223eadad 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -31,6 +31,7 @@ #include "event_data_objects.h" #endif #include <my_dir.h> +#include "debug_sync.h" #define STR_OR_NIL(S) ((S) ? (S) : "<nil>") @@ -3354,7 +3355,9 @@ make_table_name_list(THD *thd, List<LEX_STRING> *table_names, LEX *lex, { if (with_i_schema) { - if (find_schema_table(thd, lookup_field_vals->table_value.str)) + ST_SCHEMA_TABLE *schema_table= + find_schema_table(thd, lookup_field_vals->table_value.str); + if (schema_table && !schema_table->hidden) { if (table_names->push_back(&lookup_field_vals->table_value)) return 1; @@ -3431,8 +3434,7 @@ fill_schema_show_cols_or_idxs(THD *thd, TABLE_LIST *tables, bool res; LEX_STRING tmp_lex_string, tmp_lex_string1, *db_name, *table_name; enum_sql_command save_sql_command= lex->sql_command; - TABLE_LIST *show_table_list= (TABLE_LIST*) tables->schema_select_lex-> - table_list.first; + TABLE_LIST *show_table_list= tables->schema_select_lex->table_list.first; TABLE *table= tables->table; int error= 1; DBUG_ENTER("fill_schema_show"); @@ -3878,12 +3880,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) goto err; if (make_table_list(thd, &sel, db_name, table_name)) goto err; - TABLE_LIST *show_table_list= (TABLE_LIST*) sel.table_list.first; + TABLE_LIST *show_table_list= sel.table_list.first; lex->all_selects_list= &sel; lex->derived_tables= 0; lex->sql_command= SQLCOM_SHOW_FIELDS; show_table_list->i_s_requested_object= schema_table->i_s_requested_object; + DEBUG_SYNC(thd, "before_open_in_get_all_tables"); res= open_normal_and_derived_tables(thd, show_table_list, MYSQL_LOCK_IGNORE_FLUSH); lex->sql_command= save_sql_command; @@ -4622,24 +4625,37 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) } +static inline void copy_field_as_string(Field *to_field, Field *from_field) +{ + char buff[MAX_FIELD_WIDTH]; + String tmp_str(buff, sizeof(buff), system_charset_info); + from_field->val_str(&tmp_str); + to_field->store(tmp_str.ptr(), tmp_str.length(), system_charset_info); +} + + bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, const char *wild, bool full_access, const char *sp_user) { - String tmp_string; - String sp_db, sp_name, definer; MYSQL_TIME time; LEX *lex= thd->lex; CHARSET_INFO *cs= system_charset_info; - get_field(thd->mem_root, proc_table->field[0], &sp_db); - get_field(thd->mem_root, proc_table->field[1], &sp_name); - get_field(thd->mem_root, proc_table->field[11], &definer); + char sp_db_buff[NAME_LEN + 1], sp_name_buff[NAME_LEN + 1], + definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 2]; + String sp_db(sp_db_buff, sizeof(sp_db_buff), cs); + String sp_name(sp_name_buff, sizeof(sp_name_buff), cs); + String definer(definer_buff, sizeof(definer_buff), cs); + + proc_table->field[0]->val_str(&sp_db); + proc_table->field[1]->val_str(&sp_name); + proc_table->field[11]->val_str(&definer); + if (!full_access) - full_access= !strcmp(sp_user, definer.ptr()); - if (!full_access && check_some_routine_access(thd, sp_db.ptr(), - sp_name.ptr(), - proc_table->field[2]-> - val_int() == - TYPE_ENUM_PROCEDURE)) + full_access= !strcmp(sp_user, definer.c_ptr_safe()); + if (!full_access && + check_some_routine_access(thd, sp_db.c_ptr_safe(), sp_name.c_ptr_safe(), + proc_table->field[2]->val_int() == + TYPE_ENUM_PROCEDURE)) return 0; if ((lex->sql_command == SQLCOM_SHOW_STATUS_PROC && @@ -4649,55 +4665,42 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0) { restore_record(table, s->default_values); - if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0)) + if (!wild || !wild[0] || !wild_compare(sp_name.c_ptr_safe(), wild, 0)) { int enum_idx= (int) proc_table->field[5]->val_int(); table->field[3]->store(sp_name.ptr(), sp_name.length(), cs); - get_field(thd->mem_root, proc_table->field[3], &tmp_string); - table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[0], proc_table->field[3]); table->field[2]->store(sp_db.ptr(), sp_db.length(), cs); - get_field(thd->mem_root, proc_table->field[2], &tmp_string); - table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[4], proc_table->field[2]); if (proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION) { - get_field(thd->mem_root, proc_table->field[9], &tmp_string); - table->field[5]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[5], proc_table->field[9]); table->field[5]->set_notnull(); } if (full_access) { - get_field(thd->mem_root, proc_table->field[19], &tmp_string); - table->field[7]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[7], proc_table->field[19]); table->field[7]->set_notnull(); } table->field[6]->store(STRING_WITH_LEN("SQL"), cs); table->field[10]->store(STRING_WITH_LEN("SQL"), cs); - get_field(thd->mem_root, proc_table->field[6], &tmp_string); - table->field[11]->store(tmp_string.ptr(), tmp_string.length(), cs); - table->field[12]->store(sp_data_access_name[enum_idx].str, + copy_field_as_string(table->field[11], proc_table->field[6]); + table->field[12]->store(sp_data_access_name[enum_idx].str, sp_data_access_name[enum_idx].length , cs); - get_field(thd->mem_root, proc_table->field[7], &tmp_string); - table->field[14]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[14], proc_table->field[7]); + bzero((char *)&time, sizeof(time)); ((Field_timestamp *) proc_table->field[12])->get_time(&time); table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); bzero((char *)&time, sizeof(time)); ((Field_timestamp *) proc_table->field[13])->get_time(&time); table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); - get_field(thd->mem_root, proc_table->field[14], &tmp_string); - table->field[17]->store(tmp_string.ptr(), tmp_string.length(), cs); - get_field(thd->mem_root, proc_table->field[15], &tmp_string); - table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[17], proc_table->field[14]); + copy_field_as_string(table->field[18], proc_table->field[15]); table->field[19]->store(definer.ptr(), definer.length(), cs); - - get_field(thd->mem_root, proc_table->field[16], &tmp_string); - table->field[20]->store(tmp_string.ptr(), tmp_string.length(), cs); - - get_field(thd->mem_root, proc_table->field[17], &tmp_string); - table->field[21]->store(tmp_string.ptr(), tmp_string.length(), cs); - - get_field(thd->mem_root, proc_table->field[18], &tmp_string); - table->field[22]->store(tmp_string.ptr(), tmp_string.length(), cs); + copy_field_as_string(table->field[20], proc_table->field[16]); + copy_field_as_string(table->field[21], proc_table->field[17]); + copy_field_as_string(table->field[22], proc_table->field[18]); return schema_table_store_record(thd, table); } @@ -6906,7 +6909,7 @@ ST_FIELD_INFO engines_fields_info[]= { {"ENGINE", 64, MYSQL_TYPE_STRING, 0, 0, "Engine", SKIP_OPEN_TABLE}, {"SUPPORT", 8, MYSQL_TYPE_STRING, 0, 0, "Support", SKIP_OPEN_TABLE}, - {"COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment", SKIP_OPEN_TABLE}, + {"COMMENT", 160, MYSQL_TYPE_STRING, 0, 0, "Comment", SKIP_OPEN_TABLE}, {"TRANSACTIONS", 3, MYSQL_TYPE_STRING, 0, 1, "Transactions", SKIP_OPEN_TABLE}, {"XA", 3, MYSQL_TYPE_STRING, 0, 1, "XA", SKIP_OPEN_TABLE}, {"SAVEPOINTS", 3 ,MYSQL_TYPE_STRING, 0, 1, "Savepoints", SKIP_OPEN_TABLE}, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 2190353a9cb..b9b5ff47cf6 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -386,6 +386,25 @@ uint filename_to_tablename(const char *from, char *to, uint to_length) /** + Check if given string begins with "#mysql50#" prefix + + @param name string to check cut + + @retval + FALSE no prefix found + @retval + TRUE prefix found +*/ + +bool check_mysql50_prefix(const char *name) +{ + return (name[0] == '#' && + !strncmp(name, MYSQL50_TABLE_NAME_PREFIX, + MYSQL50_TABLE_NAME_PREFIX_LENGTH)); +} + + +/** Check if given string begins with "#mysql50#" prefix, cut it if so. @param from string to check and cut @@ -400,9 +419,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length) uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length) { - if (from[0] == '#' && - !strncmp(from, MYSQL50_TABLE_NAME_PREFIX, - MYSQL50_TABLE_NAME_PREFIX_LENGTH)) + if (check_mysql50_prefix(from)) return (uint) (strmake(to, from + MYSQL50_TABLE_NAME_PREFIX_LENGTH, to_length - 1) - to); return 0; @@ -2232,10 +2249,10 @@ static int sort_keys(KEY *a, KEY *b) { if (!(b_flags & HA_NOSAME)) return -1; - if ((a_flags ^ b_flags) & (HA_NULL_PART_KEY | HA_END_SPACE_KEY)) + if ((a_flags ^ b_flags) & HA_NULL_PART_KEY) { /* Sort NOT NULL keys before other keys */ - return (a_flags & (HA_NULL_PART_KEY | HA_END_SPACE_KEY)) ? 1 : -1; + return (a_flags & HA_NULL_PART_KEY) ? 1 : -1; } if (a->name == primary_key_name) return -1; @@ -4633,7 +4650,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, table->next_global= 0; save_next_local= table->next_local; table->next_local= 0; - select->table_list.first= (uchar*)table; + select->table_list.first= table; /* Time zone tables and SP tables can be add to lex->query_tables list, so it have to be prepared. @@ -5347,6 +5364,11 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, */ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { + if (src_table->table->file->ht == partition_hton) + { + my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); + goto err; + } if (find_temporary_table(thd, db, table_name)) goto table_exists; dst_path_length= build_tmptable_filename(thd, dst_path, sizeof(dst_path)); @@ -5411,14 +5433,15 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, /* For partitioned tables we need to copy the .par file as well since it is used in open_table_def to even be able to create a new handler. - There is no way to find out here if the original table is a - partitioned table so we copy the file and ignore any errors. */ - fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); - strmov(dst_path, tmp_path); - fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); - strmov(src_path, tmp_path); - my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)); + if (src_table->table->file->ht == partition_hton) + { + fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); + strmov(dst_path, tmp_path); + fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); + strmov(src_path, tmp_path); + my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)); + } #endif DBUG_EXECUTE_IF("sleep_create_like_before_ha_create", my_sleep(6000000);); @@ -6604,6 +6627,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, uint *index_add_buffer= NULL; uint candidate_key_count= 0; bool no_pk; + ulong explicit_used_fields= 0; DBUG_ENTER("mysql_alter_table"); /* @@ -6874,6 +6898,7 @@ view_err: change the row format in update_create_info(). */ create_info->used_fields|= HA_CREATE_USED_ROW_FORMAT; + explicit_used_fields|= HA_CREATE_USED_ROW_FORMAT; } DBUG_PRINT("info", ("old type: %s new type: %s", @@ -7034,6 +7059,9 @@ view_err: if (mysql_prepare_alter_table(thd, table, create_info, alter_info)) goto err; + /* Remove markers set for update_create_info */ + create_info->used_fields&= ~explicit_used_fields; + if (need_copy_table == ALTER_TABLE_METADATA_ONLY) need_copy_table= alter_info->change_level; diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index aafb25013f6..bf4a46a4c67 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -653,7 +653,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, */ old_field= new_field= table->field; - for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first); + for (trg_field= lex->trg_table_fields.first; trg_field; trg_field= trg_field->next_trg_field) { /* @@ -1297,9 +1297,9 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, thd->variables.sql_mode= (ulong)*trg_sql_mode; - Parser_state parser_state(thd, - trg_create_str->str, - trg_create_str->length); + Parser_state parser_state; + if (parser_state.init(thd, trg_create_str->str, trg_create_str->length)) + goto err_with_lex_cleanup; Trigger_creation_ctx *creation_ctx= Trigger_creation_ctx::create(thd, @@ -1413,7 +1413,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, */ triggers->trigger_fields[lex.trg_chistics.event] [lex.trg_chistics.action_time]= - (Item_trigger_field *)(lex.trg_table_fields.first); + lex.trg_table_fields.first; /* Also let us bind these objects to Field objects in table being opened. @@ -1423,8 +1423,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, SELECT)... Anyway some things can be checked only during trigger execution. */ - for (Item_trigger_field *trg_field= - (Item_trigger_field *)(lex.trg_table_fields.first); + for (Item_trigger_field *trg_field= lex.trg_table_fields.first; trg_field; trg_field= trg_field->next_trg_field) { diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 61f47bee799..3b2ef92ab48 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -144,20 +144,19 @@ void st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg) { thd_arg->lex->current_select= fake_select_lex; - fake_select_lex->table_list.link_in_list((uchar *)&result_table_list, - (uchar **) - &result_table_list.next_local); + fake_select_lex->table_list.link_in_list(&result_table_list, + &result_table_list.next_local); fake_select_lex->context.table_list= fake_select_lex->context.first_name_resolution_table= fake_select_lex->get_table_list(); if (!fake_select_lex->first_execution) { - for (ORDER *order= (ORDER *) global_parameters->order_list.first; + for (ORDER *order= global_parameters->order_list.first; order; order= order->next) order->item= &order->item_ptr; } - for (ORDER *order= (ORDER *)global_parameters->order_list.first; + for (ORDER *order= global_parameters->order_list.first; order; order=order->next) { @@ -249,18 +248,18 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, can_skip_order_by= is_union_select && !(sl->braces && sl->explicit_limit); saved_error= join->prepare(&sl->ref_pointer_array, - (TABLE_LIST*) sl->table_list.first, + sl->table_list.first, sl->with_wild, sl->where, (can_skip_order_by ? 0 : sl->order_list.elements) + sl->group_list.elements, can_skip_order_by ? - (ORDER*) 0 : (ORDER *)sl->order_list.first, - (ORDER*) sl->group_list.first, + NULL : sl->order_list.first, + sl->group_list.first, sl->having, - (is_union_select ? (ORDER*) 0 : - (ORDER*) thd_arg->lex->proc_list.first), + (is_union_select ? NULL : + thd_arg->lex->proc_list.first), sl, this); /* There are no * in the statement anymore (for PS) */ sl->with_wild= 0; @@ -354,7 +353,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { ORDER *ord; Item_func::Functype ft= Item_func::FT_FUNC; - for (ord= (ORDER*)global_parameters->order_list.first; ord; ord= ord->next) + for (ord= global_parameters->order_list.first; ord; ord= ord->next) if ((*ord->item)->walk (&Item::find_function_processor, FALSE, (uchar *) &ft)) { @@ -416,12 +415,11 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, thd_arg->lex->current_select= fake_select_lex; saved_error= fake_select_lex->join-> prepare(&fake_select_lex->ref_pointer_array, - (TABLE_LIST*) fake_select_lex->table_list.first, + fake_select_lex->table_list.first, 0, 0, fake_select_lex->order_list.elements, - (ORDER*) fake_select_lex->order_list.first, - (ORDER*) NULL, NULL, - (ORDER*) NULL, + fake_select_lex->order_list.first, + NULL, NULL, NULL, fake_select_lex, this); fake_select_lex->table_list.empty(); } @@ -597,8 +595,8 @@ bool st_select_lex_unit::exec() &result_table_list, 0, item_list, NULL, global_parameters->order_list.elements, - (ORDER*)global_parameters->order_list.first, - (ORDER*) NULL, NULL, (ORDER*) NULL, + global_parameters->order_list.first, + NULL, NULL, NULL, fake_select_lex->options | SELECT_NO_UNLOCK, result, this, fake_select_lex); } @@ -620,8 +618,8 @@ bool st_select_lex_unit::exec() &result_table_list, 0, item_list, NULL, global_parameters->order_list.elements, - (ORDER*)global_parameters->order_list.first, - (ORDER*) NULL, NULL, (ORDER*) NULL, + global_parameters->order_list.first, + NULL, NULL, NULL, fake_select_lex->options | SELECT_NO_UNLOCK, result, this, fake_select_lex); } @@ -697,7 +695,7 @@ bool st_select_lex_unit::cleanup() if (global_parameters->order_list.elements) { ORDER *ord; - for (ord= (ORDER*)global_parameters->order_list.first; ord; ord= ord->next) + for (ord= global_parameters->order_list.first; ord; ord= ord->next) (*ord->item)->walk (&Item::cleanup_processor, 0, 0); } } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c231fd7afc6..15251e956be 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -29,12 +29,12 @@ bool compare_record(TABLE *table) { - if (table->s->blob_fields + table->s->varchar_fields == 0) + if (table->s->can_cmp_whole_record) return cmp_record(table,record[1]); /* Compare null bits */ if (memcmp(table->null_flags, table->null_flags+table->s->rec_buff_length, - table->s->null_bytes)) + table->s->null_bytes_for_compare)) return TRUE; // Diff in NULL value /* Compare updated fields */ for (Field **ptr= table->field ; *ptr ; ptr++) @@ -394,7 +394,7 @@ int mysql_update(THD *thd, matching rows before updating the table! */ if (used_index < MAX_KEY && old_covering_keys.is_set(used_index)) - table->mark_columns_used_by_index(used_index); + table->add_read_columns_used_by_index(used_index); else { table->use_all_columns(); @@ -422,6 +422,7 @@ int mysql_update(THD *thd, { goto err; } + thd->examined_row_count+= examined_rows; /* Filesort has already found and selected the rows we want to update, so we don't need the where clause @@ -473,6 +474,7 @@ int mysql_update(THD *thd, !thd->killed && !thd->is_error()) { update_virtual_fields(thd, table); + thd->examined_row_count++; if (!select || select->skip_record(thd) > 0) { if (table->file->was_semi_consistent_read()) @@ -581,6 +583,7 @@ int mysql_update(THD *thd, while (!(error=info.read_record(&info)) && !thd->killed) { update_virtual_fields(thd, table); + thd->examined_row_count++; if (!select || select->skip_record(thd) > 0) { if (table->file->was_semi_consistent_read()) @@ -1054,7 +1057,7 @@ reopen_tables: correct order of statements. Otherwise, we use a TL_READ lock to improve performance. */ - tl->lock_type= read_lock_type_for_table(thd, table); + tl->lock_type= read_lock_type_for_table(thd, lex, tl); tl->updating= 0; /* Update TABLE::lock_type accordingly. */ if (!tl->placeholder() && !using_lock_tables) @@ -1332,7 +1335,7 @@ int multi_update::prepare(List<Item> ¬_used_values, SELECT_LEX_UNIT *lex_unit) { TABLE_LIST *table_ref; - SQL_LIST update; + SQL_I_List<TABLE_LIST> update; table_map tables_to_update; Item_field *item; List_iterator_fast<Item> field_it(*fields); @@ -1412,11 +1415,11 @@ int multi_update::prepare(List<Item> ¬_used_values, leaf_table_count++; if (tables_to_update & table->map) { - TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref, + TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref, sizeof(*tl)); if (!tl) DBUG_RETURN(1); - update.link_in_list((uchar*) tl, (uchar**) &tl->next_local); + update.link_in_list(tl, &tl->next_local); tl->shared= table_count++; table->no_keyread=1; table->covering_keys.clear_all(); @@ -1437,7 +1440,7 @@ int multi_update::prepare(List<Item> ¬_used_values, table_count= update.elements; - update_tables= (TABLE_LIST*) update.first; + update_tables= update.first; tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count); tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) * @@ -1998,9 +2001,11 @@ int multi_update::do_updates() Setup copy functions to copy fields from temporary table */ List_iterator_fast<Item> field_it(*fields_for_table[offset]); - Field **field= tmp_table->field + - 1 + unupdated_check_opt_tables.elements; // Skip row pointers + Field **field; Copy_field *copy_field_ptr= copy_field, *copy_field_end; + + /* Skip row pointers */ + field= tmp_table->field + 1 + unupdated_check_opt_tables.elements; for ( ; *field ; field++) { Item_field *item= (Item_field* ) field_it++; diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 5381ce94411..2112d9b1e1c 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -887,7 +887,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, view->algorithm != VIEW_ALGORITHM_TMPTABLE))) { /* TODO: change here when we will support UNIONs */ - for (TABLE_LIST *tbl= (TABLE_LIST *)lex->select_lex.table_list.first; + for (TABLE_LIST *tbl= lex->select_lex.table_list.first; tbl; tbl= tbl->next_local) { @@ -1006,7 +1006,7 @@ loop_out: */ if (view->updatable_view && !lex->select_lex.master_unit()->is_union() && - !((TABLE_LIST*)lex->select_lex.table_list.first)->next_local && + !(lex->select_lex.table_list.first)->next_local && find_table_in_global_list(lex->query_tables->next_global, lex->query_tables->db, lex->query_tables->table_name)) @@ -1192,9 +1192,10 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; - Parser_state parser_state(thd, - table->select_stmt.str, - table->select_stmt.length); + Parser_state parser_state; + if (parser_state.init(thd, table->select_stmt.str, + table->select_stmt.length)) + goto err; /* Use view db name as thread default database, in order to ensure @@ -1351,8 +1352,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, This may change in future, for example if we enable merging of views with subqueries in select list. */ - view_main_select_tables= - (TABLE_LIST*)lex->select_lex.table_list.first; + view_main_select_tables= lex->select_lex.table_list.first; /* Let us set proper lock type for tables of the view's main diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b63fc0eae4a..e01289ad087 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -516,8 +516,7 @@ set_trigger_new_row(THD *thd, LEX_STRING *name, Item *val) Let us add this item to list of all Item_trigger_field objects in trigger. */ - lex->trg_table_fields.link_in_list((uchar *) trg_fld, - (uchar **) &trg_fld->next_trg_field); + lex->trg_table_fields.link_in_list(trg_fld, &trg_fld->next_trg_field); return lex->sphead->add_instr(sp_fld); } @@ -4710,11 +4709,9 @@ create_table_option: TABLE_LIST *table_list= lex->select_lex.get_table_list(); lex->create_info.merge_list= lex->select_lex.table_list; lex->create_info.merge_list.elements--; - lex->create_info.merge_list.first= - (uchar*) (table_list->next_local); + lex->create_info.merge_list.first= table_list->next_local; lex->select_lex.table_list.elements=1; - lex->select_lex.table_list.next= - (uchar**) &(table_list->next_local); + lex->select_lex.table_list.next= &(table_list->next_local); table_list->next_local= 0; lex->create_info.used_fields|= HA_CREATE_USED_UNION; } @@ -5847,8 +5844,7 @@ alter: lex->alter_info.reset(); lex->col_list.empty(); lex->select_lex.init_order(); - lex->select_lex.db= - ((TABLE_LIST*) lex->select_lex.table_list.first)->db; + lex->select_lex.db= (lex->select_lex.table_list.first)->db; bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= 0; lex->create_info.default_table_charset= NULL; @@ -8560,9 +8556,8 @@ opt_gorder_clause: | order_clause { SELECT_LEX *select= Select; - select->gorder_list= - (SQL_LIST*) sql_memdup((char*) &select->order_list, - sizeof(st_sql_list)); + select->gorder_list= new (YYTHD->mem_root) + SQL_I_List<ORDER>(select->order_list); if (select->gorder_list == NULL) MYSQL_YYABORT; select->order_list.empty(); @@ -9512,7 +9507,7 @@ procedure_clause: } lex->proc_list.elements=0; lex->proc_list.first=0; - lex->proc_list.next= (uchar**) &lex->proc_list.first; + lex->proc_list.next= &lex->proc_list.first; Item_field *item= new (YYTHD->mem_root) Item_field(&lex->current_select->context, NULL, NULL, $2.str); @@ -11500,8 +11495,8 @@ simple_ident_q: Let us add this item to list of all Item_trigger_field objects in trigger. */ - lex->trg_table_fields.link_in_list((uchar*) trg_fld, - (uchar**) &trg_fld->next_trg_field); + lex->trg_table_fields.link_in_list(trg_fld, + &trg_fld->next_trg_field); $$= trg_fld; } @@ -11587,7 +11582,7 @@ field_ident: ident { $$=$1;} | ident '.' ident '.' ident { - TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; + TABLE_LIST *table= Select->table_list.first; if (my_strcasecmp(table_alias_charset, $1.str, table->db)) { my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); @@ -11603,7 +11598,7 @@ field_ident: } | ident '.' ident { - TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; + TABLE_LIST *table= Select->table_list.first; if (my_strcasecmp(table_alias_charset, $1.str, table->alias)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $1.str); diff --git a/sql/table.cc b/sql/table.cc index 7db7e510ea0..988e082addf 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -52,6 +52,8 @@ static uint find_field(Field **fields, uchar *record, uint start, uint length); inline bool is_system_table_name(const char *name, uint length); +static ulong get_form_pos(File file, uchar *head); + /************************************************************************** Object_creation_ctx implementation. **************************************************************************/ @@ -304,13 +306,6 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key, share->version= refresh_version; /* - This constant is used to mark that no table map version has been - assigned. No arithmetic is done on the value: it will be - overwritten with a value taken from MYSQL_BIN_LOG. - */ - share->table_map_version= ~(ulonglong)0; - - /* Since alloc_table_share() can be called without any locking (for example, ha_create_table... functions), we do not assign a table map id here. Instead we assign a value that is not used @@ -373,11 +368,6 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key, share->path.length= share->normalized_path.length= strlen(path); share->frm_version= FRM_VER_TRUE_VARCHAR; - /* - Temporary tables are not replicated, but we set up these fields - anyway to be able to catch errors. - */ - share->table_map_version= ~(ulonglong)0; share->cached_row_logging_check= -1; /* @@ -706,6 +696,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, const char **interval_array; enum legacy_db_type legacy_db_type; my_bitmap_map *bitmaps; + bool null_bits_are_used; DBUG_ENTER("open_binary_frm"); new_field_pack_flag= head[27]; @@ -714,7 +705,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, disk_buff= 0; error= 3; - if (!(pos=get_form_pos(file,head,(TYPELIB*) 0))) + /* Position of the form in the form file. */ + if (!(pos= get_form_pos(file, head))) goto err; /* purecov: inspected */ share->frm_version= head[2]; @@ -1152,6 +1144,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, goto free_and_err; record= share->default_values-1; /* Fieldstart = 1 */ + null_bits_are_used= share->null_fields != 0; if (share->null_field_first) { null_flags= null_pos= (uchar*) record+1; @@ -1370,6 +1363,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, reg_field->stored_in_db= fld_stored_in_db; if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag)) { + null_bits_are_used= 1; if ((null_bit_pos+= field_length & 7) > 7) { null_pos++; @@ -1503,12 +1497,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, keyinfo->extra_length+=HA_KEY_BLOB_LENGTH; key_part->store_length+=HA_KEY_BLOB_LENGTH; keyinfo->key_length+= HA_KEY_BLOB_LENGTH; - /* - Mark that there may be many matching values for one key - combination ('a', 'a ', 'a '...) - */ - if (!(field->flags & BINARY_FLAG)) - keyinfo->flags|= HA_END_SPACE_KEY; } if (field->type() == MYSQL_TYPE_BIT) key_part->key_part_flag|= HA_BIT_PART; @@ -1702,6 +1690,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, share->null_bytes= (null_pos - (uchar*) null_flags + (null_bit_pos + 7) / 8); share->last_null_bit_pos= null_bit_pos; + share->null_bytes_for_compare= null_bits_are_used ? share->null_bytes : 0; + share->can_cmp_whole_record= (share->blob_fields == 0 && + share->varchar_fields == 0); share->db_low_byte_first= handler_file->low_byte_first(); share->column_bitmap_size= bitmap_buffer_size(share->fields); @@ -1946,7 +1937,14 @@ bool unpack_vcol_info_from_frm(THD *thd, LEX_STRING *vcol_expr, bool *error_reported) { - bool rc= FALSE; + bool rc; + char *vcol_expr_str; + int str_len; + CHARSET_INFO *old_character_set_client; + Query_arena *backup_stmt_arena_ptr; + Query_arena backup_arena; + Query_arena *vcol_arena; + Parser_state parser_state; DBUG_ENTER("unpack_vcol_info_from_frm"); DBUG_ASSERT(vcol_expr); @@ -1955,9 +1953,6 @@ bool unpack_vcol_info_from_frm(THD *thd, The string to be parsed has to be of the following format: "PARSE_VCOL_EXPR (<expr_string_from_frm>)". */ - char *vcol_expr_str; - int str_len= 0; - CHARSET_INFO *old_character_set_client; if (!(vcol_expr_str= (char*) alloc_root(&table->mem_root, vcol_expr->length + @@ -1979,14 +1974,15 @@ bool unpack_vcol_info_from_frm(THD *thd, str_len++; memcpy(vcol_expr_str + str_len, "\0", 1); str_len++; - Parser_state parser_state(thd, vcol_expr_str, str_len); + + if (parser_state.init(thd, vcol_expr_str, str_len)) + goto err; /* Step 2: Setup thd for parsing. */ - Query_arena *backup_stmt_arena_ptr= thd->stmt_arena; - Query_arena backup_arena; - Query_arena *vcol_arena= table->expr_arena; + backup_stmt_arena_ptr= thd->stmt_arena; + vcol_arena= table->expr_arena; if (!vcol_arena) { Query_arena expr_arena(&table->mem_root, Query_arena::INITIALIZED); @@ -2019,6 +2015,7 @@ bool unpack_vcol_info_from_frm(THD *thd, field->vcol_info= 0; goto err; } + rc= FALSE; goto end; err: @@ -2517,55 +2514,46 @@ void free_field_buffers_larger_than(TABLE *table, uint32 size) } } - /* Find where a form starts */ - /* if formname is NullS then only formnames is read */ +/** + Find where a form starts. + + @param head The start of the form file. + + @remark If formname is NULL then only formnames is read. + + @retval The form position. +*/ -ulong get_form_pos(File file, uchar *head, TYPELIB *save_names) +static ulong get_form_pos(File file, uchar *head) { - uint a_length,names,length; - uchar *pos,*buf; + uchar *pos, *buf; + uint names, length; ulong ret_value=0; DBUG_ENTER("get_form_pos"); - LINT_INIT(buf); + names= uint2korr(head+8); - names=uint2korr(head+8); - a_length=(names+2)*sizeof(char *); /* Room for two extra */ + if (!(names= uint2korr(head+8))) + DBUG_RETURN(0); - if (!save_names) - a_length=0; - else - save_names->type_names=0; /* Clear if error */ + length= uint2korr(head+4); - if (names) - { - length=uint2korr(head+4); - VOID(my_seek(file,64L,MY_SEEK_SET,MYF(0))); - if (!(buf= (uchar*) my_malloc((size_t) length+a_length+names*4, - MYF(MY_WME))) || - my_read(file, buf+a_length, (size_t) (length+names*4), - MYF(MY_NABP))) - { /* purecov: inspected */ - x_free((uchar*) buf); /* purecov: inspected */ - DBUG_RETURN(0L); /* purecov: inspected */ - } - pos= buf+a_length+length; - ret_value=uint4korr(pos); - } - if (! save_names) - { - if (names) - my_free((uchar*) buf,MYF(0)); - } - else if (!names) - bzero((char*) save_names,sizeof(save_names)); - else + my_seek(file, 64L, MY_SEEK_SET, MYF(0)); + + if (!(buf= (uchar*) my_malloc(length+names*4, MYF(MY_WME)))) + DBUG_RETURN(0); + + if (my_read(file, buf, length+names*4, MYF(MY_NABP))) { - char *str; - const char **tmp = (const char**) buf; - str=(char *) (buf+a_length); - fix_type_pointers(&tmp, save_names, 1, &str); + x_free(buf); + DBUG_RETURN(0); } + + pos= buf+length; + ret_value= uint4korr(pos); + + my_free(buf, MYF(0)); + DBUG_RETURN(ret_value); } @@ -3163,44 +3151,30 @@ bool check_db_name(LEX_STRING *org_name) { char *name= org_name->str; uint name_length= org_name->length; + bool check_for_path_chars; if (!name_length || name_length > NAME_LEN) return 1; + if ((check_for_path_chars= check_mysql50_prefix(name))) + { + name+= MYSQL50_TABLE_NAME_PREFIX_LENGTH; + name_length-= MYSQL50_TABLE_NAME_PREFIX_LENGTH; + } + if (lower_case_table_names && name != any_db) my_casedn_str(files_charset_info, name); -#if defined(USE_MB) && defined(USE_MB_IDENT) - if (use_mb(system_charset_info)) - { - name_length= 0; - bool last_char_is_space= TRUE; - char *end= name + org_name->length; - while (name < end) - { - int len; - last_char_is_space= my_isspace(system_charset_info, *name); - len= my_ismbchar(system_charset_info, name, end); - if (!len) - len= 1; - name+= len; - name_length++; - } - return (last_char_is_space || name_length > NAME_CHAR_LEN); - } - else -#endif - return ((org_name->str[org_name->length - 1] != ' ') || - (name_length > NAME_CHAR_LEN)); /* purecov: inspected */ + return check_table_name(name, name_length, check_for_path_chars); } + /* Allow anything as a table name, as long as it doesn't contain an ' ' at the end returns 1 on error */ - bool check_table_name(const char *name, uint length, bool check_for_path_chars) { uint name_length= 0; // name length in symbols @@ -3228,10 +3202,10 @@ bool check_table_name(const char *name, uint length, bool check_for_path_chars) continue; } } +#endif if (check_for_path_chars && (*name == '/' || *name == '\\' || *name == '~' || *name == FN_EXTCHAR)) return 1; -#endif name++; name_length++; } @@ -4864,6 +4838,27 @@ void st_table::mark_columns_used_by_index(uint index) /* + Add fields used by a specified index to the table's read_set. + + NOTE: + The original state can be restored with + restore_column_maps_after_mark_index(). +*/ + +void st_table::add_read_columns_used_by_index(uint index) +{ + MY_BITMAP *bitmap= &tmp_set; + DBUG_ENTER("st_table::add_read_columns_used_by_index"); + + enable_keyread(); + bitmap_copy(bitmap, read_set); + mark_columns_used_by_index_no_reset(index, bitmap); + column_bitmaps_set(bitmap, write_set); + DBUG_VOID_RETURN; +} + + +/* Restore to use normal column maps after key read NOTES diff --git a/sql/table.h b/sql/table.h index d2fe7ba3261..9f6006c9adc 100644 --- a/sql/table.h +++ b/sql/table.h @@ -408,6 +408,11 @@ typedef struct st_table_share uint blob_ptr_size; /* 4 or 8 */ uint key_block_size; /* create key_block_size, if used */ uint null_bytes, last_null_bit_pos; + /* + Same as null_bytes, except that if there is only a 'delete-marker' in + the record then this value is 0. + */ + uint null_bytes_for_compare; uint fields; /* Number of fields */ /* Number of stored fields, generated-only virtual fields are not included */ uint stored_fields; @@ -442,8 +447,8 @@ typedef struct st_table_share bool name_lock, replace_with_name_lock; bool waiting_on_cond; /* Protection against free */ bool deleting; /* going to delete this table */ + bool can_cmp_whole_record; ulong table_map_id; /* for row-based replication */ - ulonglong table_map_version; /* Cache for row-based replication table share checks that does not @@ -888,6 +893,7 @@ struct st_table { void prepare_for_position(void); void mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *map); void mark_columns_used_by_index(uint index); + void add_read_columns_used_by_index(uint index); void restore_column_maps_after_mark_index(); void mark_auto_increment_column(void); void mark_columns_needed_for_update(void); @@ -1217,7 +1223,7 @@ struct TABLE_LIST } /* - List of tables local to a subquery (used by SQL_LIST). Considers + List of tables local to a subquery (used by SQL_I_List). Considers views as leaves (unlike 'next_leaf' below). Created at parse time in st_select_lex::add_table_to_list() -> table_list.link_in_list(). */ @@ -1748,7 +1754,11 @@ typedef struct st_nested_join */ table_map used_tables; table_map not_null_tables; /* tables that rejects nulls */ - struct st_join_table *first_nested;/* the first nested table in the plan */ + /** + Used for pointing out the first table in the plan being covered by this + join nest. It is used exclusively within make_outerjoin_info(). + */ + struct st_join_table *first_nested; /* Used to count tables in the nested join in 2 isolated places: 1. In make_outerjoin_info(). @@ -1763,6 +1773,15 @@ typedef struct st_nested_join */ uint n_tables; nested_join_map nj_map; /* Bit used to identify this nested join*/ + /** + True if this join nest node is completely covered by the query execution + plan. This means two things. + + 1. All tables on its @c join_list are covered by the plan. + + 2. All child join nest nodes are fully covered. + */ + bool is_fully_covered() const { return join_list.elements == counter; } } NESTED_JOIN; diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc index 83c4a8ee2a0..da0c25b54dd 100644 --- a/sql/thr_malloc.cc +++ b/sql/thr_malloc.cc @@ -21,8 +21,6 @@ extern "C" { void sql_alloc_error_handler(void) { - sql_print_error("%s", ER(ER_OUT_OF_RESOURCES)); - THD *thd= current_thd; if (thd) { @@ -49,6 +47,12 @@ extern "C" { ER(ER_OUT_OF_RESOURCES)); } } + + /* Skip writing to the error log to avoid mtr complaints */ + DBUG_EXECUTE_IF("simulate_out_of_memory", return;); + + sql_print_error("%s", ER(ER_OUT_OF_RESOURCES)); + } } diff --git a/sql/unireg.cc b/sql/unireg.cc index a048adbef72..9859d27dd3b 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -113,7 +113,6 @@ bool mysql_create_frm(THD *thd, const char *file_name, ulong filepos, data_offset; uint options_len; uchar fileinfo[64],forminfo[288],*keybuff; - TYPELIB formnames; uchar *screen_buff; char buff[128]; #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -124,7 +123,7 @@ bool mysql_create_frm(THD *thd, const char *file_name, DBUG_ENTER("mysql_create_frm"); DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension - formnames.type_names=0; + if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0))) DBUG_RETURN(1); DBUG_ASSERT(db_file != NULL); @@ -210,8 +209,15 @@ bool mysql_create_frm(THD *thd, const char *file_name, key_buff_length= uint4korr(fileinfo+47); keybuff=(uchar*) my_malloc(key_buff_length, MYF(0)); key_info_length= pack_keys(keybuff, keys, key_info, data_offset); - VOID(get_form_pos(file,fileinfo,&formnames)); - if (!(filepos=make_new_entry(file,fileinfo,&formnames,""))) + + /* + Ensure that there are no forms in this newly created form file. + Even if the form file exists, create_frm must truncate it to + ensure one form per form file. + */ + DBUG_ASSERT(uint2korr(fileinfo+8) == 0); + + if (!(filepos= make_new_entry(file, fileinfo, NULL, ""))) goto err; maxlength=(uint) next_io_size((ulong) (uint2korr(forminfo)+1000)); int2store(forminfo+2,maxlength); |