diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/field.cc | 8 | ||||
-rw-r--r-- | sql/ha_partition.cc | 22 | ||||
-rw-r--r-- | sql/item_cmpfunc.cc | 28 | ||||
-rw-r--r-- | sql/item_subselect.cc | 3 | ||||
-rw-r--r-- | sql/item_sum.cc | 15 | ||||
-rw-r--r-- | sql/item_xmlfunc.cc | 6 | ||||
-rw-r--r-- | sql/log_event.cc | 18 | ||||
-rw-r--r-- | sql/log_event.h | 18 | ||||
-rw-r--r-- | sql/mdl.h | 10 | ||||
-rw-r--r-- | sql/mysqld.cc | 37 | ||||
-rw-r--r-- | sql/opt_range.cc | 2 | ||||
-rw-r--r-- | sql/rpl_utility.cc | 9 | ||||
-rw-r--r-- | sql/sql_admin.cc | 28 | ||||
-rw-r--r-- | sql/sql_base.cc | 49 | ||||
-rw-r--r-- | sql/sql_base.h | 27 | ||||
-rw-r--r-- | sql/sql_cache.cc | 30 | ||||
-rw-r--r-- | sql/sql_class.cc | 24 | ||||
-rw-r--r-- | sql/sql_class.h | 3 | ||||
-rw-r--r-- | sql/sql_repl.cc | 4 | ||||
-rw-r--r-- | sql/sql_select.cc | 63 | ||||
-rw-r--r-- | sql/sql_select.h | 4 | ||||
-rw-r--r-- | sql/sql_table.cc | 227 | ||||
-rw-r--r-- | sql/strfunc.cc | 2 | ||||
-rw-r--r-- | sql/sys_vars.cc | 58 | ||||
-rw-r--r-- | sql/transaction.cc | 20 | ||||
-rw-r--r-- | sql/tztime.cc | 21 |
26 files changed, 493 insertions, 243 deletions
diff --git a/sql/field.cc b/sql/field.cc index 8ed3f787d79..7da46d6b383 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -204,7 +204,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -235,7 +235,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24, + MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -266,7 +266,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24, + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -297,7 +297,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 4cf66daf315..b10de0362f2 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3826,6 +3826,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt) part, sub_elem->partition_name)); if ((error= m_file[part]->ha_truncate())) break; + sub_elem->part_state= PART_NORMAL; } while (++j < num_subparts); } else @@ -4423,6 +4424,7 @@ bool ha_partition::init_record_priority_queue() { if (bitmap_is_set(&m_part_info->used_partitions, i)) { + DBUG_PRINT("info", ("init rec-buf for part %u", i)); int2store(ptr, i); ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } @@ -5317,11 +5319,27 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) m_top_entry= NO_CURRENT_PART_ID; queue_remove_all(&m_queue); - DBUG_PRINT("info", ("m_part_spec.start_part %d", m_part_spec.start_part)); - for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) + /* + Position part_rec_buf_ptr to point to the first used partition >= + start_part. There may be partitions marked by used_partitions, + but is before start_part. These partitions has allocated record buffers + but is dynamically pruned, so those buffers must be skipped. + */ + uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions); + for (; first_used_part < m_part_spec.start_part; first_used_part++) + { + if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part)) + part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; + } + DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u", + m_part_spec.start_part, first_used_part)); + for (i= first_used_part; i <= m_part_spec.end_part; i++) { if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) continue; + DBUG_PRINT("info", ("reading from part %u (scan_type: %u)", + i, m_index_scan_type)); + DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS; int error; handler *file= m_file[i]; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 3e64e3969e4..fec6998d0ce 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -241,15 +241,15 @@ static uint collect_cmp_types(Item **items, uint nitems, bool skip_nulls= FALSE) items[i]->cmp_type() == ROW_RESULT) && cmp_row_type(items[0], items[i])) return 0; - found_types|= 1<< (uint)item_cmp_type(left_result, - items[i]->cmp_type()); + found_types|= 1U << (uint)item_cmp_type(left_result, + items[i]->cmp_type()); } /* Even if all right-hand items are NULLs and we are skipping them all, we need at least one type bit in the found_type bitmask. */ if (skip_nulls && !found_types) - found_types= 1 << (uint)left_result; + found_types= 1U << (uint)left_result; return found_types; } @@ -2824,12 +2824,12 @@ Item *Item_func_case::find_item(String *str) cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type()); DBUG_ASSERT(cmp_type != ROW_RESULT); DBUG_ASSERT(cmp_items[(uint)cmp_type]); - if (!(value_added_map & (1<<(uint)cmp_type))) + if (!(value_added_map & (1U << (uint)cmp_type))) { cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]); if ((null_value=args[first_expr_num]->null_value)) return else_expr_num != -1 ? args[else_expr_num] : 0; - value_added_map|= 1<<(uint)cmp_type; + value_added_map|= 1U << (uint)cmp_type; } if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value) return args[i + 1]; @@ -3036,10 +3036,10 @@ void Item_func_case::fix_length_and_dec() return; Item *date_arg= 0; - if (found_types & (1 << TIME_RESULT)) + if (found_types & (1U << TIME_RESULT)) date_arg= find_date_time_item(args, arg_count, 0); - if (found_types & (1 << STRING_RESULT)) + if (found_types & (1U << STRING_RESULT)) { /* If we'll do string comparison, we also need to aggregate @@ -3080,7 +3080,7 @@ void Item_func_case::fix_length_and_dec() for (i= 0; i <= (uint)TIME_RESULT; i++) { - if (found_types & (1 << i) && !cmp_items[i]) + if (found_types & (1U << i) && !cmp_items[i]) { DBUG_ASSERT((Item_result)i != ROW_RESULT); @@ -3936,7 +3936,7 @@ void Item_func_in::fix_length_and_dec() } for (i= 0; i <= (uint)TIME_RESULT; i++) { - if (found_types & 1 << i) + if (found_types & (1U << i)) { (type_cnt)++; cmp_type= (Item_result) i; @@ -4063,14 +4063,14 @@ void Item_func_in::fix_length_and_dec() } else { - if (found_types & (1 << TIME_RESULT)) + if (found_types & (1U << TIME_RESULT)) date_arg= find_date_time_item(args, arg_count, 0); - if (found_types & (1 << STRING_RESULT) && + if (found_types & (1U << STRING_RESULT) && agg_arg_charsets_for_comparison(cmp_collation, args, arg_count)) return; for (i= 0; i <= (uint) TIME_RESULT; i++) { - if (found_types & (1 << i) && !cmp_items[i]) + if (found_types & (1U << i) && !cmp_items[i]) { if (!cmp_items[i] && !(cmp_items[i]= cmp_item::get_comparator((Item_result)i, date_arg, @@ -4156,12 +4156,12 @@ longlong Item_func_in::val_int() Item_result cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type()); in_item= cmp_items[(uint)cmp_type]; DBUG_ASSERT(in_item); - if (!(value_added_map & (1 << (uint)cmp_type))) + if (!(value_added_map & (1U << (uint)cmp_type))) { in_item->store_value(args[0]); if ((null_value= args[0]->null_value)) return 0; - value_added_map|= 1 << (uint)cmp_type; + value_added_map|= 1U << (uint)cmp_type; } if (!in_item->cmp(args[i]) && !args[i]->null_value) return (longlong) (!negated); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index dbc4f9818d5..152b50da616 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1852,7 +1852,8 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join) print_where(item, "rewrite with MIN/MAX", QT_ORDINARY);); save_allow_sum_func= thd->lex->allow_sum_func; - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= + (nesting_map)1 << thd->lex->current_select->nest_level; /* Item_sum_(max|min) can't substitute other item => we can use 0 as reference, also Item_sum_(max|min) can't be fixed after creation, so diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 04358ac8c45..72e0e637d38 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -152,9 +152,10 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) If it is there under a construct where it is not allowed we report an error. */ - invalid= !(allow_sum_func & (1 << max_arg_level)); + invalid= !(allow_sum_func & ((nesting_map)1 << max_arg_level)); } - else if (max_arg_level >= 0 || !(allow_sum_func & (1 << nest_level))) + else if (max_arg_level >= 0 || + !(allow_sum_func & ((nesting_map)1 << nest_level))) { /* The set function can be aggregated only in outer subqueries. @@ -163,7 +164,8 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) */ if (register_sum_func(thd, ref)) return TRUE; - invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + invalid= aggr_level < 0 && + !(allow_sum_func & ((nesting_map)1 << nest_level)); if (!invalid && thd->variables.sql_mode & MODE_ANSI) invalid= aggr_level < 0 && max_arg_level < nest_level; } @@ -311,14 +313,15 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl && sl->nest_level > max_arg_level; sl= sl->master_unit()->outer_select() ) { - if (aggr_level < 0 && (allow_sum_func & (1 << sl->nest_level))) + if (aggr_level < 0 && + (allow_sum_func & ((nesting_map)1 << sl->nest_level))) { /* Found the most nested subquery where the function can be aggregated */ aggr_level= sl->nest_level; aggr_sel= sl; } } - if (sl && (allow_sum_func & (1 << sl->nest_level))) + if (sl && (allow_sum_func & ((nesting_map)1 << sl->nest_level))) { /* We reached the subquery of level max_arg_level and checked @@ -559,7 +562,7 @@ void Item_sum::update_used_tables () used_tables_cache&= PSEUDO_TABLE_BITS; // the aggregate function is aggregated into its local context - used_tables_cache |= (1 << aggr_sel->join->table_count) - 1; + used_tables_cache|= ((table_map)1 << aggr_sel->join->tables) - 1; } because if we do it, table elimination will assume that - constructs like "COUNT(*)" use columns from all tables diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 5a824e48b7b..723429f107a 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -2704,8 +2704,12 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) node.parent= data->parent; // Set parent for the new node to old parent data->parent= numnodes; // Remember current node as new parent + DBUG_ASSERT(data->level <= MAX_LEVEL); data->pos[data->level]= numnodes; - node.level= data->level++; + if (data->level < MAX_LEVEL) + node.level= data->level++; + else + return MY_XML_ERROR; node.type= st->current_node_type; // TAG or ATTR node.beg= attr; node.end= attr + len; diff --git a/sql/log_event.cc b/sql/log_event.cc index d5cfa9367c5..44ff00f9b25 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -4713,10 +4713,21 @@ do_server_version_split(char* version, for (uint i= 0; i<=2; i++) { number= strtoul(p, &r, 10); - split_versions->ver[i]= (uchar) number; - DBUG_ASSERT(number < 256); // fit in uchar + /* + It is an invalid version if any version number greater than 255 or + first number is not followed by '.'. + */ + if (number < 256 && (*r == '.' || i != 0)) + split_versions->ver[i]= (uchar) number; + else + { + split_versions->ver[0]= 0; + split_versions->ver[1]= 0; + split_versions->ver[2]= 0; + break; + } + p= r; - DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice if (*r == '.') p++; // skip the dot } @@ -4734,7 +4745,6 @@ do_server_version_split(char* version, into 'server_version_split': X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z} X.Yabc -> {X,Y,0} - Xabc -> {X,0,0} 'server_version_split' is then used for lookups to find if the server which created this event has some known bug. */ diff --git a/sql/log_event.h b/sql/log_event.h index 2c2e1dcd8b9..99c9a0fb1d7 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1081,7 +1081,7 @@ public: return thd ? thd->db : 0; } #else - Log_event() : temp_buf(0) {} + Log_event() : temp_buf(0), flags(0) {} /* avoid having to link mysqlbinlog against libpthread */ static Log_event* read_log_event(IO_CACHE* file, const Format_description_log_event @@ -2417,12 +2417,26 @@ public: #ifdef MYSQL_SERVER bool write(IO_CACHE* file); #endif - bool is_valid() const + bool header_is_valid() const { return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN : LOG_EVENT_MINIMAL_HEADER_LEN)) && (post_header_len != NULL)); } + + bool version_is_valid() const + { + /* It is invalid only when all version numbers are 0 */ + return !(server_version_split.ver[0] == 0 && + server_version_split.ver[1] == 0 && + server_version_split.ver[2] == 0); + } + + bool is_valid() const + { + return header_is_valid() && version_is_valid(); + } + int get_data_size() { /* diff --git a/sql/mdl.h b/sql/mdl.h index af7d75c1297..68f24a7a0e8 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -242,8 +242,14 @@ public: const char *db, const char *name) { m_ptr[0]= (char) mdl_namespace; - m_db_name_length= (uint16) (strmov(m_ptr + 1, db) - m_ptr - 1); - m_length= (uint16) (strmov(m_ptr + m_db_name_length + 2, name) - m_ptr + 1); + /* + It is responsibility of caller to ensure that db and object names + are not longer than NAME_LEN. Still we play safe and try to avoid + buffer overruns. + */ + m_db_name_length= (uint16) (strmake(m_ptr + 1, db, NAME_LEN) - m_ptr - 1); + m_length= (uint16) (strmake(m_ptr + m_db_name_length + 2, name, NAME_LEN) - + m_ptr + 1); } void mdl_key_init(const MDL_key *rhs) { diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 14737cd69bb..872ab8a54ca 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1296,6 +1296,7 @@ static void clean_up(bool print_message); static int test_if_case_insensitive(const char *dir_name); #ifndef EMBEDDED_LIBRARY +static bool pid_file_created= false; static void usage(void); static void start_signal_handler(void); static void close_server_sock(); @@ -1304,6 +1305,7 @@ static void wait_for_signal_thread_to_end(void); static void create_pid_file(); static void mysqld_exit(int exit_code) __attribute__((noreturn)); #endif +static void delete_pid_file(myf flags); static void end_ssl(); @@ -1808,7 +1810,6 @@ void clean_up(bool print_message) item_user_lock_free(); lex_free(); /* Free some memory */ item_create_cleanup(); - free_charsets(); if (!opt_noacl) { #ifdef HAVE_DLOPEN @@ -1855,10 +1856,8 @@ void clean_up(bool print_message) debug_sync_end(); #endif /* defined(ENABLED_DEBUG_SYNC) */ -#if !defined(EMBEDDED_LIBRARY) - if (!opt_bootstrap) - mysql_file_delete(key_file_pid, pidfile_name, MYF(0)); // This may not always exist -#endif + delete_pid_file(MYF(0)); + if (print_message && my_default_lc_messages && server_start_time) sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname); cleanup_errmsgs(); @@ -1872,6 +1871,7 @@ void clean_up(bool print_message) sys_var_end(); my_atomic_rwlock_destroy(&global_query_id_lock); my_atomic_rwlock_destroy(&thread_running_lock); + free_charsets(); mysql_mutex_lock(&LOCK_thread_count); DBUG_PRINT("quit", ("got thread count lock")); ready_to_exit=1; @@ -4885,9 +4885,7 @@ int mysqld_main(int argc, char **argv) (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL); - - if (!opt_bootstrap) - mysql_file_delete(key_file_pid, pidfile_name, MYF(MY_WME)); // Not needed anymore + delete_pid_file(MYF(MY_WME)); if (unix_sock != INVALID_SOCKET) unlink(mysqld_unix_port); @@ -8296,13 +8294,14 @@ static void create_pid_file() if ((file= mysql_file_create(key_file_pid, pidfile_name, 0664, O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0) { - char buff[21], *end; + char buff[MAX_BIGINT_WIDTH + 1], *end; end= int10_to_str((long) getpid(), buff, 10); *end++= '\n'; if (!mysql_file_write(file, (uchar*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP))) { mysql_file_close(file, MYF(0)); + pid_file_created= true; return; } mysql_file_close(file, MYF(0)); @@ -8312,6 +8311,26 @@ static void create_pid_file() } #endif /* EMBEDDED_LIBRARY */ + +/** + Remove the process' pid file. + + @param flags file operation flags +*/ + +static void delete_pid_file(myf flags) +{ +#ifndef EMBEDDED_LIBRARY + if (pid_file_created) + { + mysql_file_delete(key_file_pid, pidfile_name, flags); + pid_file_created= false; + } +#endif /* EMBEDDED_LIBRARY */ + return; +} + + /** Clear most status variables. */ void refresh_status(THD *thd) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index cb25ca7760e..e8753b7870c 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -12078,7 +12078,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) cur_parts have bits set for only used keyparts. */ ulonglong all_parts, cur_parts; - all_parts= (1<<max_key_part) - 1; + all_parts= (1ULL << max_key_part) - 1; cur_parts= used_key_parts_map.to_ulonglong() >> 1; if (all_parts != cur_parts) goto next_index; diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 069fac1c3ec..c83e52fe26b 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -878,8 +878,13 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE * DBUG_ENTER("table_def::create_conversion_table"); List<Create_field> field_list; - - for (uint col= 0 ; col < size() ; ++col) + /* + At slave, columns may differ. So we should create + min(columns@master, columns@slave) columns in the + conversion table. + */ + uint const cols_to_create= min(target_table->s->fields, size()); + for (uint col= 0 ; col < cols_to_create; ++col) { Create_field *field_def= (Create_field*) alloc_root(thd->mem_root, sizeof(Create_field)); diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index c65e56edbe0..631c4271eb5 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -739,6 +739,11 @@ send_result_message: case HA_ADMIN_TRY_ALTER: { + uint save_flags; + Alter_info *alter_info= &lex->alter_info; + + /* Store the original value of alter_info->flags */ + save_flags= alter_info->flags; /* This is currently used only by InnoDB. ha_innobase::optimize() answers "try with alter", so here we close the table, do an ALTER TABLE, @@ -746,10 +751,19 @@ send_result_message: We have to end the row, so analyze could return more rows. */ protocol->store(STRING_WITH_LEN("note"), system_charset_info); - protocol->store(STRING_WITH_LEN( - "Table does not support optimize, doing recreate + analyze instead"), - system_charset_info); - if (protocol->write()) + if(alter_info->flags & ALTER_ADMIN_PARTITION) + { + protocol->store(STRING_WITH_LEN( + "Table does not support optimize on partitions. All partitions " + "will be rebuilt and analyzed."),system_charset_info); + } + else + { + protocol->store(STRING_WITH_LEN( + "Table does not support optimize, doing recreate + analyze instead"), + system_charset_info); + } + if (protocol->write()) goto err; DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze...")); TABLE_LIST *save_next_local= table->next_local, @@ -768,6 +782,11 @@ send_result_message: table->mdl_request.ticket= NULL; DEBUG_SYNC(thd, "ha_admin_open_ltable"); table->mdl_request.set_type(MDL_SHARED_WRITE); + /* + Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags + to force analyze on all partitions. + */ + alter_info->flags &= ~(ALTER_ADMIN_PARTITION); if ((table->table= open_ltable(thd, table, lock_type, 0))) { result_code= table->table->file->ha_analyze(thd, check_opt); @@ -778,6 +797,7 @@ send_result_message: } else result_code= -1; // open failed + alter_info->flags= save_flags; } /* Start a new row for the final status row */ protocol->prepare_for_resend(); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index c76f2d43279..277e07b9c49 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -326,12 +326,9 @@ uint create_table_def_key(THD *thd, char *key, const TABLE_LIST *table_list, bool tmp_table) { - char *db_end= strnmov(key, table_list->db, MAX_DBKEY_LENGTH - 2); - *db_end++= '\0'; - char *table_end= strnmov(db_end, table_list->table_name, - key + MAX_DBKEY_LENGTH - 1 - db_end); - *table_end++= '\0'; - uint key_length= (uint) (table_end-key); + uint key_length= create_table_def_key(key, table_list->db, + table_list->table_name); + if (tmp_table) { int4store(key + key_length, thd->server_id); @@ -832,13 +829,10 @@ void release_table_share(TABLE_SHARE *share) TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name) { char key[SAFE_NAME_LEN*2+2]; - TABLE_LIST table_list; uint key_length; mysql_mutex_assert_owner(&LOCK_open); - table_list.db= (char*) db; - table_list.table_name= (char*) table_name; - key_length= create_table_def_key((THD*) 0, key, &table_list, 0); + key_length= create_table_def_key(key, db, table_name); return (TABLE_SHARE*) my_hash_search(&table_def_cache, (uchar*) key, key_length); } @@ -3237,7 +3231,7 @@ err_unlock: TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; - uint key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + uint key_length= create_table_def_key(key, db, table_name); for (TABLE *table= list; table ; table=table->next) { @@ -6122,17 +6116,27 @@ TABLE *open_table_uncached(THD *thd, const char *path, const char *db, } -bool rm_temporary_table(handlerton *base, char *path) +/** + Delete a temporary table. + + @param base Handlerton for table to be deleted. + @param path Path to the table to be deleted (i.e. path + to its .frm without an extension). + + @retval false - success. + @retval true - failure. +*/ + +bool rm_temporary_table(handlerton *base, const char *path) { bool error=0; handler *file; - char *ext; + char frm_path[FN_REFLEN + 1]; DBUG_ENTER("rm_temporary_table"); - strmov(ext= strend(path), reg_ext); - if (mysql_file_delete(key_file_frm, path, MYF(0))) + strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS); + if (mysql_file_delete(key_file_frm, frm_path, MYF(0))) error=1; /* purecov: inspected */ - *ext= 0; // remove extension file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base); if (file && file->ha_delete_table(path)) { @@ -8078,7 +8082,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, thd->mark_used_columns= mark_used_columns; DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); if (allow_sum_func) - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= + (nesting_map)1 << thd->lex->current_select->nest_level; thd->where= THD::DEFAULT_WHERE; save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; thd->lex->current_select->is_item_list_lookup= 0; @@ -9383,7 +9388,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name, MDL_EXCLUSIVE)); - key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + key_length= create_table_def_key(key, db, table_name); if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key, key_length))) @@ -9497,12 +9502,14 @@ open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias, { LEX_STRING pathstr; File_parser *parser; - char path[FN_REFLEN]; + char path[FN_REFLEN+1]; DBUG_ENTER("open_new_frm"); /* Create path with extension */ - pathstr.length= (uint) (strxmov(path, share->normalized_path.str, reg_ext, - NullS)- path); + pathstr.length= (uint) (strxnmov(path, sizeof(path) - 1, + share->normalized_path.str, + reg_ext, + NullS) - path); pathstr.str= path; if ((parser= sql_parse_prepare(&pathstr, mem_root, 1))) diff --git a/sql/sql_base.h b/sql/sql_base.h index 45d41777fea..21a2ff322f0 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -81,6 +81,31 @@ uint cached_table_definitions(void); uint create_table_def_key(THD *thd, char *key, const TABLE_LIST *table_list, bool tmp_table); + +/** + Create a table cache key for non-temporary table. + + @param key Buffer for key (must be at least NAME_LEN*2+2 bytes). + @param db Database name. + @param table_name Table name. + + @return Length of key. + + @sa create_table_def_key(thd, char *, table_list, bool) +*/ + +inline uint +create_table_def_key(char *key, const char *db, const char *table_name) +{ + /* + In theory caller should ensure that both db and table_name are + not longer than NAME_LEN bytes. In practice we play safe to avoid + buffer overruns. + */ + return (uint)(strmake(strmake(key, db, NAME_LEN) + 1, table_name, + NAME_LEN) - key + 1); +} + TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key, uint key_length, uint db_flags, int *error, my_hash_value_type hash_value); @@ -158,7 +183,7 @@ thr_lock_type read_lock_type_for_table(THD *thd, TABLE_LIST *table_list); my_bool mysql_rm_tmp_tables(void); -bool rm_temporary_table(handlerton *base, char *path); +bool rm_temporary_table(handlerton *base, const char *path); void close_tables_for_reopen(THD *thd, TABLE_LIST **tables, const MDL_savepoint &start_of_statement_svp); TABLE_LIST *find_table_in_list(TABLE_LIST *table, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 191cc4e01c8..0d28e9adb6f 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -3123,8 +3123,8 @@ void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list) char key[MAX_DBKEY_LENGTH]; uint key_length; - key_length=(uint) (strmov(strmov(key,table_list->db)+1, - table_list->table_name) -key)+ 1; + key_length= create_table_def_key(key, table_list->db, + table_list->table_name); // We don't store temporary tables => no key_length+=4 ... invalidate_table(thd, (uchar *)key, key_length); @@ -3242,8 +3242,8 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, DBUG_PRINT("qcache", ("view: %s db: %s", tables_used->view_name.str, tables_used->view_db.str)); - key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1, - tables_used->view_name.str) - key) + 1; + key_length= create_table_def_key(key, tables_used->view_db.str, + tables_used->view_name.str); /* There are not callback function for for VIEWs */ @@ -4287,15 +4287,14 @@ my_bool Query_cache::move_by_type(uchar **border, case Query_cache_block::RES_CONT: case Query_cache_block::RESULT: { - DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block, - (int) block->type)); - if (*border == 0) - break; - Query_cache_block *query_block = block->result()->parent(), - *next = block->next, - *prev = block->prev; - Query_cache_block::block_type type = block->type; - BLOCK_LOCK_WR(query_block); + DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block,
+ (int) block->type));
+ if (*border == 0)
+ break;
+ Query_cache_block *query_block= block->result()->parent();
+ BLOCK_LOCK_WR(query_block);
+ Query_cache_block *next= block->next, *prev= block->prev;
+ Query_cache_block::block_type type= block->type; ulong len = block->length, used = block->used; Query_cache_block *pprev = block->pprev, *pnext = block->pnext, @@ -4457,8 +4456,9 @@ uint Query_cache::filename_2_table_key (char *key, const char *path, *db_length= (filename - dbname) - 1; DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename)); - DBUG_RETURN((uint) (strmov(strmake(key, dbname, *db_length) + 1, - filename) -key) + 1); + DBUG_RETURN((uint) (strmake(strmake(key, dbname, + min(*db_length, NAME_LEN)) + 1, + filename, NAME_LEN) - key) + 1); } /**************************************************************************** diff --git a/sql/sql_class.cc b/sql/sql_class.cc index df71b78ada0..c5005c8ad5c 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1850,6 +1850,19 @@ void THD::cleanup_after_query() stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; auto_inc_intervals_in_cur_stmt_for_binlog.empty(); rand_used= 0; +#ifndef EMBEDDED_LIBRARY + /* + Clean possible unused INSERT_ID events by current statement. + is_update_query() is needed to ignore SET statements: + Statements that don't update anything directly and don't + used stored functions. This is mostly necessary to ignore + statements in binlog between SET INSERT_ID and DML statement + which is intended to consume its event (there can be other + SET statements between them). + */ + if ((rli_slave || rli_fake) && is_update_query(lex->sql_command)) + auto_inc_intervals_forced.empty(); +#endif } if (first_successful_insert_id_in_cur_stmt > 0) { @@ -4467,9 +4480,14 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state) bool xid_cache_insert(XID_STATE *xid_state) { mysql_mutex_lock(&LOCK_xid_cache); - DBUG_ASSERT(my_hash_search(&xid_cache, xid_state->xid.key(), - xid_state->xid.key_length())==0); - my_bool res=my_hash_insert(&xid_cache, (uchar*)xid_state); + if (my_hash_search(&xid_cache, xid_state->xid.key(), + xid_state->xid.key_length())) + { + mysql_mutex_unlock(&LOCK_xid_cache); + my_error(ER_XAER_DUPID, MYF(0)); + return true; + } + bool res= my_hash_insert(&xid_cache, (uchar*)xid_state); mysql_mutex_unlock(&LOCK_xid_cache); return res; } diff --git a/sql/sql_class.h b/sql/sql_class.h index cff6acbb9c9..3ce49c52a86 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -574,6 +574,9 @@ typedef struct system_variables ulong wt_timeout_long, wt_deadlock_search_depth_long; double long_query_time_double; + + my_bool pseudo_slave_mode; + } SV; /** diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 631825f0527..57820784b8d 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1262,6 +1262,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) if (thd->lex->mi.pos) { + if (thd->lex->mi.relay_log_pos) + slave_errno=ER_BAD_SLAVE_UNTIL_COND; mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS; mi->rli.until_log_pos= thd->lex->mi.pos; /* @@ -1273,6 +1275,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) } else if (thd->lex->mi.relay_log_pos) { + if (thd->lex->mi.pos) + slave_errno=ER_BAD_SLAVE_UNTIL_COND; mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS; mi->rli.until_log_pos= thd->lex->mi.relay_log_pos; strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 4fb05fe71c8..38b9b643d10 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2012 Oracle and/or its affiliates. +/* Copyright (c) 2000, 2013 Oracle and/or its affiliates. Copyright (c) 2009, 2013 Monty Program Ab. This program is free software; you can redistribute it and/or modify @@ -553,25 +553,25 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, ORDER *group, bool *hidden_group_fields) { int res; - nesting_map save_allow_sum_func=thd->lex->allow_sum_func ; + st_select_lex *const select= thd->lex->current_select; + nesting_map save_allow_sum_func= thd->lex->allow_sum_func; /* Need to save the value, so we can turn off only any new non_agg_field_used additions coming from the WHERE */ - const bool saved_non_agg_field_used= - thd->lex->current_select->non_agg_field_used(); + const bool saved_non_agg_field_used= select->non_agg_field_used(); DBUG_ENTER("setup_without_group"); - thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); res= setup_conds(thd, tables, leaves, conds); /* it's not wrong to have non-aggregated columns in a WHERE */ - thd->lex->current_select->set_non_agg_field_used(saved_non_agg_field_used); + select->set_non_agg_field_used(saved_non_agg_field_used); - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, order); - thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, group, hidden_group_fields); thd->lex->allow_sum_func= save_allow_sum_func; @@ -720,7 +720,7 @@ JOIN::prepare(Item ***rref_pointer_array, { nesting_map save_allow_sum_func= thd->lex->allow_sum_func; thd->where="having clause"; - thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level; + thd->lex->allow_sum_func|= (nesting_map)1 << select_lex_arg->nest_level; select_lex->having_fix_field= 1; /* Wrap alone field in HAVING clause in case it will be outer field of subquery @@ -2187,6 +2187,7 @@ JOIN::exec() { List<Item> *columns_list= &fields_list; int tmp_error; + DBUG_ENTER("JOIN::exec"); thd_proc_info(thd, "executing"); @@ -5438,7 +5439,8 @@ best_access_path(JOIN *join, in ReuseRangeEstimateForRef-3. */ if (table->quick_keys.is_set(key) && - (const_part & ((1 << table->quick_key_parts[key])-1)) == + (const_part & + (((key_part_map)1 << table->quick_key_parts[key])-1)) == (((key_part_map)1 << table->quick_key_parts[key])-1) && table->quick_n_ranges[key] == 1 && records > (double) table->quick_rows[key]) @@ -5602,7 +5604,8 @@ best_access_path(JOIN *join, */ if (table->quick_keys.is_set(key) && table->quick_key_parts[key] <= max_key_part && - const_part & (1 << table->quick_key_parts[key]) && + const_part & + ((key_part_map)1 << table->quick_key_parts[key]) && table->quick_n_ranges[key] == 1 + test(ref_or_null_part & const_part) && records > (double) table->quick_rows[key]) @@ -7933,7 +7936,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, j->ref.items[i]=keyuse->val; // Save for cond removal j->ref.cond_guards[i]= keyuse->cond_guard; if (keyuse->null_rejecting) - j->ref.null_rejecting |= 1 << i; + j->ref.null_rejecting|= (key_part_map)1 << i; keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables; if (!keyuse->val->used_tables() && !thd->lex->describe) { // Compare against constant @@ -8209,7 +8212,7 @@ static void add_not_null_conds(JOIN *join) { for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++) { - if (tab->ref.null_rejecting & (1 << keypart)) + if (tab->ref.null_rejecting & ((key_part_map)1 << keypart)) { Item *item= tab->ref.items[keypart]; Item *notnull; @@ -14604,11 +14607,11 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, } else { - recinfo->null_bit= 1 << (null_count & 7); + recinfo->null_bit= (uint8)1 << (null_count & 7); recinfo->null_pos= null_count/8; } field->move_field(pos,null_flags+null_count/8, - 1 << (null_count & 7)); + (uint8)1 << (null_count & 7)); null_count++; } else @@ -15056,7 +15059,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list) { cur_field->move_field(field_pos, (uchar*) null_pos, null_bit); null_bit<<= 1; - if (null_bit == (1 << 8)) + if (null_bit == (uint)1 << 8) { ++null_pos; null_bit= 1; @@ -19103,17 +19106,29 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, if (!keep_quick) { select->cleanup(); - /* - The select object should now be ready for the next use. If it - is re-used then there exists a backup copy of this join tab - which has the pointer to it. The join tab will be restored in - JOIN::reset(). So here we just delete the pointer to it. - */ - tab->select= NULL; - // If we deleted the quick select object we need to clear quick_keys + + // If we deleted the quick object we need to clear quick_keys table->quick_keys.clear_all(); table->intersect_keys.clear_all(); } + else + { + // Need to close the index scan in order to re-use the handler + tab->select->quick->range_end(); + } + + /* + The select object is now ready for the next use. To avoid that + the select object is used when reading the records in sorted + order we set the pointer to it to NULL. The select pointer will + be restored from the saved_select pointer when this select + operation is completed (@see JOIN::exec). This ensures that it + will be re-used when filesort is used by subqueries that are + executed multiple times. + */ + tab->saved_select= tab->select; + tab->select= NULL; + // Restore the output resultset table->sort.io_cache= tablesort_result_cache; } diff --git a/sql/sql_select.h b/sql/sql_select.h index bac29b96c5a..f52e9c45613 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1,8 +1,8 @@ #ifndef SQL_SELECT_INCLUDED #define SQL_SELECT_INCLUDED -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2008-2011 Monty Program Ab +/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. + Copyright (c) 2008, 2013, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 330c28ebbd8..f75d1bafdd3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -648,13 +648,6 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) struct st_global_ddl_log { - /* - We need to adjust buffer size to be able to handle downgrades/upgrades - where IO_SIZE has changed. We'll set the buffer size such that we can - handle that the buffer size was upto 4 times bigger in the version - that wrote the DDL log. - */ - char file_entry_buf[4*IO_SIZE]; char file_name_str[FN_REFLEN]; char *file_name; DDL_LOG_MEMORY_ENTRY *first_free; @@ -682,51 +675,60 @@ mysql_mutex_t LOCK_gdl; #define DDL_LOG_NUM_ENTRY_POS 0 #define DDL_LOG_NAME_LEN_POS 4 #define DDL_LOG_IO_SIZE_POS 8 +#define DDL_LOG_HEADER_SIZE 12 -/* - Read one entry from ddl log file - SYNOPSIS - read_ddl_log_file_entry() - entry_no Entry number to read - RETURN VALUES - TRUE Error - FALSE Success +/** + Read one entry from ddl log file. + @param[out] file_entry_buf Buffer to read into + @param entry_no Entry number to read + @param size Number of bytes of the entry to read + + @return Operation status + @retval true Error + @retval false Success */ -static bool read_ddl_log_file_entry(uint entry_no) +static bool read_ddl_log_file_entry(uchar *file_entry_buf, + uint entry_no, + uint size) { bool error= FALSE; File file_id= global_ddl_log.file_id; - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; uint io_size= global_ddl_log.io_size; DBUG_ENTER("read_ddl_log_file_entry"); + DBUG_ASSERT(io_size >= size); - if (mysql_file_pread(file_id, file_entry_buf, io_size, io_size * entry_no, - MYF(MY_WME)) != io_size) + if (mysql_file_pread(file_id, file_entry_buf, size, io_size * entry_no, + MYF(MY_WME)) != size) error= TRUE; DBUG_RETURN(error); } -/* - Write one entry from ddl log file - SYNOPSIS - write_ddl_log_file_entry() - entry_no Entry number to write - RETURN VALUES - TRUE Error - FALSE Success +/** + Write one entry to ddl log file. + + @param file_entry_buf Buffer to write + @param entry_no Entry number to write + @param size Number of bytes of the entry to write + + @return Operation status + @retval true Error + @retval false Success */ -static bool write_ddl_log_file_entry(uint entry_no) +static bool write_ddl_log_file_entry(uchar *file_entry_buf, + uint entry_no, + uint size) { bool error= FALSE; File file_id= global_ddl_log.file_id; - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + uint io_size= global_ddl_log.io_size; DBUG_ENTER("write_ddl_log_file_entry"); + DBUG_ASSERT(io_size >= size); - if (mysql_file_pwrite(file_id, (uchar*)file_entry_buf, - IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE) + if (mysql_file_pwrite(file_id, file_entry_buf, size, + io_size * entry_no, MYF(MY_WME)) != size) error= TRUE; DBUG_RETURN(error); } @@ -745,17 +747,20 @@ static bool write_ddl_log_header() { uint16 const_var; bool error= FALSE; + uchar file_entry_buf[DDL_LOG_HEADER_SIZE]; DBUG_ENTER("write_ddl_log_header"); + DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len) + <= global_ddl_log.io_size); - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS], + int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS], global_ddl_log.num_entries); - const_var= FN_LEN; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS], + const_var= global_ddl_log.name_len; + int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS], (ulong) const_var); - const_var= IO_SIZE; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS], + const_var= global_ddl_log.io_size; + int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS], (ulong) const_var); - if (write_ddl_log_file_entry(0UL)) + if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE)) { sql_print_error("Error writing ddl log header"); DBUG_RETURN(TRUE); @@ -795,18 +800,20 @@ static inline void create_ddl_log_file_name(char *file_name) static uint read_ddl_log_header() { - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + char file_entry_buf[DDL_LOG_HEADER_SIZE]; char file_name[FN_REFLEN]; uint entry_no; bool successful_open= FALSE; DBUG_ENTER("read_ddl_log_header"); + DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE); create_ddl_log_file_name(file_name); if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log, file_name, O_RDWR | O_BINARY, MYF(0))) >= 0) { - if (read_ddl_log_file_entry(0UL)) + if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL, + DDL_LOG_HEADER_SIZE)) { /* Write message into error log */ sql_print_error("Failed to read ddl log file in recovery"); @@ -819,8 +826,6 @@ static uint read_ddl_log_header() entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]); global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]); global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]); - DBUG_ASSERT(global_ddl_log.io_size <= - sizeof(global_ddl_log.file_entry_buf)); } else { @@ -835,30 +840,22 @@ static uint read_ddl_log_header() } -/* - Read a ddl log entry - SYNOPSIS - read_ddl_log_entry() - read_entry Number of entry to read - out:entry_info Information from entry - RETURN VALUES - TRUE Error - FALSE Success - DESCRIPTION - Read a specified entry in the ddl log +/** + Set ddl log entry struct from buffer + @param read_entry Entry number + @param file_entry_buf Buffer to use + @param ddl_log_entry Entry to be set + + @note Pointers in ddl_log_entry will point into file_entry_buf! */ -bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) +static void set_ddl_log_entry_from_buf(uint read_entry, + uchar *file_entry_buf, + DDL_LOG_ENTRY *ddl_log_entry) { - char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf; uint inx; uchar single_char; - DBUG_ENTER("read_ddl_log_entry"); - - if (read_ddl_log_file_entry(read_entry)) - { - DBUG_RETURN(TRUE); - } + DBUG_ENTER("set_ddl_log_entry_from_buf"); ddl_log_entry->entry_pos= read_entry; single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]; ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char; @@ -866,14 +863,14 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) ddl_log_entry->action_type= (enum ddl_log_action_code)single_char; ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS]; ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]); - ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS]; + ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS]; inx= DDL_LOG_NAME_POS + global_ddl_log.name_len; - ddl_log_entry->from_name= &file_entry_buf[inx]; + ddl_log_entry->from_name= (char*) &file_entry_buf[inx]; inx+= global_ddl_log.name_len; - ddl_log_entry->handler_name= &file_entry_buf[inx]; - DBUG_RETURN(FALSE); + ddl_log_entry->handler_name= (char*) &file_entry_buf[inx]; + DBUG_VOID_RETURN; } - + /* Initialise ddl log @@ -1076,6 +1073,7 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used; DBUG_ENTER("get_free_ddl_log_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); if (global_ddl_log.first_free == NULL) { if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc( @@ -1133,34 +1131,36 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool error, write_header; + char file_entry_buf[IO_SIZE]; DBUG_ENTER("write_ddl_log_entry"); if (init_ddl_log()) { DBUG_RETURN(TRUE); } - global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= + memset(file_entry_buf, 0, sizeof(file_entry_buf)); + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_ENTRY_CODE; - global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= + file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= (char)ddl_log_entry->action_type; - global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], + file_entry_buf[DDL_LOG_PHASE_POS]= 0; + int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], ddl_log_entry->next_entry); - DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - ddl_log_entry->name, FN_LEN - 1); + DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name, + global_ddl_log.name_len - 1); if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION || ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION) { - DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN], - ddl_log_entry->from_name, FN_LEN - 1); + DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len], + ddl_log_entry->from_name, global_ddl_log.name_len - 1); } else - global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0; - DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)], - ddl_log_entry->handler_name, FN_LEN - 1); + file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0; + DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)], + ddl_log_entry->handler_name, global_ddl_log.name_len - 1); if (get_free_ddl_log_entry(active_entry, &write_header)) { DBUG_RETURN(TRUE); @@ -1168,14 +1168,15 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, error= FALSE; DBUG_PRINT("ddl_log", ("write type %c next %u name '%s' from_name '%s' handler '%s'", - (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS], + (char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS], ddl_log_entry->next_entry, - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + FN_LEN], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + (2*FN_LEN)])); - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + (char*) &file_entry_buf[DDL_LOG_NAME_POS], + (char*) &file_entry_buf[DDL_LOG_NAME_POS + + global_ddl_log.name_len], + (char*) &file_entry_buf[DDL_LOG_NAME_POS + + (2*global_ddl_log.name_len)])); + if (write_ddl_log_file_entry((uchar*) file_entry_buf, + (*active_entry)->entry_pos, IO_SIZE)) { error= TRUE; sql_print_error("Failed to write entry_no = %u", @@ -1225,13 +1226,14 @@ bool write_execute_ddl_log_entry(uint first_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool write_header= FALSE; - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + char file_entry_buf[IO_SIZE]; DBUG_ENTER("write_execute_ddl_log_entry"); if (init_ddl_log()) { DBUG_RETURN(TRUE); } + memset(file_entry_buf, 0, sizeof(file_entry_buf)); if (!complete) { /* @@ -1245,12 +1247,7 @@ bool write_execute_ddl_log_entry(uint first_entry, } else file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE; - file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */ - file_entry_buf[DDL_LOG_PHASE_POS]= 0; int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry); - file_entry_buf[DDL_LOG_NAME_POS]= 0; - file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0; - file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0; if (!(*active_entry)) { if (get_free_ddl_log_entry(active_entry, &write_header)) @@ -1258,7 +1255,9 @@ bool write_execute_ddl_log_entry(uint first_entry, DBUG_RETURN(TRUE); } } - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + if (write_ddl_log_file_entry((uchar*) file_entry_buf, + (*active_entry)->entry_pos, + IO_SIZE)) { sql_print_error("Error writing execute entry in ddl log"); release_ddl_log_memory_entry(*active_entry); @@ -1303,10 +1302,16 @@ bool write_execute_ddl_log_entry(uint first_entry, bool deactivate_ddl_log_entry(uint entry_no) { - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + uchar file_entry_buf[DDL_LOG_NAME_POS]; DBUG_ENTER("deactivate_ddl_log_entry"); - if (!read_ddl_log_file_entry(entry_no)) + + /* + Only need to read and write the first bytes of the entry, where + ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS + to include all info except for the names. + */ + if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) { if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE) { @@ -1324,7 +1329,7 @@ bool deactivate_ddl_log_entry(uint entry_no) { DBUG_ASSERT(0); } - if (write_ddl_log_file_entry(entry_no)) + if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) { sql_print_error("Error in deactivating log entry. Position = %u", entry_no); @@ -1385,6 +1390,7 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry; DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry; DBUG_ENTER("release_ddl_log_memory_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); global_ddl_log.first_free= log_entry; log_entry->next_log_entry= first_free; @@ -1414,24 +1420,26 @@ bool execute_ddl_log_entry(THD *thd, uint first_entry) { DDL_LOG_ENTRY ddl_log_entry; uint read_entry= first_entry; + uchar file_entry_buf[IO_SIZE]; DBUG_ENTER("execute_ddl_log_entry"); mysql_mutex_lock(&LOCK_gdl); do { - if (read_ddl_log_entry(read_entry, &ddl_log_entry)) + if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE)) { - /* Write to error log and continue with next log entry */ + /* Print the error to the log and continue with next log entry */ sql_print_error("Failed to read entry = %u from ddl log", read_entry); break; } + set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry); DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE); if (execute_ddl_log_action(thd, &ddl_log_entry)) { - /* Write to error log and continue with next log entry */ + /* Print the error to the log and continue with next log entry */ sql_print_error("Failed to execute action for entry = %u from ddl log", read_entry); break; @@ -1476,13 +1484,14 @@ void execute_ddl_log_recovery() uint num_entries, i; THD *thd; DDL_LOG_ENTRY ddl_log_entry; + uchar *file_entry_buf; + uint io_size; char file_name[FN_REFLEN]; DBUG_ENTER("execute_ddl_log_recovery"); /* Initialise global_ddl_log struct */ - bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf)); global_ddl_log.inited= FALSE; global_ddl_log.recovery_phase= TRUE; global_ddl_log.io_size= IO_SIZE; @@ -1497,14 +1506,23 @@ void execute_ddl_log_recovery() thd->store_globals(); num_entries= read_ddl_log_header(); + io_size= global_ddl_log.io_size; + file_entry_buf= (uchar*) my_malloc(io_size, MYF(0)); + if (!file_entry_buf) + { + sql_print_error("Failed to allocate buffer for recover ddl log"); + DBUG_VOID_RETURN; + } for (i= 1; i < num_entries + 1; i++) { - if (read_ddl_log_entry(i, &ddl_log_entry)) + if (read_ddl_log_file_entry(file_entry_buf, i, io_size)) { sql_print_error("Failed to read entry no = %u from ddl log", i); continue; } + + set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry); if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE) { if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry)) @@ -1519,6 +1537,7 @@ void execute_ddl_log_recovery() (void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0)); global_ddl_log.recovery_phase= FALSE; delete thd; + my_free(file_entry_buf); /* Remember that we don't have a THD */ my_pthread_setspecific_ptr(THR_THD, 0); DBUG_VOID_RETURN; @@ -1535,14 +1554,16 @@ void execute_ddl_log_recovery() void release_ddl_log() { - DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free; - DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used; + DDL_LOG_MEMORY_ENTRY *free_list; + DDL_LOG_MEMORY_ENTRY *used_list; DBUG_ENTER("release_ddl_log"); if (!global_ddl_log.do_release) DBUG_VOID_RETURN; mysql_mutex_lock(&LOCK_gdl); + free_list= global_ddl_log.first_free; + used_list= global_ddl_log.first_used; while (used_list) { DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry; diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 0c0742b3805..f39901a6ca5 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -85,7 +85,7 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs, *set_warning= 1; } else - found|= ((longlong) 1 << (find - 1)); + found|= 1ULL << (find - 1); if (pos >= end) break; start= pos + mblen; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c1c6e142706..76486dcca71 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3823,9 +3823,67 @@ static Sys_var_ulong Sys_debug_binlog_fsync_sleep( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1)); #endif + static Sys_var_harows Sys_expensive_subquery_limit( "expensive_subquery_limit", "The maximum number of rows a subquery may examine in order to be " "executed during optimization and used for constant optimization", SESSION_VAR(expensive_subquery_limit), CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, HA_POS_ERROR), DEFAULT(100), BLOCK_SIZE(1)); + +static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) +{ + longlong previous_val= thd->variables.pseudo_slave_mode; + longlong val= (longlong) var->save_result.ulonglong_value; + bool rli_fake= false; + +#ifndef EMBEDDED_LIBRARY + rli_fake= thd->rli_fake ? true : false; +#endif + + if (rli_fake) + { + if (!val) + { +#ifndef EMBEDDED_LIBRARY + delete thd->rli_fake; + thd->rli_fake= NULL; +#endif + } + else if (previous_val && val) + goto ineffective; + else if (!previous_val && val) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "'pseudo_slave_mode' is already ON."); + } + else + { + if (!previous_val && !val) + goto ineffective; + else if (previous_val && !val) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "Slave applier execution mode not active, " + "statement ineffective."); + } + goto end; + +ineffective: + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "'pseudo_slave_mode' change was ineffective."); + +end: + return FALSE; +} +static Sys_var_mybool Sys_pseudo_slave_mode( + "pseudo_slave_mode", + "SET pseudo_slave_mode= 0,1 are commands that mysqlbinlog " + "adds to beginning and end of binary log dumps. While zero " + "value indeed disables, the actual enabling of the slave " + "applier execution mode is done implicitly when a " + "Format_description_event is sent through the session.", + SESSION_ONLY(pseudo_slave_mode), NO_CMD_LINE, DEFAULT(FALSE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_pseudo_slave_mode)); + diff --git a/sql/transaction.cc b/sql/transaction.cc index 3359decbcd5..1623cd57d77 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -567,15 +567,19 @@ bool trans_xa_start(THD *thd) my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]); else if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction()) my_error(ER_XAER_OUTSIDE, MYF(0)); - else if (xid_cache_search(thd->lex->xid)) - my_error(ER_XAER_DUPID, MYF(0)); else if (!trans_begin(thd)) { DBUG_ASSERT(thd->transaction.xid_state.xid.is_null()); thd->transaction.xid_state.xa_state= XA_ACTIVE; thd->transaction.xid_state.rm_error= 0; thd->transaction.xid_state.xid.set(thd->lex->xid); - xid_cache_insert(&thd->transaction.xid_state); + if (xid_cache_insert(&thd->transaction.xid_state)) + { + thd->transaction.xid_state.xa_state= XA_NOTR; + thd->transaction.xid_state.xid.null(); + trans_rollback(thd); + DBUG_RETURN(true); + } DBUG_RETURN(FALSE); } @@ -661,6 +665,16 @@ bool trans_xa_commit(THD *thd) if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) { + /* + xid_state.in_thd is always true beside of xa recovery procedure. + Note, that there is no race condition here between xid_cache_search + and xid_cache_delete, since we always delete our own XID + (thd->lex->xid == thd->transaction.xid_state.xid). + The only case when thd->lex->xid != thd->transaction.xid_state.xid + and xid_state->in_thd == 0 is in the function + xa_cache_insert(XID, xa_states), which is called before starting + client connections, and thus is always single-threaded. + */ XID_STATE *xs= xid_cache_search(thd->lex->xid); res= !xs || xs->in_thd; if (res) diff --git a/sql/tztime.cc b/sql/tztime.cc index 8194367a7f9..0e793290bba 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1856,7 +1856,7 @@ static Time_zone* tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) { TABLE *table= 0; - TIME_ZONE_INFO *tz_info; + TIME_ZONE_INFO *tz_info= NULL; Tz_names_entry *tmp_tzname; Time_zone *return_val= 0; int res; @@ -1866,7 +1866,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) uchar keybuff[32]; Field *field; String abbr(buff, sizeof(buff), &my_charset_latin1); - char *alloc_buff, *tz_name_buff; + char *alloc_buff= NULL; + char *tz_name_buff= NULL; /* Temporary arrays that are used for loading of data for filling TIME_ZONE_INFO structure @@ -1886,22 +1887,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) DBUG_ENTER("tz_load_from_open_tables"); - /* Prepare tz_info for loading also let us make copy of time zone name */ - if (!(alloc_buff= (char*) alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) + - tz_name->length() + 1))) - { - sql_print_error("Out of memory while loading time zone description"); - return 0; - } - tz_info= (TIME_ZONE_INFO *)alloc_buff; - bzero(tz_info, sizeof(TIME_ZONE_INFO)); - tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO); - /* - By writing zero to the end we guarantee that we can call ptr() - instead of c_ptr() for time zone name. - */ - strmake(tz_name_buff, tz_name->ptr(), tz_name->length()); - /* Let us find out time zone id by its name (there is only one index and it is specifically for this purpose). |