diff options
author | Sergei Golubchik <sergii@pisem.net> | 2013-03-27 23:41:02 +0100 |
---|---|---|
committer | Sergei Golubchik <sergii@pisem.net> | 2013-03-27 23:41:02 +0100 |
commit | 993ea79f2df42292eceeee394e8ece9f4a3f6cf2 (patch) | |
tree | d105c8288a89a25d412e9006b740c756db6326d6 /sql | |
parent | 1827d9591e24ee469527021771088d842ab18374 (diff) | |
parent | 6599fd3e9c23a8957a63146cbe6a0ffc4c292a3d (diff) | |
download | mariadb-git-993ea79f2df42292eceeee394e8ece9f4a3f6cf2.tar.gz |
5.5 merge
Diffstat (limited to 'sql')
67 files changed, 1438 insertions, 645 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 4aa7b8f882e..5d61df2fa9b 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -188,9 +188,12 @@ INSTALL_DEBUG_TARGET(mysqld PDB_DESTINATION ${INSTALL_SBINDIR}/debug RENAME mysqld-debug) +INCLUDE(${CMAKE_SOURCE_DIR}/cmake/bison.cmake) + # Handle out-of-source build from source package with possibly broken # bison. Copy bison output to from source to build directory, if not already # there +IF (NOT BISON_USABLE) IF (NOT ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR}) IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.cc) IF(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc) @@ -201,9 +204,8 @@ IF (NOT ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR}) ENDIF() ENDIF() ENDIF() +ENDIF() - -INCLUDE(${CMAKE_SOURCE_DIR}/cmake/bison.cmake) RUN_BISON( ${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.yy ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc diff --git a/sql/field.cc b/sql/field.cc index d4468ba3c5b..a0f46778092 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -199,7 +199,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -230,7 +230,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24, + MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -261,7 +261,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24, + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -292,7 +292,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR @@ -7619,6 +7619,19 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) if (wkb_type < (uint32) Geometry::wkb_point || wkb_type > (uint32) Geometry::wkb_last) goto err; + + if (geom_type != Field::GEOM_GEOMETRY && + geom_type != Field::GEOM_GEOMETRYCOLLECTION && + (uint32) geom_type != wkb_type) + { + my_printf_error(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), MYF(0), + Geometry::ci_collection[geom_type]->m_name.str, + Geometry::ci_collection[wkb_type]->m_name.str, field_name, + (ulong) table->in_use->warning_info->current_row_for_warning()); + goto err_exit; + } + Field_blob::store_length(length); if (table->copy_blobs || length <= MAX_FIELD_WIDTH) { // Must make a copy @@ -7630,9 +7643,10 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) return 0; err: - bzero(ptr, Field_blob::pack_length()); my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); +err_exit: + bzero(ptr, Field_blob::pack_length()); return -1; } diff --git a/sql/field.h b/sql/field.h index e832928b114..92a01b9421c 100644 --- a/sql/field.h +++ b/sql/field.h @@ -2151,6 +2151,7 @@ public: bool has_charset(void) const { return TRUE; } /* enum and set are sorted as integers */ CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; } + uint decimals() const { return 0; } virtual uchar *pack(uchar *to, const uchar *from, uint max_length); virtual const uchar *unpack(uchar *to, const uchar *from, diff --git a/sql/filesort.cc b/sql/filesort.cc index f31afc226cc..0efdf90c936 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -953,7 +953,10 @@ static void make_sortkey(register Sort_param *param, { MYSQL_TIME buf; if (item->get_date_result(&buf, TIME_FUZZY_DATE | TIME_INVALID_DATES)) - DBUG_ASSERT(maybe_null && item->null_value); + { + DBUG_ASSERT(maybe_null); + DBUG_ASSERT(item->null_value); + } else value= pack_time(&buf); } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index ce3f17aeb92..5f67a89cb9c 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3801,6 +3801,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt) part, sub_elem->partition_name)); if ((error= m_file[part]->ha_truncate())) break; + sub_elem->part_state= PART_NORMAL; } while (++j < num_subparts); } else @@ -4399,6 +4400,7 @@ bool ha_partition::init_record_priority_queue() { if (bitmap_is_set(&m_part_info->used_partitions, i)) { + DBUG_PRINT("info", ("init rec-buf for part %u", i)); int2store(ptr, i); ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } @@ -5293,11 +5295,27 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) m_top_entry= NO_CURRENT_PART_ID; queue_remove_all(&m_queue); - DBUG_PRINT("info", ("m_part_spec.start_part %d", m_part_spec.start_part)); - for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) + /* + Position part_rec_buf_ptr to point to the first used partition >= + start_part. There may be partitions marked by used_partitions, + but is before start_part. These partitions has allocated record buffers + but is dynamically pruned, so those buffers must be skipped. + */ + uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions); + for (; first_used_part < m_part_spec.start_part; first_used_part++) + { + if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part)) + part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; + } + DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u", + m_part_spec.start_part, first_used_part)); + for (i= first_used_part; i <= m_part_spec.end_part; i++) { if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) continue; + DBUG_PRINT("info", ("reading from part %u (scan_type: %u)", + i, m_index_scan_type)); + DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS; int error; handler *file= m_file[i]; diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h index 5406c292b18..24ee848bed1 100644 --- a/sql/innodb_priv.h +++ b/sql/innodb_priv.h @@ -25,7 +25,7 @@ class THD; int get_quote_char_for_identifier(THD *thd, const char *name, uint length); bool schema_table_store_record(THD *thd, TABLE *table); void localtime_to_TIME(MYSQL_TIME *to, struct tm *from); -bool check_global_access(THD *thd, ulong want_access); +bool check_global_access(THD *thd, ulong want_access, bool no_errors=false); uint strconvert(CHARSET_INFO *from_cs, const char *from, CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors); diff --git a/sql/item.cc b/sql/item.cc index 9ef1593b0bf..1d8e466b2fd 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1277,11 +1277,15 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) DBUG_ASSERT(0); } - return 0; + return null_value= 0; err: + /* + if the item was not null and convertion failed, we return a zero date + if allowed, otherwise - null. + */ bzero((char*) ltime,sizeof(*ltime)); - return 1; + return null_value|= (fuzzydate & (TIME_NO_ZERO_DATE|TIME_NO_ZERO_IN_DATE)); } bool Item::get_seconds(ulonglong *sec, ulong *sec_part) @@ -8655,7 +8659,7 @@ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) Item_cache* Item_cache::get_cache(const Item *item) { - return get_cache(item, item->result_type()); + return get_cache(item, item->cmp_type()); } diff --git a/sql/item.h b/sql/item.h index db17b723071..d72d2f1f80b 100644 --- a/sql/item.h +++ b/sql/item.h @@ -4053,6 +4053,7 @@ public: bool cache_value(); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); int save_in_field(Field *field, bool no_conversions); + Item_result cmp_type() const { return TIME_RESULT; } void store_packed(longlong val_arg, Item *example); /* Having a clone_item method tells optimizer that this object diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index b1208243e6b..6f10e84a5f5 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -241,15 +241,15 @@ static uint collect_cmp_types(Item **items, uint nitems, bool skip_nulls= FALSE) items[i]->cmp_type() == ROW_RESULT) && cmp_row_type(items[0], items[i])) return 0; - found_types|= 1<< (uint)item_cmp_type(left_result, - items[i]->cmp_type()); + found_types|= 1U << (uint)item_cmp_type(left_result, + items[i]->cmp_type()); } /* Even if all right-hand items are NULLs and we are skipping them all, we need at least one type bit in the found_type bitmask. */ if (skip_nulls && !found_types) - found_types= 1 << (uint)left_result; + found_types= 1U << (uint)left_result; return found_types; } @@ -906,7 +906,8 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg, } if ((*is_null= item->null_value)) return ~(ulonglong) 0; - if (cache_arg && item->const_item() && item->type() != Item::CACHE_ITEM) + if (cache_arg && item->const_item() && + !(item->type() == Item::CACHE_ITEM && item->cmp_type() == TIME_RESULT)) { Query_arena backup; Query_arena *save_arena= thd->switch_to_arena_for_cached_items(&backup); @@ -2864,12 +2865,12 @@ Item *Item_func_case::find_item(String *str) cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type()); DBUG_ASSERT(cmp_type != ROW_RESULT); DBUG_ASSERT(cmp_items[(uint)cmp_type]); - if (!(value_added_map & (1<<(uint)cmp_type))) + if (!(value_added_map & (1U << (uint)cmp_type))) { cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]); if ((null_value=args[first_expr_num]->null_value)) return else_expr_num != -1 ? args[else_expr_num] : 0; - value_added_map|= 1<<(uint)cmp_type; + value_added_map|= 1U << (uint)cmp_type; } if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value) return args[i + 1]; @@ -3076,10 +3077,10 @@ void Item_func_case::fix_length_and_dec() return; Item *date_arg= 0; - if (found_types & (1 << TIME_RESULT)) + if (found_types & (1U << TIME_RESULT)) date_arg= find_date_time_item(args, arg_count, 0); - if (found_types & (1 << STRING_RESULT)) + if (found_types & (1U << STRING_RESULT)) { /* If we'll do string comparison, we also need to aggregate @@ -3120,7 +3121,7 @@ void Item_func_case::fix_length_and_dec() for (i= 0; i <= (uint)TIME_RESULT; i++) { - if (found_types & (1 << i) && !cmp_items[i]) + if (found_types & (1U << i) && !cmp_items[i]) { DBUG_ASSERT((Item_result)i != ROW_RESULT); @@ -3976,7 +3977,7 @@ void Item_func_in::fix_length_and_dec() } for (i= 0; i <= (uint)TIME_RESULT; i++) { - if (found_types & 1 << i) + if (found_types & (1U << i)) { (type_cnt)++; cmp_type= (Item_result) i; @@ -4103,14 +4104,14 @@ void Item_func_in::fix_length_and_dec() } else { - if (found_types & (1 << TIME_RESULT)) + if (found_types & (1U << TIME_RESULT)) date_arg= find_date_time_item(args, arg_count, 0); - if (found_types & (1 << STRING_RESULT) && + if (found_types & (1U << STRING_RESULT) && agg_arg_charsets_for_comparison(cmp_collation, args, arg_count)) return; for (i= 0; i <= (uint) TIME_RESULT; i++) { - if (found_types & (1 << i) && !cmp_items[i]) + if (found_types & (1U << i) && !cmp_items[i]) { if (!cmp_items[i] && !(cmp_items[i]= cmp_item::get_comparator((Item_result)i, date_arg, @@ -4196,12 +4197,12 @@ longlong Item_func_in::val_int() Item_result cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type()); in_item= cmp_items[(uint)cmp_type]; DBUG_ASSERT(in_item); - if (!(value_added_map & (1 << (uint)cmp_type))) + if (!(value_added_map & (1U << (uint)cmp_type))) { in_item->store_value(args[0]); if ((null_value= args[0]->null_value)) return 0; - value_added_map|= 1 << (uint)cmp_type; + value_added_map|= 1U << (uint)cmp_type; } if (!in_item->cmp(args[i]) && !args[i]->null_value) return (longlong) (!negated); @@ -5574,6 +5575,7 @@ Item_equal::Item_equal(Item *f1, Item *f2, bool with_const_item) equal_items.push_back(f1); equal_items.push_back(f2); compare_as_dates= with_const_item && f2->cmp_type() == TIME_RESULT; + upper_levels= NULL; } @@ -5602,10 +5604,11 @@ Item_equal::Item_equal(Item_equal *item_equal) with_const= item_equal->with_const; compare_as_dates= item_equal->compare_as_dates; cond_false= item_equal->cond_false; + upper_levels= item_equal->upper_levels; } -/* +/** @brief Add a constant item to the Item_equal object @@ -5659,6 +5662,7 @@ void Item_equal::add_const(Item *c, Item *f) const_item_cache= 1; } + /** @brief Check whether a field is referred to in the multiple equality @@ -5727,6 +5731,87 @@ void Item_equal::merge(Item_equal *item) /** @brief + Merge members of another Item_equal object into this one + + @param item multiple equality whose members are to be merged + + @details + If the Item_equal 'item' happened to have some elements of the list + of equal items belonging to 'this' object then the function merges + the equal items from 'item' into this list. + If both lists contains constants and they are different then + the value of the cond_false flag is set to TRUE. + + @retval + 1 the lists of equal items in 'item' and 'this' contain common elements + @retval + 0 otherwise + + @notes + The method 'merge' just joins the list of equal items belonging to 'item' + to the list of equal items belonging to this object assuming that the lists + are disjoint. It would be more correct to call the method 'join'. + The method 'merge_with_check' really merges two lists of equal items if they + have common members. +*/ + +bool Item_equal::merge_with_check(Item_equal *item) +{ + bool intersected= FALSE; + Item_equal_fields_iterator_slow fi(*this); + while (fi++) + { + if (item->contains(fi.get_curr_field())) + { + fi.remove(); + intersected= TRUE; + } + } + if (intersected) + item->merge(this); + return intersected; +} + + +/** + @brief + Merge this object into a list of Item_equal objects + + @param list the list of Item_equal objects to merge into + + @details + If the list of equal items from 'this' object contains common members + with the lists of equal items belonging to Item_equal objects from 'list' + then all involved Item_equal objects e1,...,ek are merged into one + Item equal that replaces e1,...,ek in the 'list'. Otherwise this + Item_equal is joined to the 'list'. +*/ + +void Item_equal::merge_into_list(List<Item_equal> *list) +{ + Item_equal *item; + List_iterator<Item_equal> it(*list); + Item_equal *merge_into= NULL; + while((item= it++)) + { + if (!merge_into) + { + if (merge_with_check(item)) + merge_into= item; + } + else + { + if (item->merge_with_check(merge_into)) + it.remove(); + } + } + if (!merge_into) + list->push_back(this); +} + + +/** + @brief Order equal items of the multiple equality according to a sorting criteria @param compare function to compare items from the equal_items list diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 5a5f9ad60fd..4ca31e01df9 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1578,6 +1578,11 @@ public: DBUG_ASSERT(nlist->elements); list.prepand(nlist); } + void add_at_end(List<Item> *nlist) + { + DBUG_ASSERT(nlist->elements); + list.concat(nlist); + } bool fix_fields(THD *, Item **ref); void fix_after_pullout(st_select_lex *new_parent, Item **ref); @@ -1603,6 +1608,7 @@ public: bool eval_not_null_tables(uchar *opt_arg); }; +template <template<class> class LI, class T> class Item_equal_iterator; /* The class Item_equal is used to represent conjunctions of equality @@ -1730,7 +1736,11 @@ class Item_equal: public Item_bool_func used in the original equality. */ Item_field *context_field; + public: + + COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */ + inline Item_equal() : Item_bool_func(), with_const(FALSE), eval_item(0), cond_false(0), context_field(NULL) @@ -1747,6 +1757,8 @@ public: /** Get number of field items / references to field items in this object */ uint n_field_items() { return equal_items.elements-test(with_const); } void merge(Item_equal *item); + bool merge_with_check(Item_equal *equal_item); + void merge_into_list(List<Item_equal> *list); void update_const(); enum Functype functype() const { return MULT_EQUAL_FUNC; } longlong val_int(); @@ -1762,7 +1774,8 @@ public: CHARSET_INFO *compare_collation(); void set_context_field(Item_field *ctx_field) { context_field= ctx_field; } - friend class Item_equal_fields_iterator; + friend class Item_equal_iterator<List_iterator_fast,Item>; + friend class Item_equal_iterator<List_iterator,Item>; friend Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, Item_equal *item_equal); friend bool setup_sj_materialization_part1(struct st_join_table *tab); @@ -1781,43 +1794,55 @@ public: { upper_levels= 0; } + void copy(COND_EQUAL &cond_equal) + { + max_members= cond_equal.max_members; + upper_levels= cond_equal.upper_levels; + if (cond_equal.current_level.is_empty()) + current_level.empty(); + else + current_level= cond_equal.current_level; + } }; /* - The class Item_equal_fields_iterator is used to iterate over references - to table/view columns from a list of equal items. + The template Item_equal_iterator is used to define classes + Item_equal_fields_iterator and Item_equal_fields_iterator_slow. + These are helper classes for the class Item equal + Both classes are used to iterate over references to table/view columns + from the list of equal items that included in an Item_equal object. + The second class supports the operation of removal of the current member + from the list when performing an iteration. */ -class Item_equal_fields_iterator : public List_iterator_fast<Item> +template <template<class> class LI, typename T> class Item_equal_iterator + : public LI<T> { +protected: Item_equal *item_equal; Item *curr_item; public: - Item_equal_fields_iterator(Item_equal &item_eq) - :List_iterator_fast<Item> (item_eq.equal_items) + Item_equal_iterator<LI,T>(Item_equal &item_eq) + :LI<T> (item_eq.equal_items) { curr_item= NULL; item_equal= &item_eq; if (item_eq.with_const) { - List_iterator_fast<Item> *list_it= this; + LI<T> *list_it= this; curr_item= (*list_it)++; } } Item* operator++(int) { - List_iterator_fast<Item> *list_it= this; + LI<T> *list_it= this; curr_item= (*list_it)++; return curr_item; } - Item ** ref() - { - return List_iterator_fast<Item>::ref(); - } void rewind(void) { - List_iterator_fast<Item> *list_it= this; + LI<T> *list_it= this; list_it->rewind(); if (item_equal->with_const) curr_item= (*list_it)++; @@ -1829,6 +1854,36 @@ public: } }; +typedef Item_equal_iterator<List_iterator_fast,Item > Item_equal_iterator_fast; + +class Item_equal_fields_iterator + :public Item_equal_iterator_fast +{ +public: + Item_equal_fields_iterator(Item_equal &item_eq) + :Item_equal_iterator_fast(item_eq) + { } + Item ** ref() + { + return List_iterator_fast<Item>::ref(); + } +}; + +typedef Item_equal_iterator<List_iterator,Item > Item_equal_iterator_iterator_slow; + +class Item_equal_fields_iterator_slow + :public Item_equal_iterator_iterator_slow +{ +public: + Item_equal_fields_iterator_slow(Item_equal &item_eq) + :Item_equal_iterator_iterator_slow(item_eq) + { } + void remove() + { + List_iterator<Item>::remove(); + } +}; + class Item_cond_and :public Item_cond { diff --git a/sql/item_create.cc b/sql/item_create.cc index fc31b074055..ba6eb7ff603 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -4453,8 +4453,7 @@ Create_func_make_set::create_native(THD *thd, LEX_STRING name, return NULL; } - Item *param_1= item_list->pop(); - return new (thd->mem_root) Item_func_make_set(param_1, *item_list); + return new (thd->mem_root) Item_func_make_set(*item_list); } diff --git a/sql/item_func.cc b/sql/item_func.cc index aca76eac82a..b4c825b6222 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -769,13 +769,14 @@ void Item_num_op::find_num_type(void) { hybrid_type= DECIMAL_RESULT; result_precision(); + fix_decimals(); } else { DBUG_ASSERT(r0 == INT_RESULT && r1 == INT_RESULT); - decimals= 0; hybrid_type=INT_RESULT; result_precision(); + decimals= 0; } DBUG_PRINT("info", ("Type: %s", (hybrid_type == REAL_RESULT ? "REAL_RESULT" : @@ -1708,6 +1709,7 @@ void Item_func_div::fix_length_and_dec() break; case DECIMAL_RESULT: result_precision(); + fix_decimals(); break; case STRING_RESULT: case ROW_RESULT: @@ -1907,6 +1909,16 @@ longlong Item_func_neg::int_op() if (args[0]->unsigned_flag && (ulonglong) value > (ulonglong) LONGLONG_MAX + 1) return raise_integer_overflow(); + + if (value == LONGLONG_MIN) + { + if (args[0]->unsigned_flag != unsigned_flag) + /* negation of LONGLONG_MIN is LONGLONG_MIN. */ + return LONGLONG_MIN; + else + return raise_integer_overflow(); + } + return check_integer_overflow(-value, !args[0]->unsigned_flag && value < 0); } @@ -4819,7 +4831,7 @@ void Item_func_set_user_var::save_item_result(Item *item) { DBUG_ENTER("Item_func_set_user_var::save_item_result"); - switch (cached_result_type) { + switch (args[0]->result_type()) { case REAL_RESULT: save_result.vreal= item->val_result(); break; diff --git a/sql/item_func.h b/sql/item_func.h index f562c87fe1c..653641c9f72 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -431,6 +431,13 @@ public: void fix_num_length_and_dec(); virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */ + inline void fix_decimals() + { + DBUG_ASSERT(result_type() == DECIMAL_RESULT); + if (decimals == NOT_FIXED_DEC) + set_if_smaller(decimals, max_length - 1); + } + double val_real(); longlong val_int(); my_decimal *val_decimal(my_decimal *); diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index e3e80bdf59f..0a7f18e6546 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -561,8 +561,8 @@ longlong Item_func_spatial_mbr_rel::val_int() args[1]->null_value || !(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) || !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) || - g1->get_mbr(&mbr1, &dummy) || - g2->get_mbr(&mbr2, &dummy)))) + g1->get_mbr(&mbr1, &dummy) || !mbr1.valid() || + g2->get_mbr(&mbr2, &dummy) || !mbr2.valid()))) return 0; switch (spatial_rel) { @@ -687,12 +687,11 @@ longlong Item_func_spatial_rel::val_int() if ((null_value= (args[0]->null_value || args[1]->null_value || !(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) || - !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length()))))) + !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) || + g1->get_mbr(&mbr1, &c_end) || !mbr1.valid() || + g2->get_mbr(&mbr2, &c_end) || !mbr2.valid()))) goto exit; - g1->get_mbr(&mbr1, &c_end); - g2->get_mbr(&mbr2, &c_end); - umbr= mbr1; umbr.add_mbr(&mbr2); collector.set_extent(umbr.xmin, umbr.xmax, umbr.ymin, umbr.ymax); @@ -826,14 +825,14 @@ String *Item_func_spatial_operation::val_str(String *str_value) if ((null_value= (args[0]->null_value || args[1]->null_value || !(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) || - !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length()))))) + !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) || + g1->get_mbr(&mbr1, &c_end) || !mbr1.valid() || + g2->get_mbr(&mbr2, &c_end) || !mbr2.valid()))) { str_value= 0; goto exit; } - g1->get_mbr(&mbr1, &c_end); - g2->get_mbr(&mbr2, &c_end); mbr1.add_mbr(&mbr2); collector.set_extent(mbr1.xmin, mbr1.xmax, mbr1.ymin, mbr1.ymax); @@ -1358,11 +1357,11 @@ longlong Item_func_issimple::val_int() DBUG_ENTER("Item_func_issimple::val_int"); DBUG_ASSERT(fixed == 1); - if ((null_value= args[0]->null_value) || - !(g= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))) + if ((null_value= (args[0]->null_value || + !(g= Geometry::construct(&buffer, swkb->ptr(), swkb->length())) || + g->get_mbr(&mbr, &c_end)))) DBUG_RETURN(0); - g->get_mbr(&mbr, &c_end); collector.set_extent(mbr.xmin, mbr.xmax, mbr.ymin, mbr.ymax); if (g->get_class_info()->m_type_id == Geometry::wkb_point) @@ -1598,11 +1597,11 @@ double Item_func_distance::val_real() if ((null_value= (args[0]->null_value || args[1]->null_value || !(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) || - !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length()))))) + !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) || + g1->get_mbr(&mbr1, &c_end) || + g2->get_mbr(&mbr2, &c_end)))) goto mem_error; - g1->get_mbr(&mbr1, &c_end); - g2->get_mbr(&mbr2, &c_end); mbr1.add_mbr(&mbr2); collector.set_extent(mbr1.xmin, mbr1.xmax, mbr1.ymin, mbr1.ymax); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 48de0c4bc5c..462db3c24fd 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -2461,40 +2461,16 @@ String *Item_func_elt::val_str(String *str) } -void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array, - List<Item> &fields) -{ - item->split_sum_func2(thd, ref_pointer_array, fields, &item, TRUE); - Item_str_func::split_sum_func(thd, ref_pointer_array, fields); -} - - void Item_func_make_set::fix_length_and_dec() { - uint32 char_length= arg_count - 1; /* Separators */ + uint32 char_length= arg_count - 2; /* Separators */ - if (agg_arg_charsets_for_string_result(collation, args, arg_count)) + if (agg_arg_charsets_for_string_result(collation, args + 1, arg_count - 1)) return; - for (uint i=0 ; i < arg_count ; i++) + for (uint i=1 ; i < arg_count ; i++) char_length+= args[i]->max_char_length(); fix_char_length(char_length); - used_tables_cache|= item->used_tables(); - not_null_tables_cache&= item->not_null_tables(); - const_item_cache&= item->const_item(); - with_sum_func= with_sum_func || item->with_sum_func; - with_field= with_field || item->with_field; -} - - -void Item_func_make_set::update_used_tables() -{ - DBUG_ENTER("Item_func_make_set::update_used_tables"); - Item_func::update_used_tables(); - item->update_used_tables(); - used_tables_cache|=item->used_tables(); - const_item_cache&=item->const_item(); - DBUG_VOID_RETURN; } @@ -2503,15 +2479,15 @@ String *Item_func_make_set::val_str(String *str) DBUG_ASSERT(fixed == 1); ulonglong bits; bool first_found=0; - Item **ptr=args; + Item **ptr=args+1; String *result=&my_empty_string; - bits=item->val_int(); - if ((null_value=item->null_value)) + bits=args[0]->val_int(); + if ((null_value=args[0]->null_value)) return NULL; - if (arg_count < 64) - bits &= ((ulonglong) 1 << arg_count)-1; + if (arg_count < 65) + bits &= ((ulonglong) 1 << (arg_count-1))-1; for (; bits; bits >>= 1, ptr++) { @@ -2551,39 +2527,6 @@ String *Item_func_make_set::val_str(String *str) } -Item *Item_func_make_set::transform(Item_transformer transformer, uchar *arg) -{ - DBUG_ASSERT(!current_thd->stmt_arena->is_stmt_prepare()); - - Item *new_item= item->transform(transformer, arg); - if (!new_item) - return 0; - - /* - THD::change_item_tree() should be called only if the tree was - really transformed, i.e. when a new item has been created. - Otherwise we'll be allocating a lot of unnecessary memory for - change records at each execution. - */ - if (item != new_item) - current_thd->change_item_tree(&item, new_item); - return Item_str_func::transform(transformer, arg); -} - - -void Item_func_make_set::print(String *str, enum_query_type query_type) -{ - str->append(STRING_WITH_LEN("make_set(")); - item->print(str, query_type); - if (arg_count) - { - str->append(','); - print_args(str, 0, query_type); - } - str->append(')'); -} - - String *Item_func_char::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2990,7 +2933,7 @@ String *Item_func_conv_charset::val_str(String *str) return null_value ? 0 : &str_value; String *arg= args[0]->val_str(str); uint dummy_errors; - if (!arg) + if (args[0]->null_value) { null_value=1; return 0; @@ -4677,11 +4620,16 @@ null: void Item_dyncol_get::print(String *str, enum_query_type query_type) { + /* see create_func_dyncol_get */ + DBUG_ASSERT(str->length() >= 5); + DBUG_ASSERT(strncmp(str->ptr() + str->length() - 5, "cast(", 5) == 0); + + str->length(str->length() - 5); // removing "cast(" str->append(STRING_WITH_LEN("column_get(")); args[0]->print(str, query_type); str->append(','); args[1]->print(str, query_type); - str->append(')'); + /* let the parent cast item add " as <type>)" */ } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 486b7cf36ef..00863e9af2b 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -544,31 +544,13 @@ public: class Item_func_make_set :public Item_str_func { - Item *item; String tmp_str; public: - Item_func_make_set(Item *a,List<Item> &list) :Item_str_func(list),item(a) {} + Item_func_make_set(List<Item> &list) :Item_str_func(list) {} String *val_str(String *str); - bool fix_fields(THD *thd, Item **ref) - { - DBUG_ASSERT(fixed == 0); - return ((!item->fixed && item->fix_fields(thd, &item)) || - item->check_cols(1) || - Item_func::fix_fields(thd, ref)); - } - void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); void fix_length_and_dec(); - void update_used_tables(); const char *func_name() const { return "make_set"; } - - bool walk(Item_processor processor, bool walk_subquery, uchar *arg) - { - return item->walk(processor, walk_subquery, arg) || - Item_str_func::walk(processor, walk_subquery, arg); - } - Item *transform(Item_transformer transformer, uchar *arg); - virtual void print(String *str, enum_query_type query_type); }; @@ -859,25 +841,37 @@ public: { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_int(); - return args[0]->val_int(); + longlong res= args[0]->val_int(); + if ((null_value= args[0]->null_value)) + return 0; + return res; } double val_real() { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_real(); - return args[0]->val_real(); + double res= args[0]->val_real(); + if ((null_value= args[0]->null_value)) + return 0; + return res; } my_decimal *val_decimal(my_decimal *d) { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_decimal(d); - return args[0]->val_decimal(d); + my_decimal *res= args[0]->val_decimal(d); + if ((null_value= args[0]->null_value)) + return NULL; + return res; } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::get_date(ltime, fuzzydate); - return args[0]->get_date(ltime, fuzzydate); + bool res= args[0]->get_date(ltime, fuzzydate); + if ((null_value= args[0]->null_value)) + return 1; + return res; } void fix_length_and_dec(); const char *func_name() const { return "convert"; } diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index c2273721805..f2ef955af05 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1102,6 +1102,11 @@ enum Item_result Item_singlerow_subselect::result_type() const return engine->type(); } +enum Item_result Item_singlerow_subselect::cmp_type() const +{ + return engine->cmptype(); +} + /* Don't rely on the result type to calculate field type. Ask the engine instead. @@ -1866,7 +1871,8 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join) print_where(item, "rewrite with MIN/MAX", QT_ORDINARY);); save_allow_sum_func= thd->lex->allow_sum_func; - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= + (nesting_map)1 << thd->lex->current_select->nest_level; /* Item_sum_(max|min) can't substitute other item => we can use 0 as reference, also Item_sum_(max|min) can't be fixed after creation, so @@ -3503,12 +3509,13 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row) { Item *sel_item; List_iterator_fast<Item> li(item_list); - res_type= STRING_RESULT; + cmp_type= res_type= STRING_RESULT; res_field_type= MYSQL_TYPE_VAR_STRING; for (uint i= 0; (sel_item= li++); i++) { item->max_length= sel_item->max_length; res_type= sel_item->result_type(); + cmp_type= sel_item->cmp_type(); res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; @@ -3519,7 +3526,7 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row) //psergey-backport-timours: row[i]->store(sel_item); } if (item_list.elements > 1) - res_type= ROW_RESULT; + cmp_type= res_type= ROW_RESULT; } void subselect_single_select_engine::fix_length_and_dec(Item_cache **row) diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 4d9555f0366..e806f45041a 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -287,6 +287,7 @@ public: bool val_bool(); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); enum Item_result result_type() const; + enum Item_result cmp_type() const; enum_field_types field_type() const; void fix_length_and_dec(); @@ -733,6 +734,7 @@ protected: THD *thd; /* pointer to current THD */ Item_subselect *item; /* item, that use this engine */ enum Item_result res_type; /* type of results */ + enum Item_result cmp_type; /* how to compare the results */ enum_field_types res_field_type; /* column type of the results */ bool maybe_null; /* may be null (first item in select) */ public: @@ -747,7 +749,7 @@ public: { result= res; item= si; - res_type= STRING_RESULT; + cmp_type= res_type= STRING_RESULT; res_field_type= MYSQL_TYPE_VAR_STRING; maybe_null= 0; set_thd(thd_arg); @@ -787,6 +789,7 @@ public: virtual uint cols()= 0; /* return number of columns in select */ virtual uint8 uncacheable()= 0; /* query is uncacheable */ enum Item_result type() { return res_type; } + enum Item_result cmptype() { return cmp_type; } enum_field_types field_type() { return res_field_type; } virtual void exclude()= 0; virtual bool may_be_null() { return maybe_null; }; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index ab4827f16aa..9410a12f21b 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -152,9 +152,10 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) If it is there under a construct where it is not allowed we report an error. */ - invalid= !(allow_sum_func & (1 << max_arg_level)); + invalid= !(allow_sum_func & ((nesting_map)1 << max_arg_level)); } - else if (max_arg_level >= 0 || !(allow_sum_func & (1 << nest_level))) + else if (max_arg_level >= 0 || + !(allow_sum_func & ((nesting_map)1 << nest_level))) { /* The set function can be aggregated only in outer subqueries. @@ -163,7 +164,8 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) */ if (register_sum_func(thd, ref)) return TRUE; - invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + invalid= aggr_level < 0 && + !(allow_sum_func & ((nesting_map)1 << nest_level)); if (!invalid && thd->variables.sql_mode & MODE_ANSI) invalid= aggr_level < 0 && max_arg_level < nest_level; } @@ -311,14 +313,15 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl && sl->nest_level > max_arg_level; sl= sl->master_unit()->outer_select() ) { - if (aggr_level < 0 && (allow_sum_func & (1 << sl->nest_level))) + if (aggr_level < 0 && + (allow_sum_func & ((nesting_map)1 << sl->nest_level))) { /* Found the most nested subquery where the function can be aggregated */ aggr_level= sl->nest_level; aggr_sel= sl; } } - if (sl && (allow_sum_func & (1 << sl->nest_level))) + if (sl && (allow_sum_func & ((nesting_map)1 << sl->nest_level))) { /* We reached the subquery of level max_arg_level and checked @@ -564,7 +567,7 @@ void Item_sum::update_used_tables () used_tables_cache&= PSEUDO_TABLE_BITS; // the aggregate function is aggregated into its local context - used_tables_cache |= (1 << aggr_sel->join->table_count) - 1; + used_tables_cache|= ((table_map)1 << aggr_sel->join->tables) - 1; } because if we do it, table elimination will assume that - constructs like "COUNT(*)" use columns from all tables @@ -1594,9 +1597,10 @@ void Item_sum_avg::fix_length_and_dec() f_scale= args[0]->decimals; dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); } - else { + else + { decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); - max_length= args[0]->max_length + prec_increment; + max_length= min(args[0]->max_length + prec_increment, float_length(decimals)); } } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 02a7b8511af..69a6dac5381 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -923,16 +923,14 @@ longlong Item_func_dayofmonth::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_date(<ime, TIME_FUZZY_DATE); - return (longlong) ltime.day; + return get_arg0_date(<ime, TIME_FUZZY_DATE) ? 0 : (longlong) ltime.day; } longlong Item_func_month::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_date(<ime, TIME_FUZZY_DATE); - return (longlong) ltime.month; + return get_arg0_date(<ime, TIME_FUZZY_DATE) ? 0 : (longlong) ltime.month; } @@ -983,16 +981,14 @@ longlong Item_func_hour::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_time(<ime); - return ltime.hour; + return get_arg0_time(<ime) ? 0 : ltime.hour; } longlong Item_func_minute::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_time(<ime); - return ltime.minute; + return get_arg0_time(<ime) ? 0 : ltime.minute; } /** @@ -1002,8 +998,7 @@ longlong Item_func_second::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_time(<ime); - return ltime.second; + return get_arg0_time(<ime) ? 0 : ltime.second; } @@ -1120,8 +1115,7 @@ longlong Item_func_year::val_int() { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - (void) get_arg0_date(<ime, TIME_FUZZY_DATE); - return (longlong) ltime.year; + return get_arg0_date(<ime, TIME_FUZZY_DATE) ? 0 : (longlong) ltime.year; } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 3e3cd698efc..9b2db9e816e 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -491,6 +491,7 @@ public: enum Item_result result_type () const { return STRING_RESULT; } CHARSET_INFO *charset_for_protocol(void) const { return &my_charset_bin; } enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } + Item_result cmp_type() const { return TIME_RESULT; } String *val_str(String *str); longlong val_int(); double val_real(); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 5a824e48b7b..723429f107a 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -2704,8 +2704,12 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) node.parent= data->parent; // Set parent for the new node to old parent data->parent= numnodes; // Remember current node as new parent + DBUG_ASSERT(data->level <= MAX_LEVEL); data->pos[data->level]= numnodes; - node.level= data->level++; + if (data->level < MAX_LEVEL) + node.level= data->level++; + else + return MY_XML_ERROR; node.type= st->current_node_type; // TAG or ATTR node.beg= attr; node.end= attr + len; diff --git a/sql/log_event.cc b/sql/log_event.cc index 10f0fe1e931..55b95996515 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1808,6 +1808,7 @@ void Log_event::print_header(IO_CACHE* file, /** Prints a quoted string to io cache. Control characters are displayed as hex sequence, e.g. \x00 + Single-quote and backslash characters are escaped with a \ @param[in] file IO cache @param[in] prt Pointer to string @@ -1823,6 +1824,10 @@ my_b_write_quoted(IO_CACHE *file, const uchar *ptr, uint length) { if (*s > 0x1F) my_b_write(file, s, 1); + else if (*s == '\'') + my_b_write(file, "\\'", 2); + else if (*s == '\\') + my_b_write(file, "\\\\", 2); else { uchar hex[10]; @@ -4828,10 +4833,21 @@ do_server_version_split(char* version, for (uint i= 0; i<=2; i++) { number= strtoul(p, &r, 10); - split_versions->ver[i]= (uchar) number; - DBUG_ASSERT(number < 256); // fit in uchar + /* + It is an invalid version if any version number greater than 255 or + first number is not followed by '.'. + */ + if (number < 256 && (*r == '.' || i != 0)) + split_versions->ver[i]= (uchar) number; + else + { + split_versions->ver[0]= 0; + split_versions->ver[1]= 0; + split_versions->ver[2]= 0; + break; + } + p= r; - DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice if (*r == '.') p++; // skip the dot } @@ -4849,7 +4865,6 @@ do_server_version_split(char* version, into 'server_version_split': X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z} X.Yabc -> {X,Y,0} - Xabc -> {X,0,0} 'server_version_split' is then used for lookups to find if the server which created this event has some known bug. */ diff --git a/sql/log_event.h b/sql/log_event.h index ff13cab9cd5..24a08ae107c 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1127,7 +1127,7 @@ public: return thd ? thd->db : 0; } #else - Log_event() : temp_buf(0) {} + Log_event() : temp_buf(0), flags(0) {} /* avoid having to link mysqlbinlog against libpthread */ static Log_event* read_log_event(IO_CACHE* file, const Format_description_log_event @@ -2464,12 +2464,26 @@ public: #ifdef MYSQL_SERVER bool write(IO_CACHE* file); #endif - bool is_valid() const + bool header_is_valid() const { return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN : LOG_EVENT_MINIMAL_HEADER_LEN)) && (post_header_len != NULL)); } + + bool version_is_valid() const + { + /* It is invalid only when all version numbers are 0 */ + return !(server_version_split.ver[0] == 0 && + server_version_split.ver[1] == 0 && + server_version_split.ver[2] == 0); + } + + bool is_valid() const + { + return header_is_valid() && version_is_valid(); + } + int get_data_size() { /* diff --git a/sql/mdl.h b/sql/mdl.h index af7d75c1297..68f24a7a0e8 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -242,8 +242,14 @@ public: const char *db, const char *name) { m_ptr[0]= (char) mdl_namespace; - m_db_name_length= (uint16) (strmov(m_ptr + 1, db) - m_ptr - 1); - m_length= (uint16) (strmov(m_ptr + m_db_name_length + 2, name) - m_ptr + 1); + /* + It is responsibility of caller to ensure that db and object names + are not longer than NAME_LEN. Still we play safe and try to avoid + buffer overruns. + */ + m_db_name_length= (uint16) (strmake(m_ptr + 1, db, NAME_LEN) - m_ptr - 1); + m_length= (uint16) (strmake(m_ptr + m_db_name_length + 2, name, NAME_LEN) - + m_ptr + 1); } void mdl_key_init(const MDL_key *rhs) { diff --git a/sql/mysqld.cc b/sql/mysqld.cc index fa8b855d91c..19d01e61771 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1310,6 +1310,7 @@ static void clean_up(bool print_message); static int test_if_case_insensitive(const char *dir_name); #ifndef EMBEDDED_LIBRARY +static bool pid_file_created= false; static void usage(void); static void start_signal_handler(void); static void close_server_sock(); @@ -1318,6 +1319,7 @@ static void wait_for_signal_thread_to_end(void); static void create_pid_file(); static void mysqld_exit(int exit_code) __attribute__((noreturn)); #endif +static void delete_pid_file(myf flags); static void end_ssl(); @@ -1825,7 +1827,6 @@ void clean_up(bool print_message) item_user_lock_free(); lex_free(); /* Free some memory */ item_create_cleanup(); - free_charsets(); if (!opt_noacl) { #ifdef HAVE_DLOPEN @@ -1872,10 +1873,8 @@ void clean_up(bool print_message) debug_sync_end(); #endif /* defined(ENABLED_DEBUG_SYNC) */ -#if !defined(EMBEDDED_LIBRARY) - if (!opt_bootstrap) - mysql_file_delete(key_file_pid, pidfile_name, MYF(0)); // This may not always exist -#endif + delete_pid_file(MYF(0)); + if (print_message && my_default_lc_messages && server_start_time) sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname); cleanup_errmsgs(); @@ -1891,6 +1890,7 @@ void clean_up(bool print_message) my_atomic_rwlock_destroy(&thread_running_lock); my_atomic_rwlock_destroy(&thread_count_lock); my_atomic_rwlock_destroy(&statistics_lock); + free_charsets(); mysql_mutex_lock(&LOCK_thread_count); DBUG_PRINT("quit", ("got thread count lock")); ready_to_exit=1; @@ -3261,14 +3261,7 @@ pthread_handler_t handle_shutdown(void *arg) } #endif -const char *load_default_groups[]= { -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -"mysql_cluster", -#endif -"mysqld", "server", MYSQL_BASE_VERSION, -"mariadb", MARIADB_BASE_VERSION, -"client-server", -0, 0}; +#include <mysqld_default_groups.h> #if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) static const int load_default_groups_sz= @@ -5013,9 +5006,7 @@ int mysqld_main(int argc, char **argv) (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL); - - if (!opt_bootstrap) - mysql_file_delete(key_file_pid, pidfile_name, MYF(MY_WME)); // Not needed anymore + delete_pid_file(MYF(MY_WME)); if (unix_sock != INVALID_SOCKET) unlink(mysqld_unix_port); @@ -7434,7 +7425,7 @@ static int mysql_init_variables(void) log_error_file_ptr= log_error_file; protocol_version= PROTOCOL_VERSION; what_to_log= ~ (1L << (uint) COM_TIME); - refresh_version= 1L; /* Increments on each reload */ + refresh_version= 2L; /* Increments on each reload. 0 and 1 are reserved */ denied_connections= 0; executed_events= 0; global_query_id= thread_id= 1L; @@ -8461,13 +8452,14 @@ static void create_pid_file() if ((file= mysql_file_create(key_file_pid, pidfile_name, 0664, O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0) { - char buff[21], *end; + char buff[MAX_BIGINT_WIDTH + 1], *end; end= int10_to_str((long) getpid(), buff, 10); *end++= '\n'; if (!mysql_file_write(file, (uchar*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP))) { mysql_file_close(file, MYF(0)); + pid_file_created= true; return; } mysql_file_close(file, MYF(0)); @@ -8477,6 +8469,26 @@ static void create_pid_file() } #endif /* EMBEDDED_LIBRARY */ + +/** + Remove the process' pid file. + + @param flags file operation flags +*/ + +static void delete_pid_file(myf flags) +{ +#ifndef EMBEDDED_LIBRARY + if (pid_file_created) + { + mysql_file_delete(key_file_pid, pidfile_name, flags); + pid_file_created= false; + } +#endif /* EMBEDDED_LIBRARY */ + return; +} + + /** Clear most status variables. */ void refresh_status(THD *thd) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7a3a0c94a18..8b07e022ad3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -11691,7 +11691,8 @@ static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO **first_non_infix_part); static bool check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, - Field::imagetype image_type); + Field::imagetype image_type, + bool *has_min_max_fld, bool *has_other_fld); static void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, @@ -12031,6 +12032,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) else goto next_index; } + /* + This function is called on the precondition that the index is covering. + Therefore if the GROUP BY list contains more elements than the index, + these are duplicates. The GROUP BY list cannot be a prefix of the index. + */ + if (cur_part == end_part && tmp_group) + goto next_index; } /* Check (GA2) if this is a DISTINCT query. @@ -12084,7 +12092,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) cur_parts have bits set for only used keyparts. */ ulonglong all_parts, cur_parts; - all_parts= (1<<max_key_part) - 1; + all_parts= (1ULL << max_key_part) - 1; cur_parts= used_key_parts_map.to_ulonglong() >> 1; if (all_parts != cur_parts) goto next_index; @@ -12241,10 +12249,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) DBUG_RETURN(NULL); /* Check (SA3) for the where clause. */ + bool has_min_max_fld= false, has_other_fld= false; if (join->conds && min_max_arg_item && !check_group_min_max_predicates(join->conds, min_max_arg_item, (index_info->flags & HA_SPATIAL) ? - Field::itMBR : Field::itRAW)) + Field::itMBR : Field::itRAW, + &has_min_max_fld, &has_other_fld)) DBUG_RETURN(NULL); /* @@ -12292,16 +12302,24 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) SYNOPSIS check_group_min_max_predicates() - cond tree (or subtree) describing all or part of the WHERE - clause being analyzed - min_max_arg_item the field referenced by the MIN/MAX function(s) - min_max_arg_part the keypart of the MIN/MAX argument if any + cond [in] the expression tree being analyzed + min_max_arg [in] the field referenced by the MIN/MAX function(s) + image_type [in] + has_min_max_arg [out] true if the subtree being analyzed references min_max_arg + has_other_arg [out] true if the subtree being analyzed references a column + other min_max_arg DESCRIPTION The function walks recursively over the cond tree representing a WHERE clause, and checks condition (SA3) - if a field is referenced by a MIN/MAX aggregate function, it is referenced only by one of the following - predicates: {=, !=, <, <=, >, >=, between, is null, is not null}. + predicates $FUNC$: + {=, !=, <, <=, >, >=, between, is [not] null, multiple equal}. + In addition the function checks that the WHERE condition is equivalent to + "cond1 AND cond2" where : + cond1 - does not use min_max_column at all. + cond2 - is an AND/OR tree with leaves in form + "$FUNC$(min_max_column[, const])". RETURN TRUE if cond passes the test @@ -12310,7 +12328,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) static bool check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, - Field::imagetype image_type) + Field::imagetype image_type, + bool *has_min_max_arg, bool *has_other_arg) { DBUG_ENTER("check_group_min_max_predicates"); DBUG_ASSERT(cond && min_max_arg_item); @@ -12322,12 +12341,24 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, DBUG_PRINT("info", ("Analyzing: %s", ((Item_func*) cond)->func_name())); List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); Item *and_or_arg; + Item_func::Functype func_type= ((Item_cond*) cond)->functype(); + bool has_min_max= false, has_other= false; while ((and_or_arg= li++)) { - if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, - image_type)) + /* + The WHERE clause doesn't pass the condition if: + (1) any subtree doesn't pass the condition or + (2) the subtree passes the test, but it is an OR and it references both + the min/max argument and other columns. + */ + if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1 + image_type, + &has_min_max, &has_other) || + (func_type == Item_func::COND_OR_FUNC && has_min_max && has_other))//2 DBUG_RETURN(FALSE); } + *has_min_max_arg= has_min_max || *has_min_max_arg; + *has_other_arg= has_other || *has_other_arg; DBUG_RETURN(TRUE); } @@ -12361,6 +12392,10 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, if (cond_type == Item::FIELD_ITEM) { DBUG_PRINT("info", ("Analyzing: %s", cond->full_name())); + if (min_max_arg_item->eq((Item_field*)cond, 1)) + *has_min_max_arg= true; + else + *has_other_arg= true; DBUG_RETURN(TRUE); } @@ -12369,9 +12404,33 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, /* Test if cond references only group-by or non-group fields. */ Item_func *pred= (Item_func*) cond; + Item_func::Functype pred_type= pred->functype(); + DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); + if (pred_type == Item_func::MULT_EQUAL_FUNC) + { + /* + Check that each field in a multiple equality is either a constant or + it is a reference to the min/max argument, or it doesn't contain the + min/max argument at all. + */ + Item_equal_fields_iterator eq_it(*((Item_equal*)pred)); + Item *eq_item; + bool has_min_max= false, has_other= false; + while ((eq_item= eq_it++)) + { + if (min_max_arg_item->eq(eq_item->real_item(), 1)) + has_min_max= true; + else + has_other= true; + } + *has_min_max_arg= has_min_max || *has_min_max_arg; + *has_other_arg= has_other || *has_other_arg; + DBUG_RETURN(!(has_min_max && has_other)); + } + Item **arguments= pred->arguments(); Item *cur_arg; - DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); + bool has_min_max= false, has_other= false; for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) { cur_arg= arguments[arg_idx]->real_item(); @@ -12380,11 +12439,11 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, { if (min_max_arg_item->eq(cur_arg, 1)) { - /* - If pred references the MIN/MAX argument, check whether pred is a range - condition that compares the MIN/MAX argument with a constant. - */ - Item_func::Functype pred_type= pred->functype(); + has_min_max= true; + /* + If pred references the MIN/MAX argument, check whether pred is a range + condition that compares the MIN/MAX argument with a constant. + */ if (pred_type != Item_func::EQUAL_FUNC && pred_type != Item_func::LT_FUNC && pred_type != Item_func::LE_FUNC && @@ -12424,14 +12483,16 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, min_max_arg_item->field->cmp_type() != args[1]->result_type()))) DBUG_RETURN(FALSE); } + else + has_other= true; } else if (cur_arg->type() == Item::FUNC_ITEM) { - if (!check_group_min_max_predicates(cur_arg, min_max_arg_item, - image_type)) + if (!check_group_min_max_predicates(cur_arg, min_max_arg_item, image_type, + &has_min_max, &has_other)) DBUG_RETURN(FALSE); } - else if (cur_arg->const_item()) + else if (cur_arg->const_item() && !cur_arg->is_expensive()) { /* For predicates of the form "const OP expr" we also have to check 'expr' @@ -12441,7 +12502,11 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, } else DBUG_RETURN(FALSE); + if(has_min_max && has_other) + DBUG_RETURN(FALSE); } + *has_min_max_arg= has_min_max || *has_min_max_arg; + *has_other_arg= has_other || *has_other_arg; DBUG_RETURN(TRUE); } diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 6698d4af28d..710c04ba063 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -2549,6 +2549,10 @@ void advance_sj_state(JOIN *join, table_map remaining_tables, uint idx, /* Mark strategy as used */ (*strategy)->mark_used(); pos->sj_strategy= sj_strategy; + if (sj_strategy == SJ_OPT_MATERIALIZE) + join->sjm_lookup_tables |= handled_fanout; + else + join->sjm_lookup_tables &= ~handled_fanout; *current_read_time= read_time; *current_record_count= rec_count; join->cur_dups_producing_tables &= ~handled_fanout; @@ -3073,6 +3077,13 @@ void restore_prev_sj_state(const table_map remaining_tables, const JOIN_TAB *tab, uint idx) { TABLE_LIST *emb_sj_nest; + + if (tab->emb_sj_nest) + { + table_map subq_tables= tab->emb_sj_nest->sj_inner_tables; + tab->join->sjm_lookup_tables &= ~subq_tables; + } + if ((emb_sj_nest= tab->emb_sj_nest)) { /* If we're removing the last SJ-inner table, remove the sj-nest */ @@ -3250,6 +3261,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join) uint tablenr; table_map remaining_tables= 0; table_map handled_tabs= 0; + join->sjm_lookup_tables= 0; for (tablenr= table_count - 1 ; tablenr != join->const_tables - 1; tablenr--) { POSITION *pos= join->best_positions + tablenr; @@ -3275,6 +3287,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join) first= tablenr - sjm->tables + 1; join->best_positions[first].n_sj_tables= sjm->tables; join->best_positions[first].sj_strategy= SJ_OPT_MATERIALIZE; + join->sjm_lookup_tables|= s->table->map; } else if (pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN) { diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 1b9e744bcc1..8a1170cf4fa 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -878,8 +878,13 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE * DBUG_ENTER("table_def::create_conversion_table"); List<Create_field> field_list; - - for (uint col= 0 ; col < size() ; ++col) + /* + At slave, columns may differ. So we should create + min(columns@master, columns@slave) columns in the + conversion table. + */ + uint const cols_to_create= min(target_table->s->fields, size()); + for (uint col= 0 ; col < cols_to_create; ++col) { Create_field *field_def= (Create_field*) alloc_root(thd->mem_root, sizeof(Create_field)); diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index a6ebebeae39..8c7a8000695 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -4989,13 +4989,13 @@ ER_WRONG_NAME_FOR_CATALOG 42000 spa "Nombre de catalog incorrecto '%-.100s'" swe "Felaktigt katalog namn '%-.100s'" ER_WARN_QC_RESIZE - eng "Query cache failed to set size %lu; new query cache size is %lu" - ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu" - por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu" - rus "Кеш запросов не может установить размер %lu, новый размер кеша зпросов - %lu" - spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu" - swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu" - ukr "Кеш запитів неспроможен встановити розмір %lu, новий розмір кеша запитів - %lu" + eng "Query cache failed to set size %llu; new query cache size is %lu" + ger "Änderung der Query-Cache-Größe auf %llu fehlgeschlagen; neue Query-Cache-Größe ist %lu" + por "Falha em Query cache para configurar tamanho %llu, novo tamanho de query cache é %lu" + rus "Кеш запросов не может установить размер %llu, новый размер кеша зпросов - %lu" + spa "Query cache fallada para configurar tamaño %llu, nuevo tamaño de query cache es %lu" + swe "Storleken av "Query cache" kunde inte sättas till %llu, ny storlek är %lu" + ukr "Кеш запитів неспроможен встановити розмір %llu, новий розмір кеша запитів - %lu" ER_BAD_FT_COLUMN eng "Column '%-.192s' cannot be part of FULLTEXT index" ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein" diff --git a/sql/slave.cc b/sql/slave.cc index da10169d97b..e2a4d229ab9 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1337,6 +1337,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) unavailable (very old master not supporting UNIX_TIMESTAMP()?). */ +#ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("dbug.before_get_UNIX_TIMESTAMP", { const char act[]= @@ -1346,6 +1347,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) DBUG_ASSERT(!debug_sync_set_action(current_thd, STRING_WITH_LEN(act))); };); +#endif master_res= NULL; if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) && @@ -1387,6 +1389,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) Note: we could have put a @@SERVER_ID in the previous SELECT UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters. */ +#ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("dbug.before_get_SERVER_ID", { const char act[]= @@ -1396,6 +1399,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) DBUG_ASSERT(!debug_sync_set_action(current_thd, STRING_WITH_LEN(act))); };); +#endif master_res= NULL; master_row= NULL; if (!mysql_real_query(mysql, @@ -3241,6 +3245,7 @@ pthread_handler_t handle_slave_io(void *arg) connected: +#ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("dbug.before_get_running_status_yes", { const char act[]= @@ -3250,6 +3255,7 @@ connected: DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act))); };); +#endif // TODO: the assignment below should be under mutex (5.0) mi->slave_running= MYSQL_SLAVE_RUN_CONNECT; diff --git a/sql/spatial.cc b/sql/spatial.cc index de0b563eaf4..2359f4fa271 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -312,6 +312,9 @@ bool Geometry::envelope(String *result) const const char *end; if (get_mbr(&mbr, &end)) + return 1; + + if (!mbr.valid()) { /* Empty geometry */ if (result->reserve(1 + 4*2)) @@ -444,18 +447,19 @@ const char *Geometry::append_points(String *txt, uint32 n_points, const char *Geometry::get_mbr_for_points(MBR *mbr, const char *data, uint offset) const { - uint32 points; + uint32 n_points; /* read number of points */ if (no_data(data, 4)) return 0; - points= uint4korr(data); + n_points= uint4korr(data); data+= 4; - if (no_data(data, (POINT_DATA_SIZE + offset) * points)) + if (n_points > max_n_points || + no_data(data, (POINT_DATA_SIZE + offset) * n_points)) return 0; /* Calculate MBR for points */ - while (points--) + while (n_points--) { data+= offset; mbr->add_xy(data, data + SIZEOF_STORED_DOUBLE); @@ -559,9 +563,12 @@ const Geometry::Class_info *Gis_point::get_class_info() const uint32 Gis_line_string::get_data_size() const { - if (no_data(m_data, 4)) + uint32 n_points, size; + if (no_data(m_data, 4) || + (n_points= uint4korr(m_data)) > max_n_points || + no_data(m_data, (size= 4 + n_points * POINT_DATA_SIZE))) return GET_SIZE_ERROR; - return 4 + uint4korr(m_data) * POINT_DATA_SIZE; + return size; } @@ -631,7 +638,7 @@ bool Gis_line_string::get_data_as_wkt(String *txt, const char **end) const n_points= uint4korr(data); data += 4; - if (n_points < 1 || + if (n_points < 1 || n_points > max_n_points || no_data(data, POINT_DATA_SIZE * n_points) || txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1)*2 + 1) * n_points)) return 1; @@ -669,7 +676,8 @@ int Gis_line_string::geom_length(double *len, const char **end) const return 1; n_points= uint4korr(data); data+= 4; - if (n_points < 1 || no_data(data, POINT_DATA_SIZE * n_points)) + if (n_points < 1 || n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points)) return 1; get_point(&prev_x, &prev_y, data); @@ -717,7 +725,7 @@ int Gis_line_string::is_closed(int *closed) const return 0; } data+= 4; - if (n_points == 0 || + if (n_points == 0 || n_points > max_n_points || no_data(data, POINT_DATA_SIZE * n_points)) return 1; @@ -753,6 +761,9 @@ int Gis_line_string::end_point(String *result) const if (no_data(m_data, 4)) return 1; n_points= uint4korr(m_data); + if (n_points == 0 || n_points > max_n_points || + no_data(m_data, POINT_DATA_SIZE * n_points)) + return 1; return create_point(result, m_data + 4 + (n_points - 1) * POINT_DATA_SIZE); } @@ -762,11 +773,14 @@ int Gis_line_string::point_n(uint32 num, String *result) const uint32 n_points; if (no_data(m_data, 4)) return 1; + num--; n_points= uint4korr(m_data); - if ((uint32) (num - 1) >= n_points) // means (num > n_points || num < 1) + if (num >= n_points || + num > max_n_points || // means (num > n_points || num < 1) + no_data(m_data, num * POINT_DATA_SIZE)) return 1; - return create_point(result, m_data + 4 + (num - 1) * POINT_DATA_SIZE); + return create_point(result, m_data + 4 + num*POINT_DATA_SIZE); } @@ -782,7 +796,8 @@ int Gis_line_string::store_shapes(Gcalc_shape_transporter *trn) const return 1; n_points= uint4korr(data); data+= 4; - if (n_points < 1 || no_data(data, POINT_DATA_SIZE * n_points)) + if (n_points < 1 || n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points)) return 1; trn->start_line(); @@ -815,6 +830,7 @@ const Geometry::Class_info *Gis_line_string::get_class_info() const uint32 Gis_polygon::get_data_size() const { uint32 n_linear_rings; + uint32 n_points; const char *data= m_data; if (no_data(data, 4)) @@ -824,10 +840,13 @@ uint32 Gis_polygon::get_data_size() const while (n_linear_rings--) { - if (no_data(data, 4)) + if (no_data(data, 4) || + (n_points= uint4korr(data)) > max_n_points) return GET_SIZE_ERROR; - data+= 4 + uint4korr(data)*POINT_DATA_SIZE; + data+= 4 + n_points*POINT_DATA_SIZE; } + if (no_data(data, 0)) + return GET_SIZE_ERROR; return (uint32) (data - m_data); } @@ -966,7 +985,7 @@ bool Gis_polygon::get_data_as_wkt(String *txt, const char **end) const return 1; n_points= uint4korr(data); data+= 4; - if (no_data(data, POINT_DATA_SIZE * n_points) || + if (n_points > max_n_points || no_data(data, POINT_DATA_SIZE * n_points) || txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) return 1; txt->qs_append('('); @@ -1020,7 +1039,8 @@ int Gis_polygon::area(double *ar, const char **end_of_data) const if (no_data(data, 4)) return 1; n_points= uint4korr(data); - if (no_data(data, POINT_DATA_SIZE * n_points)) + if (n_points == 0 || n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points)) return 1; get_point(&prev_x, &prev_y, data+4); data+= (4+POINT_DATA_SIZE); @@ -1056,7 +1076,8 @@ int Gis_polygon::exterior_ring(String *result) const n_points= uint4korr(data); data+= 4; length= n_points * POINT_DATA_SIZE; - if (no_data(data, length) || result->reserve(1 + 4 + 4 + length)) + if (n_points > max_n_points || + no_data(data, length) || result->reserve(1 + 4 + 4 + length)) return 1; result->q_append((char) wkb_ndr); @@ -1102,7 +1123,8 @@ int Gis_polygon::interior_ring_n(uint32 num, String *result) const n_points= uint4korr(data); points_size= n_points * POINT_DATA_SIZE; data+= 4; - if (no_data(data, points_size) || result->reserve(1 + 4 + 4 + points_size)) + if (n_points > max_n_points || + no_data(data, points_size) || result->reserve(1 + 4 + 4 + points_size)) return 1; result->q_append((char) wkb_ndr); @@ -1122,13 +1144,11 @@ int Gis_polygon::centroid_xy(double *x, double *y) const const char *data= m_data; bool first_loop= 1; - if (no_data(data, 4)) + if (no_data(data, 4) || + (n_linear_rings= uint4korr(data)) == 0) return 1; - n_linear_rings= uint4korr(data); data+= 4; - DBUG_ASSERT(n_linear_rings > 0); - while (n_linear_rings--) { uint32 n_points, org_n_points; @@ -1141,7 +1161,8 @@ int Gis_polygon::centroid_xy(double *x, double *y) const return 1; org_n_points= n_points= uint4korr(data); data+= 4; - if (no_data(data, POINT_DATA_SIZE * n_points)) + if (n_points == 0 || n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points)) return 1; get_point(&prev_x, &prev_y, data); data+= POINT_DATA_SIZE; @@ -1215,7 +1236,8 @@ int Gis_polygon::store_shapes(Gcalc_shape_transporter *trn) const return 1; n_points= uint4korr(data); data+= 4; - if (!n_points || no_data(data, POINT_DATA_SIZE * n_points)) + if (!n_points || n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points)) return 1; trn->start_ring(); @@ -1268,9 +1290,14 @@ const Geometry::Class_info *Gis_polygon::get_class_info() const uint32 Gis_multi_point::get_data_size() const { - if (no_data(m_data, 4)) - return GET_SIZE_ERROR; - return 4 + uint4korr(m_data)*(POINT_DATA_SIZE + WKB_HEADER_SIZE); + uint32 n_points; + uint32 size; + + if (no_data(m_data, 4) || + (n_points= uint4korr(m_data)) > max_n_points || + no_data(m_data, (size= 4 + n_points*(POINT_DATA_SIZE + WKB_HEADER_SIZE)))) + return GET_SIZE_ERROR; + return size; } @@ -1364,8 +1391,8 @@ bool Gis_multi_point::get_data_as_wkt(String *txt, const char **end) const return 1; n_points= uint4korr(m_data); - if (no_data(m_data+4, - n_points * (POINT_DATA_SIZE + WKB_HEADER_SIZE)) || + if (n_points > max_n_points || + no_data(m_data+4, n_points * (POINT_DATA_SIZE + WKB_HEADER_SIZE)) || txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) return 1; *end= append_points(txt, n_points, m_data+4, WKB_HEADER_SIZE); @@ -1446,6 +1473,7 @@ const Geometry::Class_info *Gis_multi_point::get_class_info() const uint32 Gis_multi_line_string::get_data_size() const { uint32 n_line_strings; + uint32 n_points; const char *data= m_data; if (no_data(data, 4)) @@ -1455,11 +1483,13 @@ uint32 Gis_multi_line_string::get_data_size() const while (n_line_strings--) { - if (no_data(data, WKB_HEADER_SIZE + 4)) + if (no_data(data, WKB_HEADER_SIZE + 4) || + (n_points= uint4korr(data + WKB_HEADER_SIZE)) > max_n_points) return GET_SIZE_ERROR; - data+= (WKB_HEADER_SIZE + 4 + uint4korr(data + WKB_HEADER_SIZE) * - POINT_DATA_SIZE); + data+= (WKB_HEADER_SIZE + 4 + n_points*POINT_DATA_SIZE); } + if (no_data(data, 0)) + return GET_SIZE_ERROR; return (uint32) (data - m_data); } @@ -1583,7 +1613,7 @@ bool Gis_multi_line_string::get_data_as_wkt(String *txt, return 1; n_points= uint4korr(data + WKB_HEADER_SIZE); data+= WKB_HEADER_SIZE + 4; - if (no_data(data, n_points * POINT_DATA_SIZE) || + if (n_points > max_n_points || no_data(data, n_points * POINT_DATA_SIZE) || txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points)) return 1; txt->qs_append('('); @@ -1644,7 +1674,7 @@ int Gis_multi_line_string::geometry_n(uint32 num, String *result) const return 1; n_points= uint4korr(data + WKB_HEADER_SIZE); length= WKB_HEADER_SIZE + 4+ POINT_DATA_SIZE * n_points; - if (no_data(data, length)) + if (n_points > max_n_points || no_data(data, length)) return 1; if (!--num) break; @@ -1755,6 +1785,7 @@ const Geometry::Class_info *Gis_multi_line_string::get_class_info() const uint32 Gis_multi_polygon::get_data_size() const { uint32 n_polygons; + uint32 n_points; const char *data= m_data; if (no_data(data, 4)) @@ -1773,11 +1804,14 @@ uint32 Gis_multi_polygon::get_data_size() const while (n_linear_rings--) { - if (no_data(data, 4)) + if (no_data(data, 4) || + (n_points= uint4korr(data)) > max_n_points) return GET_SIZE_ERROR; - data+= 4 + uint4korr(data) * POINT_DATA_SIZE; + data+= 4 + n_points * POINT_DATA_SIZE; } } + if (no_data(data, 0)) + return GET_SIZE_ERROR; return (uint32) (data - m_data); } @@ -1905,7 +1939,8 @@ bool Gis_multi_polygon::get_data_as_wkt(String *txt, const char **end) const return 1; uint32 n_points= uint4korr(data); data+= 4; - if (no_data(data, POINT_DATA_SIZE * n_points) || + if (n_points > max_n_points || + no_data(data, POINT_DATA_SIZE * n_points) || txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points, 512)) return 1; @@ -1988,6 +2023,8 @@ int Gis_multi_polygon::geometry_n(uint32 num, String *result) const if (no_data(data, 4)) return 1; n_points= uint4korr(data); + if (n_points > max_n_points) + return 1; data+= 4 + POINT_DATA_SIZE * n_points; } } while (--num); @@ -2317,7 +2354,7 @@ bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const n_objects= uint4korr(data); data+= 4; if (n_objects == 0) - return 1; + goto exit; while (n_objects--) { @@ -2334,6 +2371,7 @@ bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const if (geom->get_mbr(mbr, &data)) return 1; } +exit: *end= data; return 0; } @@ -2351,10 +2389,11 @@ int Gis_geometry_collection::area(double *ar, const char **end) const return 1; n_objects= uint4korr(data); data+= 4; - if (n_objects == 0) - return 1; result= 0.0; + if (n_objects == 0) + goto exit; + while (n_objects--) { uint32 wkb_type; @@ -2371,6 +2410,7 @@ int Gis_geometry_collection::area(double *ar, const char **end) const return 1; result+= *ar; } +exit: *end= data; *ar= result; return 0; @@ -2389,10 +2429,11 @@ int Gis_geometry_collection::geom_length(double *len, const char **end) const return 1; n_objects= uint4korr(data); data+= 4; + result= 0.0; + if (n_objects == 0) - return 1; + goto exit; - result= 0.0; while (n_objects--) { uint32 wkb_type; @@ -2409,6 +2450,8 @@ int Gis_geometry_collection::geom_length(double *len, const char **end) const return 1; result+= *len; } + +exit: *end= data; *len= result; return 0; diff --git a/sql/spatial.h b/sql/spatial.h index 1277e7bc01c..9aaedfe8a20 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -200,6 +200,9 @@ struct MBR return (d == intersection.dimension()); } + + int valid() const + { return xmin <= xmax && ymin <= ymax; } }; @@ -210,6 +213,11 @@ struct Geometry_buffer; class Geometry { public: + // Maximum number of points in feature that can fit into String + static const uint32 max_n_points= + (uint32) (INT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) / + POINT_DATA_SIZE; + Geometry() {} /* Remove gcc warning */ virtual ~Geometry() {} /* Remove gcc warning */ static void *operator new(size_t size, void *buffer) @@ -391,10 +399,6 @@ public: class Gis_line_string: public Geometry { - // Maximum number of points in LineString that can fit into String - static const uint32 max_n_points= - (uint32) (UINT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) / - POINT_DATA_SIZE; public: Gis_line_string() {} /* Remove gcc warning */ virtual ~Gis_line_string() {} /* Remove gcc warning */ diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 25b54abf983..f6b3a7adeb7 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -52,6 +52,8 @@ #include "sql_db.h" #include "sql_array.h" +#include "sql_plugin_compat.h" + bool mysql_user_table_is_in_short_password_format= false; static const @@ -8876,7 +8878,20 @@ static int do_auth_once(THD *thd, const LEX_STRING *auth_plugin_name, if (plugin) { st_mysql_auth *auth= (st_mysql_auth *) plugin_decl(plugin)->info; - res= auth->authenticate_user(mpvio, &mpvio->auth_info); + switch (auth->interface_version) { + case 0x0200: + res= auth->authenticate_user(mpvio, &mpvio->auth_info); + break; + case 0x0100: + { + MYSQL_SERVER_AUTH_INFO_0x0100 compat; + compat.downgrade(&mpvio->auth_info); + res= auth->authenticate_user(mpvio, (MYSQL_SERVER_AUTH_INFO *)&compat); + compat.upgrade(&mpvio->auth_info); + } + break; + default: DBUG_ASSERT(0); + } if (unlock_plugin) plugin_unlock(thd, plugin); @@ -8927,8 +8942,6 @@ bool acl_authenticate(THD *thd, uint connect_errors, : COM_CONNECT; DBUG_ENTER("acl_authenticate"); - compile_time_assert(MYSQL_USERNAME_LENGTH == USERNAME_LENGTH); - bzero(&mpvio, sizeof(mpvio)); mpvio.read_packet= server_mpvio_read_packet; mpvio.write_packet= server_mpvio_write_packet; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index e6bbef482a7..10636fd7296 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -89,6 +89,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, const char **ext; MY_STAT stat_info; Open_table_context ot_ctx(thd, (MYSQL_OPEN_IGNORE_FLUSH | + MYSQL_OPEN_FOR_REPAIR | MYSQL_OPEN_HAS_MDL_LOCK | MYSQL_LOCK_IGNORE_TIMEOUT)); DBUG_ENTER("prepare_for_repair"); @@ -199,7 +200,8 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, */ pos_in_locked_tables= table->pos_in_locked_tables; if (wait_while_table_is_used(thd, table, - HA_EXTRA_PREPARE_FOR_FORCED_CLOSE)) + HA_EXTRA_PREPARE_FOR_FORCED_CLOSE, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto end; /* Close table but don't remove from locked list */ close_all_tables_for_name(thd, table_list->table->s, @@ -593,8 +595,10 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, */ if (lock_type == TL_WRITE && !table->table->s->tmp_table) { + table->table->s->protect_against_usage(); if (wait_while_table_is_used(thd, table->table, - HA_EXTRA_PREPARE_FOR_RENAME)) + HA_EXTRA_PREPARE_FOR_RENAME, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto err; DEBUG_SYNC(thd, "after_admin_flush"); /* Flush entries in the query cache involving this table. */ @@ -826,6 +830,11 @@ send_result_message: case HA_ADMIN_TRY_ALTER: { + uint save_flags; + Alter_info *alter_info= &lex->alter_info; + + /* Store the original value of alter_info->flags */ + save_flags= alter_info->flags; /* This is currently used only by InnoDB. ha_innobase::optimize() answers "try with alter", so here we close the table, do an ALTER TABLE, @@ -833,10 +842,19 @@ send_result_message: We have to end the row, so analyze could return more rows. */ protocol->store(STRING_WITH_LEN("note"), system_charset_info); - protocol->store(STRING_WITH_LEN( - "Table does not support optimize, doing recreate + analyze instead"), - system_charset_info); - if (protocol->write()) + if(alter_info->flags & ALTER_ADMIN_PARTITION) + { + protocol->store(STRING_WITH_LEN( + "Table does not support optimize on partitions. All partitions " + "will be rebuilt and analyzed."),system_charset_info); + } + else + { + protocol->store(STRING_WITH_LEN( + "Table does not support optimize, doing recreate + analyze instead"), + system_charset_info); + } + if (protocol->write()) goto err; DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze...")); TABLE_LIST *save_next_local= table->next_local, @@ -855,6 +873,11 @@ send_result_message: table->mdl_request.ticket= NULL; DEBUG_SYNC(thd, "ha_admin_open_ltable"); table->mdl_request.set_type(MDL_SHARED_WRITE); + /* + Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags + to force analyze on all partitions. + */ + alter_info->flags &= ~(ALTER_ADMIN_PARTITION); if ((table->table= open_ltable(thd, table, lock_type, 0))) { result_code= table->table->file->ha_analyze(thd, check_opt); @@ -865,6 +888,7 @@ send_result_message: } else result_code= -1; // open failed + alter_info->flags= save_flags; } /* Start a new row for the final status row */ protocol->prepare_for_resend(); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index b3321bd2771..d32f39cae2f 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -327,12 +327,9 @@ uint create_table_def_key(THD *thd, char *key, const TABLE_LIST *table_list, bool tmp_table) { - char *db_end= strnmov(key, table_list->db, MAX_DBKEY_LENGTH - 2); - *db_end++= '\0'; - char *table_end= strnmov(db_end, table_list->table_name, - key + MAX_DBKEY_LENGTH - 1 - db_end); - *table_end++= '\0'; - uint key_length= (uint) (table_end-key); + uint key_length= create_table_def_key(key, table_list->db, + table_list->table_name); + if (tmp_table) { int4store(key + key_length, thd->server_id); @@ -835,13 +832,10 @@ void release_table_share(TABLE_SHARE *share) TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name) { char key[SAFE_NAME_LEN*2+2]; - TABLE_LIST table_list; uint key_length; mysql_mutex_assert_owner(&LOCK_open); - table_list.db= (char*) db; - table_list.table_name= (char*) table_name; - key_length= create_table_def_key((THD*) 0, key, &table_list, 0); + key_length= create_table_def_key(key, db, table_name); return (TABLE_SHARE*) my_hash_search(&table_def_cache, (uchar*) key, key_length); } @@ -1077,7 +1071,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, if (share) { kill_delayed_threads_for_table(share); - /* tdc_remove_table() also sets TABLE_SHARE::version to 0. */ + /* tdc_remove_table() calls share->remove_from_cache_at_close() */ tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, table->db, table->table_name, TRUE); found=1; @@ -2339,7 +2333,8 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *db, */ bool wait_while_table_is_used(THD *thd, TABLE *table, - enum ha_extra_function function) + enum ha_extra_function function, + enum_tdc_remove_table_type remove_type) { DBUG_ENTER("wait_while_table_is_used"); DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu", @@ -2350,7 +2345,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table, table->mdl_ticket, thd->variables.lock_wait_timeout)) DBUG_RETURN(TRUE); - tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN, + tdc_remove_table(thd, remove_type, table->s->db.str, table->s->table_name.str, FALSE); /* extra() call must come only after all instances above are closed */ @@ -3096,7 +3091,9 @@ retry_share: goto err_unlock; } - if (!(flags & MYSQL_OPEN_IGNORE_FLUSH)) + if (!(flags & MYSQL_OPEN_IGNORE_FLUSH) || + (share->protected_against_usage() && + !(flags & MYSQL_OPEN_FOR_REPAIR))) { if (share->has_old_version()) { @@ -3243,7 +3240,7 @@ err_unlock: TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; - uint key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + uint key_length= create_table_def_key(key, db, table_name); for (TABLE *table= list; table ; table=table->next) { @@ -6156,17 +6153,27 @@ TABLE *open_table_uncached(THD *thd, const char *path, const char *db, } -bool rm_temporary_table(handlerton *base, char *path) +/** + Delete a temporary table. + + @param base Handlerton for table to be deleted. + @param path Path to the table to be deleted (i.e. path + to its .frm without an extension). + + @retval false - success. + @retval true - failure. +*/ + +bool rm_temporary_table(handlerton *base, const char *path) { bool error=0; handler *file; - char *ext; + char frm_path[FN_REFLEN + 1]; DBUG_ENTER("rm_temporary_table"); - strmov(ext= strend(path), reg_ext); - if (mysql_file_delete(key_file_frm, path, MYF(0))) + strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS); + if (mysql_file_delete(key_file_frm, frm_path, MYF(0))) error=1; /* purecov: inspected */ - *ext= 0; // remove extension file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base); if (file && file->ha_delete_table(path)) { @@ -8112,7 +8119,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, thd->mark_used_columns= mark_used_columns; DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); if (allow_sum_func) - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= + (nesting_map)1 << thd->lex->current_select->nest_level; thd->where= THD::DEFAULT_WHERE; save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; thd->lex->current_select->is_item_list_lookup= 0; @@ -9401,6 +9409,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, TABLE *table; TABLE_SHARE *share; DBUG_ENTER("tdc_remove_table"); + DBUG_PRINT("enter",("name: %s remove_type: %d", table_name, remove_type)); if (! has_lock) mysql_mutex_lock(&LOCK_open); @@ -9413,7 +9422,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name, MDL_EXCLUSIVE)); - key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + key_length= create_table_def_key(key, db, table_name); if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key, key_length))) @@ -9426,7 +9435,8 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, { DBUG_ASSERT(share->used_tables.is_empty()); } - else if (remove_type == TDC_RT_REMOVE_NOT_OWN) + else if (remove_type == TDC_RT_REMOVE_NOT_OWN || + remove_type == TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE) { I_P_List_iterator<TABLE, TABLE_share> it2(share->used_tables); while ((table= it2++)) @@ -9437,8 +9447,8 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, } #endif /* - Set share's version to zero in order to ensure that it gets - automatically deleted once it is no longer referenced. + Mark share to ensure that it gets automatically deleted once + it is no longer referenced. Note that code in TABLE_SHARE::wait_for_old_version() assumes that marking share as old and removal of its unused tables @@ -9447,7 +9457,13 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, TDC does not contain old shares which don't have any tables used. */ - share->version= 0; + if (remove_type == TDC_RT_REMOVE_NOT_OWN) + share->remove_from_cache_at_close(); + else + { + /* Ensure that no can open the table while it's used */ + share->protect_against_usage(); + } while ((table= it++)) free_cache_entry(table); @@ -9527,12 +9543,14 @@ open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias, { LEX_STRING pathstr; File_parser *parser; - char path[FN_REFLEN]; + char path[FN_REFLEN+1]; DBUG_ENTER("open_new_frm"); /* Create path with extension */ - pathstr.length= (uint) (strxmov(path, share->normalized_path.str, reg_ext, - NullS)- path); + pathstr.length= (uint) (strxnmov(path, sizeof(path) - 1, + share->normalized_path.str, + reg_ext, + NullS) - path); pathstr.str= path; if ((parser= sql_parse_prepare(&pathstr, mem_root, 1))) diff --git a/sql/sql_base.h b/sql/sql_base.h index 78ab8c7df24..7d1797e2883 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -60,7 +60,8 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, IGNORE_EXCEPT_NON_UNIQUE}; enum enum_tdc_remove_table_type {TDC_RT_REMOVE_ALL, TDC_RT_REMOVE_NOT_OWN, - TDC_RT_REMOVE_UNUSED}; + TDC_RT_REMOVE_UNUSED, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE}; /* bits for last argument to remove_table_from_cache() */ #define RTFC_NO_FLAG 0x0000 @@ -81,6 +82,31 @@ uint cached_table_definitions(void); uint create_table_def_key(THD *thd, char *key, const TABLE_LIST *table_list, bool tmp_table); + +/** + Create a table cache key for non-temporary table. + + @param key Buffer for key (must be at least NAME_LEN*2+2 bytes). + @param db Database name. + @param table_name Table name. + + @return Length of key. + + @sa create_table_def_key(thd, char *, table_list, bool) +*/ + +inline uint +create_table_def_key(char *key, const char *db, const char *table_name) +{ + /* + In theory caller should ensure that both db and table_name are + not longer than NAME_LEN bytes. In practice we play safe to avoid + buffer overruns. + */ + return (uint)(strmake(strmake(key, db, NAME_LEN) + 1, table_name, + NAME_LEN) - key + 1); +} + TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key, uint key_length, uint db_flags, int *error, my_hash_value_type hash_value); @@ -128,6 +154,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, */ #define MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK 0x1000 #define MYSQL_LOCK_NOT_TEMPORARY 0x2000 +#define MYSQL_OPEN_FOR_REPAIR 0x4000 /** Please refer to the internals manual. */ #define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\ @@ -158,7 +185,7 @@ thr_lock_type read_lock_type_for_table(THD *thd, TABLE_LIST *table_list); my_bool mysql_rm_tmp_tables(void); -bool rm_temporary_table(handlerton *base, char *path); +bool rm_temporary_table(handlerton *base, const char *path); void close_tables_for_reopen(THD *thd, TABLE_LIST **tables, const MDL_savepoint &start_of_statement_svp); TABLE_LIST *find_table_in_list(TABLE_LIST *table, @@ -229,7 +256,9 @@ bool setup_tables_and_check_access(THD *thd, ulong want_access, bool full_table_list); bool wait_while_table_is_used(THD *thd, TABLE *table, - enum ha_extra_function function); + enum ha_extra_function function, + enum_tdc_remove_table_type remove_type= + TDC_RT_REMOVE_NOT_OWN); void drop_open_table(THD *thd, TABLE *table, const char *db_name, const char *table_name); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index ddc75254c9e..33b938225c0 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -828,18 +828,18 @@ void Query_cache_block::destroy() DBUG_VOID_RETURN; } -inline uint Query_cache_block::headers_len() +uint Query_cache_block::headers_len() { return (ALIGN_SIZE(sizeof(Query_cache_block_table)*n_tables) + ALIGN_SIZE(sizeof(Query_cache_block))); } -inline uchar* Query_cache_block::data(void) +uchar* Query_cache_block::data(void) { return (uchar*)( ((uchar*)this) + headers_len() ); } -inline Query_cache_query * Query_cache_block::query() +Query_cache_query * Query_cache_block::query() { #ifndef DBUG_OFF if (type != QUERY) @@ -848,7 +848,7 @@ inline Query_cache_query * Query_cache_block::query() return (Query_cache_query *) data(); } -inline Query_cache_table * Query_cache_block::table() +Query_cache_table * Query_cache_block::table() { #ifndef DBUG_OFF if (type != TABLE) @@ -857,7 +857,7 @@ inline Query_cache_table * Query_cache_block::table() return (Query_cache_table *) data(); } -inline Query_cache_result * Query_cache_block::result() +Query_cache_result * Query_cache_block::result() { #ifndef DBUG_OFF if (type != RESULT && type != RES_CONT && type != RES_BEG && @@ -867,7 +867,7 @@ inline Query_cache_result * Query_cache_block::result() return (Query_cache_result *) data(); } -inline Query_cache_block_table * Query_cache_block::table(TABLE_COUNTER_TYPE n) +Query_cache_block_table * Query_cache_block::table(TABLE_COUNTER_TYPE n) { return ((Query_cache_block_table *) (((uchar*)this)+ALIGN_SIZE(sizeof(Query_cache_block)) + @@ -3123,8 +3123,8 @@ void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list) char key[MAX_DBKEY_LENGTH]; uint key_length; - key_length=(uint) (strmov(strmov(key,table_list->db)+1, - table_list->table_name) -key)+ 1; + key_length= create_table_def_key(key, table_list->db, + table_list->table_name); // We don't store temporary tables => no key_length+=4 ... invalidate_table(thd, (uchar *)key, key_length); @@ -3242,8 +3242,8 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, DBUG_PRINT("qcache", ("view: %s db: %s", tables_used->view_name.str, tables_used->view_db.str)); - key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1, - tables_used->view_name.str) - key) + 1; + key_length= create_table_def_key(key, tables_used->view_db.str, + tables_used->view_name.str); /* There are not callback function for for VIEWs */ @@ -4288,14 +4288,13 @@ my_bool Query_cache::move_by_type(uchar **border, case Query_cache_block::RESULT: { DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block, - (int) block->type)); + (int) block->type)); if (*border == 0) break; - Query_cache_block *query_block = block->result()->parent(), - *next = block->next, - *prev = block->prev; - Query_cache_block::block_type type = block->type; + Query_cache_block *query_block= block->result()->parent(); BLOCK_LOCK_WR(query_block); + Query_cache_block *next= block->next, *prev= block->prev; + Query_cache_block::block_type type= block->type; ulong len = block->length, used = block->used; Query_cache_block *pprev = block->pprev, *pnext = block->pnext, @@ -4457,8 +4456,9 @@ uint Query_cache::filename_2_table_key (char *key, const char *path, *db_length= (filename - dbname) - 1; DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename)); - DBUG_RETURN((uint) (strmov(strmake(key, dbname, *db_length) + 1, - filename) -key) + 1); + DBUG_RETURN((uint) (strmake(strmake(key, dbname, + min(*db_length, NAME_LEN)) + 1, + filename, NAME_LEN) - key) + 1); } /**************************************************************************** diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 7444d444cf9..f35ac889b23 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -141,12 +141,12 @@ struct Query_cache_block inline bool is_free(void) { return type == FREE; } void init(ulong length); void destroy(); - inline uint headers_len(); - inline uchar* data(void); - inline Query_cache_query *query(); - inline Query_cache_table *table(); - inline Query_cache_result *result(); - inline Query_cache_block_table *table(TABLE_COUNTER_TYPE n); + uint headers_len(); + uchar* data(void); + Query_cache_query *query(); + Query_cache_table *table(); + Query_cache_result *result(); + Query_cache_block_table *table(TABLE_COUNTER_TYPE n); }; struct Query_cache_query diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 9fd3827ab39..6218ca60311 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1377,11 +1377,12 @@ void THD::cleanup(void) #error xid_state in the cache should be replaced by the allocated value } #endif - { - transaction.xid_state.xa_state= XA_NOTR; - trans_rollback(this); - xid_cache_delete(&transaction.xid_state); - } + + close_temporary_tables(this); + + transaction.xid_state.xa_state= XA_NOTR; + trans_rollback(this); + xid_cache_delete(&transaction.xid_state); locked_tables_list.unlock_locked_tables(this); mysql_ha_cleanup(this); @@ -1415,7 +1416,6 @@ void THD::cleanup(void) delete_dynamic(&user_var_events); my_hash_free(&user_vars); - close_temporary_tables(this); sp_cache_clear(&sp_proc_cache); sp_cache_clear(&sp_func_cache); @@ -1860,6 +1860,19 @@ void THD::cleanup_after_query() stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; auto_inc_intervals_in_cur_stmt_for_binlog.empty(); rand_used= 0; +#ifndef EMBEDDED_LIBRARY + /* + Clean possible unused INSERT_ID events by current statement. + is_update_query() is needed to ignore SET statements: + Statements that don't update anything directly and don't + used stored functions. This is mostly necessary to ignore + statements in binlog between SET INSERT_ID and DML statement + which is intended to consume its event (there can be other + SET statements between them). + */ + if ((rli_slave || rli_fake) && is_update_query(lex->sql_command)) + auto_inc_intervals_forced.empty(); +#endif } if (first_successful_insert_id_in_cur_stmt > 0) { @@ -3121,42 +3134,13 @@ int select_exists_subselect::send_data(List<Item> &items) int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) { unit= u; - List_iterator_fast<my_var> var_li(var_list); - List_iterator_fast<Item> it(list); - Item *item; - my_var *mv; - Item_func_set_user_var **suv; if (var_list.elements != list.elements) { my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0)); return 1; - } - - /* - Iterate over the destination variables and mark them as being - updated in this query. - We need to do this at JOIN::prepare time to ensure proper - const detection of Item_func_get_user_var that is determined - by the presence of Item_func_set_user_vars - */ - - suv= set_var_items= (Item_func_set_user_var **) - sql_alloc(sizeof(Item_func_set_user_var *) * list.elements); - - while ((mv= var_li++) && (item= it++)) - { - if (!mv->local) - { - *suv= new Item_func_set_user_var(mv->s, item); - (*suv)->fix_fields(thd, 0); - } - else - *suv= NULL; - suv++; - } - + } return 0; } @@ -3486,7 +3470,6 @@ int select_dumpvar::send_data(List<Item> &items) List_iterator<Item> it(items); Item *item; my_var *mv; - Item_func_set_user_var **suv; DBUG_ENTER("select_dumpvar::send_data"); if (unit->offset_limit_cnt) @@ -3499,19 +3482,20 @@ int select_dumpvar::send_data(List<Item> &items) my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0)); DBUG_RETURN(1); } - for (suv= set_var_items; ((mv= var_li++) && (item= it++)); suv++) + while ((mv= var_li++) && (item= it++)) { if (mv->local) { - DBUG_ASSERT(!*suv); if (thd->spcont->set_variable(thd, mv->offset, &item)) DBUG_RETURN(1); } else { - DBUG_ASSERT(*suv); - (*suv)->save_item_result(item); - if ((*suv)->update()) + Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item); + suv->save_item_result(item); + if (suv->fix_fields(thd, 0)) + DBUG_RETURN (1); + if (suv->update()) DBUG_RETURN (1); } } @@ -4497,9 +4481,14 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state) bool xid_cache_insert(XID_STATE *xid_state) { mysql_mutex_lock(&LOCK_xid_cache); - DBUG_ASSERT(my_hash_search(&xid_cache, xid_state->xid.key(), - xid_state->xid.key_length())==0); - my_bool res=my_hash_insert(&xid_cache, (uchar*)xid_state); + if (my_hash_search(&xid_cache, xid_state->xid.key(), + xid_state->xid.key_length())) + { + mysql_mutex_unlock(&LOCK_xid_cache); + my_error(ER_XAER_DUPID, MYF(0)); + return true; + } + bool res= my_hash_insert(&xid_cache, (uchar*)xid_state); mysql_mutex_unlock(&LOCK_xid_cache); return res; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 01558603f68..a5bb12ae370 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -581,6 +581,9 @@ typedef struct system_variables ulong wt_timeout_long, wt_deadlock_search_depth_long; double long_query_time_double; + + my_bool pseudo_slave_mode; + } SV; /** @@ -3366,6 +3369,7 @@ public: #else void begin_dataset() {} #endif + virtual void update_used_tables() {} }; @@ -4133,6 +4137,7 @@ public: return updated; } virtual void abort_result_set(); + void update_used_tables(); }; class my_var : public Sql_alloc { @@ -4156,7 +4161,6 @@ public: class select_dumpvar :public select_result_interceptor { ha_rows row_count; - Item_func_set_user_var **set_var_items; public: List<my_var> var_list; select_dumpvar() { var_list.empty(); row_count= 0;} diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index fde9f70fa79..6b0882bda80 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -775,7 +775,7 @@ ulong JOIN_CACHE::get_min_join_buffer_size() tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) { len+= tab->get_max_used_fieldlength(); - len_last=+ tab->get_used_fieldlength(); + len_last+= tab->get_used_fieldlength(); } size_t len_addon= get_record_max_affix_length() + get_max_key_addon_space_per_record(); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 2f81172dc7f..a6038031936 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -3871,7 +3871,8 @@ void SELECT_LEX::update_used_tables() { for (ORDER *order= order_list.first; order; order= order->next) (*order->item)->update_used_tables(); - } + } + join->result->update_used_tables(); } diff --git a/sql/sql_list.h b/sql/sql_list.h index b4e0ab84aab..aef2f8d5f25 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -287,13 +287,15 @@ public: if (node == &end_of_list) return; } - *prev= *last; + *prev= &end_of_list; last= prev; } inline void prepand(base_list *list) { if (!list->is_empty()) { + if (is_empty()) + last= list->last; *list->last= first; first= list->first; elements+= list->elements; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index bdf2bd17589..ed4a68c8534 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1658,7 +1658,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, break; case SCH_USER_STATS: case SCH_CLIENT_STATS: - if (check_global_access(thd, SUPER_ACL | PROCESS_ACL)) + if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true)) DBUG_RETURN(1); case SCH_TABLE_STATS: case SCH_INDEX_STATS: @@ -1833,7 +1833,7 @@ bool sp_process_definer(THD *thd) if ((strcmp(lex->definer->user.str, thd->security_ctx->priv_user) || my_strcasecmp(system_charset_info, lex->definer->host.str, thd->security_ctx->priv_host)) && - check_global_access(thd, SUPER_ACL)) + check_global_access(thd, SUPER_ACL, true)) { my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); DBUG_RETURN(TRUE); @@ -3095,6 +3095,7 @@ end_with_restore_list: thd->first_successful_insert_id_in_cur_stmt= thd->first_successful_insert_id_in_prev_stmt; +#ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("after_mysql_insert", { const char act1[]= @@ -3110,6 +3111,7 @@ end_with_restore_list: STRING_WITH_LEN(act2))); };); DEBUG_SYNC(thd, "after_mysql_insert"); +#endif break; } case SQLCOM_REPLACE_SELECT: @@ -5475,14 +5477,17 @@ bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table) 1 Access denied. In this case an error is sent to the client */ -bool check_global_access(THD *thd, ulong want_access) +bool check_global_access(THD *thd, ulong want_access, bool no_errors) { #ifndef NO_EMBEDDED_ACCESS_CHECKS char command[128]; if ((thd->security_ctx->master_access & want_access)) return 0; - get_privilege_desc(command, sizeof(command), want_access); - my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); + if (!no_errors) + { + get_privilege_desc(command, sizeof(command), want_access); + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); + } status_var_increment(thd->status_var.access_denied_errors); return 1; #else diff --git a/sql/sql_parse.h b/sql/sql_parse.h index 4510ebe94e2..6d47e648be2 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -196,7 +196,7 @@ check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables, /* These were under the INNODB_COMPATIBILITY_HOOKS */ -bool check_global_access(THD *thd, ulong want_access); +bool check_global_access(THD *thd, ulong want_access, bool no_errors= false); inline bool is_supported_parser_charset(CHARSET_INFO *cs) { diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 1c47991f474..07b93818bb7 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -35,6 +35,8 @@ #include <mysql/plugin_auth.h> #include "lock.h" // MYSQL_LOCK_IGNORE_TIMEOUT #include <mysql/plugin_auth.h> +#include "sql_plugin_compat.h" + #define REPORT_TO_LOG 1 #define REPORT_TO_USER 2 @@ -135,7 +137,7 @@ static int min_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]= MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION, MYSQL_AUDIT_INTERFACE_VERSION, MYSQL_REPLICATION_INTERFACE_VERSION, - MYSQL_AUTHENTICATION_INTERFACE_VERSION + MIN_AUTHENTICATION_INTERFACE_VERSION }; static int cur_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]= { diff --git a/sql/sql_plugin_compat.h b/sql/sql_plugin_compat.h new file mode 100644 index 00000000000..8c6014f8dc6 --- /dev/null +++ b/sql/sql_plugin_compat.h @@ -0,0 +1,65 @@ +/* Copyright (C) 2013 Sergei Golubchik and Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +/* old plugin api structures, used for backward compatibility */ + +#define upgrade_var(X) latest->X= X +#define upgrade_str(X) strmake(latest->X, X, sizeof(X)) +#define downgrade_var(X) X= latest->X +#define downgrade_str(X) strmake(X, latest->X, sizeof(X)-1) + +/**************************************************************/ +/* Authentication API, version 0x0100 *************************/ +#define MIN_AUTHENTICATION_INTERFACE_VERSION 0x0100 + +struct MYSQL_SERVER_AUTH_INFO_0x0100 { + char *user_name; + unsigned int user_name_length; + const char *auth_string; + unsigned long auth_string_length; + char authenticated_as[49]; + char external_user[512]; + int password_used; + const char *host_or_ip; + unsigned int host_or_ip_length; + + void upgrade(MYSQL_SERVER_AUTH_INFO *latest) + { + upgrade_var(user_name); + upgrade_var(user_name_length); + upgrade_var(auth_string); + upgrade_var(auth_string_length); + upgrade_str(authenticated_as); + upgrade_str(external_user); + upgrade_var(password_used); + upgrade_var(host_or_ip); + upgrade_var(host_or_ip_length); + } + void downgrade(MYSQL_SERVER_AUTH_INFO *latest) + { + downgrade_var(user_name); + downgrade_var(user_name_length); + downgrade_var(auth_string); + downgrade_var(auth_string_length); + downgrade_str(authenticated_as); + downgrade_str(external_user); + downgrade_var(password_used); + downgrade_var(host_or_ip); + downgrade_var(host_or_ip_length); + } +}; + +/**************************************************************/ + diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 6b8d0f5153f..631255e0d69 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -966,6 +966,7 @@ impossible position"; event_type= (Log_event_type)((uchar)(*packet)[LOG_EVENT_OFFSET+ev_offset]); +#ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", { if (event_type == XID_EVENT) @@ -984,6 +985,7 @@ impossible position"; STRING_WITH_LEN(act2))); } }); +#endif if (event_type == FORMAT_DESCRIPTION_EVENT) { current_checksum_alg= get_checksum_alg(packet->ptr() + ev_offset, @@ -1348,6 +1350,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) if (thd->lex->mi.pos) { + if (thd->lex->mi.relay_log_pos) + slave_errno=ER_BAD_SLAVE_UNTIL_COND; mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS; mi->rli.until_log_pos= thd->lex->mi.pos; /* @@ -1359,6 +1363,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) } else if (thd->lex->mi.relay_log_pos) { + if (thd->lex->mi.pos) + slave_errno=ER_BAD_SLAVE_UNTIL_COND; mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS; mi->rli.until_log_pos= thd->lex->mi.relay_log_pos; strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 81f510a4563..0abac42eb73 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2012 Oracle and/or its affiliates. +/* Copyright (c) 2000, 2013 Oracle and/or its affiliates. Copyright (c) 2009, 2013 Monty Program Ab. This program is free software; you can redistribute it and/or modify @@ -130,9 +130,10 @@ static int return_zero_rows(JOIN *join, select_result *res, List<Item> &fields, bool send_row, ulonglong select_options, const char *info, Item *having, List<Item> &all_fields); -static COND *build_equal_items(THD *thd, COND *cond, +static COND *build_equal_items(JOIN *join, COND *cond, COND_EQUAL *inherited, List<TABLE_LIST> *join_list, + bool ignore_on_conds, COND_EQUAL **cond_equal_ref); static COND* substitute_for_best_equal_field(JOIN_TAB *context_tab, COND *cond, @@ -148,7 +149,8 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, static COND *optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, - Item::cond_result *cond_value, + bool ignore_on_conds, + Item::cond_result *cond_value, COND_EQUAL **cond_equal); bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); static bool create_internal_tmp_table_from_heap2(THD *, TABLE *, @@ -607,16 +609,16 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, uint *reserved) { int res; - nesting_map save_allow_sum_func=thd->lex->allow_sum_func ; + st_select_lex *const select= thd->lex->current_select; + nesting_map save_allow_sum_func= thd->lex->allow_sum_func; /* Need to save the value, so we can turn off only any new non_agg_field_used additions coming from the WHERE */ - const bool saved_non_agg_field_used= - thd->lex->current_select->non_agg_field_used(); + const bool saved_non_agg_field_used= select->non_agg_field_used(); DBUG_ENTER("setup_without_group"); - thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); res= setup_conds(thd, tables, leaves, conds); if (thd->lex->current_select->first_cond_optimization) { @@ -627,12 +629,12 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, } /* it's not wrong to have non-aggregated columns in a WHERE */ - thd->lex->current_select->set_non_agg_field_used(saved_non_agg_field_used); + select->set_non_agg_field_used(saved_non_agg_field_used); - thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, order); - thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, group, hidden_group_fields); thd->lex->allow_sum_func= save_allow_sum_func; @@ -781,7 +783,7 @@ JOIN::prepare(Item ***rref_pointer_array, { nesting_map save_allow_sum_func= thd->lex->allow_sum_func; thd->where="having clause"; - thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level; + thd->lex->allow_sum_func|= (nesting_map)1 << select_lex_arg->nest_level; select_lex->having_fix_field= 1; /* Wrap alone field in HAVING clause in case it will be outer field of subquery @@ -1155,7 +1157,8 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S if (setup_jtbm_semi_joins(this, join_list, &conds)) DBUG_RETURN(1); - conds= optimize_cond(this, conds, join_list, &cond_value, &cond_equal); + conds= optimize_cond(this, conds, join_list, FALSE, + &cond_value, &cond_equal); if (thd->is_error()) { @@ -1165,7 +1168,9 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S } { - having= optimize_cond(this, having, join_list, &having_value, &having_equal); + having= optimize_cond(this, having, join_list, TRUE, + &having_value, &having_equal); + if (thd->is_error()) { error= 1; @@ -2312,6 +2317,7 @@ void JOIN::exec_inner() { List<Item> *columns_list= &fields_list; int tmp_error; + DBUG_ENTER("JOIN::exec"); const bool has_group_by= this->group; @@ -4183,7 +4189,7 @@ add_key_field(JOIN *join, !(field->table->pos_in_table_list->is_materialized_derived() && field->table->created)) || (field->table->pos_in_table_list->is_materialized_derived() && - !field->table->created))) + !field->table->created && !(field->flags & BLOB_FLAG)))) { optimize= KEY_OPTIMIZE_EQ; } @@ -5160,6 +5166,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) } + /** Check for the presence of AGGFN(DISTINCT a) queries that may be subject to loose index scan. @@ -5486,6 +5493,8 @@ best_access_path(JOIN *join, 2. we won't get two ref-or-null's */ if (!(remaining_tables & keyuse->used_tables) && + s->access_from_tables_is_allowed(keyuse->used_tables, + join->sjm_lookup_tables) && !(ref_or_null_part && (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL))) { @@ -5606,7 +5615,8 @@ best_access_path(JOIN *join, in ReuseRangeEstimateForRef-3. */ if (table->quick_keys.is_set(key) && - (const_part & ((1 << table->quick_key_parts[key])-1)) == + (const_part & + (((key_part_map)1 << table->quick_key_parts[key])-1)) == (((key_part_map)1 << table->quick_key_parts[key])-1) && table->quick_n_ranges[key] == 1 && records > (double) table->quick_rows[key]) @@ -5770,7 +5780,8 @@ best_access_path(JOIN *join, */ if (table->quick_keys.is_set(key) && table->quick_key_parts[key] <= max_key_part && - const_part & (1 << table->quick_key_parts[key]) && + const_part & + ((key_part_map)1 << table->quick_key_parts[key]) && table->quick_n_ranges[key] == 1 + test(ref_or_null_part & const_part) && records > (double) table->quick_rows[key]) @@ -8024,7 +8035,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, */ do { - if (!(~used_tables & keyuse->used_tables)) + if (!(~used_tables & keyuse->used_tables) && + j->access_from_tables_is_allowed(keyuse->used_tables, + join->sjm_lookup_tables)) { if (are_tables_local(j, keyuse->val->used_tables())) { @@ -8093,7 +8106,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, uint i; for (i=0 ; i < keyparts ; keyuse++,i++) { - while (((~used_tables) & keyuse->used_tables) || + while (((~used_tables) & keyuse->used_tables) || + !j->access_from_tables_is_allowed(keyuse->used_tables, + join->sjm_lookup_tables) || keyuse->keypart == NO_KEYPART || (keyuse->keypart != (is_hash_join_key_no(key) ? @@ -8105,7 +8120,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, j->ref.items[i]=keyuse->val; // Save for cond removal j->ref.cond_guards[i]= keyuse->cond_guard; if (keyuse->null_rejecting) - j->ref.null_rejecting |= 1 << i; + j->ref.null_rejecting|= (key_part_map)1 << i; keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables; /* Todo: we should remove this check for thd->lex->describe on the next @@ -8152,20 +8167,17 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, ulong key_flags= j->table->actual_key_flags(keyinfo); if (j->type == JT_CONST) j->table->const_table= 1; - else if (((key_flags & (HA_NOSAME | HA_NULL_PART_KEY))!= HA_NOSAME) || - keyparts != j->table->actual_n_key_parts(keyinfo) || - null_ref_key) - { - if (test(key_flags & HA_EXT_NOSAME) && keyparts == keyinfo->ext_key_parts && - !null_ref_key) - j->type= JT_EQ_REF; - else - { - /* Must read with repeat */ - j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF; - j->ref.null_ref_key= null_ref_key; - j->ref.null_ref_part= null_ref_part; - } + else if (!((keyparts == keyinfo->key_parts && + ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)) || + (keyparts > keyinfo->key_parts && // true only for extended keys + test(key_flags & HA_EXT_NOSAME) && + keyparts == keyinfo->ext_key_parts)) || + null_ref_key) + { + /* Must read with repeat */ + j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF; + j->ref.null_ref_key= null_ref_key; + j->ref.null_ref_part= null_ref_part; } else if (keyuse_uses_no_tables) { @@ -8316,9 +8328,9 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2) Item *res; if ((res= new Item_cond_and(*e1, e2))) { - *e1= res; res->fix_fields(thd, 0); res->update_used_tables(); + *e1= res; } } else @@ -8390,7 +8402,7 @@ static void add_not_null_conds(JOIN *join) { for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++) { - if (tab->ref.null_rejecting & (1 << keypart)) + if (tab->ref.null_rejecting & ((key_part_map)1 << keypart)) { Item *item= tab->ref.items[keypart]; Item *notnull; @@ -10580,7 +10592,6 @@ bool JOIN_TAB::preread_init() } - /** Build a TABLE_REF structure for index lookup in the temporary table @@ -11832,7 +11843,9 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond, item_equal->n_field_items()); } - ((Item_cond_and*)cond)->cond_equal= cond_equal; + ((Item_cond_and*)cond)->cond_equal.copy(cond_equal); + cond_equal.current_level= + ((Item_cond_and*)cond)->cond_equal.current_level; inherited= &(((Item_cond_and*)cond)->cond_equal); } /* @@ -11887,6 +11900,7 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond, item_equal->update_used_tables(); set_if_bigger(thd->lex->current_select->max_equal_elems, item_equal->n_field_items()); + item_equal->upper_levels= inherited; return item_equal; } @@ -11909,7 +11923,8 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond, set_if_bigger(thd->lex->current_select->max_equal_elems, item_equal->n_field_items()); } - and_cond->cond_equal= cond_equal; + and_cond->cond_equal.copy(cond_equal); + cond_equal.current_level= and_cond->cond_equal.current_level; args->concat((List<Item> *)&cond_equal.current_level); return and_cond; @@ -11991,6 +12006,8 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond, @param inherited path to all inherited multiple equality items @param join_list list of join tables to which the condition refers to + @ignore_on_conds TRUE <-> do not build multiple equalities + for on expressions @param[out] cond_equal_ref pointer to the structure to place built equalities in @@ -11998,10 +12015,13 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond, pointer to the transformed condition containing multiple equalities */ -static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited, +static COND *build_equal_items(JOIN *join, COND *cond, + COND_EQUAL *inherited, List<TABLE_LIST> *join_list, + bool ignore_on_conds, COND_EQUAL **cond_equal_ref) { + THD *thd= join->thd; COND_EQUAL *cond_equal= 0; if (cond) @@ -12026,7 +12046,7 @@ static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited, } *cond_equal_ref= cond_equal; - if (join_list) + if (join_list && !ignore_on_conds) { TABLE_LIST *table; List_iterator<TABLE_LIST> li(*join_list); @@ -12041,8 +12061,8 @@ static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited, We can modify table->on_expr because its old value will be restored before re-execution of PS/SP. */ - table->on_expr= build_equal_items(thd, table->on_expr, inherited, - nested_join_list, + table->on_expr= build_equal_items(join, table->on_expr, inherited, + nested_join_list, ignore_on_conds, &table->cond_equal); } } @@ -12239,11 +12259,16 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, Item *item_const= item_equal->get_const(); Item_equal_fields_iterator it(*item_equal); Item *head; - DBUG_ASSERT(!cond || cond->type() == Item::COND_ITEM); - TABLE_LIST *current_sjm= NULL; Item *current_sjm_head= NULL; + DBUG_ASSERT(!cond || + cond->type() == Item::INT_ITEM || + (cond->type() == Item::FUNC_ITEM && + ((Item_func *) cond)->functype() == Item_func::EQ_FUNC) || + (cond->type() == Item::COND_ITEM && + ((Item_func *) cond)->functype() == Item_func::COND_AND_FUNC)); + /* Pick the "head" item: the constant one or the first in the join order (if the first in the join order happends to be inside an SJM nest, that's @@ -12318,8 +12343,8 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, if (produce_equality) { - if (eq_item) - eq_list.push_back(eq_item); + if (eq_item && eq_list.push_back(eq_item)) + return 0; /* If we're inside an SJM-nest (current_sjm!=NULL), and the multi-equality @@ -12343,31 +12368,61 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, current_sjm= field_sjm; } - if (!cond) + /* + We have produced zero, one, or more pair-wise equalities eq_i. We want to + return an expression in form: + + cond AND eq_1 AND eq_2 AND eq_3 AND ... + + 'cond' is a parameter for this function, which may be NULL, an Item_int(1), + or an Item_func_eq or an Item_cond_and. + + We want to return a well-formed condition: no nested Item_cond_and objects, + or Item_cond_and with a single child: + - if 'cond' is an Item_cond_and, we add eq_i as its tail + - if 'cond' is Item_int(1), we return eq_i + - otherwise, we create our own Item_cond_and and put 'cond' at the front of + it. + - if we have only one condition to return, we don't create an Item_cond_and + */ + + if (eq_item && eq_list.push_back(eq_item)) + return 0; + COND *res= 0; + switch (eq_list.elements) + { + case 0: + res= cond ? cond : new Item_int((longlong) 1, 1); + break; + case 1: + if (!cond || cond->type() == Item::INT_ITEM) + res= eq_item; + break; + default: + break; + } + if (!res) { - if (eq_list.is_empty()) + if (cond) { - if (eq_item) - return eq_item; - return new Item_int((longlong) 1, 1); + if (cond->type() == Item::COND_ITEM) + { + res= cond; + ((Item_cond *) res)->add_at_end(&eq_list); + } + else if (eq_list.push_front(cond)) + return 0; } - /* eq_item is always set if list is not empty */ - DBUG_ASSERT(eq_item); - eq_list.push_back(eq_item); - if (!(cond= new Item_cond_and(eq_list))) - return 0; // Error - } - else + } + if (!res) + res= new Item_cond_and(eq_list); + if (res) { - if (eq_item) - eq_list.push_back(eq_item); - if (!eq_list.is_empty()) - ((Item_cond *) cond)->add_at_head(&eq_list); + res->quick_fix_field(); + res->update_used_tables(); } - cond->quick_fix_field(); - cond->update_used_tables(); - - return cond; + + return res; } @@ -12470,31 +12525,68 @@ static COND* substitute_for_best_equal_field(JOIN_TAB *context_tab, if (and_level) { + COND *eq_cond= 0; List_iterator_fast<Item_equal> it(cond_equal->current_level); + bool false_eq_cond= FALSE; while ((item_equal= it++)) { - cond= eliminate_item_equal(cond, cond_equal->upper_levels, item_equal); - // This occurs when eliminate_item_equal() founds that cond is - // always false and substitutes it with Item_int 0. - // Due to this, value of item_equal will be 0, so just return it. - if (!cond) - return org_cond; // Error - if (cond->type() != Item::COND_ITEM) + eq_cond= eliminate_item_equal(eq_cond, cond_equal->upper_levels, + item_equal); + if (!eq_cond) + { + eq_cond= 0; + break; + } + else if (eq_cond->type() == Item::INT_ITEM && !eq_cond->val_bool()) + { + /* + This occurs when eliminate_item_equal() founds that cond is + always false and substitutes it with Item_int 0. + Due to this, value of item_equal will be 0, so just return it. + */ + cond= eq_cond; + false_eq_cond= TRUE; break; + } } - } - if (cond->type() == Item::COND_ITEM && - !((Item_cond*)cond)->argument_list()->elements) - cond= new Item_int((int32)cond->val_bool()); - + if (eq_cond && !false_eq_cond) + { + /* Insert the generated equalities before all other conditions */ + if (eq_cond->type() == Item::COND_ITEM) + ((Item_cond *) cond)->add_at_head( + ((Item_cond *) eq_cond)->argument_list()); + else + { + if (cond_list->is_empty()) + cond= eq_cond; + else + { + /* Do not add an equality condition if it's always true */ + if (eq_cond->type() != Item::INT_ITEM && + cond_list->push_front(eq_cond)) + eq_cond= 0; + } + } + } + if (!eq_cond) + { + /* + We are out of memory doing the transformation. + This is a fatal error now. However we bail out by returning the + original condition that we had before we started the transformation. + */ + cond_list->concat((List<Item> *) &cond_equal->current_level); + } + } } else if (cond->type() == Item::FUNC_ITEM && ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) { item_equal= (Item_equal *) cond; item_equal->sort(&compare_fields_by_table_order, table_join_idx); + cond_equal= item_equal->upper_levels; if (cond_equal && cond_equal->current_level.head() == item_equal) - cond_equal= 0; + cond_equal= cond_equal->upper_levels; cond= eliminate_item_equal(0, cond_equal, item_equal); return cond ? cond : org_cond; } @@ -13453,7 +13545,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab, static COND * -optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, +optimize_cond(JOIN *join, COND *conds, + List<TABLE_LIST> *join_list, bool ignore_on_conds, Item::cond_result *cond_value, COND_EQUAL **cond_equal) { THD *thd= join->thd; @@ -13462,7 +13555,9 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, if (!conds) { *cond_value= Item::COND_TRUE; - build_equal_items(join->thd, NULL, NULL, join_list, cond_equal); + if (!ignore_on_conds) + build_equal_items(join, NULL, NULL, join_list, ignore_on_conds, + cond_equal); } else { @@ -13475,7 +13570,8 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, multiple equality contains a constant. */ DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY);); - conds= build_equal_items(join->thd, conds, NULL, join_list, cond_equal); + conds= build_equal_items(join, conds, NULL, join_list, ignore_on_conds, + cond_equal); DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY);); /* change field = field to field = const for each found field = const */ @@ -13531,7 +13627,61 @@ internal_remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) li.remove(); else if (item != new_item) { - (void) li.replace(new_item); + if (and_level) + { + /* + Take a special care of multiple equality predicates + that may be part of 'cond' and 'new_item'. + Those multiple equalities that have common members + must be merged. + */ + Item_cond_and *cond_and= (Item_cond_and *) cond; + List<Item_equal> *cond_equal_items= + &cond_and->cond_equal.current_level; + List<Item> *cond_and_list= cond_and->argument_list(); + + if (new_item->type() == Item::COND_ITEM && + ((Item_cond*) new_item)->functype() == Item_func::COND_AND_FUNC) + { + Item_cond_and *new_item_and= (Item_cond_and *) new_item; + List<Item_equal> *new_item_equal_items= + &new_item_and->cond_equal.current_level; + List<Item> *new_item_and_list= new_item_and->argument_list(); + cond_and_list->disjoin((List<Item>*) cond_equal_items); + new_item_and_list->disjoin((List<Item>*) new_item_equal_items); + Item_equal *equal_item; + List_iterator<Item_equal> it(*new_item_equal_items); + while ((equal_item= it++)) + { + equal_item->merge_into_list(cond_equal_items); + } + if (new_item_and_list->is_empty()) + li.remove(); + else + li.replace(*new_item_and_list); + cond_and_list->concat((List<Item>*) cond_equal_items); + } + else if (new_item->type() == Item::FUNC_ITEM && + ((Item_cond*) new_item)->functype() == + Item_func::MULT_EQUAL_FUNC) + { + cond_and_list->disjoin((List<Item>*) cond_equal_items); + ((Item_equal *) new_item)->merge_into_list(cond_equal_items); + li.remove(); + cond_and_list->concat((List<Item>*) cond_equal_items); + } + else + li.replace(new_item); + } + else + { + if (new_item->type() == Item::COND_ITEM && + ((Item_cond*) new_item)->functype() == + ((Item_cond*) cond)->functype()) + li.replace(*((Item_cond*) new_item)->argument_list()); + else + li.replace(new_item); + } should_fix_fields=1; } if (*cond_value == Item::COND_UNDEF) @@ -14110,6 +14260,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, } case Item::FIELD_ITEM: case Item::DEFAULT_VALUE_ITEM: + case Item::INSERT_VALUE_ITEM: { Item_field *field= (Item_field*) item; bool orig_modify= modify_item; @@ -14788,11 +14939,11 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, } else { - recinfo->null_bit= 1 << (null_count & 7); + recinfo->null_bit= (uint8)1 << (null_count & 7); recinfo->null_pos= null_count/8; } field->move_field(pos,null_flags+null_count/8, - 1 << (null_count & 7)); + (uint8)1 << (null_count & 7)); null_count++; } else @@ -15246,7 +15397,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list) { cur_field->move_field(field_pos, (uchar*) null_pos, null_bit); null_bit<<= 1; - if (null_bit == (1 << 8)) + if (null_bit == (uint)1 << 8) { ++null_pos; null_bit= 1; @@ -19306,7 +19457,6 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, *(join->pre_sort_join_tab)= *tab; - tab->select=NULL; tab->set_select_cond(NULL, __LINE__); tab->type=JT_ALL; // Read with normal read_record diff --git a/sql/sql_select.h b/sql/sql_select.h index 638de926d75..7e6f81cc65b 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1,8 +1,8 @@ #ifndef SQL_SELECT_INCLUDED #define SQL_SELECT_INCLUDED -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2008-2011 Monty Program Ab +/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. + Copyright (c) 2008, 2013, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -523,6 +523,16 @@ typedef struct st_join_table { bool preread_init(); bool is_sjm_nest() { return test(bush_children); } + + bool access_from_tables_is_allowed(table_map used_tables, + table_map sjm_lookup_tables) + { + table_map used_sjm_lookup_tables= used_tables & sjm_lookup_tables; + return !used_sjm_lookup_tables || + (emb_sj_nest && + !(used_sjm_lookup_tables & ~emb_sj_nest->sj_inner_tables)); + } + } JOIN_TAB; @@ -973,6 +983,11 @@ public: bool hash_join; bool do_send_rows; table_map const_table_map; + /** + Bitmap of semijoin tables that the current partial plan decided + to materialize and access by lookups + */ + table_map sjm_lookup_tables; /* Constant tables for which we have found a row (as opposed to those for which we didn't). @@ -1304,8 +1319,9 @@ public: outer_ref_cond= pseudo_bits_cond= NULL; in_to_exists_where= NULL; in_to_exists_having= NULL; - pre_sort_join_tab= NULL; + emb_sjm_nest= NULL; + sjm_lookup_tables= 0; } int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num, diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 095ad409a94..269a99faadd 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3152,8 +3152,8 @@ int fill_schema_user_stats(THD* thd, TABLE_LIST* tables, COND* cond) int result; DBUG_ENTER("fill_schema_user_stats"); - if (check_global_access(thd, SUPER_ACL | PROCESS_ACL)) - DBUG_RETURN(1); + if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true)) + DBUG_RETURN(0); /* Iterates through all the global stats and sends them to the client. @@ -3187,8 +3187,8 @@ int fill_schema_client_stats(THD* thd, TABLE_LIST* tables, COND* cond) int result; DBUG_ENTER("fill_schema_client_stats"); - if (check_global_access(thd, SUPER_ACL | PROCESS_ACL)) - DBUG_RETURN(1); + if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true)) + DBUG_RETURN(0); /* Iterates through all the global stats and sends them to the client. diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 9d11677666f..095c557c531 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -796,7 +796,7 @@ copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, uint32 length= min(to_length, from_length), length2= length; -#if defined(__i386__) +#if defined(__i386__) || defined(__x86_64__) /* Special loop for i386, it allows to refer to a non-aligned memory block as UINT32, which makes diff --git a/sql/sql_table.cc b/sql/sql_table.cc index d0a4aedfa4f..a379b581539 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2012, Oracle and/or its affiliates. - Copyright (c) 2010, 2011, Monty Program Ab + Copyright (c) 2010, 2013, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -649,13 +649,6 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) struct st_global_ddl_log { - /* - We need to adjust buffer size to be able to handle downgrades/upgrades - where IO_SIZE has changed. We'll set the buffer size such that we can - handle that the buffer size was upto 4 times bigger in the version - that wrote the DDL log. - */ - char file_entry_buf[4*IO_SIZE]; char file_name_str[FN_REFLEN]; char *file_name; DDL_LOG_MEMORY_ENTRY *first_free; @@ -683,51 +676,60 @@ mysql_mutex_t LOCK_gdl; #define DDL_LOG_NUM_ENTRY_POS 0 #define DDL_LOG_NAME_LEN_POS 4 #define DDL_LOG_IO_SIZE_POS 8 +#define DDL_LOG_HEADER_SIZE 12 -/* - Read one entry from ddl log file - SYNOPSIS - read_ddl_log_file_entry() - entry_no Entry number to read - RETURN VALUES - TRUE Error - FALSE Success +/** + Read one entry from ddl log file. + @param[out] file_entry_buf Buffer to read into + @param entry_no Entry number to read + @param size Number of bytes of the entry to read + + @return Operation status + @retval true Error + @retval false Success */ -static bool read_ddl_log_file_entry(uint entry_no) +static bool read_ddl_log_file_entry(uchar *file_entry_buf, + uint entry_no, + uint size) { bool error= FALSE; File file_id= global_ddl_log.file_id; - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; uint io_size= global_ddl_log.io_size; DBUG_ENTER("read_ddl_log_file_entry"); + DBUG_ASSERT(io_size >= size); - if (mysql_file_pread(file_id, file_entry_buf, io_size, io_size * entry_no, - MYF(MY_WME)) != io_size) + if (mysql_file_pread(file_id, file_entry_buf, size, io_size * entry_no, + MYF(MY_WME)) != size) error= TRUE; DBUG_RETURN(error); } -/* - Write one entry from ddl log file - SYNOPSIS - write_ddl_log_file_entry() - entry_no Entry number to write - RETURN VALUES - TRUE Error - FALSE Success +/** + Write one entry to ddl log file. + + @param file_entry_buf Buffer to write + @param entry_no Entry number to write + @param size Number of bytes of the entry to write + + @return Operation status + @retval true Error + @retval false Success */ -static bool write_ddl_log_file_entry(uint entry_no) +static bool write_ddl_log_file_entry(uchar *file_entry_buf, + uint entry_no, + uint size) { bool error= FALSE; File file_id= global_ddl_log.file_id; - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + uint io_size= global_ddl_log.io_size; DBUG_ENTER("write_ddl_log_file_entry"); + DBUG_ASSERT(io_size >= size); - if (mysql_file_pwrite(file_id, (uchar*)file_entry_buf, - IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE) + if (mysql_file_pwrite(file_id, file_entry_buf, size, + io_size * entry_no, MYF(MY_WME)) != size) error= TRUE; DBUG_RETURN(error); } @@ -746,17 +748,20 @@ static bool write_ddl_log_header() { uint16 const_var; bool error= FALSE; + uchar file_entry_buf[DDL_LOG_HEADER_SIZE]; DBUG_ENTER("write_ddl_log_header"); + DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len) + <= global_ddl_log.io_size); - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS], + int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS], global_ddl_log.num_entries); - const_var= FN_LEN; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS], + const_var= global_ddl_log.name_len; + int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS], (ulong) const_var); - const_var= IO_SIZE; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS], + const_var= global_ddl_log.io_size; + int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS], (ulong) const_var); - if (write_ddl_log_file_entry(0UL)) + if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE)) { sql_print_error("Error writing ddl log header"); DBUG_RETURN(TRUE); @@ -796,18 +801,20 @@ static inline void create_ddl_log_file_name(char *file_name) static uint read_ddl_log_header() { - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + char file_entry_buf[DDL_LOG_HEADER_SIZE]; char file_name[FN_REFLEN]; uint entry_no; bool successful_open= FALSE; DBUG_ENTER("read_ddl_log_header"); + DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE); create_ddl_log_file_name(file_name); if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log, file_name, O_RDWR | O_BINARY, MYF(0))) >= 0) { - if (read_ddl_log_file_entry(0UL)) + if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL, + DDL_LOG_HEADER_SIZE)) { /* Write message into error log */ sql_print_error("Failed to read ddl log file in recovery"); @@ -820,8 +827,6 @@ static uint read_ddl_log_header() entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]); global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]); global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]); - DBUG_ASSERT(global_ddl_log.io_size <= - sizeof(global_ddl_log.file_entry_buf)); } else { @@ -836,30 +841,22 @@ static uint read_ddl_log_header() } -/* - Read a ddl log entry - SYNOPSIS - read_ddl_log_entry() - read_entry Number of entry to read - out:entry_info Information from entry - RETURN VALUES - TRUE Error - FALSE Success - DESCRIPTION - Read a specified entry in the ddl log +/** + Set ddl log entry struct from buffer + @param read_entry Entry number + @param file_entry_buf Buffer to use + @param ddl_log_entry Entry to be set + + @note Pointers in ddl_log_entry will point into file_entry_buf! */ -bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) +static void set_ddl_log_entry_from_buf(uint read_entry, + uchar *file_entry_buf, + DDL_LOG_ENTRY *ddl_log_entry) { - char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf; uint inx; uchar single_char; - DBUG_ENTER("read_ddl_log_entry"); - - if (read_ddl_log_file_entry(read_entry)) - { - DBUG_RETURN(TRUE); - } + DBUG_ENTER("set_ddl_log_entry_from_buf"); ddl_log_entry->entry_pos= read_entry; single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]; ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char; @@ -867,14 +864,14 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) ddl_log_entry->action_type= (enum ddl_log_action_code)single_char; ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS]; ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]); - ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS]; + ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS]; inx= DDL_LOG_NAME_POS + global_ddl_log.name_len; - ddl_log_entry->from_name= &file_entry_buf[inx]; + ddl_log_entry->from_name= (char*) &file_entry_buf[inx]; inx+= global_ddl_log.name_len; - ddl_log_entry->handler_name= &file_entry_buf[inx]; - DBUG_RETURN(FALSE); + ddl_log_entry->handler_name= (char*) &file_entry_buf[inx]; + DBUG_VOID_RETURN; } - + /* Initialise ddl log @@ -1077,6 +1074,7 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used; DBUG_ENTER("get_free_ddl_log_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); if (global_ddl_log.first_free == NULL) { if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc( @@ -1134,34 +1132,36 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool error, write_header; + char file_entry_buf[IO_SIZE]; DBUG_ENTER("write_ddl_log_entry"); if (init_ddl_log()) { DBUG_RETURN(TRUE); } - global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= + memset(file_entry_buf, 0, sizeof(file_entry_buf)); + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_ENTRY_CODE; - global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= + file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= (char)ddl_log_entry->action_type; - global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], + file_entry_buf[DDL_LOG_PHASE_POS]= 0; + int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], ddl_log_entry->next_entry); - DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - ddl_log_entry->name, FN_LEN - 1); + DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name, + global_ddl_log.name_len - 1); if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION || ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION) { - DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN], - ddl_log_entry->from_name, FN_LEN - 1); + DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len], + ddl_log_entry->from_name, global_ddl_log.name_len - 1); } else - global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0; - DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)], - ddl_log_entry->handler_name, FN_LEN - 1); + file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0; + DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len); + strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)], + ddl_log_entry->handler_name, global_ddl_log.name_len - 1); if (get_free_ddl_log_entry(active_entry, &write_header)) { DBUG_RETURN(TRUE); @@ -1169,14 +1169,15 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, error= FALSE; DBUG_PRINT("ddl_log", ("write type %c next %u name '%s' from_name '%s' handler '%s'", - (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS], + (char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS], ddl_log_entry->next_entry, - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + FN_LEN], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + (2*FN_LEN)])); - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + (char*) &file_entry_buf[DDL_LOG_NAME_POS], + (char*) &file_entry_buf[DDL_LOG_NAME_POS + + global_ddl_log.name_len], + (char*) &file_entry_buf[DDL_LOG_NAME_POS + + (2*global_ddl_log.name_len)])); + if (write_ddl_log_file_entry((uchar*) file_entry_buf, + (*active_entry)->entry_pos, IO_SIZE)) { error= TRUE; sql_print_error("Failed to write entry_no = %u", @@ -1226,13 +1227,14 @@ bool write_execute_ddl_log_entry(uint first_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool write_header= FALSE; - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + char file_entry_buf[IO_SIZE]; DBUG_ENTER("write_execute_ddl_log_entry"); if (init_ddl_log()) { DBUG_RETURN(TRUE); } + memset(file_entry_buf, 0, sizeof(file_entry_buf)); if (!complete) { /* @@ -1246,12 +1248,7 @@ bool write_execute_ddl_log_entry(uint first_entry, } else file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE; - file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */ - file_entry_buf[DDL_LOG_PHASE_POS]= 0; int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry); - file_entry_buf[DDL_LOG_NAME_POS]= 0; - file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0; - file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0; if (!(*active_entry)) { if (get_free_ddl_log_entry(active_entry, &write_header)) @@ -1259,7 +1256,9 @@ bool write_execute_ddl_log_entry(uint first_entry, DBUG_RETURN(TRUE); } } - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + if (write_ddl_log_file_entry((uchar*) file_entry_buf, + (*active_entry)->entry_pos, + IO_SIZE)) { sql_print_error("Error writing execute entry in ddl log"); release_ddl_log_memory_entry(*active_entry); @@ -1304,10 +1303,16 @@ bool write_execute_ddl_log_entry(uint first_entry, bool deactivate_ddl_log_entry(uint entry_no) { - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; + uchar file_entry_buf[DDL_LOG_NAME_POS]; DBUG_ENTER("deactivate_ddl_log_entry"); - if (!read_ddl_log_file_entry(entry_no)) + + /* + Only need to read and write the first bytes of the entry, where + ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS + to include all info except for the names. + */ + if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) { if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE) { @@ -1325,7 +1330,7 @@ bool deactivate_ddl_log_entry(uint entry_no) { DBUG_ASSERT(0); } - if (write_ddl_log_file_entry(entry_no)) + if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) { sql_print_error("Error in deactivating log entry. Position = %u", entry_no); @@ -1386,6 +1391,7 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry; DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry; DBUG_ENTER("release_ddl_log_memory_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); global_ddl_log.first_free= log_entry; log_entry->next_log_entry= first_free; @@ -1415,24 +1421,26 @@ bool execute_ddl_log_entry(THD *thd, uint first_entry) { DDL_LOG_ENTRY ddl_log_entry; uint read_entry= first_entry; + uchar file_entry_buf[IO_SIZE]; DBUG_ENTER("execute_ddl_log_entry"); mysql_mutex_lock(&LOCK_gdl); do { - if (read_ddl_log_entry(read_entry, &ddl_log_entry)) + if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE)) { - /* Write to error log and continue with next log entry */ + /* Print the error to the log and continue with next log entry */ sql_print_error("Failed to read entry = %u from ddl log", read_entry); break; } + set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry); DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE); if (execute_ddl_log_action(thd, &ddl_log_entry)) { - /* Write to error log and continue with next log entry */ + /* Print the error to the log and continue with next log entry */ sql_print_error("Failed to execute action for entry = %u from ddl log", read_entry); break; @@ -1477,13 +1485,14 @@ void execute_ddl_log_recovery() uint num_entries, i; THD *thd; DDL_LOG_ENTRY ddl_log_entry; + uchar *file_entry_buf; + uint io_size; char file_name[FN_REFLEN]; DBUG_ENTER("execute_ddl_log_recovery"); /* Initialise global_ddl_log struct */ - bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf)); global_ddl_log.inited= FALSE; global_ddl_log.recovery_phase= TRUE; global_ddl_log.io_size= IO_SIZE; @@ -1498,14 +1507,23 @@ void execute_ddl_log_recovery() thd->store_globals(); num_entries= read_ddl_log_header(); + io_size= global_ddl_log.io_size; + file_entry_buf= (uchar*) my_malloc(io_size, MYF(0)); + if (!file_entry_buf) + { + sql_print_error("Failed to allocate buffer for recover ddl log"); + DBUG_VOID_RETURN; + } for (i= 1; i < num_entries + 1; i++) { - if (read_ddl_log_entry(i, &ddl_log_entry)) + if (read_ddl_log_file_entry(file_entry_buf, i, io_size)) { sql_print_error("Failed to read entry no = %u from ddl log", i); continue; } + + set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry); if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE) { if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry)) @@ -1520,6 +1538,7 @@ void execute_ddl_log_recovery() (void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0)); global_ddl_log.recovery_phase= FALSE; delete thd; + my_free(file_entry_buf); /* Remember that we don't have a THD */ set_current_thd(0); DBUG_VOID_RETURN; @@ -1536,14 +1555,16 @@ void execute_ddl_log_recovery() void release_ddl_log() { - DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free; - DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used; + DDL_LOG_MEMORY_ENTRY *free_list; + DDL_LOG_MEMORY_ENTRY *used_list; DBUG_ENTER("release_ddl_log"); if (!global_ddl_log.do_release) DBUG_VOID_RETURN; mysql_mutex_lock(&LOCK_gdl); + free_list= global_ddl_log.first_free; + used_list= global_ddl_log.first_used; while (used_list) { DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry; @@ -2203,7 +2224,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, if (thd->locked_tables_mode) { - if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED)) + if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) { error= -1; goto err; @@ -6350,15 +6372,19 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, case LEAVE_AS_IS: break; case ENABLE: - if (wait_while_table_is_used(thd, table, extra_func)) + if (wait_while_table_is_used(thd, table, extra_func, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto err; DEBUG_SYNC(thd,"alter_table_enable_indexes"); error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + table->s->allow_access_to_protected_table(); break; case DISABLE: - if (wait_while_table_is_used(thd, table, extra_func)) + if (wait_while_table_is_used(thd, table, extra_func, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto err; error=table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + table->s->allow_access_to_protected_table(); break; default: DBUG_ASSERT(FALSE); @@ -6384,7 +6410,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, simple rename did nothing and therefore we can safely return without additional clean-up. */ - if (wait_while_table_is_used(thd, table, extra_func)) + if (wait_while_table_is_used(thd, table, extra_func, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto err; close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME); /* @@ -6824,6 +6851,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (table->s->tmp_table) { Open_table_context ot_ctx(thd, (MYSQL_OPEN_IGNORE_FLUSH | + MYSQL_OPEN_FOR_REPAIR | MYSQL_LOCK_IGNORE_TIMEOUT)); TABLE_LIST tbl; bzero((void*) &tbl, sizeof(tbl)); @@ -6894,7 +6922,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, table->file->indexes_are_disabled()) need_lock_for_indexes= true; if (!table->s->tmp_table && need_lock_for_indexes && - wait_while_table_is_used(thd, table, extra_func)) + wait_while_table_is_used(thd, table, extra_func, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) goto err_new_table_cleanup; thd_proc_info(thd, "manage keys"); DEBUG_SYNC(thd, "alter_table_manage_keys"); @@ -6903,6 +6932,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, error= trans_commit_stmt(thd); if (trans_commit_implicit(thd)) error= 1; + /* + If the table was locked, allow one to still run SHOW commands against it + */ + if (table->s->protected_against_usage()) + table->s->allow_access_to_protected_table(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -7114,7 +7148,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (lower_case_table_names) my_casedn_str(files_charset_info, old_name); - if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME)) + if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) { if (pending_inplace_add_index) { diff --git a/sql/sql_time.cc b/sql/sql_time.cc index 57dbd979933..dadf579b2e7 100644 --- a/sql/sql_time.cc +++ b/sql/sql_time.cc @@ -302,6 +302,9 @@ str_to_datetime_with_warn(CHARSET_INFO *cs, make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, str, length, flags & TIME_TIME_ONLY ? MYSQL_TIMESTAMP_TIME : ts_type, NullS); + DBUG_EXECUTE_IF("str_to_datetime_warn", + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_YES, str);); return ts_type; } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index d7d902bc6b0..882f7fcca8c 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -701,10 +701,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, thd->security_ctx->priv_host))) { if (check_global_access(thd, SUPER_ACL)) - { - my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); return TRUE; - } } /* diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc index 4b77344c042..d47fb24eaba 100644 --- a/sql/sql_truncate.cc +++ b/sql/sql_truncate.cc @@ -364,7 +364,8 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref, { DEBUG_SYNC(thd, "upgrade_lock_for_truncate"); /* To remove the table from the cache we need an exclusive lock. */ - if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_DROP)) + if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_DROP, + TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) DBUG_RETURN(TRUE); m_ticket_downgrade= table->mdl_ticket; /* Close if table is going to be recreated. */ diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 28b9fe7eacd..cf30d1342c0 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1583,6 +1583,15 @@ int multi_update::prepare(List<Item> ¬_used_values, DBUG_RETURN(thd->is_fatal_error != 0); } +void multi_update::update_used_tables() +{ + Item *item; + List_iterator_fast<Item> it(*values); + while ((item= it++)) + { + item->update_used_tables(); + } +} /* Check if table is safe to update on fly diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 3cfd997b2b8..f4e03ac3d14 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -789,10 +789,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %pure_parser /* We have threads */ /* - Currently there are 173 shift/reduce conflicts. + Currently there are 174 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 173 +%expect 174 /* Comments for TOKENS. @@ -13132,6 +13132,7 @@ keyword: | LANGUAGE_SYM {} | NO_SYM {} | OPEN_SYM {} + | OPTION {} | OPTIONS_SYM {} | OWNER_SYM {} | PARSER_SYM {} diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 9603ca30cfa..aa6d2535b0d 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -86,7 +86,7 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs, *set_warning= 1; } else - found|= ((longlong) 1 << (find - 1)); + found|= 1ULL << (find - 1); if (pos >= end) break; start= pos + mblen; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index fcd2df7b338..6a7466d6cf1 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3783,9 +3783,67 @@ static Sys_var_ulong Sys_debug_binlog_fsync_sleep( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1)); #endif + static Sys_var_harows Sys_expensive_subquery_limit( "expensive_subquery_limit", "The maximum number of rows a subquery may examine in order to be " "executed during optimization and used for constant optimization", SESSION_VAR(expensive_subquery_limit), CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, HA_POS_ERROR), DEFAULT(100), BLOCK_SIZE(1)); + +static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) +{ + longlong previous_val= thd->variables.pseudo_slave_mode; + longlong val= (longlong) var->save_result.ulonglong_value; + bool rli_fake= false; + +#ifndef EMBEDDED_LIBRARY + rli_fake= thd->rli_fake ? true : false; +#endif + + if (rli_fake) + { + if (!val) + { +#ifndef EMBEDDED_LIBRARY + delete thd->rli_fake; + thd->rli_fake= NULL; +#endif + } + else if (previous_val && val) + goto ineffective; + else if (!previous_val && val) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "'pseudo_slave_mode' is already ON."); + } + else + { + if (!previous_val && !val) + goto ineffective; + else if (previous_val && !val) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "Slave applier execution mode not active, " + "statement ineffective."); + } + goto end; + +ineffective: + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "'pseudo_slave_mode' change was ineffective."); + +end: + return FALSE; +} +static Sys_var_mybool Sys_pseudo_slave_mode( + "pseudo_slave_mode", + "SET pseudo_slave_mode= 0,1 are commands that mysqlbinlog " + "adds to beginning and end of binary log dumps. While zero " + "value indeed disables, the actual enabling of the slave " + "applier execution mode is done implicitly when a " + "Format_description_event is sent through the session.", + SESSION_ONLY(pseudo_slave_mode), NO_CMD_LINE, DEFAULT(FALSE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_pseudo_slave_mode)); + diff --git a/sql/sys_vars.h b/sql/sys_vars.h index 1729dcefd63..31764aa82a4 100644 --- a/sql/sys_vars.h +++ b/sql/sys_vars.h @@ -547,8 +547,7 @@ public: protected: virtual uchar *session_value_ptr(THD *thd, LEX_STRING *base) { - return thd->security_ctx->proxy_user[0] ? - (uchar *) &(thd->security_ctx->proxy_user[0]) : NULL; + return (uchar*)thd->security_ctx->external_user; } }; diff --git a/sql/table.cc b/sql/table.cc index a011fa845a7..06e424b2cfb 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -324,7 +324,7 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key, share->normalized_path.str= share->path.str; share->normalized_path.length= path_length; - share->version= refresh_version; + share->set_refresh_version(); /* Since alloc_table_share() can be called without any locking (for @@ -1026,7 +1026,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, } } if (j == first_key_parts) - keyinfo->ext_key_flags= keyinfo->flags | HA_NOSAME | HA_EXT_NOSAME; + keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME; } share->ext_key_parts+= keyinfo->ext_key_parts; } diff --git a/sql/table.h b/sql/table.h index 1a567ae75d1..9aa68b05cf2 100644 --- a/sql/table.h +++ b/sql/table.h @@ -825,12 +825,42 @@ struct TABLE_SHARE return table_map_id; } - /** Is this table share being expelled from the table definition cache? */ inline bool has_old_version() const { return version != refresh_version; } + inline bool protected_against_usage() const + { + return version == 0; + } + inline void protect_against_usage() + { + version= 0; + } + /* + This is used only for the case of locked tables, as we want to + allow one to do SHOW commands on them even after ALTER or REPAIR + */ + inline void allow_access_to_protected_table() + { + DBUG_ASSERT(version == 0); + version= 1; + } + /* + Remove from table definition cache at close. + Table can still be opened by SHOW + */ + inline void remove_from_cache_at_close() + { + if (version != 0) /* Don't remove protection */ + version= 1; + } + inline void set_refresh_version() + { + version= refresh_version; + } + /** Convert unrelated members of TABLE_SHARE to one enum representing its type. diff --git a/sql/threadpool_unix.cc b/sql/threadpool_unix.cc index da38d64fa4d..94251660e37 100644 --- a/sql/threadpool_unix.cc +++ b/sql/threadpool_unix.cc @@ -29,14 +29,14 @@ #ifdef __linux__ #include <sys/epoll.h> typedef struct epoll_event native_event; -#endif -#if defined (__FreeBSD__) || defined (__APPLE__) +#elif defined(HAVE_KQUEUE) #include <sys/event.h> typedef struct kevent native_event; -#endif -#if defined (__sun) +#elif defined (__sun) #include <port.h> typedef port_event_t native_event; +#else +#error threadpool is not available on this platform #endif /** Maximum number of native events a listener can read in one go */ @@ -285,7 +285,7 @@ static void *native_event_get_userdata(native_event *event) return event->data.ptr; } -#elif defined (__FreeBSD__) || defined (__APPLE__) +#elif defined(HAVE_KQUEUE) int io_poll_create() { return kqueue(); @@ -386,8 +386,6 @@ static void* native_event_get_userdata(native_event *event) { return event->portev_user; } -#else -#error not ported yet to this OS #endif @@ -1247,11 +1245,12 @@ static void connection_abort(connection_t *connection) DBUG_ENTER("connection_abort"); thread_group_t *group= connection->thread_group; + threadpool_remove_connection(connection->thd); + mysql_mutex_lock(&group->mutex); group->connection_count--; mysql_mutex_unlock(&group->mutex); - - threadpool_remove_connection(connection->thd); + my_free(connection); DBUG_VOID_RETURN; } diff --git a/sql/transaction.cc b/sql/transaction.cc index 3359decbcd5..1623cd57d77 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -567,15 +567,19 @@ bool trans_xa_start(THD *thd) my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]); else if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction()) my_error(ER_XAER_OUTSIDE, MYF(0)); - else if (xid_cache_search(thd->lex->xid)) - my_error(ER_XAER_DUPID, MYF(0)); else if (!trans_begin(thd)) { DBUG_ASSERT(thd->transaction.xid_state.xid.is_null()); thd->transaction.xid_state.xa_state= XA_ACTIVE; thd->transaction.xid_state.rm_error= 0; thd->transaction.xid_state.xid.set(thd->lex->xid); - xid_cache_insert(&thd->transaction.xid_state); + if (xid_cache_insert(&thd->transaction.xid_state)) + { + thd->transaction.xid_state.xa_state= XA_NOTR; + thd->transaction.xid_state.xid.null(); + trans_rollback(thd); + DBUG_RETURN(true); + } DBUG_RETURN(FALSE); } @@ -661,6 +665,16 @@ bool trans_xa_commit(THD *thd) if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) { + /* + xid_state.in_thd is always true beside of xa recovery procedure. + Note, that there is no race condition here between xid_cache_search + and xid_cache_delete, since we always delete our own XID + (thd->lex->xid == thd->transaction.xid_state.xid). + The only case when thd->lex->xid != thd->transaction.xid_state.xid + and xid_state->in_thd == 0 is in the function + xa_cache_insert(XID, xa_states), which is called before starting + client connections, and thus is always single-threaded. + */ XID_STATE *xs= xid_cache_search(thd->lex->xid); res= !xs || xs->in_thd; if (res) diff --git a/sql/tztime.cc b/sql/tztime.cc index b16cc65d6bb..000e3ac0518 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1856,7 +1856,7 @@ static Time_zone* tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) { TABLE *table= 0; - TIME_ZONE_INFO *tz_info; + TIME_ZONE_INFO *tz_info= NULL; Tz_names_entry *tmp_tzname; Time_zone *return_val= 0; int res; @@ -1866,7 +1866,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) uchar keybuff[32]; Field *field; String abbr(buff, sizeof(buff), &my_charset_latin1); - char *alloc_buff, *tz_name_buff; + char *alloc_buff= NULL; + char *tz_name_buff= NULL; /* Temporary arrays that are used for loading of data for filling TIME_ZONE_INFO structure @@ -1886,22 +1887,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) DBUG_ENTER("tz_load_from_open_tables"); - /* Prepare tz_info for loading also let us make copy of time zone name */ - if (!(alloc_buff= (char*) alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) + - tz_name->length() + 1))) - { - sql_print_error("Out of memory while loading time zone description"); - return 0; - } - tz_info= (TIME_ZONE_INFO *)alloc_buff; - bzero(tz_info, sizeof(TIME_ZONE_INFO)); - tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO); - /* - By writing zero to the end we guarantee that we can call ptr() - instead of c_ptr() for time zone name. - */ - strmake(tz_name_buff, tz_name->ptr(), tz_name->length()); - /* Let us find out time zone id by its name (there is only one index and it is specifically for this purpose). |