From 357a008ad32704df620411bcb8a7cb26f15662de Mon Sep 17 00:00:00 2001 From: Annamalai Gurusami Date: Thu, 12 Jul 2012 16:42:07 +0530 Subject: Bug #11765218 58157: INNODB LOCKS AN UNMATCHED ROW EVEN THOUGH USING RBR AND RC Description: When scanning and locking rows with < or <=, InnoDB locks the next row even though row based binary logging and read committed is used. Solution: In the handler, when the row is identified to fall outside of the range (as specified in the query predicates), then request the storage engine to unlock the row (if possible). This is done in handler::read_range_first() and handler::read_range_next(). --- sql/handler.cc | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/handler.cc b/sql/handler.cc index 9e43d5aba93..4f5c613a6a4 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -4287,7 +4287,19 @@ int handler::read_range_first(const key_range *start_key, ? HA_ERR_END_OF_FILE : result); - DBUG_RETURN (compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); + if (compare_key(end_range) <= 0) + { + DBUG_RETURN(0); + } + else + { + /* + The last read row does not fall in the range. So request + storage engine to release row lock if possible. + */ + unlock_row(); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } } @@ -4319,7 +4331,20 @@ int handler::read_range_next() result= index_next(table->record[0]); if (result) DBUG_RETURN(result); - DBUG_RETURN(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE); + + if (compare_key(end_range) <= 0) + { + DBUG_RETURN(0); + } + else + { + /* + The last read row does not fall in the range. So request + storage engine to release row lock if possible. + */ + unlock_row(); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } } -- cgit v1.2.1 From ddcd6867e925613c90e699dcf3e51ab765cf07ba Mon Sep 17 00:00:00 2001 From: Chaithra Gopalareddy Date: Wed, 18 Jul 2012 14:36:08 +0530 Subject: Bug#11762052: 54599: BUG IN QUERY PLANNER ON QUERIES WITH "ORDER BY" AND "LIMIT BY" CLAUSE PROBLEM: When a 'limit' clause is specified in a query along with group by and order by, optimizer chooses wrong index there by examining more number of rows than required. However without the 'limit' clause, optimizer chooses the right index. ANALYSIS: With respect to the query specified, range optimizer chooses the first index as there is a range present ( on 'a'). Optimizer then checks for an index which would give records in sorted order for the 'group by' clause. While checking chooses the second index (on 'c,b,a') based on the 'limit' specified and the selectivity of 'quick_condition_rows' (number of rows present in the range) in 'test_if_skip_sort_order' function. But, it fails to consider that an order by clause on a different column will result in scanning the entire index and hence the estimated number of rows calculated above are wrong (which results in choosing the second index). FIX: Do not enforce the 'limit' clause in the call to 'test_if_skip_sort_order' if we are creating a temporary table. Creation of temporary table indicates that there would be more post-processing and hence will need all the rows. This fix is backported from 5.6. This problem is fixed in 5.6 as part of changes for work log #5558 mysql-test/r/subselect.result: Changes for Bug#11762052 results in the correct number of rows. sql/sql_select.cc: Do not pass the actual 'limit' value if 'need_tmp' is true. --- sql/sql_select.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index f2007f609e0..c097c4d16ef 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1482,12 +1482,19 @@ JOIN::optimize() DBUG_RETURN(1); } } - + /* + Calculate a possible 'limit' of table rows for 'GROUP BY': 'need_tmp' + implies that there will be more postprocessing so the specified + 'limit' should not be enforced yet in the call to + 'test_if_skip_sort_order'. + */ + const ha_rows limit = need_tmp ? HA_POS_ERROR : unit->select_limit_cnt; + if (!(select_options & SELECT_BIG_RESULT) && ((group_list && (!simple_group || !test_if_skip_sort_order(&join_tab[const_tables], group_list, - unit->select_limit_cnt, 0, + limit, 0, &join_tab[const_tables].table-> keys_in_use_for_group_by))) || select_distinct) && -- cgit v1.2.1 From 1cb513ba6b199c5bd4a2ae159839adb5dd7561a4 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 24 Jul 2012 09:27:00 +0400 Subject: Fixing wrong copyright. Index.xml was modified in 2005, while the copyright notice still mentioned 2003. --- sql/share/charsets/Index.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml index 80b844e2f19..07e7e37b798 100644 --- a/sql/share/charsets/Index.xml +++ b/sql/share/charsets/Index.xml @@ -3,7 +3,7 @@ - Copyright (C) 2003 MySQL AB + Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From 03993d03a75049d2df659b46f8ec5e263b7ff1bd Mon Sep 17 00:00:00 2001 From: Sujatha Sivakumar Date: Tue, 24 Jul 2012 16:26:16 +0530 Subject: Bug#13961678:MULTI-STATEMENT TRANSACTION REQUIRED MORE THAN 'MAX_BINLOG_CACHE_SIZE' ERROR Problem: ======= MySQL returns following error in win64. "ERROR 1197 (HY000): Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again" when user tries to load >4G file even if max_binlog_cache_size set to maximum value. On Linux everything works fine. Analysis: ======== The `max_binlog_cache_size' variable is of type `ulonglong'. This value is set to `ULONGLONG_MAX' at the time of server start up. The above value is stored in an intermediate variable named `saved_max_binlog_cache_size' which is of type `ulong'. In visual c++ complier the `ulong' type is of 4bytes in size and hence the value is getting truncated to '4GB' and the cache is not able to grow beyond 4GB size. The same limitation is observed with "max_binlog_stmt_cache_size" as well. Similar fix has been applied. Fix: === As part of fix the type "ulong" is replaced with "my_off_t" which is of type "ulonglong". mysys/mf_iocache.c: Added debug statement to simulate a scenario where the cache file's current position is set to >4GB sql/log.cc: Replaced the type of `saved_max_binlog_cache_size' from "ulong" to "my_off_t", which is a type def for "ulonglong". --- sql/log.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index fc11dcc77ce..1cc94d2fb02 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -300,7 +300,7 @@ public: before_stmt_pos= MY_OFF_T_UNDEF; } - void set_binlog_cache_info(ulong param_max_binlog_cache_size, + void set_binlog_cache_info(my_off_t param_max_binlog_cache_size, ulong *param_ptr_binlog_cache_use, ulong *param_ptr_binlog_cache_disk_use) { @@ -377,7 +377,7 @@ private: is configured. This corresponds to either . max_binlog_cache_size or max_binlog_stmt_cache_size. */ - ulong saved_max_binlog_cache_size; + my_off_t saved_max_binlog_cache_size; /* Stores a pointer to the status variable that keeps track of the in-memory @@ -415,8 +415,8 @@ private: class binlog_cache_mngr { public: - binlog_cache_mngr(ulong param_max_binlog_stmt_cache_size, - ulong param_max_binlog_cache_size, + binlog_cache_mngr(my_off_t param_max_binlog_stmt_cache_size, + my_off_t param_max_binlog_cache_size, ulong *param_ptr_binlog_stmt_cache_use, ulong *param_ptr_binlog_stmt_cache_disk_use, ulong *param_ptr_binlog_cache_use, -- cgit v1.2.1 From fa0d2df5ff6915427b907450939e6ef3cef9408a Mon Sep 17 00:00:00 2001 From: Thayumanavar Date: Wed, 25 Jul 2012 16:24:18 +0530 Subject: Bug#13699303 - THREAD POOL PLUGIN IGNORES TIMEOUT. PROBLEM: mysql provides a feature where in a session which is idle for a period specified by the wait_timeout variable (whose value is in seconds), the session is closed This feature is not present when we use thread pool. FIX: This patch implements the interface functions which is required to implement the wait_timeout functionality in the thread pool plugin. --- sql/sql_class.cc | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 8931d67dd25..b0d7cac1864 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -254,6 +254,18 @@ PSI_thread *thd_get_psi(THD *thd) return thd->scheduler.m_psi; } +/** + Get net_wait_timeout for THD object + + @param thd THD object + + @retval net_wait_timeout value for thread on THD +*/ +ulong thd_get_net_wait_timeout(THD* thd) +{ + return thd->variables.net_wait_timeout; +} + /** Set reference to Performance Schema object for THD object @@ -422,6 +434,17 @@ void thd_set_net_read_write(THD *thd, uint val) thd->net.reading_or_writing= val; } +/** + Get reading/writing on socket from THD object + @param thd THD object + + @retval net.reading_or_writing value for thread on THD. +*/ +uint thd_get_net_read_write(THD *thd) +{ + return thd->net.reading_or_writing; +} + /** Set reference to mysys variable in THD object -- cgit v1.2.1 From d24a78d1ea2b07cd1b3863190b371c7c8ea39a3c Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Thu, 26 Jul 2012 15:05:24 +0200 Subject: Backport of Bug#14171740 65562: STRING::SHRINK SHOULD BE A NO-OP WHEN ALLOCED=0 --- sql/sql_string.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/sql_string.h b/sql/sql_string.h index e5a41352992..6d5e8c46c55 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -225,8 +225,12 @@ public: } bool real_alloc(uint32 arg_length); // Empties old string bool realloc(uint32 arg_length); - inline void shrink(uint32 arg_length) // Shrink buffer + + // Shrink the buffer, but only if it is allocated on the heap. + inline void shrink(uint32 arg_length) { + if (!is_alloced()) + return; if (arg_length < Alloced_length) { char *new_ptr; @@ -242,7 +246,7 @@ public: } } } - bool is_alloced() { return alloced; } + bool is_alloced() const { return alloced; } inline String& operator = (const String &s) { if (&s != this) -- cgit v1.2.1 From 44cd81da86e41c6ac7114ef8dbd31c738eba095d Mon Sep 17 00:00:00 2001 From: Praveenkumar Hulakund Date: Thu, 26 Jul 2012 23:44:43 +0530 Subject: BUG#13868860 - LIMIT '5' IS EXECUTED WITHOUT ERROR WHEN '5' IS PLACE HOLDER AND USE SERVER-SIDE Analysis: LIMIT always takes nonnegative integer constant values. http://dev.mysql.com/doc/refman/5.6/en/select.html So parsing of value '5' for LIMIT in SELECT fails. But, within prepared statement, LIMIT parameters can be specified using '?' markers. Value for the parameter can be supplied while executing the prepared statement. Passing string values, float or double value for LIMIT works well from CLI. Because, while setting the value for the parameters from the variable list (added using SET), if the value is for parameter LIMIT then its converted to integer value. But, when prepared statement is executed from the other interfaces as J connectors, or C applications etc. The value for the parameters are sent to the server with execute command. Each item in log has value and the data TYPE. So, While setting parameter value from this log, value is set to all the parameters with the same data type as passed. But here logic to convert value to integer type if its for LIMIT parameter is missing. Because of this,string '5' is set to LIMIT. And the same is logged into the binlog file too. Fix: When executing prepared statement having parameter for CLI it worked fine, as the value set for the parameter is converted to integer. And this failed in other interfaces as J connector,C Applications etc as this conversion is missing. So, as a fix added check while setting value for the parameters. If the parameter is for LIMIT value then its converted to integer value. --- sql/sql_prepare.cc | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'sql') diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 27e70aaf843..2afd4085c51 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -785,6 +785,14 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array, param->set_param_func(param, &read_pos, (uint) (data_end - read_pos)); if (param->state == Item_param::NO_VALUE) DBUG_RETURN(1); + + if (param->limit_clause_param && param->item_type != Item::INT_ITEM) + { + param->set_int(param->val_int(), MY_INT64_NUM_DECIMAL_DIGITS); + param->item_type= Item::INT_ITEM; + if (!param->unsigned_flag && param->value.integer < 0) + DBUG_RETURN(1); + } } } /* -- cgit v1.2.1 From 5f2f37cd4164a9e883d3b385e9ba6842c49a1d29 Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Fri, 27 Jul 2012 09:13:10 +0200 Subject: Bug#14111180 HANDLE_FATAL_SIGNAL IN PTR_COMPARE_1 / QUEUE_INSERT Space available for merging was calculated incorrectly. --- sql/filesort.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/filesort.cc b/sql/filesort.cc index e6842cec4a2..3a2102fa62b 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1,5 +1,5 @@ /* - Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -295,8 +295,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, Use also the space previously used by string pointers in sort_buffer for temporary key storage. */ - param.keys=((param.keys*(param.rec_length+sizeof(char*))) / - param.rec_length-1); + param.keys= table_sort.sort_keys_size / param.rec_length; maxbuffer--; // Offset from 0 if (merge_many_buff(¶m,(uchar*) sort_keys,buffpek,&maxbuffer, &tempfile)) -- cgit v1.2.1 From c61abdadcfd977b624c6623f395521369aabb63a Mon Sep 17 00:00:00 2001 From: Chaithra Gopalareddy Date: Sun, 5 Aug 2012 16:29:28 +0530 Subject: Bug #14099846: EXPORT_SET CRASHES DUE TO OVERALLOCATION OF MEMORY Backport the fix from 5.6 to 5.1 Base bug number : 11765562 sql/item_strfunc.cc: In Item_func_export_set::val_str, verify that the size of the end result is within reasonable bounds. --- sql/item_strfunc.cc | 51 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'sql') diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ed14f7f01e9..27483ede032 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -3047,23 +3047,21 @@ err: String* Item_func_export_set::val_str(String* str) { DBUG_ASSERT(fixed == 1); - ulonglong the_set = (ulonglong) args[0]->val_int(); - String yes_buf, *yes; - yes = args[1]->val_str(&yes_buf); - String no_buf, *no; - no = args[2]->val_str(&no_buf); - String *sep = NULL, sep_buf ; + String yes_buf, no_buf, sep_buf; + const ulonglong the_set = (ulonglong) args[0]->val_int(); + const String *yes= args[1]->val_str(&yes_buf); + const String *no= args[2]->val_str(&no_buf); + const String *sep= NULL; uint num_set_values = 64; - ulonglong mask = 0x1; str->length(0); str->set_charset(collation.collation); /* Check if some argument is a NULL value */ if (args[0]->null_value || args[1]->null_value || args[2]->null_value) { - null_value=1; - return 0; + null_value= true; + return NULL; } /* Arg count can only be 3, 4 or 5 here. This is guaranteed from the @@ -3076,37 +3074,56 @@ String* Item_func_export_set::val_str(String* str) num_set_values=64; if (args[4]->null_value) { - null_value=1; - return 0; + null_value= true; + return NULL; } /* Fall through */ case 4: if (!(sep = args[3]->val_str(&sep_buf))) // Only true if NULL { - null_value=1; - return 0; + null_value= true; + return NULL; } break; case 3: { /* errors is not checked - assume "," can always be converted */ uint errors; - sep_buf.copy(STRING_WITH_LEN(","), &my_charset_bin, collation.collation, &errors); + sep_buf.copy(STRING_WITH_LEN(","), &my_charset_bin, + collation.collation, &errors); sep = &sep_buf; } break; default: DBUG_ASSERT(0); // cannot happen } - null_value=0; + null_value= false; + + const ulong max_allowed_packet= current_thd->variables.max_allowed_packet; + const uint num_separators= num_set_values > 0 ? num_set_values - 1 : 0; + const ulonglong max_total_length= + num_set_values * max(yes->length(), no->length()) + + num_separators * sep->length(); + + if (unlikely(max_total_length > max_allowed_packet)) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), max_allowed_packet); + null_value= true; + return NULL; + } - for (uint i = 0; i < num_set_values; i++, mask = (mask << 1)) + uint ix; + ulonglong mask; + for (ix= 0, mask=0x1; ix < num_set_values; ++ix, mask = (mask << 1)) { if (the_set & mask) str->append(*yes); else str->append(*no); - if (i != num_set_values - 1) + if (ix != num_separators) str->append(*sep); } return str; -- cgit v1.2.1 From d0766534bd67554d6be36bf00a8dbd5fdf385042 Mon Sep 17 00:00:00 2001 From: Praveenkumar Hulakund Date: Tue, 7 Aug 2012 11:48:36 +0530 Subject: Bug#13058122 - DML, LOCK/UNLOCK TABLES AND SELECT LEAD TO FOREVER MDL LOCK Analysis: ---------- While granting MDL lock for the lock requests in wait queue, first the lock is granted to the high priority lock types and then to the low priority lock types. MDL Priority Matrix, +-------------+----+---+---+---+----+-----+ | Locks | | | | | | | | has Priority| | | | | | | | over ---> | S | SR| SW| SU| SNW| SNRW| +-------------+----+---+---+---+----+-----+ | X | + | + | + | + | + | + | +-------------|----|---|---|---|----|-----| | SNRW | - | + | + | - | - | - | +-------------|----|---|---|---|----|-----| | SNW | - | - | + | - | - | - | +-------------+----+---+---+---+----+-----+ Here '+' means, Lock priority is higher. '-' means, Has same priority In the scenario where, *. Lock wait queue has requests of type S/SR/SW/SU. *. And locks of high priority X/SNRW/SNW are requested continuously. In this case, while granting lock, always first high priority lock requests(X/SNRW/SNW) are considered. Low priority locks(S/SR/SW/SU) will not get chance and they will wait forever. In the scenario for which this bug is reported, application executed many LOCK TABLES ... WRITE statements concurrently. These statements request SNRW lock. Also there were some connections trying to execute DML statements requesting SR lock. Since SNRW lock request has higher priority (and as they were too many waiting SNRW requests) lock is always granted to it. So, lock request SR will wait forever, resulting in DML starvation. How is this handled in 5.1? --------------------------- Even in 5.1 we have low priority lock starvation issue. But, in 5.1 thread locking, system variable "max_write_lock_count" can be configured to grant some pending read lock requests. After "max_write_lock_count" of write lock grants all the low priority locks are granted. Why this issue is seen in 5.5/trunk? --------------------------------- In 5.5/trunk MDL locking, "max_write_lock_count" system variable exists but not used in MDL, only thread lock uses it. So no effect of "max_write_lock_count" in MDL locking. This means that starvation of metadata locks is possible even if max_write_lock_count is used. Looks like, customer was using "max_write_lock_count" in 5.1 and when upgraded to 5.5, starvation is seen because of not having effect of "max_write_lock_count" in MDL. Fix: ---------- As a fix, support for max_write_lock_count is added to MDL. To maintain write lock counter per MDL_lock object, new member "m_hog_lock_count" is added in MDL_lock. And following logic is added to increment the counter in function reschedule_waiters, (reschedule_waiters function is called while thread is releasing the lock) - After granting lock request from the wait queue. - Check if there are any S/SR/SU/SW exists in the wait queue - If yes then increment the "m_hog_lock_count" And following logic is added in the same function to handle pending S/SU/SR/SW locks - Before granting locks - Check if max_write_lock_count <= m_hog_lock_count - If Yes, then try to grant S/SR/SW/SU locks. (Since all of these has same priority, all locks are granted together. But some lock grant may fail because of grant incompatibility) - Reset m_hog_lock_count if there no low priority lock requests in wait queue. - return Note: -------------------------- In the lock priority matrix explained above, though X has priority over the SNW and SNRW. X locks is taken mostly for RENAME, TRUNCATE, CREATE ... operations. So lock type X may not be requested in loop continuously in real world applications, as compared to other lock request types. So, lock request of type SNW and SNRW are not starved. So, we can grant all S/SR/SU/SW in one shot, without considering SNW & SNRW lock request starvation. ALTER table operations take SU lock first and then upgrade to SNW if required. All S, SR, SW, SU have same lock priority. So while granting SU, request of types SR, SW, S are also granted in one shot. So, lock request of type SU->SNW in loop will not make other low priority lock request to starve. But, when there is request for lock of type SNRW, lock requests of lower priority types are not granted. And if SNRW is requested in loop continuously then all S, SR, SW, SU are starved. This patch addresses the latter scenario. When we have S/SR/SW/SU in wait queue and if there are - Continuous SNRW lock requests - OR one or more X and Continuous SNRW lock requests. - OR one SNW and Continuous SNRW lock requests. - OR one SNW, one or more X and continuous SNRW lock requests. in wait queue then, S/SR/SW/SU lock request are starved. --- sql/mdl.cc | 120 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++----- sql/mdl.h | 6 ++++ 2 files changed, 118 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/mdl.cc b/sql/mdl.cc index 81d6c69dca4..1a968f27841 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -378,7 +378,8 @@ public: bool has_pending_conflicting_lock(enum_mdl_type type); - bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx) const; + bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx, + bool ignore_lock_priority) const; inline static MDL_lock *create(const MDL_key *key); @@ -392,14 +393,24 @@ public: virtual bool needs_notification(const MDL_ticket *ticket) const = 0; virtual void notify_conflicting_locks(MDL_context *ctx) = 0; + virtual bitmap_t hog_lock_types_bitmap() const = 0; + /** List of granted tickets for this lock. */ Ticket_list m_granted; /** Tickets for contexts waiting to acquire a lock. */ Ticket_list m_waiting; + + /** + Number of times high priority lock requests have been granted while + low priority lock requests were waiting. + */ + ulong m_hog_lock_count; + public: MDL_lock(const MDL_key *key_arg) : key(key_arg), + m_hog_lock_count(0), m_ref_usage(0), m_ref_release(0), m_is_destroyed(FALSE), @@ -484,6 +495,15 @@ public: } virtual void notify_conflicting_locks(MDL_context *ctx); + /* + In scoped locks, only IX lock request would starve because of X/S. But that + is practically very rare case. So just return 0 from this function. + */ + virtual bitmap_t hog_lock_types_bitmap() const + { + return 0; + } + private: static const bitmap_t m_granted_incompatible[MDL_TYPE_END]; static const bitmap_t m_waiting_incompatible[MDL_TYPE_END]; @@ -536,6 +556,18 @@ public: } virtual void notify_conflicting_locks(MDL_context *ctx); + /* + To prevent starvation, these lock types that are only granted + max_write_lock_count times in a row while other lock types are + waiting. + */ + virtual bitmap_t hog_lock_types_bitmap() const + { + return (MDL_BIT(MDL_SHARED_NO_WRITE) | + MDL_BIT(MDL_SHARED_NO_READ_WRITE) | + MDL_BIT(MDL_EXCLUSIVE)); + } + private: static const bitmap_t m_granted_incompatible[MDL_TYPE_END]; static const bitmap_t m_waiting_incompatible[MDL_TYPE_END]; @@ -1267,6 +1299,41 @@ void MDL_lock::reschedule_waiters() { MDL_lock::Ticket_iterator it(m_waiting); MDL_ticket *ticket; + bool skip_high_priority= false; + bitmap_t hog_lock_types= hog_lock_types_bitmap(); + + if (m_hog_lock_count >= max_write_lock_count) + { + /* + If number of successively granted high-prio, strong locks has exceeded + max_write_lock_count give a way to low-prio, weak locks to avoid their + starvation. + */ + + if ((m_waiting.bitmap() & ~hog_lock_types) != 0) + { + /* + Even though normally when m_hog_lock_count is non-0 there is + some pending low-prio lock, we still can encounter situation + when m_hog_lock_count is non-0 and there are no pending low-prio + locks. This, for example, can happen when a ticket for pending + low-prio lock was removed from waiters list due to timeout, + and reschedule_waiters() is called after that to update the + waiters queue. m_hog_lock_count will be reset to 0 at the + end of this call in such case. + + Note that it is not an issue if we fail to wake up any pending + waiters for weak locks in the loop below. This would mean that + all of them are either killed, timed out or chosen as a victim + by deadlock resolver, but have not managed to remove ticket + from the waiters list yet. After tickets will be removed from + the waiters queue there will be another call to + reschedule_waiters() with pending bitmap updated to reflect new + state of waiters queue. + */ + skip_high_priority= true; + } + } /* Find the first (and hence the oldest) waiting request which @@ -1288,7 +1355,16 @@ void MDL_lock::reschedule_waiters() */ while ((ticket= it++)) { - if (can_grant_lock(ticket->get_type(), ticket->get_ctx())) + /* + Skip high-prio, strong locks if earlier we have decided to give way to + low-prio, weaker locks. + */ + if (skip_high_priority && + ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0)) + continue; + + if (can_grant_lock(ticket->get_type(), ticket->get_ctx(), + skip_high_priority)) { if (! ticket->get_ctx()->m_wait.set_status(MDL_wait::GRANTED)) { @@ -1302,6 +1378,13 @@ void MDL_lock::reschedule_waiters() */ m_waiting.remove_ticket(ticket); m_granted.add_ticket(ticket); + + /* + Increase counter of successively granted high-priority strong locks, + if we have granted one. + */ + if ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0) + m_hog_lock_count++; } /* If we could not update the wait slot of the waiter, @@ -1313,6 +1396,24 @@ void MDL_lock::reschedule_waiters() */ } } + + if ((m_waiting.bitmap() & ~hog_lock_types) == 0) + { + /* + Reset number of successively granted high-prio, strong locks + if there are no pending low-prio, weak locks. + This ensures: + - That m_hog_lock_count is correctly reset after strong lock + is released and weak locks are granted (or there are no + other lock requests). + - That situation when SNW lock is granted along with some SR + locks, but SW locks are still blocked are handled correctly. + - That m_hog_lock_count is zero in most cases when there are no pending + weak locks (see comment at the start of this method for example of + exception). This allows to save on checks at the start of this method. + */ + m_hog_lock_count= 0; + } } @@ -1467,8 +1568,9 @@ MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END] = Check if request for the metadata lock can be satisfied given its current state. - @param type_arg The requested lock type. - @param requestor_ctx The MDL context of the requestor. + @param type_arg The requested lock type. + @param requestor_ctx The MDL context of the requestor. + @param ignore_lock_priority Ignore lock priority. @retval TRUE Lock request can be satisfied @retval FALSE There is some conflicting lock. @@ -1480,19 +1582,21 @@ MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END] = bool MDL_lock::can_grant_lock(enum_mdl_type type_arg, - MDL_context *requestor_ctx) const + MDL_context *requestor_ctx, + bool ignore_lock_priority) const { bool can_grant= FALSE; bitmap_t waiting_incompat_map= incompatible_waiting_types_bitmap()[type_arg]; bitmap_t granted_incompat_map= incompatible_granted_types_bitmap()[type_arg]; + /* New lock request can be satisfied iff: - There are no incompatible types of satisfied requests in other contexts - There are no waiting requests which have higher priority - than this request. + than this request when priority was not ignored. */ - if (! (m_waiting.bitmap() & waiting_incompat_map)) + if (ignore_lock_priority || !(m_waiting.bitmap() & waiting_incompat_map)) { if (! (m_granted.bitmap() & granted_incompat_map)) can_grant= TRUE; @@ -1788,7 +1892,7 @@ MDL_context::try_acquire_lock_impl(MDL_request *mdl_request, ticket->m_lock= lock; - if (lock->can_grant_lock(mdl_request->type, this)) + if (lock->can_grant_lock(mdl_request->type, this, false)) { lock->m_granted.add_ticket(ticket); diff --git a/sql/mdl.h b/sql/mdl.h index 5c1af23f5e2..d30d30ac2fa 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -859,4 +859,10 @@ extern mysql_mutex_t LOCK_open; extern ulong mdl_locks_cache_size; static const ulong MDL_LOCKS_CACHE_SIZE_DEFAULT = 1024; +/* + Metadata locking subsystem tries not to grant more than + max_write_lock_count high-prio, strong locks successively, + to avoid starving out weak, low-prio locks. +*/ +extern "C" ulong max_write_lock_count; #endif -- cgit v1.2.1 From 5ad8292c63a89db63a54f9cfed6ceb3a35656f7b Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Tue, 7 Aug 2012 18:58:19 +0530 Subject: Bug#13928675 MYSQL CLIENT COPYRIGHT NOTICE MUST SHOW 2012 INSTEAD OF 2011 * Added a new macro to hold the current year : COPYRIGHT_NOTICE_CURRENT_YEAR * Modified ORACLE_WELCOME_COPYRIGHT_NOTICE macro to take the initial year as parameter and pick current year from the above mentioned macro. --- sql/gen_lex_hash.cc | 6 +++--- sql/mysqld.cc | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 9814814e8db..f126afb9015 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -1,5 +1,5 @@ /* - Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -350,7 +350,7 @@ static void usage(int version) my_progname, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); if (version) return; - puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000, 2011")); + puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("This program generates a perfect hashing function for the sql_lex.cc"); printf("Usage: %s [OPTIONS]\n\n", my_progname); my_print_help(my_long_options); @@ -453,7 +453,7 @@ int main(int argc,char **argv) printf("/*\n\n Do " "not " "edit " "this " "file " "directly!\n\n*/\n"); puts("/*"); - puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000, 2011")); + puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("*/"); /* Broken up to indicate that it's not advice to you, gentle reader. */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d6397280e0d..bc8d4162272 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -7652,7 +7652,7 @@ static void usage(void) if (!default_collation_name) default_collation_name= (char*) default_charset_info->name; print_version(); - puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000, 2011")); + puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Starts the MySQL database server.\n"); printf("Usage: %s [OPTIONS]\n", my_progname); if (!opt_verbose) -- cgit v1.2.1 From 6592afd5c28c0e7519d76c8c894658a4a17515c0 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Thu, 9 Aug 2012 12:51:37 +0200 Subject: Bug#14342883: SELECT QUERY RETURNS NOT ALL ROWS THAT ARE EXPECTED For non range/list partitioned tables (i.e. HASH/KEY): When prune_partitions finds a multi-range list (or in this test '<>') for a field of the partition index, even if it cannot make any use of the multi-range, it will continue with the next field of the partition index and use that for pruning (even if it the previous field could not be used). This results in partitions is pruned away, leaving partitions that only matches the last field in the partition index, and will exclude partitions which might match any previous fields. Fixed by skipping rest of partitioning key fields/parts if current key field/part could not be used. Also notice it is the order of the fields in the CREATE TABLE statement that triggers this bug, not the order of fields in primary/unique key or PARTITION BY KEY (). It must not be the last field in the partitioning expression that is not equal (or have a non single point range). I.e. the partitioning index is created with the same field order as in the CREATE TABLE. And for the bug to appear the last field must be a single point and some previous field must be a multi-point range. --- sql/opt_range.cc | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 03f444c22b5..8d221af392b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3071,27 +3071,28 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) ppar->cur_subpart_fields+= ppar->is_subpart_keypart[key_tree_part]; *(ppar->arg_stack_end++)= key_tree; + if (ignore_part_fields) + { + /* + We come here when a condition on the first partitioning + fields led to evaluating the partitioning condition + (due to finding a condition of the type a < const or + b > const). Thus we must ignore the rest of the + partitioning fields but we still want to analyse the + subpartitioning fields. + */ + if (key_tree->next_key_part) + res= find_used_partitions(ppar, key_tree->next_key_part); + else + res= -1; + goto pop_and_go_right; + } + if (key_tree->type == SEL_ARG::KEY_RANGE) { if (ppar->part_info->get_part_iter_for_interval && key_tree->part <= ppar->last_part_partno) { - if (ignore_part_fields) - { - /* - We come here when a condition on the first partitioning - fields led to evaluating the partitioning condition - (due to finding a condition of the type a < const or - b > const). Thus we must ignore the rest of the - partitioning fields but we still want to analyse the - subpartitioning fields. - */ - if (key_tree->next_key_part) - res= find_used_partitions(ppar, key_tree->next_key_part); - else - res= -1; - goto pop_and_go_right; - } /* Collect left and right bound, their lengths and flags */ uchar *min_key= ppar->cur_min_key; uchar *max_key= ppar->cur_max_key; @@ -3332,6 +3333,13 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) res= -1; goto pop_and_go_right; } + /* + No meaning in continuing with rest of partitioning key parts. + Will try to continue with subpartitioning key parts. + */ + ppar->ignore_part_fields= true; + did_set_ignore_part_fields= true; + goto process_next_key_part; } } -- cgit v1.2.1 From 2f30b34095e286877cda7156ae9622a4154147bd Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 9 Aug 2012 15:34:52 +0400 Subject: Bug #14409015 MEMORY LEAK WHEN REFERENCING OUTER FIELD IN HAVING When resolving outer fields, Item_field::fix_outer_fields() creates new Item_refs for each execution of a prepared statement, so these must be allocated in the runtime memroot. The memroot switching before resolving JOIN::having causes these to be allocated in the statement root, leaking memory for each PS execution. sql/item_subselect.cc: addon, fix for 11829691, item could be created in runtime memroot, so we need to use real_item instead. --- sql/item.cc | 7 ++++++- sql/item_subselect.cc | 2 +- sql/sql_select.cc | 4 ---- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 356fe4827c8..63215179ac6 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6010,7 +6010,12 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) if (from_field != not_found_field) { Item_field* fld; - if (!(fld= new Item_field(thd, last_checked_context, from_field))) + Query_arena backup, *arena; + arena= thd->activate_stmt_arena_if_needed(&backup); + fld= new Item_field(thd, last_checked_context, from_field); + if (arena) + thd->restore_active_arena(arena, &backup); + if (!fld) goto error; thd->change_item_tree(reference, fld); mark_as_dependent(thd, last_checked_context->select_lex, diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 46d49797b9c..2c91d0573c1 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1135,7 +1135,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, } else { - Item *item= (Item*) select_lex->item_list.head(); + Item *item= (Item*) select_lex->item_list.head()->real_item(); if (select_lex->table_list.elements) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index c097c4d16ef..042e7563d42 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -528,8 +528,6 @@ JOIN::prepare(Item ***rref_pointer_array, if (having) { - Query_arena backup, *arena; - arena= thd->activate_stmt_arena_if_needed(&backup); nesting_map save_allow_sum_func= thd->lex->allow_sum_func; thd->where="having clause"; thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level; @@ -539,8 +537,6 @@ JOIN::prepare(Item ***rref_pointer_array, having->check_cols(1))); select_lex->having_fix_field= 0; select_lex->having= having; - if (arena) - thd->restore_active_arena(arena, &backup); if (having_fix_rc || thd->is_error()) DBUG_RETURN(-1); /* purecov: inspected */ -- cgit v1.2.1 From 03bfc41bb83210ae4bdf16e6650f6168a2111ac0 Mon Sep 17 00:00:00 2001 From: Sujatha Sivakumar Date: Tue, 14 Aug 2012 14:11:01 +0530 Subject: Bug#13596613:SHOW SLAVE STATUS GIVES WRONG OUTPUT WITH MASTER-MASTER AND USING SET USE Problem: ======= In a master-master set-up, a master can show a wrong 'SHOW SLAVE STATUS' output. Requirements: - master-master - log_slave_updates This is caused when using SET user-variables and then using it to perform writes. From then on the master that performed the insert will have a SHOW SLAVE STATUS that is wrong and it will never get updated until a write happens on the other master. On"Master A" the "exec_master_log_pos" is not getting updated. Analysis: ======== Slave receives a "User_var" event from the master and after applying the event, when "log_slave_updates" option is enabled the slave tries to write this applied event into its own binary log. At the time of writing this event the slave should use the "originating server-id". But in the above case the sever always logs the "user var events" by using its global server-id. Due to this in a "master-master" replication when the event comes back to the originating server the "User_var_event" doesn't get skipped. "User_var_events" are context based events and they always follow with a query event which marks their end of group. Due to the above mentioned problem with "User_var_event" logging the "User_var_event" never gets skipped where as its corresponding "query_event" gets skipped. Hence the "User_var" event always waits for the next "query event" and the "Exec_master_log_position" does not get updated properly. Fix: === `MYSQL_BIN_LOG::write' function is used to write events into binary log. Within this function a new object for "User_var_log_event" is created and this new object is used to write the "User_var" event in the binlog. "User var" event is inherited from "Log_event". This "Log_event" has different overloaded constructors. When a "THD" object is present "Log_event(thd,...)" constructor should be used to initialise the objects and in the absence of a valid "THD" object "Log_event()" minimal constructor should be used. In the above mentioned problem always default minimal constructor was used which is incorrect. This minimal constructor is replaced with "Log_event(thd,...)". sql/log_event.h: Replaced the default constructor with another constructor which takes "THD" object as an argument. --- sql/log_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/log_event.h b/sql/log_event.h index e755b6a5a41..5030e1c6f3d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -2487,7 +2487,7 @@ public: User_var_log_event(THD* thd_arg, char *name_arg, uint name_len_arg, char *val_arg, ulong val_len_arg, Item_result type_arg, uint charset_number_arg) - :Log_event(), name(name_arg), name_len(name_len_arg), val(val_arg), + :Log_event(thd_arg,0,0), name(name_arg), name_len(name_len_arg), val(val_arg), val_len(val_len_arg), type(type_arg), charset_number(charset_number_arg), deferred(false) { is_null= !val; } -- cgit v1.2.1 From bcee9f1896ab6015e77ea88fde5317f50edaead7 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Wed, 15 Aug 2012 14:31:26 +0200 Subject: Bug#13025132 - PARTITIONS USE TOO MUCH MEMORY The buffer for the current read row from each partition (m_ordered_rec_buffer) used for sorted reads was allocated on open and freed when the ha_partition handler was closed or destroyed. For tables with many partitions and big records this could take up too much valuable memory. Solution is to only allocate the memory when it is needed and free it when nolonger needed. I.e. allocate it in index_init and free it in index_end (and to handle failures also free it on reset, close etc.) Also only allocating needed memory, according to partitioning pruning. Manually tested that it does not use as much memory and releases it after queries. --- sql/ha_partition.cc | 126 +++++++++++++++++++++++++++++++++++++--------------- sql/ha_partition.h | 13 +++++- 2 files changed, 100 insertions(+), 39 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 77eb8285245..a60a5b2d6dd 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -320,7 +320,7 @@ ha_partition::~ha_partition() for (i= 0; i < m_tot_parts; i++) delete m_file[i]; } - my_free((char*) m_ordered_rec_buffer, MYF(MY_ALLOW_ZERO_PTR)); + destroy_record_priority_queue(); clear_handler_file(); DBUG_VOID_RETURN; @@ -2594,7 +2594,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { char *name_buffer_ptr; int error= HA_ERR_INITIALIZATION; - uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); @@ -2612,32 +2611,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->reclength; - alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS); - alloc_len+= table_share->max_key_length; - if (!m_ordered_rec_buffer) - { - if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) - { - DBUG_RETURN(error); - } - { - /* - We set-up one record per partition and each record has 2 bytes in - front where the partition id is written. This is used by ordered - index_read. - We also set-up a reference to the first record for temporary use in - setting up the scan. - */ - char *ptr= (char*)m_ordered_rec_buffer; - uint i= 0; - do - { - int2store(ptr, i); - ptr+= m_rec_length + PARTITION_BYTES_IN_POS; - } while (++i < m_tot_parts); - m_start_key.key= (const uchar*)ptr; - } - } /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) @@ -2657,7 +2630,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (m_is_clone_of) { - uint i; + uint i, alloc_len; DBUG_ASSERT(m_clone_mem_root); /* Allocate an array of handler pointers for the partitions handlers. */ alloc_len= (m_tot_parts + 1) * sizeof(handler*); @@ -2733,12 +2706,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) being opened once. */ clear_handler_file(); - /* - Initialize priority queue, initialized to reading forward. - */ - if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS, - 0, key_rec_cmp, (void*)this))) - goto err_handler; /* Use table_share->ha_data to share auto_increment_value among all handlers @@ -2861,7 +2828,7 @@ int ha_partition::close(void) DBUG_ENTER("ha_partition::close"); DBUG_ASSERT(table->s == table_share); - delete_queue(&m_queue); + destroy_record_priority_queue(); bitmap_free(&m_bulk_insert_started); if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); @@ -4073,6 +4040,87 @@ int ha_partition::rnd_pos_by_record(uchar *record) subset of the partitions are used, then only use those partitions. */ + +/** + Setup the ordered record buffer and the priority queue. +*/ + +bool ha_partition::init_record_priority_queue() +{ + DBUG_ENTER("ha_partition::init_record_priority_queue"); + DBUG_ASSERT(!m_ordered_rec_buffer); + /* + Initialize the ordered record buffer. + */ + if (!m_ordered_rec_buffer) + { + uint map_len, alloc_len; + uint used_parts= 0; + /* Allocate an array for mapping used partitions to their record buffer. */ + map_len= m_tot_parts * PARTITION_BYTES_IN_POS; + alloc_len= map_len; + /* Allocate record buffer for each used partition. */ + alloc_len+= bitmap_bits_set(&m_part_info->used_partitions) * + (m_rec_length + PARTITION_BYTES_IN_POS); + /* Allocate a key for temporary use when setting up the scan. */ + alloc_len+= table_share->max_key_length; + + if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) + DBUG_RETURN(true); + + /* + We set-up one record per partition and each record has 2 bytes in + front where the partition id is written. This is used by ordered + index_read. + We also set-up a reference to the first record for temporary use in + setting up the scan. + No need to initialize the full map, it should only be used partitions + that will be read, so it is better to not set them to find possible + bugs through valgrind. + */ + uint16 *map= (uint16*) m_ordered_rec_buffer; + char *ptr= (char*) m_ordered_rec_buffer + map_len; + uint16 i= 0; + do + { + if (bitmap_is_set(&m_part_info->used_partitions, i)) + { + map[i]= used_parts++; + int2store(ptr, i); + ptr+= m_rec_length + PARTITION_BYTES_IN_POS; + } + } while (++i < m_tot_parts); + m_start_key.key= (const uchar*)ptr; + /* Initialize priority queue, initialized to reading forward. */ + if (init_queue(&m_queue, used_parts, (uint) PARTITION_BYTES_IN_POS, + 0, key_rec_cmp, (void*)m_curr_key_info)) + { + my_free(m_ordered_rec_buffer, MYF(0)); + m_ordered_rec_buffer= NULL; + DBUG_RETURN(true); + } + } + DBUG_RETURN(false); +} + + +/** + Destroy the ordered record buffer and the priority queue. +*/ + +void ha_partition::destroy_record_priority_queue() +{ + DBUG_ENTER("ha_partition::destroy_record_priority_queue"); + if (m_ordered_rec_buffer) + { + delete_queue(&m_queue); + my_free(m_ordered_rec_buffer, MYF(0)); + m_ordered_rec_buffer= NULL; + } + DBUG_VOID_RETURN; +} + + /* Initialize handler before start of index scan @@ -4114,6 +4162,10 @@ int ha_partition::index_init(uint inx, bool sorted) } else m_curr_key_info[1]= NULL; + + if (init_record_priority_queue()) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + /* Some handlers only read fields as specified by the bitmap for the read set. For partitioned handlers we always require that the @@ -4188,11 +4240,11 @@ int ha_partition::index_end() do { int tmp; - /* TODO RONM: Change to index_end() when code is stable */ if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) if ((tmp= (*file)->ha_index_end())) error= tmp; } while (*(++file)); + destroy_record_priority_queue(); DBUG_RETURN(error); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 7e6b062846a..a7e072a3b77 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -517,6 +517,8 @@ public: virtual int read_range_next(); private: + bool init_record_priority_queue(); + void destroy_record_priority_queue(); int common_index_read(uchar * buf, bool have_start_key); int common_first_last(uchar * buf); int partition_scan_set_up(uchar * buf, bool idx_read_flag); @@ -524,8 +526,15 @@ private: int handle_unordered_scan_next_partition(uchar * buf); uchar *queue_buf(uint part_id) { - return (m_ordered_rec_buffer + - (part_id * (m_rec_length + PARTITION_BYTES_IN_POS))); + uint16 *part_id_map= (uint16*) m_ordered_rec_buffer; + /* Offset to the partition's record buffer in number of partitions. */ + uint offset= part_id_map[part_id]; + /* + Return the pointer to the partition's record buffer. + First skip the partition id map, and then add the offset. + */ + return (m_ordered_rec_buffer + m_tot_parts * PARTITION_BYTES_IN_POS + + (offset * (m_rec_length + PARTITION_BYTES_IN_POS))); } uchar *rec_buf(uint part_id) { -- cgit v1.2.1 From 5aec4e2b3bbcaea33d719e2e4e94665c4856e413 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 17 Aug 2012 13:14:04 +0400 Subject: Backporting Bug 14100466 from 5.6. --- sql/spatial.cc | 11 ++++++----- sql/spatial.h | 8 ++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'sql') diff --git a/sql/spatial.cc b/sql/spatial.cc index 0d2dd81c71e..07f28855987 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -525,12 +525,13 @@ uint Gis_line_string::init_from_wkb(const char *wkb, uint len, const char *wkb_end; Gis_point p; - if (len < 4) + if (len < 4 || + (n_points= wkb_get_uint(wkb, bo)) < 1 || + n_points > max_n_points) return 0; - n_points= wkb_get_uint(wkb, bo); proper_length= 4 + n_points * POINT_DATA_SIZE; - if (!n_points || len < proper_length || res->reserve(proper_length)) + if (len < proper_length || res->reserve(proper_length)) return 0; res->q_append(n_points); @@ -1072,9 +1073,9 @@ uint Gis_multi_point::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, Gis_point p; const char *wkb_end; - if (len < 4) + if (len < 4 || + (n_points= wkb_get_uint(wkb, bo)) > max_n_points) return 0; - n_points= wkb_get_uint(wkb, bo); proper_size= 4 + n_points * (WKB_HEADER_SIZE + POINT_DATA_SIZE); if (len < proper_size || res->reserve(proper_size)) diff --git a/sql/spatial.h b/sql/spatial.h index 4159d93c7a7..68a6c889615 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -379,6 +379,10 @@ public: class Gis_line_string: public Geometry { + // Maximum number of points in LineString that can fit into String + static const uint32 max_n_points= + (uint32) (UINT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) / + POINT_DATA_SIZE; public: Gis_line_string() {} /* Remove gcc warning */ virtual ~Gis_line_string() {} /* Remove gcc warning */ @@ -435,6 +439,10 @@ public: class Gis_multi_point: public Geometry { + // Maximum number of points in MultiPoint that can fit into String + static const uint32 max_n_points= + (uint32) (UINT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) / + (WKB_HEADER_SIZE + POINT_DATA_SIZE); public: Gis_multi_point() {} /* Remove gcc warning */ virtual ~Gis_multi_point() {} /* Remove gcc warning */ -- cgit v1.2.1 From 1ffecedfc3e6ecdfa068c01a588cbe1ceca14ec2 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 17 Aug 2012 14:25:32 +0200 Subject: Bug#13025132 - PARTITIONS USE TOO MUCH MEMORY Additional patch to remove the part_id -> ref_buffer offset. The partitioning id and the associate record buffer can be found without having to calculate it. By initializing it for each used partition, and then reuse the key-buffer from the queue, it is not needed to have such map. --- sql/ha_partition.cc | 31 +++++++++++++------------------ sql/ha_partition.h | 17 ----------------- 2 files changed, 13 insertions(+), 35 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index a60a5b2d6dd..e7629681e02 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4055,13 +4055,9 @@ bool ha_partition::init_record_priority_queue() if (!m_ordered_rec_buffer) { uint map_len, alloc_len; - uint used_parts= 0; - /* Allocate an array for mapping used partitions to their record buffer. */ - map_len= m_tot_parts * PARTITION_BYTES_IN_POS; - alloc_len= map_len; + uint used_parts= bitmap_bits_set(&m_part_info->used_partitions); /* Allocate record buffer for each used partition. */ - alloc_len+= bitmap_bits_set(&m_part_info->used_partitions) * - (m_rec_length + PARTITION_BYTES_IN_POS); + alloc_len= used_parts * (m_rec_length + PARTITION_BYTES_IN_POS); /* Allocate a key for temporary use when setting up the scan. */ alloc_len+= table_share->max_key_length; @@ -4074,18 +4070,13 @@ bool ha_partition::init_record_priority_queue() index_read. We also set-up a reference to the first record for temporary use in setting up the scan. - No need to initialize the full map, it should only be used partitions - that will be read, so it is better to not set them to find possible - bugs through valgrind. */ - uint16 *map= (uint16*) m_ordered_rec_buffer; - char *ptr= (char*) m_ordered_rec_buffer + map_len; + char *ptr= (char*) m_ordered_rec_buffer; uint16 i= 0; do { if (bitmap_is_set(&m_part_info->used_partitions, i)) { - map[i]= used_parts++; int2store(ptr, i); ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } @@ -4984,6 +4975,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) uint i; uint j= 0; bool found= FALSE; + uchar *part_rec_buf_ptr= m_ordered_rec_buffer; DBUG_ENTER("ha_partition::handle_ordered_index_scan"); m_top_entry= NO_CURRENT_PART_ID; @@ -4994,7 +4986,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) { if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) continue; - uchar *rec_buf_ptr= rec_buf(i); + uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS; int error; handler *file= m_file[i]; @@ -5041,12 +5033,13 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) /* Initialize queue without order first, simply insert */ - queue_element(&m_queue, j++)= (uchar*)queue_buf(i); + queue_element(&m_queue, j++)= part_rec_buf_ptr; } else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { DBUG_RETURN(error); } + part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } if (found) { @@ -5109,18 +5102,19 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) { int error; uint part_id= m_top_entry; + uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; handler *file= m_file[part_id]; DBUG_ENTER("ha_partition::handle_ordered_next"); if (m_index_scan_type == partition_read_range) { error= file->read_range_next(); - memcpy(rec_buf(part_id), table->record[0], m_rec_length); + memcpy(rec_buf, table->record[0], m_rec_length); } else if (!is_next_same) - error= file->index_next(rec_buf(part_id)); + error= file->index_next(rec_buf); else - error= file->index_next_same(rec_buf(part_id), m_start_key.key, + error= file->index_next_same(rec_buf, m_start_key.key, m_start_key.length); if (error) { @@ -5163,10 +5157,11 @@ int ha_partition::handle_ordered_prev(uchar *buf) { int error; uint part_id= m_top_entry; + uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; handler *file= m_file[part_id]; DBUG_ENTER("ha_partition::handle_ordered_prev"); - if ((error= file->index_prev(rec_buf(part_id)))) + if ((error= file->index_prev(rec_buf))) { if (error == HA_ERR_END_OF_FILE) { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index a7e072a3b77..16d8f27bd71 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -524,23 +524,6 @@ private: int partition_scan_set_up(uchar * buf, bool idx_read_flag); int handle_unordered_next(uchar * buf, bool next_same); int handle_unordered_scan_next_partition(uchar * buf); - uchar *queue_buf(uint part_id) - { - uint16 *part_id_map= (uint16*) m_ordered_rec_buffer; - /* Offset to the partition's record buffer in number of partitions. */ - uint offset= part_id_map[part_id]; - /* - Return the pointer to the partition's record buffer. - First skip the partition id map, and then add the offset. - */ - return (m_ordered_rec_buffer + m_tot_parts * PARTITION_BYTES_IN_POS + - (offset * (m_rec_length + PARTITION_BYTES_IN_POS))); - } - uchar *rec_buf(uint part_id) - { - return (queue_buf(part_id) + - PARTITION_BYTES_IN_POS); - } int handle_ordered_index_scan(uchar * buf, bool reverse_order); int handle_ordered_next(uchar * buf, bool next_same); int handle_ordered_prev(uchar * buf); -- cgit v1.2.1 From 5d83889791c138955ec0ab61967e8d0dcdede871 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Mon, 20 Aug 2012 12:39:36 +0200 Subject: Bug#13025132 - PARTITIONS USE TOO MUCH MEMORY pre-push fix, removed unused variable. --- sql/ha_partition.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index e7629681e02..4e6f5984934 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4054,7 +4054,7 @@ bool ha_partition::init_record_priority_queue() */ if (!m_ordered_rec_buffer) { - uint map_len, alloc_len; + uint alloc_len; uint used_parts= bitmap_bits_set(&m_part_info->used_partitions); /* Allocate record buffer for each used partition. */ alloc_len= used_parts * (m_rec_length + PARTITION_BYTES_IN_POS); -- cgit v1.2.1 From a16e00a6c4bcb4308ce07e56438151a5ad135027 Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Thu, 23 Aug 2012 16:29:41 +0200 Subject: Bug#14463247 ORDER BY SUBQUERY REFERENCING OUTER ALIAS FAILS Documentation for class Item_outer_ref was wrong: (*ref) may point to Item_field as well (see e.g. Item_outer_ref::fix_fields) So this casting in get_store_key() was wrong: (*(Item_ref**)((Item_ref*)keyuse->val)->ref)->ref_type() --- sql/item.h | 1 + sql/sql_select.cc | 38 ++++++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/item.h b/sql/item.h index bd43689e91b..71eb725e76e 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2676,6 +2676,7 @@ public: resolved is a grouping one. After it has been fixed the ref field will point to either an Item_ref or an Item_direct_ref object which will be used to access the field. + The ref field may also point to an Item_field instance. See also comments for the fix_inner_refs() and the Item_field::fix_outer_field() functions. */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 7169a834ff7..808af9d69ab 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -6019,19 +6019,33 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, key_part->length, keyuse->val); } - else if (keyuse->val->type() == Item::FIELD_ITEM || - (keyuse->val->type() == Item::REF_ITEM && - ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF && - (*(Item_ref**)((Item_ref*)keyuse->val)->ref)->ref_type() == - Item_ref::DIRECT_REF && - keyuse->val->real_item()->type() == Item::FIELD_ITEM)) + + Item_field *field_item= NULL; + if (keyuse->val->type() == Item::FIELD_ITEM) + field_item= static_cast(keyuse->val->real_item()); + else if (keyuse->val->type() == Item::REF_ITEM) + { + Item_ref *item_ref= static_cast(keyuse->val); + if (item_ref->ref_type() == Item_ref::OUTER_REF) + { + if ((*item_ref->ref)->type() == Item::FIELD_ITEM) + field_item= static_cast(item_ref->real_item()); + else if ((*(Item_ref**)(item_ref)->ref)->ref_type() + == Item_ref::DIRECT_REF + && + item_ref->real_item()->type() == Item::FIELD_ITEM) + field_item= static_cast(item_ref->real_item()); + } + } + if (field_item) return new store_key_field(thd, - key_part->field, - key_buff + maybe_null, - maybe_null ? key_buff : 0, - key_part->length, - ((Item_field*) keyuse->val->real_item())->field, - keyuse->val->full_name()); + key_part->field, + key_buff + maybe_null, + maybe_null ? key_buff : 0, + key_part->length, + field_item->field, + keyuse->val->full_name()); + return new store_key_item(thd, key_part->field, key_buff + maybe_null, -- cgit v1.2.1 From 17695cb4ffcddb9634a9e27c459eab943ceae36e Mon Sep 17 00:00:00 2001 From: Gopal Shankar Date: Fri, 24 Aug 2012 09:51:42 +0530 Subject: Bug#14364558 ASSERT `TABLE_LIST->PRELOCKING_PLACEHOLDER==FALSE' FAILED IN CHECK_LOCK_AND_ST Problem: -------- lock_tables() is supposed to invoke check_lock_and_start_stmt() for TABLE_LIST which are directly used by top level statement. TABLE_LIST->prelocking_placeholder is set only for TABLE_LIST which are used indirectly by stored programs invoked by top level statement. Hence check_lock_and_start_stmt() should have TABLE_LIST->prelocking_placeholder==false always, but it is observed that this assert fails. The failure is found during RQG test rqg_signal_resignal. Analysis: --------- open_tables() invokes open_and_process_routines() where it finds all the TABLE_LIST that belong to the routine and adds it to thd->lex->query_tables. During this process if the open_and_process_routines() fail for some reason, we are supposed to chop-off all the TABLE_LIST found during calls to open_and_process_routines(). But, in practice this is not happening. thd->lex->query_tables_own_last is supposed to point to a node in thd->lex->query_tables, which would be a first TABLE_LIST used indirectly by stored programs invoked by top level statement. This is found to be not-set correctly when we plan to chop-off TABLE_LIST's, when open_and_process_routines() failed. close_tables_for_reopen() does chop-off all the TABLE_LIST added after thd->lex->query_table_own_last. This is invoked upon error in open_and_process_routines(). This call would not work as expected as thd->lex->query_tables_own_last is not set, or is not set to correctly. Further, when open_tables() restarts the process of finding TABLE_LIST belonging to stored programs, and as the thd->lex->query_tables_own_last points to in-correct node, there is possibility of new iteration setting the thd->lex->query_tables_own_last past some old nodes that belong to stored programs, added earlier and not removed. Later when open_tables() completes, lock_tables() ends up invoking check_lock_and_start_stmt() for TABLE_LIST which belong to stored programs, which is not expected behavior and hence we hit the assert TABLE_LIST->prelocking_placeholder==false. Due to above behavior, if a user application tries to execute a SQL statement which invokes some stored function and if the lock grant on stored function fails due to a deadlock, then mysqld crashes. Fix: ---- open_tables() remembers save_query_tables_last which points to thd-lex->query_tables_last before calls to open_and_process_routines(). If there is no known thd->lex->query_tables_own_last set, we are now setting thd->lex->query_tables_own_last to save_query_tables_last. This will make sure that the call to close_tables_for_reopen() will chop-off the list correctly, in other words we now remove all the nodes added to thd->lex->query_tables, by previous calls to open_and_process_routines(). Further, it is found that the problem exists starting from 5.5, due to a code refactoring effort related to open_tables(). Hence, the fix will be pushed in 5.5, 5.6 and trunk. --- sql/sql_base.cc | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/sql_base.cc b/sql/sql_base.cc index efd9c4dfd4a..a7d04f12341 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4955,8 +4955,6 @@ restart: */ if (thd->locked_tables_mode <= LTM_LOCK_TABLES) { - bool need_prelocking= FALSE; - TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last; /* Process elements of the prelocking set which are present there since parsing stage or were added to it by invocations of @@ -4969,10 +4967,19 @@ restart: for (Sroutine_hash_entry *rt= *sroutine_to_open; rt; sroutine_to_open= &rt->next, rt= rt->next) { + bool need_prelocking= false; + TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last; + error= open_and_process_routine(thd, thd->lex, rt, prelocking_strategy, has_prelocking_list, &ot_ctx, &need_prelocking); + if (need_prelocking && ! thd->lex->requires_prelocking()) + thd->lex->mark_as_requiring_prelocking(save_query_tables_last); + + if (need_prelocking && ! *start) + *start= thd->lex->query_tables; + if (error) { if (ot_ctx.can_recover_from_failed_open()) @@ -4993,12 +5000,6 @@ restart: goto err; } } - - if (need_prelocking && ! thd->lex->requires_prelocking()) - thd->lex->mark_as_requiring_prelocking(save_query_tables_last); - - if (need_prelocking && ! *start) - *start= thd->lex->query_tables; } } @@ -5271,6 +5272,12 @@ static bool check_lock_and_start_stmt(THD *thd, thr_lock_type lock_type; DBUG_ENTER("check_lock_and_start_stmt"); + /* + Prelocking placeholder is not set for TABLE_LIST that + are directly used by TOP level statement. + */ + DBUG_ASSERT(table_list->prelocking_placeholder == false); + /* TL_WRITE_DEFAULT and TL_READ_DEFAULT are supposed to be parser only types of locks so they should be converted to appropriate other types -- cgit v1.2.1 From df2bdd6063e1a9a05be0048a309288dc4d7a8ce4 Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Fri, 24 Aug 2012 10:17:08 +0200 Subject: Bug#14498355: DEPRECATION WARNINGS SHOULD NOT CONTAIN MYSQL VERSION NUMBERS If a system variable was declared as deprecated without mention of an alternative, the message would look funny, e.g. for @@delayed_insert_limit: Warning 1287 '@@delayed_insert_limit' is deprecated and will be removed in MySQL . The message was meant to display the version number, but it's not possible to give one when declaring a system variable. The fix does two things: 1) The definition of the message ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT is changed so that it does not display a version number. I.e. in English the message now reads: Warning 1287 The syntax '@@delayed_insert_limit' is deprecated and will be removed in a future version. 2) The message ER_WARN_DEPRECATED_SYNTAX_WITH_VER is discontinued in favor of ER_WARN_DEPRECATED_SYNTAX for system variables. This change was already done in versions 5.6 and above as part of wl#5265. This part is simply back-ported from the worklog. --- sql/set_var.cc | 37 ++++++++++------------ sql/set_var.h | 4 +-- sql/share/errmsg-utf8.txt | 4 +-- sql/sql_plugin.cc | 2 +- sql/sys_vars.cc | 12 +++---- sql/sys_vars.h | 80 +++++++++++++++++++++++------------------------ 6 files changed, 68 insertions(+), 71 deletions(-) (limited to 'sql') diff --git a/sql/set_var.cc b/sql/set_var.cc index 4cdee8e1258..231fbb47d35 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -134,9 +134,9 @@ void sys_var_end() put your additional checks here @param on_update_func a function to be called at the end of sys_var::update, any post-update activity should happen here - @param deprecated_version if not 0 - when this variable will go away - @param substitute if not 0 - what one should use instead when this - deprecated variable + @param substitute If non-NULL, this variable is deprecated and the + string describes what one should use instead. If an empty string, + the variable is deprecated but no replacement is offered. @param parse_flag either PARSE_EARLY or PARSE_NORMAL */ sys_var::sys_var(sys_var_chain *chain, const char *name_arg, @@ -146,12 +146,12 @@ sys_var::sys_var(sys_var_chain *chain, const char *name_arg, PolyLock *lock, enum binlog_status_enum binlog_status_arg, on_check_function on_check_func, on_update_function on_update_func, - uint deprecated_version, const char *substitute, - int parse_flag) : + const char *substitute, int parse_flag) : next(0), binlog_status(binlog_status_arg), flags(flags_arg), m_parse_flag(parse_flag), show_val_type(show_val_type_arg), guard(lock), offset(off), on_check(on_check_func), on_update(on_update_func), + deprecation_substitute(substitute), is_os_charset(FALSE) { /* @@ -177,12 +177,6 @@ sys_var::sys_var(sys_var_chain *chain, const char *name_arg, option.value= (uchar **)global_var_ptr(); option.def_value= def_val; - deprecated.version= deprecated_version; - deprecated.substitute= substitute; - DBUG_ASSERT((deprecated_version != 0) || (substitute == 0)); - DBUG_ASSERT(deprecated_version % 100 == 0); - DBUG_ASSERT(!deprecated_version || MYSQL_VERSION_ID < deprecated_version); - if (chain->last) chain->last->next= this; else @@ -277,21 +271,24 @@ bool sys_var::set_default(THD *thd, enum_var_type type) void sys_var::do_deprecated_warning(THD *thd) { - if (deprecated.version) + if (deprecation_substitute != NULL) { - char buf1[NAME_CHAR_LEN + 3], buf2[10]; + char buf1[NAME_CHAR_LEN + 3]; strxnmov(buf1, sizeof(buf1)-1, "@@", name.str, 0); - my_snprintf(buf2, sizeof(buf2), "%d.%d", deprecated.version/100/100, - deprecated.version/100%100); - uint errmsg= deprecated.substitute - ? ER_WARN_DEPRECATED_SYNTAX_WITH_VER - : ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT; + + /* + if deprecation_substitute is an empty string, + there is no replacement for the syntax + */ + uint errmsg= deprecation_substitute[0] == '\0' + ? ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT + : ER_WARN_DEPRECATED_SYNTAX; if (thd) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX, ER(errmsg), - buf1, buf2, deprecated.substitute); + buf1, deprecation_substitute); else - sql_print_warning(ER_DEFAULT(errmsg), buf1, buf2, deprecated.substitute); + sql_print_warning(ER_DEFAULT(errmsg), buf1, deprecation_substitute); } } diff --git a/sql/set_var.h b/sql/set_var.h index 041c40fdca4..f0d90cb0d63 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -82,7 +82,7 @@ protected: ptrdiff_t offset; ///< offset to the value from global_system_variables on_check_function on_check; on_update_function on_update; - struct { uint version; const char *substitute; } deprecated; + const char *const deprecation_substitute; bool is_os_charset; ///< true if the value is in character_set_filesystem public: @@ -91,7 +91,7 @@ public: enum get_opt_arg_type getopt_arg_type, SHOW_TYPE show_val_type_arg, longlong def_val, PolyLock *lock, enum binlog_status_enum binlog_status_arg, on_check_function on_check_func, on_update_function on_update_func, - uint deprecated_version, const char *substitute, int parse_flag); + const char *substitute, int parse_flag); virtual ~sys_var() {} diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 263dd0ffeb7..e7ef0f74f1e 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -6343,8 +6343,8 @@ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT ER_PATH_LENGTH eng "The path specified for %.64s is too long." ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT - eng "The syntax '%s' is deprecated and will be removed in MySQL %s." - ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt." + eng "'%s' is deprecated and will be removed in a future release." + ger "'%s' ist veraltet und wird in einer zukünftigen Version entfernt werden." ER_WRONG_NATIVE_TABLE_STRUCTURE eng "Native table '%-.64s'.'%-.64s' has the wrong structure" diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 47827e0e567..1dbc76537da 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -223,7 +223,7 @@ public: (plugin_var_arg->flags & PLUGIN_VAR_THDLOCAL ? SESSION : GLOBAL) | (plugin_var_arg->flags & PLUGIN_VAR_READONLY ? READONLY : 0), 0, -1, NO_ARG, pluginvar_show_type(plugin_var_arg), 0, 0, - VARIABLE_NOT_IN_BINLOG, 0, 0, 0, 0, PARSE_NORMAL), + VARIABLE_NOT_IN_BINLOG, NULL, NULL, NULL, PARSE_NORMAL), plugin_var(plugin_var_arg), orig_pluginvar_name(plugin_var_arg->name) { plugin_var->name= name_arg; } sys_var_pluginvar *cast_pluginvar() { return this; } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index e0523989f9d..06f97af2ade 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -142,7 +142,7 @@ static bool update_keycache_param(THD *thd, KEY_CACHE *key_cache, #define PFS_TRAILING_PROPERTIES \ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL), \ - 0, NULL, sys_var::PARSE_EARLY + NULL, sys_var::PARSE_EARLY static Sys_var_mybool Sys_pfs_enabled( "performance_schema", @@ -1288,7 +1288,7 @@ static Sys_var_harows Sys_sql_max_join_size( SESSION_VAR(max_join_size), NO_CMD_LINE, VALID_RANGE(1, HA_POS_ERROR), DEFAULT(HA_POS_ERROR), BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(fix_max_join_size), DEPRECATED(70000, 0)); + ON_UPDATE(fix_max_join_size), DEPRECATED("")); static Sys_var_ulong Sys_max_long_data_size( "max_long_data_size", @@ -1707,7 +1707,7 @@ static Sys_var_ulong Sys_rpl_recovery_rank( GLOBAL_VAR(rpl_recovery_rank), CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, ULONG_MAX), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0), - DEPRECATED(70000, 0)); + DEPRECATED("")); static Sys_var_ulong Sys_range_alloc_block_size( "range_alloc_block_size", @@ -2288,7 +2288,7 @@ static Sys_var_mybool Sys_engine_condition_pushdown( CMD_LINE(OPT_ARG, OPT_ENGINE_CONDITION_PUSHDOWN), DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(fix_engine_condition_pushdown), - DEPRECATED(70000, "'@@optimizer_switch'")); + DEPRECATED("'@@optimizer_switch'")); static Sys_var_plugin Sys_default_storage_engine( "default_storage_engine", "The default storage engine for new tables", @@ -2963,7 +2963,7 @@ static Sys_var_mybool Sys_log( "log", "Alias for --general-log. Deprecated", GLOBAL_VAR(opt_log), NO_CMD_LINE, DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(fix_log_state), DEPRECATED(70000, "'@@general_log'")); + ON_UPDATE(fix_log_state), DEPRECATED("'@@general_log'")); static Sys_var_mybool Sys_slow_query_log( "slow_query_log", @@ -2980,7 +2980,7 @@ static Sys_var_mybool Sys_log_slow( "Alias for --slow-query-log. Deprecated", GLOBAL_VAR(opt_slow_log), NO_CMD_LINE, DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(fix_log_state), DEPRECATED(70000, "'@@slow_query_log'")); + ON_UPDATE(fix_log_state), DEPRECATED("'@@slow_query_log'")); static bool fix_log_state(sys_var *self, THD *thd, enum_var_type type) { diff --git a/sql/sys_vars.h b/sql/sys_vars.h index a69a91f7eb7..ca6e7d40b0e 100644 --- a/sql/sys_vars.h +++ b/sql/sys_vars.h @@ -58,7 +58,7 @@ @@foreign_key_checks <-> OPTION_NO_FOREIGN_KEY_CHECKS */ #define REVERSE(X) ~(X) -#define DEPRECATED(X, Y) X, Y +#define DEPRECATED(X) X #define session_var(THD, TYPE) (*(TYPE*)session_var_ptr(THD)) #define global_var(TYPE) (*(TYPE*)global_var_ptr()) @@ -108,11 +108,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOWT, def_val, lock, binlog_status_arg, - on_check_func, on_update_func, deprecated_version, + on_check_func, on_update_func, substitute, parse_flag) { option.var_type= ARGT; @@ -196,11 +196,11 @@ public: ulonglong def_val, PolyLock *lock, enum binlog_status_enum binlog_status_arg, on_check_function on_check_func, on_update_function on_update_func, - uint deprecated_version, const char *substitute, int parse_flag= PARSE_NORMAL) + const char *substitute, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, show_val_type_arg, def_val, lock, binlog_status_arg, on_check_func, - on_update_func, deprecated_version, substitute, parse_flag) + on_update_func, substitute, parse_flag) { for (typelib.count= 0; values[typelib.count]; typelib.count++) /*no-op */; typelib.name=""; @@ -263,11 +263,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_typelib(name_arg, comment, flag_args, off, getopt, SHOW_CHAR, values, def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute) + substitute) { option.var_type= GET_ENUM; global_var(ulong)= def_val; @@ -310,12 +310,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : Sys_var_typelib(name_arg, comment, flag_args, off, getopt, SHOW_MY_BOOL, bool_values, def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { option.var_type= GET_BOOL; global_var(my_bool)= def_val; @@ -366,12 +366,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_CHAR_PTR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { is_os_charset= is_os_charset_arg == IN_FS_CHARSET; /* @@ -460,7 +460,7 @@ public: : sys_var(&all_sys_vars, name_arg, comment, sys_var::READONLY+sys_var::ONLY_SESSION, 0, -1, NO_ARG, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG, - NULL, NULL, 0, NULL, PARSE_NORMAL) + NULL, NULL, NULL, PARSE_NORMAL) { is_os_charset= is_os_charset_arg == IN_FS_CHARSET; option.var_type= GET_STR; @@ -533,10 +533,10 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_charptr(name_arg, comment, flag_args, off, sizeof(char*), getopt, is_os_charset_arg, def_val, lock, binlog_status_arg, - on_check_func, on_update_func, deprecated_version, substitute) + on_check_func, on_update_func, substitute) { global_var(LEX_STRING).length= strlen(def_val); DBUG_ASSERT(size == sizeof(LEX_STRING)); @@ -573,12 +573,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, 0, getopt.id, getopt.arg_type, SHOW_CHAR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { option.var_type= GET_NO_ARG; } bool do_check(THD *thd, set_var *var) { @@ -658,11 +658,11 @@ public: enum binlog_status_enum binlog_status_arg, on_check_function on_check_func, keycache_update_function on_update_func, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_ulonglong(name_arg, comment, flag_args, off, size, getopt, min_val, max_val, def_val, block_size, lock, binlog_status_arg, on_check_func, 0, - deprecated_version, substitute), + substitute), keycache_update(on_update_func) { option.var_type|= GET_ASK_ADDR; @@ -726,12 +726,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_DOUBLE, (longlong) double2ulonglong(def_val), lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { option.var_type= GET_DOUBLE; option.min_value= (longlong) double2ulonglong(min_val); @@ -791,11 +791,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_uint(name_arg, comment, SESSION, off, size, getopt, min_val, max_val, def_val, block_size, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute) + substitute) { } uchar *session_value_ptr(THD *thd, LEX_STRING *base) { @@ -833,11 +833,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_typelib(name_arg, comment, flag_args, off, getopt, SHOW_CHAR, values, def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute) + substitute) { option.var_type= GET_FLAGSET; global_var(ulonglong)= def_val; @@ -944,11 +944,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_typelib(name_arg, comment, flag_args, off, getopt, SHOW_CHAR, values, def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute) + substitute) { option.var_type= GET_SET; global_var(ulonglong)= def_val; @@ -1050,12 +1050,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_CHAR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag), + substitute, parse_flag), plugin_type(plugin_type_arg) { option.var_type= GET_STR; @@ -1164,12 +1164,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, 0, getopt.id, getopt.arg_type, SHOW_CHAR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { DBUG_ASSERT(scope() == ONLY_SESSION); option.var_type= GET_NO_ARG; @@ -1257,11 +1257,11 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_typelib(name_arg, comment, flag_args, off, getopt, SHOW_MY_BOOL, bool_values, def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute) + substitute) { option.var_type= GET_BOOL; reverse_semantics= my_count_bits(bitmask_arg) > 1; @@ -1330,11 +1330,11 @@ public: on_check_function on_check_func, session_special_update_function update_func_arg, session_special_read_function read_func_arg, - uint deprecated_version=0, const char *substitute=0) + const char *substitute=0) : Sys_var_ulonglong(name_arg, comment, flag_args, 0, sizeof(ulonglong), getopt, min_val, max_val, 0, block_size, lock, binlog_status_arg, on_check_func, 0, - deprecated_version, substitute), + substitute), read_func(read_func_arg), update_func(update_func_arg) { DBUG_ASSERT(scope() == ONLY_SESSION); @@ -1383,12 +1383,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_CHAR, 0, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { DBUG_ASSERT(scope() == GLOBAL); DBUG_ASSERT(getopt.id == -1); @@ -1452,12 +1452,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_CHAR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag), + substitute, parse_flag), name_offset(name_off) { option.var_type= GET_STR; @@ -1525,12 +1525,12 @@ public: enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, on_check_function on_check_func=0, on_update_function on_update_func=0, - uint deprecated_version=0, const char *substitute=0, + const char *substitute=0, int parse_flag= PARSE_NORMAL) : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id, getopt.arg_type, SHOW_CHAR, (intptr)def_val, lock, binlog_status_arg, on_check_func, on_update_func, - deprecated_version, substitute, parse_flag) + substitute, parse_flag) { DBUG_ASSERT(getopt.id == -1); DBUG_ASSERT(size == sizeof(Time_zone *)); -- cgit v1.2.1 From 8c239b09a27c3510b675e6a8b9370a25ffb6986d Mon Sep 17 00:00:00 2001 From: Ashish Agarwal Date: Fri, 24 Aug 2012 14:55:47 +0530 Subject: Bug#14363985: MYSQLD CRASHED WHEN DISABL AND ENABLE AUDI PLUGIN WHEN DDL OPERATION HAPPENING PROBLEM: While unloading the plugin, state is not checked before it is to be reaped. This can lead to simultaneous free of plugin memory by more than one thread. Multiple deallocation leads to server crash. In the present bug two threads deallocate the alog_log plugin. SOLUTION: A check is added to ensure that only one thread is unloading the plugin. NOTE: No mtr test is added as it requires multiple threads to access critical section. debug_sync cannot be used in the current senario because we dont have access to thread pointer in some of the plugin functions. IMHO no test case in the current time frame. --- sql/sql_plugin.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 47827e0e567..14668125599 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1900,7 +1900,8 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name) mysql_audit_acquire_plugins(thd, MYSQL_AUDIT_GENERAL_CLASS); mysql_mutex_lock(&LOCK_plugin); - if (!(plugin= plugin_find_internal(name, MYSQL_ANY_PLUGIN))) + if (!(plugin= plugin_find_internal(name, MYSQL_ANY_PLUGIN)) || + plugin->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_DYING)) { my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str); goto err; -- cgit v1.2.1 From a619bfad30c13207fb0453a85af5740846186900 Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Tue, 28 Aug 2012 16:13:03 +0200 Subject: Bug#14549809 LINKING PROBLEM IN 5.5.28 BUILDS WITH THREADPOOL PLUGIN The use of Thread_iterator did not work on windows (linking problems). Solution: Change the interface between the thread_pool and the server to only use simple free functions. This patch is for 5.5 only (mimicks similar solution in 5.6) --- sql/mysqld.cc | 15 +++++++++++++++ sql/sql_list.h | 4 ++++ 2 files changed, 19 insertions(+) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d391918c42c..cfc74804f4e 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -601,6 +601,21 @@ I_List threads; Rpl_filter* rpl_filter; Rpl_filter* binlog_filter; +THD *first_global_thread() +{ + if (threads.is_empty()) + return NULL; + return threads.head(); +} + +THD *next_global_thread(THD *thd) +{ + if (threads.is_last(thd)) + return NULL; + struct ilink *next= thd->next; + return static_cast(next); +} + struct system_variables global_system_variables; struct system_variables max_system_variables; struct system_status_var global_status_var; diff --git a/sql/sql_list.h b/sql/sql_list.h index 769f44274de..c68ecc07e11 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -585,6 +585,9 @@ public: inline void empty() { first= &last; last.prev= &first; } base_ilist() { empty(); } inline bool is_empty() { return first == &last; } + // Returns true if p is the last "real" object in the list, + // i.e. p->next points to the sentinel. + inline bool is_last(ilink *p) { return p->next == NULL || p->next == &last; } inline void append(ilink *a) { first->prev= &a->next; @@ -660,6 +663,7 @@ class I_List :private base_ilist { public: I_List() :base_ilist() {} + inline bool is_last(T *p) { return base_ilist::is_last(p); } inline void empty() { base_ilist::empty(); } inline bool is_empty() { return base_ilist::is_empty(); } inline void append(T* a) { base_ilist::append(a); } -- cgit v1.2.1 From 50e8ac0b831f9cc02bdc7cbe3b465c295b453d5d Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Wed, 5 Sep 2012 17:40:13 +0200 Subject: Bug#13734987 MEMORY LEAK WITH I_S/SHOW AND VIEWS WITH SUBQUERY In fill_schema_table_by_open(): free item list before restoring active arena. sql/sql_show.cc: Replaced i_s_arena.free_items with DBUG_ASSERT(i_s_arena.free_list == NULL) (there's nothing to free in that list) --- sql/sql_show.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 1b0f94ce18e..7847fe5b510 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3164,8 +3164,9 @@ end: /* Restore original LEX value, statement's arena and THD arena values. */ lex_end(thd->lex); - if (i_s_arena.free_list) - i_s_arena.free_items(); + // Free items, before restoring backup_arena below. + DBUG_ASSERT(i_s_arena.free_list == NULL); + thd->free_items(); /* For safety reset list of open temporary tables before closing -- cgit v1.2.1 From 792efd59bce2413191d5620c6f0815e21f18b628 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 20 Sep 2012 12:48:59 +0300 Subject: MDEV-521 fix. After pullout item during single row subselect transformation it should be fixed properly. --- sql/item_subselect.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index c86deecb813..6fc1a591594 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1007,11 +1007,9 @@ Item_singlerow_subselect::select_transformer(JOIN *join) } substitution= select_lex->item_list.head(); /* - as far as we moved content to upper level, field which depend of - 'upper' select is not really dependent => we remove this dependence + as far as we moved content to upper level we have to fix dependences & Co */ - substitution->walk(&Item::remove_dependence_processor, 0, - (uchar *) select_lex->outer_select()); + substitution->fix_after_pullout(select_lex->outer_select(), &substitution); } DBUG_RETURN(false); } -- cgit v1.2.1 From 37155bf74a29a57829cc13947ac346b0f2c8fd28 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 26 Sep 2012 15:30:08 +0200 Subject: Fix some failures in 5.1 Buildbot: - Fix some warnings in newer GCC (-Werror ...). - Fix wrong STACK_DIRECTION detected by configure due to compiler inlining. --- sql/mysqld.cc | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 629ffe26f2c..66cdc3eba1c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5991,7 +5991,7 @@ struct my_option my_long_options[] = {"debug-crc-break", OPT_DEBUG_CRC, "Call my_debug_put_break_here() if crc matches this number (for debug).", &opt_my_crc_dbug_check, &opt_my_crc_dbug_check, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~(ulong) 0L, 0, 0, 0}, + 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~(ulonglong) 0, 0, 0, 0}, {"debug-flush", OPT_DEBUG_FLUSH, "Default debug log with flush after write", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-assert-if-crashed-table", OPT_DEBUG_ASSERT_IF_CRASHED_TABLE, @@ -6221,7 +6221,7 @@ each time the SQL thread starts.", #ifdef HAVE_MMAP {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", &opt_tc_log_size, &opt_tc_log_size, 0, GET_ULONG, - REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, (longlong) ULONG_MAX, 0, + REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, (ulonglong) ULONG_MAX, 0, TC_LOG_PAGE_SIZE, 0}, #endif {"log-update", OPT_UPDATE_LOG, @@ -6785,12 +6785,12 @@ thread is in the relay logs.", "during a transaction. If you often use big, multi-statement " "transactions you can increase this to get more performance.", &binlog_cache_size, &binlog_cache_size, 0, GET_ULONG, - REQUIRED_ARG, 32*1024L, IO_SIZE, (longlong) ULONG_MAX, 0, IO_SIZE, 0}, + REQUIRED_ARG, 32*1024L, IO_SIZE, (ulonglong) ULONG_MAX, 0, IO_SIZE, 0}, {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, "Size of tree cache used in bulk insert optimization. Note that this " "is a limit per thread.", &global_system_variables.bulk_insert_buff_size, &max_system_variables.bulk_insert_buff_size, - 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, (longlong) ULONG_MAX, 0, 1, 0}, + 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, (ulonglong) ULONG_MAX, 0, 1, 0}, {"connect_timeout", OPT_CONNECT_TIMEOUT, "The number of seconds the mysqld server is waiting for a connect packet " "before responding with 'Bad handshake'.", &connect_timeout, &connect_timeout, @@ -6815,7 +6815,7 @@ thread is in the relay logs.", "will check if there are any SELECT statements pending. If so, it allows " "these to execute before continuing.", &delayed_insert_limit, &delayed_insert_limit, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_LIMIT, 1, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, DELAYED_LIMIT, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", &delayed_insert_timeout, &delayed_insert_timeout, 0, @@ -6825,7 +6825,7 @@ thread is in the relay logs.", "If the queue becomes full, any client that does INSERT DELAYED will wait " "until there is room in the queue again.", &delayed_queue_size, &delayed_queue_size, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"div_precision_increment", OPT_DIV_PRECINCREMENT, "Precision of the result of '/' operator will be increased on that value.", &global_system_variables.div_precincrement, @@ -6865,7 +6865,7 @@ thread is in the relay logs.", "The maximum length of the result of function group_concat.", &global_system_variables.group_concat_max_len, &max_system_variables.group_concat_max_len, 0, GET_ULONG, - REQUIRED_ARG, 1024, 4, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, 1024, 4, (ulonglong) ULONG_MAX, 0, 1, 0}, {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive " "connection before closing it.", @@ -6876,7 +6876,7 @@ thread is in the relay logs.", "The size of the buffer that is used for full joins.", &global_system_variables.join_buff_size, &max_system_variables.join_buff_size, 0, GET_ULONG, - REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, (longlong) ULONG_MAX, + REQUIRED_ARG, 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, (ulonglong) ULONG_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, {"keep_files_on_create", OPT_KEEP_FILES_ON_CREATE, "Don't overwrite stale .MYD and .MYI even if no directory is specified.", @@ -6897,7 +6897,7 @@ thread is in the relay logs.", "This specifies the percentage ratio of that number of hits to the total " "number of blocks in key cache.", &dflt_key_cache_var.param_age_threshold, 0, 0, - (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, 300, 100, (longlong) ULONG_MAX, + (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, 300, 100, (ulonglong) ULONG_MAX, 0, 100, 0}, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "The default size of key cache blocks.", @@ -6918,7 +6918,7 @@ thread is in the relay logs.", "this to reduce output on slow query log)", &global_system_variables.log_slow_rate_limit, &max_system_variables.log_slow_rate_limit, 0, GET_ULONG, - REQUIRED_ARG, 1, 1, ~0L, 0, 1L, 0}, + REQUIRED_ARG, 1, 1, ~0ULL, 0, 1L, 0}, {"log-slow-verbosity", OPT_LOG_SLOW_VERBOSITY, "Choose how verbose the messages to your slow log will be. Multiple flags " "allowed in a comma-separated string. [query_plan, innodb]", @@ -6972,7 +6972,7 @@ thread is in the relay logs.", "If there is more than this number of interrupted connections from a host " "this host will be blocked from further connections.", &max_connect_errors, &max_connect_errors, 0, GET_ULONG, - REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, // Default max_connections of 151 is larger than Apache's default max // children, to avoid "too many connections" error in a common setup {"max_connections", OPT_MAX_CONNECTIONS, @@ -7041,7 +7041,7 @@ thread is in the relay logs.", "Maximum number of temporary tables a client can keep open at a time.", &global_system_variables.max_tmp_tables, &max_system_variables.max_tmp_tables, 0, GET_ULONG, - REQUIRED_ARG, 32, 1, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, 32, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"max_user_connections", OPT_MAX_USER_CONNECTIONS, "The maximum number of active connections for a single user (0 = no limit).", &max_user_connections, &max_user_connections, 0, GET_UINT, @@ -7054,12 +7054,12 @@ thread is in the relay logs.", "Don't log queries which examine less than min_examined_row_limit rows to file.", &global_system_variables.min_examined_row_limit, &max_system_variables.min_examined_row_limit, 0, GET_ULONG, - REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1L, 0}, + REQUIRED_ARG, 0, 0, (ulonglong) ULONG_MAX, 0, 1L, 0}, {"multi_range_count", OPT_MULTI_RANGE_COUNT, "Number of key ranges to request at once.", &global_system_variables.multi_range_count, &max_system_variables.multi_range_count, 0, - GET_ULONG, REQUIRED_ARG, 256, 1, (longlong) ULONG_MAX, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 256, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", &opt_myisam_block_size, &opt_myisam_block_size, 0, GET_ULONG, REQUIRED_ARG, @@ -7095,7 +7095,7 @@ thread is in the relay logs.", "disables parallel repair.", &global_system_variables.myisam_repair_threads, &max_system_variables.myisam_repair_threads, 0, - GET_ULONG, REQUIRED_ARG, 1, 1, (longlong) ULONG_MAX, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 1, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"myisam_sort_buffer_size", OPT_MYISAM_SORT_BUFFER_SIZE, "The buffer that is allocated when sorting the index when doing a REPAIR " "or when creating indexes with CREATE INDEX or ALTER TABLE.", @@ -7126,7 +7126,7 @@ thread is in the relay logs.", "If a read on a communication port is interrupted, retry this many times before giving up.", &global_system_variables.net_retry_count, &max_system_variables.net_retry_count,0, - GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, (longlong) ULONG_MAX, + GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, (ulonglong) ULONG_MAX, 0, 1, 0}, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT, "Number of seconds to wait for a block to be written to a connection before " @@ -7192,19 +7192,19 @@ thread is in the relay logs.", "Allocation block size for query parsing and execution.", &global_system_variables.query_alloc_block_size, &max_system_variables.query_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (ulonglong) ULONG_MAX, 0, 1024, 0}, #ifdef HAVE_QUERY_CACHE {"query_cache_limit", OPT_QUERY_CACHE_LIMIT, "Don't cache results that are bigger than this.", &query_cache_limit, &query_cache_limit, 0, GET_ULONG, - REQUIRED_ARG, 1024*1024L, 0, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, 1024*1024L, 0, (ulonglong) ULONG_MAX, 0, 1, 0}, {"query_cache_min_res_unit", OPT_QUERY_CACHE_MIN_RES_UNIT, "Minimal size of unit in which space for results is allocated (last unit " "will be trimmed after writing all result data).", &query_cache_min_res_unit, &query_cache_min_res_unit, 0, GET_ULONG, REQUIRED_ARG, QUERY_CACHE_MIN_RESULT_DATA_SIZE, - 0, (longlong) ULONG_MAX, 0, 1, 0}, + 0, (ulonglong) ULONG_MAX, 0, 1, 0}, #endif /*HAVE_QUERY_CACHE*/ {"query_cache_size", OPT_QUERY_CACHE_SIZE, "The memory allocated to store results from old queries.", @@ -7228,13 +7228,13 @@ thread is in the relay logs.", &global_system_variables.query_prealloc_size, &max_system_variables.query_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE, - (longlong) ULONG_MAX, 0, 1024, 0}, + (ulonglong) ULONG_MAX, 0, 1024, 0}, {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, "Allocation block size for storing ranges during optimization.", &global_system_variables.range_alloc_block_size, &max_system_variables.range_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE, - (longlong) ULONG_MAX, 0, 1024, 0}, + (ulonglong) ULONG_MAX, 0, 1024, 0}, {"read_buffer_size", OPT_RECORD_BUFFER, "Each thread that does a sequential scan allocates a buffer of this size " "for each table it scans. If you do many sequential scans, you may want " @@ -7303,7 +7303,7 @@ thread is in the relay logs.", "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", &sync_binlog_period, &sync_binlog_period, 0, GET_ULONG, - REQUIRED_ARG, 0, 0, (longlong) ULONG_MAX, 0, 1, 0}, + REQUIRED_ARG, 0, 0, (ulonglong) ULONG_MAX, 0, 1, 0}, {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", &opt_sync_frm, &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, @@ -7348,7 +7348,7 @@ thread is in the relay logs.", {"thread_stack", OPT_THREAD_STACK, "The stack size for each thread.", &my_thread_stack_size, &my_thread_stack_size, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, - (sizeof(void*)<=4)?1024L*128L: ((256-16)*1024L), (longlong) ULONG_MAX, 0, 1024, 0}, + (sizeof(void*)<=4)?1024L*128L: ((256-16)*1024L), (ulonglong) ULONG_MAX, 0, 1024, 0}, { "time_format", OPT_TIME_FORMAT, "The TIME format (for future).", &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], @@ -7364,13 +7364,13 @@ thread is in the relay logs.", "Allocation block size for transactions to be stored in binary log.", &global_system_variables.trans_alloc_block_size, &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (longlong) ULONG_MAX, 0, 1024, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, (ulonglong) ULONG_MAX, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, "Persistent buffer for transactions to be stored in binary log.", &global_system_variables.trans_prealloc_size, &max_system_variables.trans_prealloc_size, 0, GET_ULONG, - REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, (longlong) ULONG_MAX, 0, + REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, (ulonglong) ULONG_MAX, 0, 1024, 0}, {"thread_handling", OPT_THREAD_HANDLING, "Define threads usage for handling queries: " -- cgit v1.2.1 From 8c2bb705f11956cdc0acb67182466a903dcdd19b Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Thu, 27 Sep 2012 13:18:07 +0500 Subject: MDEV-495 backport --ignore-db-dir. The feature was backported from MySQL 5.6. Some code was added to make commands as SELECT * FROM ignored_db.t1; CALL ignored_db.proc(); USE ignored_db; to take that option into account. per-file comments: mysql-test/r/ignore_db_dirs_basic.result test result added. mysql-test/t/ignore_db_dirs_basic-master.opt options for the test, actually the set of --ignore-db-dir lines. mysql-test/t/ignore_db_dirs_basic.test test for the feature. Same test from 5.6 was taken as a basis, then tests for SELECT, CALL etc were added. per-file comments: sql/mysql_priv.h MDEV-495 backport --ignore-db-dir. interface for db_name_is_in_ignore_list() added. sql/mysqld.cc MDEV-495 backport --ignore-db-dir. --ignore-db-dir handling. sql/set_var.cc MDEV-495 backport --ignore-db-dir. the @@ignore_db_dirs variable added. sql/sql_show.cc MDEV-495 backport --ignore-db-dir. check if the directory is ignored. sql/sql_show.h MDEV-495 backport --ignore-db-dir. interface added for opt_ignored_db_dirs. sql/table.cc MDEV-495 backport --ignore-db-dir. check if the directory is ignored. --- sql/mysql_priv.h | 2 + sql/mysqld.cc | 35 ++++++- sql/set_var.cc | 3 + sql/sql_show.cc | 281 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ sql/sql_show.h | 9 ++ sql/table.cc | 3 + 6 files changed, 332 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 14810fc7119..5f20e9f0591 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -2834,6 +2834,8 @@ bool load_collation(MEM_ROOT *mem_root, CHARSET_INFO *dflt_cl, CHARSET_INFO **cl); +bool db_name_is_in_ignore_db_dirs_list(const char *dbase); + #endif /* MYSQL_SERVER */ extern "C" int test_if_data_home_dir(const char *dir); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 26fd22ef95b..e6653e410cb 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -33,6 +33,7 @@ #include #include "debug_sync.h" #include "log_event.h" +#include "sql_show.h" #include "../storage/myisam/ha_myisam.h" @@ -1447,6 +1448,7 @@ void clean_up(bool print_message) #endif my_tz_free(); my_database_names_free(); + ignore_db_dirs_free(); #ifndef NO_EMBEDDED_ACCESS_CHECKS servers_free(1); acl_free(1); @@ -3332,6 +3334,9 @@ static int init_common_variables(const char *conf_file_name, int argc, mysql_init_variables()) return 1; + if (ignore_db_dirs_init()) + return 1; + #ifdef HAVE_TZNAME { struct tm tm_tmp; @@ -3677,6 +3682,12 @@ You should consider changing lower_case_table_names to 1 or 2", files_charset_info : &my_charset_bin); + if (ignore_db_dirs_process_additions()) + { + sql_print_error("An error occurred while storing ignore_db_dirs to a hash."); + return 1; + } + return 0; } @@ -5999,7 +6010,8 @@ enum options_mysqld OPT_MAX_LONG_DATA_SIZE, OPT_MASTER_VERIFY_CHECKSUM, OPT_SLAVE_SQL_VERIFY_CHECKSUM, - OPT_QUERY_CACHE_STRIP_COMMENTS + OPT_QUERY_CACHE_STRIP_COMMENTS, + OPT_IGNORE_DB_DIRECTORY }; @@ -6288,6 +6300,11 @@ struct my_option my_long_options[] = each time the SQL thread starts.", &opt_init_slave, &opt_init_slave, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ignore-db-dir", OPT_IGNORE_DB_DIRECTORY, + "Specifies a directory to add to the ignore list when collecting " + "database names from the datadir. Put a blank argument to reset " + "the list accumulated so far.", 0, 0, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"language", 'L', "Client error messages in given language. May be given as a full path.", &language_ptr, &language_ptr, 0, GET_STR, REQUIRED_ARG, @@ -9286,6 +9303,22 @@ mysqld_get_one_option(int optid, case OPT_MAX_LONG_DATA_SIZE: max_long_data_size_used= true; break; + + + case OPT_IGNORE_DB_DIRECTORY: + if (*argument == 0) + ignore_db_dirs_reset(); + else + { + if (push_ignored_db_dir(argument)) + { + sql_print_error("Can't start server: " + "cannot process --ignore-db-dir=%.*s", + FN_REFLEN, argument); + return 1; + } + } + break; } return 0; } diff --git a/sql/set_var.cc b/sql/set_var.cc index c52e415e657..94a1b6a1cef 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -67,6 +67,7 @@ #include #include #include "events.h" +#include "sql_show.h" // opt_ignore_db_dirs /* WITH_NDBCLUSTER_STORAGE_ENGINE */ #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE @@ -1013,6 +1014,8 @@ static sys_var_readonly sys_in_transaction(&vars, "in_transaction", OPT_SESSION, SHOW_BOOL, in_transaction); +static sys_var_const_str_ptr sys_ignore_db_dirs(&vars, "ignore_db_dirs", + &opt_ignore_db_dirs); bool sys_var::check(THD *thd, set_var *var) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 0a23a95bbcf..562ccea488f 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -379,6 +379,284 @@ bool mysqld_show_privileges(THD *thd) } +/** Hash of LEX_STRINGs used to search for ignored db directories. */ +static HASH ignore_db_dirs_hash; + +/** + An array of LEX_STRING pointers to collect the options at + option parsing time. +*/ +static DYNAMIC_ARRAY ignore_db_dirs_array; + +/** + A value for the read only system variable to show a list of + ignored directories. +*/ +char *opt_ignore_db_dirs= NULL; + +/** + This flag is ON if: + - the list of ignored directories is not empty + + - and some of the ignored directory names + need no tablename-to-filename conversion. + Otherwise, if the name of the directory contains + unconditional characters like '+' or '.', they + never can match the database directory name. So the + db_name_is_in_ignore_db_dirs_list() can just return at once. +*/ +static bool skip_ignored_dir_check= TRUE; + +/** + Sets up the data structures for collection of directories at option + processing time. + We need to collect the directories in an array first, because + we need the character sets initialized before setting up the hash. + + @return state + @retval TRUE failed + @retval FALSE success +*/ + +bool +ignore_db_dirs_init() +{ + return my_init_dynamic_array(&ignore_db_dirs_array, sizeof(LEX_STRING *), + 0, 0); +} + + +/** + Retrieves the key (the string itself) from the LEX_STRING hash members. + + Needed by hash_init(). + + @param data the data element from the hash + @param out len_ret Placeholder to return the length of the key + @param unused + @return a pointer to the key +*/ + +static uchar * +db_dirs_hash_get_key(const uchar *data, size_t *len_ret, + my_bool __attribute__((unused))) +{ + LEX_STRING *e= (LEX_STRING *) data; + + *len_ret= e->length; + return (uchar *) e->str; +} + + +/** + Wrap a directory name into a LEX_STRING and push it to the array. + + Called at option processing time for each --ignore-db-dir option. + + @param path the name of the directory to push + @return state + @retval TRUE failed + @retval FALSE success +*/ + +bool +push_ignored_db_dir(char *path) +{ + LEX_STRING *new_elt; + char *new_elt_buffer; + size_t path_len= strlen(path); + + if (!path_len || path_len >= FN_REFLEN) + return true; + + // No need to normalize, it's only a directory name, not a path. + if (!my_multi_malloc(0, + &new_elt, sizeof(LEX_STRING), + &new_elt_buffer, path_len + 1, + NullS)) + return true; + new_elt->str= new_elt_buffer; + memcpy(new_elt_buffer, path, path_len); + new_elt_buffer[path_len]= 0; + new_elt->length= path_len; + return insert_dynamic(&ignore_db_dirs_array, (uchar*) &new_elt); +} + + +/** + Clean up the directory ignore options accumulated so far. + + Called at option processing time for each --ignore-db-dir option + with an empty argument. +*/ + +void +ignore_db_dirs_reset() +{ + LEX_STRING **elt; + while (NULL!= (elt= (LEX_STRING **) pop_dynamic(&ignore_db_dirs_array))) + if (elt && *elt) + my_free(*elt, MYF(0)); +} + + +/** + Free the directory ignore option variables. + + Called at server shutdown. +*/ + +void +ignore_db_dirs_free() +{ + if (opt_ignore_db_dirs) + { + my_free(opt_ignore_db_dirs, MYF(0)); + opt_ignore_db_dirs= NULL; + } + ignore_db_dirs_reset(); + delete_dynamic(&ignore_db_dirs_array); + my_hash_free(&ignore_db_dirs_hash); +} + + +/** + Initialize the ignore db directories hash and status variable from + the options collected in the array. + + Called when option processing is over and the server's in-memory + structures are fully initialized. + + @return state + @retval TRUE failed + @retval FALSE success +*/ + +static void dispose_db_dir(void *ptr) +{ + my_free(ptr, MYF(0)); +} + + +bool +ignore_db_dirs_process_additions() +{ + ulong i; + size_t len; + char *ptr; + LEX_STRING *dir; + + + DBUG_ASSERT(opt_ignore_db_dirs == NULL); + + skip_ignored_dir_check= TRUE; + + if (my_hash_init(&ignore_db_dirs_hash, + lower_case_table_names ? + character_set_filesystem : &my_charset_bin, + 0, 0, 0, db_dirs_hash_get_key, + dispose_db_dir, + HASH_UNIQUE)) + return true; + + /* len starts from 1 because of the terminating zero. */ + len= 1; + for (i= 0; i < ignore_db_dirs_array.elements; i++) + { + get_dynamic(&ignore_db_dirs_array, (uchar *) &dir, i); + len+= dir->length + 1; // +1 for the comma + if (skip_ignored_dir_check) + { + char buff[FN_REFLEN]; + uint buff_len; + buff_len= tablename_to_filename(dir->str, buff, sizeof(buff)); + skip_ignored_dir_check= strcmp(dir->str, buff) != 0; + } + } + + /* No delimiter for the last directory. */ + if (len > 1) + len--; + + /* +1 the terminating zero */ + ptr= opt_ignore_db_dirs= (char *) my_malloc(len + 1, MYF(0)); + if (!ptr) + return true; + + /* Make sure we have an empty string to start with. */ + *ptr= 0; + + for (i= 0; i < ignore_db_dirs_array.elements; i++) + { + get_dynamic(&ignore_db_dirs_array, (uchar *) &dir, i); + if (my_hash_insert(&ignore_db_dirs_hash, (uchar *) dir)) + return true; + ptr= strnmov(ptr, dir->str, dir->length); + if (i + 1 < ignore_db_dirs_array.elements) + ptr= strmov(ptr, ","); + + /* + Set the transferred array element to NULL to avoid double free + in case of error. + */ + dir= NULL; + set_dynamic(&ignore_db_dirs_array, (uchar *) &dir, i); + } + + /* make sure the string is terminated */ + DBUG_ASSERT(ptr - opt_ignore_db_dirs <= (ptrdiff_t) len); + *ptr= 0; + + /* + It's OK to empty the array here as the allocated elements are + referenced through the hash now. + */ + reset_dynamic(&ignore_db_dirs_array); + + return false; +} + + +/** + Check if a directory name is in the hash of ignored directories. + + @return search result + @retval TRUE found + @retval FALSE not found +*/ + +static inline bool +is_in_ignore_db_dirs_list(const char *directory) +{ + return ignore_db_dirs_hash.records && + NULL != my_hash_search(&ignore_db_dirs_hash, (const uchar *) directory, + strlen(directory)); +} + + +/** + Check if a database name is in the hash of ignored directories. + + @return search result + @retval TRUE found + @retval FALSE not found +*/ + +bool +db_name_is_in_ignore_db_dirs_list(const char *directory) +{ + char buff[FN_REFLEN]; + uint buff_len; + + if (skip_ignored_dir_check) + return 0; + + buff_len= tablename_to_filename(directory, buff, sizeof(buff)); + + return my_hash_search(&ignore_db_dirs_hash, (uchar *) buff, buff_len)!=NULL; +} + + /*************************************************************************** List all column types ***************************************************************************/ @@ -552,6 +830,9 @@ find_files(THD *thd, List *files, const char *db, if (!MY_S_ISDIR(file->mystat->st_mode)) continue; + if (is_in_ignore_db_dirs_list(file->name)) + continue; + file_name_len= filename_to_tablename(file->name, uname, sizeof(uname)); if (wild) { diff --git a/sql/sql_show.h b/sql/sql_show.h index fec73122e8b..e3077c560d7 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -41,4 +41,13 @@ int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); int copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table); + +/* Handle the ignored database directories list for SHOW/I_S. */ +bool ignore_db_dirs_init(); +void ignore_db_dirs_free(); +void ignore_db_dirs_reset(); +bool ignore_db_dirs_process_additions(); +bool push_ignored_db_dir(char *path); +extern char *opt_ignore_db_dirs; + #endif /* SQL_SHOW_H */ diff --git a/sql/table.cc b/sql/table.cc index 2be94c55205..0247dc167ee 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3197,6 +3197,9 @@ bool check_db_name(LEX_STRING *org_name) if (lower_case_table_names && name != any_db) my_casedn_str(files_charset_info, name); + if (db_name_is_in_ignore_db_dirs_list(name)) + return 1; + return check_table_name(name, name_length, check_for_path_chars); } -- cgit v1.2.1 From e290d2bed52b0d5d930a0bc3362008ddb27915a6 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 28 Sep 2012 09:54:43 +0200 Subject: Fix compiler warnings that breaks build (-Werror). --- sql/sql_show.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 562ccea488f..e01e75db00f 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -568,8 +568,7 @@ ignore_db_dirs_process_additions() if (skip_ignored_dir_check) { char buff[FN_REFLEN]; - uint buff_len; - buff_len= tablename_to_filename(dir->str, buff, sizeof(buff)); + (void) tablename_to_filename(dir->str, buff, sizeof(buff)); skip_ignored_dir_check= strcmp(dir->str, buff) != 0; } } -- cgit v1.2.1 From 66bd2b56fce6be2a083ef04b593fb0f8e644d5b4 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Sat, 29 Sep 2012 22:44:13 -0700 Subject: Fixed LP bug #1058071 (mdev-564). In some rare cases when the value of the system variable join_buffer_size was set to a number less than 256 the function JOIN_CACHE::set_constants determined the size of an offset in the join buffer equal to 1 though the minimal join buffer required more than 256 bytes. This could cause a crash of the server when records from the join buffer were read. --- sql/sql_join_cache.cc | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 5c803f85c49..1121c591c3b 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -680,7 +680,23 @@ void JOIN_CACHE::set_constants() uint len= length + fields*sizeof(uint)+blobs*sizeof(uchar *) + (prev_cache ? prev_cache->get_size_of_rec_offset() : 0) + sizeof(ulong); - buff_size= max(join->thd->variables.join_buff_size, 2*len); + /* + The values of size_of_rec_ofs, size_of_rec_len, size_of_fld_ofs, + base_prefix_length, pack_length, pack_length_with_blob_ptrs + will be recalculated later in this function when we get the estimate + for the actual value of the join buffer size. + */ + size_of_rec_ofs= size_of_rec_len= size_of_fld_ofs= 4; + base_prefix_length= (with_length ? size_of_rec_len : 0) + + (prev_cache ? prev_cache->get_size_of_rec_offset() : 0); + pack_length= (with_length ? size_of_rec_len : 0) + + (prev_cache ? prev_cache->get_size_of_rec_offset() : 0) + + length + fields*sizeof(uint); + pack_length_with_blob_ptrs= pack_length + blobs*sizeof(uchar *); + min_buff_size= 0; + min_records= 1; + buff_size= max(join->thd->variables.join_buff_size, + get_min_join_buffer_size()); size_of_rec_ofs= offset_size(buff_size); size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len); size_of_fld_ofs= size_of_rec_len; @@ -753,19 +769,24 @@ ulong JOIN_CACHE::get_min_join_buffer_size() if (!min_buff_size) { size_t len= 0; + size_t len_last= 0; for (JOIN_TAB *tab= start_tab; tab != join_tab; tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) { len+= tab->get_max_used_fieldlength(); + len_last=+ tab->get_used_fieldlength(); } - len+= get_record_max_affix_length() + get_max_key_addon_space_per_record(); - size_t min_sz= len*min_records; + size_t len_addon= get_record_max_affix_length() + + get_max_key_addon_space_per_record(); + len+= len_addon; + len_last+= len_addon; + size_t min_sz= len*(min_records-1) + len_last; + min_sz+= pack_length_with_blob_ptrs; size_t add_sz= 0; for (uint i=0; i < min_records; i++) add_sz+= join_tab_scan->aux_buffer_incr(i+1); avg_aux_buffer_incr= add_sz/min_records; min_sz+= add_sz; - min_sz+= pack_length_with_blob_ptrs; set_if_bigger(min_sz, 1); min_buff_size= min_sz; } -- cgit v1.2.1 From b0d11675fb46f5db458896a9a17f03bd53d98e88 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 5 Oct 2012 12:26:55 +0300 Subject: Fix of MDEV-589. The problem was in incorrect detection of merged views in tem_direct_view_ref::used_tables() . --- sql/item.cc | 4 ++-- sql/table.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 18a86aa2d1a..98c27266415 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9175,7 +9175,7 @@ table_map Item_direct_view_ref::used_tables() const { return get_depended_from() ? OUTER_REF_TABLE_BIT : - ((view->merged || !view->table) ? + ((view->is_merged_derived() || view->merged || !view->table) ? (*ref)->used_tables() : view->table->map); } @@ -9184,7 +9184,7 @@ table_map Item_direct_view_ref::not_null_tables() const { return get_depended_from() ? 0 : - ((view->merged || !view->table) ? + ((view->is_merged_derived() || view->merged || !view->table) ? (*ref)->not_null_tables() : view->table->map); } diff --git a/sql/table.h b/sql/table.h index 8ce73162aaf..a2d3aa73f7e 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1600,6 +1600,7 @@ struct TABLE_LIST /* TRUE <=> derived table should be filled right after optimization. */ bool fill_me; /* TRUE <=> view/DT is merged. */ + /* TODO: replace with derived_type */ bool merged; bool merged_for_insert; /* TRUE <=> don't prepare this derived table/view as it should be merged.*/ -- cgit v1.2.1 From 3012a5d5ce37f3fee35d24750938f69586d38e13 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 5 Oct 2012 14:24:38 +0200 Subject: MDEV-3796 various RPM problems cmake/cpack_rpm.cmake: * mark all cnf files with %config(noreplace) * add the forgotten postun script sql/sys_vars.cc: 0 for a string variable means "no default. But datadir has the default value. support-files/rpm/server-postin.sh: * use mysqld --help to determine the correct datadir in the presence of my.cnf files (better than my_print_defaults, because it considers the correct group set). * Only create users, and chown/chmod if it's a fresh install, not an upgrade. * only run mysql_install_db if datadir does not exist --- sql/sys_vars.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 6de285086a2..f9e43712f31 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -568,7 +568,7 @@ static Sys_var_ulong Sys_connect_timeout( static Sys_var_charptr Sys_datadir( "datadir", "Path to the database root directory", READ_ONLY GLOBAL_VAR(mysql_real_data_home_ptr), - CMD_LINE(REQUIRED_ARG, 'h'), IN_FS_CHARSET, DEFAULT(0)); + CMD_LINE(REQUIRED_ARG, 'h'), IN_FS_CHARSET, DEFAULT(mysql_real_data_home)); #ifndef DBUG_OFF static Sys_var_dbug Sys_dbug( -- cgit v1.2.1 From a9f9296891a6ca611878b930f5932f4dc3d9f2a6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 8 Oct 2012 13:06:20 +0200 Subject: sort status variables --- sql/mysqld.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index e56b5123a7c..1ac96120b9a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -6930,6 +6930,7 @@ SHOW_VAR status_vars[]= { {"Aborted_clients", (char*) &aborted_threads, SHOW_LONG}, {"Aborted_connects", (char*) &aborted_connects, SHOW_LONG}, {"Access_denied_errors", (char*) offsetof(STATUS_VAR, access_denied_errors), SHOW_LONG_STATUS}, + {"Binlog_bytes_written", (char*) offsetof(STATUS_VAR, binlog_bytes_written), SHOW_LONGLONG_STATUS}, {"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG}, {"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG}, {"Binlog_stmt_cache_disk_use",(char*) &binlog_stmt_cache_disk_use, SHOW_LONG}, @@ -6937,7 +6938,6 @@ SHOW_VAR status_vars[]= { {"Busy_time", (char*) offsetof(STATUS_VAR, busy_time), SHOW_DOUBLE_STATUS}, {"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONGLONG_STATUS}, {"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS}, - {"Binlog_bytes_written", (char*) offsetof(STATUS_VAR, binlog_bytes_written), SHOW_LONGLONG_STATUS}, {"Com", (char*) com_status_vars, SHOW_ARRAY}, {"Compression", (char*) &show_net_compression, SHOW_FUNC}, {"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH}, @@ -6963,14 +6963,11 @@ SHOW_VAR status_vars[]= { {"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS}, {"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS}, {"Handler_discover", (char*) offsetof(STATUS_VAR, ha_discover_count), SHOW_LONG_STATUS}, - {"Handler_icp_attempts", (char*) offsetof(STATUS_VAR, ha_icp_attempts), SHOW_LONG_STATUS}, {"Handler_icp_match", (char*) offsetof(STATUS_VAR, ha_icp_match), SHOW_LONG_STATUS}, - {"Handler_mrr_init", (char*) offsetof(STATUS_VAR, ha_mrr_init_count), SHOW_LONG_STATUS}, {"Handler_mrr_key_refills", (char*) offsetof(STATUS_VAR, ha_mrr_key_refills_count), SHOW_LONG_STATUS}, {"Handler_mrr_rowid_refills", (char*) offsetof(STATUS_VAR, ha_mrr_rowid_refills_count), SHOW_LONG_STATUS}, - {"Handler_prepare", (char*) offsetof(STATUS_VAR, ha_prepare_count), SHOW_LONG_STATUS}, {"Handler_read_first", (char*) offsetof(STATUS_VAR, ha_read_first_count), SHOW_LONG_STATUS}, {"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), SHOW_LONG_STATUS}, @@ -6996,12 +6993,12 @@ SHOW_VAR status_vars[]= { {"Open_table_definitions", (char*) &show_table_definitions, SHOW_FUNC}, {"Open_tables", (char*) &show_open_tables, SHOW_FUNC}, {"Opened_files", (char*) &my_file_total_opened, SHOW_LONG_NOFLUSH}, - {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS}, {"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS}, + {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS}, {"Opened_views", (char*) offsetof(STATUS_VAR, opened_views), SHOW_LONG_STATUS}, {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC}, - {"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS}, {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS}, + {"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS}, {"Rows_tmp_read", (char*) offsetof(STATUS_VAR, rows_tmp_read), SHOW_LONGLONG_STATUS}, #ifdef HAVE_QUERY_CACHE {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH}, @@ -7025,9 +7022,9 @@ SHOW_VAR status_vars[]= { {"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS}, {"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG}, #ifdef HAVE_REPLICATION - {"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC}, {"Slave_heartbeat_period", (char*) &show_heartbeat_period, SHOW_FUNC}, {"Slave_received_heartbeats",(char*) &show_slave_received_heartbeats, SHOW_FUNC}, + {"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC}, {"Slave_running", (char*) &show_slave_running, SHOW_FUNC}, #endif {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, -- cgit v1.2.1 From 72ab07c1cba0565a8ef043931610a2510a85cfd5 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 14 Oct 2012 19:29:31 +0300 Subject: MDEV-746: Merged mysql fix of the bug LP:1002546 & MySQL Bug#13651009. Empty result after reading const tables now works for subqueries. --- sql/sql_select.cc | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index c6bddbf9a28..2a6c60af85f 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1065,11 +1065,9 @@ JOIN::optimize() DBUG_RETURN(1); // error == -1 } if (const_table_map != found_const_table_map && - !(select_options & SELECT_DESCRIBE) && - (!conds || - !(conds->used_tables() & RAND_TABLE_BIT) || - select_lex->master_unit() == &thd->lex->unit)) // upper level SELECT + !(select_options & SELECT_DESCRIBE)) { + // There is at least one empty const table zero_result_cause= "no matching row in const table"; DBUG_PRINT("error",("Error: %s", zero_result_cause)); error= 0; @@ -12204,6 +12202,17 @@ int safe_index_read(JOIN_TAB *tab) } +/** + Reads content of constant table + + @param tab table + @param pos position of table in query plan + + @retval 0 ok, one row was found or one NULL-complemented row was created + @retval -1 ok, no row was found and no NULL-complemented row was created + @retval 1 error +*/ + static int join_read_const_table(JOIN_TAB *tab, POSITION *pos) { @@ -12295,6 +12304,16 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) } +/** + Read a constant table when there is at most one matching row, using a table + scan. + + @param tab Table to read + + @retval 0 Row was found + @retval -1 Row was not found + @retval 1 Got an error (other than row not found) during read +*/ static int join_read_system(JOIN_TAB *tab) { @@ -12326,12 +12345,9 @@ join_read_system(JOIN_TAB *tab) @param tab Table to read - @retval - 0 Row was found - @retval - -1 Row was not found - @retval - 1 Got an error (other than row not found) during read + @retval 0 Row was found + @retval -1 Row was not found + @retval 1 Got an error (other than row not found) during read */ static int -- cgit v1.2.1 From 4304dbc464d425e54b0d802568838592cb625b26 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 9 Oct 2012 17:36:02 +0300 Subject: MDEV-616 fix (MySQL fix accepted) --- sql/item_func.h | 9 ++++++ sql/sql_select.cc | 90 +++++++++++++++++++++++++++++++++++-------------------- 2 files changed, 67 insertions(+), 32 deletions(-) (limited to 'sql') diff --git a/sql/item_func.h b/sql/item_func.h index 27f20dac002..4a31eb2da4e 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1441,6 +1441,15 @@ public: :Item_func(b), cached_result_type(INT_RESULT), entry(NULL), entry_thread_id(0), name(a) {} + Item_func_set_user_var(Item_func_set_user_var *item) + :Item_func(item), cached_result_type(item->cached_result_type), + entry(item->entry), entry_thread_id(item->entry_thread_id), + value(item->value), decimal_buff(item->decimal_buff), + null_item(item->null_item), save_result(item->save_result), + name(item->name) + { + //fixed= 1; + } enum Functype functype() const { return SUSERVAR_FUNC; } double val_real(); longlong val_int(); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2a6c60af85f..cc377500b2c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -16237,40 +16237,66 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, res_selected_fields.empty(); res_all_fields.empty(); - uint i, border= all_fields.elements - elements; - for (i= 0; (item= it++); i++) + uint border= all_fields.elements - elements; + for (uint i= 0; (item= it++); i++) { Field *field; - - if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) || - (item->type() == Item::FUNC_ITEM && - ((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC)) + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) item_field= item; - else + else if (item->type() == Item::FIELD_ITEM) + item_field= item->get_tmp_table_item(thd); + else if (item->type() == Item::FUNC_ITEM && + ((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC) { - if (item->type() == Item::FIELD_ITEM) + field= item->get_tmp_table_field(); + if (field != NULL) { - item_field= item->get_tmp_table_item(thd); + /* + Replace "@:=" with "@:=". Otherwise, + we would re-evaluate , and if expression were + a subquery, this would access already-unlocked tables. + */ + Item_func_set_user_var* suv= + new Item_func_set_user_var((Item_func_set_user_var*) item); + Item_field *new_field= new Item_field(field); + if (!suv || !new_field || suv->fix_fields(thd, (Item**)&suv)) + DBUG_RETURN(true); // Fatal error + ((Item *)suv)->name= item->name; + /* + We are replacing the argument of Item_func_set_user_var after its + value has been read. The argument's null_value should be set by + now, so we must set it explicitly for the replacement argument + since the null_value may be read without any preceeding call to + val_*(). + */ + new_field->update_null_value(); + List list; + list.push_back(new_field); + suv->set_arguments(list); + item_field= suv; } - else if ((field= item->get_tmp_table_field())) + else + item_field= item; + } + else if ((field= item->get_tmp_table_field())) + { + if (item->type() == Item::SUM_FUNC_ITEM && field->table->group) + item_field= ((Item_sum*) item)->result_item(field); + else + item_field= (Item*) new Item_field(field); + if (!item_field) + DBUG_RETURN(true); // Fatal error + + if (item->real_item()->type() != Item::FIELD_ITEM) + field->orig_table= 0; + item_field->name= item->name; + if (item->type() == Item::REF_ITEM) { - if (item->type() == Item::SUM_FUNC_ITEM && field->table->group) - item_field= ((Item_sum*) item)->result_item(field); - else - item_field= (Item*) new Item_field(field); - if (!item_field) - DBUG_RETURN(TRUE); // Fatal error - - if (item->real_item()->type() != Item::FIELD_ITEM) - field->orig_table= 0; - item_field->name= item->name; - if (item->type() == Item::REF_ITEM) - { - Item_field *ifield= (Item_field *) item_field; - Item_ref *iref= (Item_ref *) item; - ifield->table_name= iref->table_name; - ifield->db_name= iref->db_name; - } + Item_field *ifield= (Item_field *) item_field; + Item_ref *iref= (Item_ref *) item; + ifield->table_name= iref->table_name; + ifield->db_name= iref->db_name; + } #ifndef DBUG_OFF if (!item_field->name) { @@ -16282,20 +16308,20 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, item_field->name= sql_strmake(str.ptr(),str.length()); } #endif - } - else - item_field= item; } + else + item_field= item; + res_all_fields.push_back(item_field); ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]= item_field; } List_iterator_fast itr(res_all_fields); - for (i= 0; i < border; i++) + for (uint i= 0; i < border; i++) itr++; itr.sublist(res_selected_fields, elements); - DBUG_RETURN(FALSE); + DBUG_RETURN(false); } -- cgit v1.2.1 From d2d6c8b8e8c973f6201b3472b7f451a9e16d0d44 Mon Sep 17 00:00:00 2001 From: Sergey Petrunya Date: Wed, 10 Oct 2012 09:21:22 +0400 Subject: Backport of: olav.sandstaa@oracle.com-20120516074923-vd0dhp183vqcp2ql .. into MariaDB 5.3 Fix for Bug#12667154 SAME QUERY EXEC AS WHERE SUBQ GIVES DIFFERENT RESULTS ON IN() & NOT IN() COMP #3 This bug causes a wrong result in mysql-trunk when ICP is used and bad performance in mysql-5.5 and mysql-trunk. Using the query from bug report to explain what happens and causes the wrong result from the query when ICP is enabled: 1. The t3 table contains four records. The outer query will read these and for each of these it will execute the subquery. 2. Before the first execution of the subquery it will be optimized. In this case the important is what happens to the first table t1: -make_join_select() will call the range optimizer which decides that t1 should be accessed using a range scan on the k1 index It creates a QUICK_RANGE_SELECT object for this. -As the last part of optimization the ICP code pushes the condition down to the storage engine for table t1 on the k1 index. This produces the following information in the explain for this table: 2 DEPENDENT SUBQUERY t1 range k1 k1 5 NULL 3 Using index condition; Using filesort Note the use of filesort. 3. The first execution of the subquery does (among other things) due to the need for sorting: a. Call create_sort_index() which again will call find_all_keys(): b. find_all_keys() will read the required keys for all qualifying rows from the storage engine. To do this it checks if it has a quick-select for the table. It will use the quick-select for reading records. In this case it will read four records from the storage engine (based on the range criteria). The storage engine will evaluate the pushed index condition for each record. c. At the end of create_sort_index() there is code that cleans up a lot of stuff on the join tab. One of the things that is cleaned is the select object. The result of this is that the quick-select object created in make_join_select is deleted. 4. The second execution of the subquery does the same as the first but the result is different: a. Call create_sort_index() which again will call find_all_keys() (same as for the first execution) b. find_all_keys() will read the keys from the storage engine. To do this it checks if it has a quick-select for the table. Now there is NO quick-select object(!) (since it was deleted in step 3c). So find_all_keys defaults to read the table using a table scan instead. So instead of reading the four relevant records in the range it reads the entire table (6 records). It then evaluates the table's condition (and here it goes wrong). Since the entire condition has been pushed down to the storage engine using ICP all 6 records qualify. (Note that the storage engine will not evaluate the pushed index condition in this case since it was pushed for the k1 index and now we do a table scan without any index being used). The result is that here we return six qualifying key values instead of four due to not evaluating the table's condition. c. As above. 5. The two last execution of the subquery will also produce wrong results for the same reason. Summary: The problem occurs due to all but the first executions of the subquery is done as a table scan without evaluating the table's condition (which is pushed to the storage engine on a different index). This is caused by the create_sort_index() function deleting the quick-select object that should have been used for executing the subquery as a range scan. Note that this bug in addition to causing wrong results also can result in bad performance due to executing the subquery using a table scan instead of a range scan. This is an issue in MySQL 5.5. The fix for this problem is to avoid that the Quick-select-object that the optimizer created is deleted when create_sort_index() is doing clean-up of the join-tab. This will ensure that the quick-select object and the corresponding pushed index condition will be available and used by all following executions of the subquery. --- sql/sql_select.cc | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5f2e97d57c1..438cec61c6d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18687,6 +18687,14 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, /* Currently ORDER BY ... LIMIT is not supported in subqueries. */ DBUG_ASSERT(join->group_list || !join->is_in_subquery()); + /* + If we have a select->quick object that is created outside of + create_sort_index() and this is part of a subquery that + potentially can be executed multiple times then we should not + delete the quick object on exit from this function. + */ + bool keep_quick= select && select->quick && join->join_tab_save; + /* When there is SQL_BIG_RESULT do not sort using index for GROUP BY, and thus force sorting on disk unless a group min-max optimization @@ -18738,6 +18746,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, get_quick_select_for_ref(thd, table, &tab->ref, tab->found_records)))) goto err; + DBUG_ASSERT(!keep_quick); } } @@ -18769,10 +18778,25 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tablesort_result_cache= table->sort.io_cache; table->sort.io_cache= NULL; - - select->cleanup(); // filesort did select - table->quick_keys.clear_all(); // as far as we cleanup select->quick - table->intersect_keys.clear_all(); + /* + If a quick object was created outside of create_sort_index() + that might be reused, then do not call select->cleanup() since + it will delete the quick object. + */ + if (!keep_quick) + { + select->cleanup(); + /* + The select object should now be ready for the next use. If it + is re-used then there exists a backup copy of this join tab + which has the pointer to it. The join tab will be restored in + JOIN::reset(). So here we just delete the pointer to it. + */ + tab->select= NULL; + // If we deleted the quick select object we need to clear quick_keys + table->quick_keys.clear_all(); + } + // Restore the output resultset table->sort.io_cache= tablesort_result_cache; } tab->set_select_cond(NULL, __LINE__); -- cgit v1.2.1 From 362c2bca3e170031d33622f27d978c9570d0a9f5 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 10 Oct 2012 22:42:50 +0300 Subject: Fix of MDEV-3799. Find left table in right join (which turned to left join by reordering tables in join list but phisical order of tables of SELECT left as it was). --- sql/table.cc | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'sql') diff --git a/sql/table.cc b/sql/table.cc index 0247dc167ee..faa248f43f6 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -4462,7 +4462,16 @@ TABLE *TABLE_LIST::get_real_join_table() tbl= (tbl->view != NULL ? tbl->view->select_lex.get_table_list() : tbl->derived->first_select()->get_table_list()); + + /* find left table in outer join on this level */ + while(tbl->outer_join & JOIN_TYPE_RIGHT) + { + DBUG_ASSERT(tbl->next_local); + tbl= tbl->next_local; + } + } + return tbl->table; } -- cgit v1.2.1 From 8215ce4695e743d313e92f4d30f412f79958439c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 11 Oct 2012 12:09:21 +0300 Subject: MDEV-3804: MySQL fix for bug#11765413 removed (we have better and more general fix for the problem). Test suite added. --- sql/item.cc | 2 +- sql/sql_select.cc | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 4d80a153785..e0e7a4288da 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6839,7 +6839,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) if (from_field != not_found_field) { Item_field* fld; - if (!(fld= new Item_field(thd, last_checked_context, from_field))) + if (!(fld= new Item_field(from_field))) goto error; thd->change_item_tree(reference, fld); mark_as_dependent(thd, last_checked_context->select_lex, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 57bb9a311d7..b4489ddc81e 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -718,8 +718,6 @@ JOIN::prepare(Item ***rref_pointer_array, if (having) { - Query_arena backup, *arena; - arena= thd->activate_stmt_arena_if_needed(&backup); nesting_map save_allow_sum_func= thd->lex->allow_sum_func; thd->where="having clause"; thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level; @@ -735,9 +733,6 @@ JOIN::prepare(Item ***rref_pointer_array, (having->fix_fields(thd, &having) || having->check_cols(1))); select_lex->having_fix_field= 0; - select_lex->having= having; - if (arena) - thd->restore_active_arena(arena, &backup); if (having_fix_rc || thd->is_error()) DBUG_RETURN(-1); /* purecov: inspected */ -- cgit v1.2.1 From e47cdfdfb6b2b6512b13fa097ee092638e05266a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 12 Oct 2012 16:44:54 +0300 Subject: MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields Fix by Sergey Petrunia. This patch only prevents the evaluation of expensive subqueries during optimization. The crash reported in this bug has been fixed by some other patch. The fix is to call value->is_null() only when !value->is_expensive(), because is_null() may trigger evaluation of the Item, which in turn triggers subquery evaluation if the Item is a subquery. --- sql/sql_select.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b4489ddc81e..13c1f499edb 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3876,8 +3876,10 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, new_fields->null_rejecting); } else if (old->eq_func && new_fields->eq_func && - ((old->val->const_item() && old->val->is_null()) || - new_fields->val->is_null())) + ((old->val->const_item() && !old->val->is_expensive() && + old->val->is_null()) || + (!new_fields->val->is_expensive() && + new_fields->val->is_null()))) { /* field = expression OR field IS NULL */ old->level= and_level; @@ -3891,7 +3893,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, Remember the NOT NULL value unless the value does not depend on other tables. */ - if (!old->val->used_tables() && old->val->is_null()) + if (!old->val->used_tables() && !old->val->is_expensive() && + old->val->is_null()) old->val= new_fields->val; } else -- cgit v1.2.1 From 4b4d74b517e8adb07bee4c1a0c1b05447afac086 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Thu, 18 Oct 2012 11:19:28 +0200 Subject: Do not DBUG_PRINT uninitialized variable. This avoid false positive from runtime checks in debug builds (Windows). --- sql/handler.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/handler.cc b/sql/handler.cc index fb10be678a0..356ae330201 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2621,8 +2621,7 @@ int handler::update_auto_increment() if (unlikely(nr == ULONGLONG_MAX)) DBUG_RETURN(HA_ERR_AUTOINC_ERANGE); - DBUG_PRINT("info",("auto_increment: %llu nb_reserved_values: %llu", - nr, nb_reserved_values)); + DBUG_PRINT("info",("auto_increment: %llu",nr)); /* Store field without warning (Warning will be printed by insert) */ save_count_cuted_fields= thd->count_cuted_fields; @@ -2640,6 +2639,8 @@ int handler::update_auto_increment() } if (append) { + DBUG_PRINT("info",("nb_reserved_values: %llu",nb_reserved_values)); + auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values, variables->auto_increment_increment); auto_inc_intervals_count++; -- cgit v1.2.1 From 97a1c53c8141d473b87dc8048c19868e8531db9e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 25 Oct 2012 15:50:10 +0300 Subject: MDEV-3812: Remove unneeded extra call to engine->exec() in Item_subselect::exec, remove enum store_key_result This task fixes an ineffeciency that is a remainder from MySQL 5.0/5.1. There, subqueries were optimized in a lazy manner, when executed for the first time. During this lazy optimization it may happen that the server finds a more efficient subquery engine, and substitute the current engine of the query being executed with the new engine. This required re-execution of the engine. MariaDB 5.3 pre-optimizes subqueries in almost all cases, and the engine is chosen in most cases, except when subquery materialization found that it must use partial matching. In this case, the current code was performing one extra re-execution although it was not needed at all. The patch performs the re-execution only if the engine was changed while executing. In addition the patch performs small cleanup by removing "enum store_key_result" because it is essentially a boolean, and the code that uses it already maps it to a boolean. --- sql/item_subselect.cc | 23 +++++++++++++---------- sql/item_subselect.h | 5 +---- sql/sql_select.h | 28 +++++++++++++--------------- 3 files changed, 27 insertions(+), 29 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index e26c3a47912..04151ebbdb2 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -49,7 +49,7 @@ Item_subselect::Item_subselect(): used_tables_cache(0), have_to_be_excluded(0), const_item_cache(1), inside_first_fix_fields(0), done_first_fix_fields(FALSE), expr_cache(0), forced_const(FALSE), substitution(0), engine(0), eliminated(FALSE), - engine_changed(0), changed(0), is_correlated(FALSE) + changed(0), is_correlated(FALSE) { DBUG_ENTER("Item_subselect::Item_subselect"); DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this)); @@ -623,6 +623,8 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery, bool Item_subselect::exec() { + subselect_engine *org_engine= engine; + DBUG_ENTER("Item_subselect::exec"); /* @@ -644,11 +646,14 @@ bool Item_subselect::exec() #ifndef DBUG_OFF ++exec_counter; #endif - if (engine_changed) + if (engine != org_engine) { - engine_changed= 0; - res= exec(); - DBUG_RETURN(res); + /* + If the subquery engine changed during execution due to lazy subquery + optimization, or because the original engine found a more efficient other + engine, re-execute the subquery with the new engine. + */ + DBUG_RETURN(exec()); } DBUG_RETURN(res); } @@ -3141,10 +3146,8 @@ int subselect_single_select_engine::exec() DBUG_RETURN(1); /* purecov: inspected */ } } - if (item->engine_changed) - { + if (item->engine_changed(this)) DBUG_RETURN(1); - } } if (select_lex->uncacheable && select_lex->uncacheable != UNCACHEABLE_EXPLAIN @@ -3318,13 +3321,13 @@ bool subselect_uniquesubquery_engine::copy_ref_key(bool skip_constants) for (store_key **copy= tab->ref.key_copy ; *copy ; copy++) { - enum store_key::store_key_result store_res; + bool store_res; if (skip_constants && (*copy)->store_key_is_const()) continue; store_res= (*copy)->copy(); tab->ref.key_err= store_res; - if (store_res == store_key::STORE_KEY_FATAL) + if (store_res) { /* Error converting the left IN operand to the column type of the right diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 2a64c63a1be..1da129380e7 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -122,8 +122,6 @@ public: */ bool eliminated; - /* changed engine indicator */ - bool engine_changed; /* subquery is transformed */ bool changed; @@ -200,9 +198,9 @@ public: { old_engine= engine; engine= eng; - engine_changed= 1; return eng == 0; } + bool engine_changed(subselect_engine *eng) { return engine != eng; } /* True if this subquery has been already evaluated. Implemented only for single select and union subqueries only. @@ -260,7 +258,6 @@ public: st_select_lex*, st_select_lex*, Field*, Item*, Item_ident*); friend bool convert_join_subqueries_to_semijoins(JOIN *join); - }; /* single value subselect */ diff --git a/sql/sql_select.h b/sql/sql_select.h index 118a684ab62..f465b08e910 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1459,7 +1459,6 @@ class store_key :public Sql_alloc { public: bool null_key; /* TRUE <=> the value of the key has a null part */ - enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; enum Type { FIELD_STORE_KEY, ITEM_STORE_KEY, CONST_ITEM_STORE_KEY }; store_key(THD *thd, Field *field_arg, uchar *ptr, uchar *null, uint length) :null_key(0), null_ptr(null), err(0) @@ -1496,9 +1495,9 @@ public: @details this function makes sure truncation warnings when preparing the key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). */ - enum store_key_result copy() + bool copy() { - enum store_key_result result; + bool result; THD *thd= to_field->table->in_use; enum_check_fields saved_count_cuted_fields= thd->count_cuted_fields; ulonglong sql_mode= thd->variables.sql_mode; @@ -1520,7 +1519,7 @@ public: uchar *null_ptr; uchar err; - virtual enum store_key_result copy_inner()=0; + virtual bool copy_inner()=0; }; @@ -1552,7 +1551,7 @@ class store_key_field: public store_key } protected: - enum store_key_result copy_inner() + bool copy_inner() { TABLE *table= copy_field.to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -1569,7 +1568,7 @@ class store_key_field: public store_key copy_field.do_copy(©_field); dbug_tmp_restore_column_map(table->write_set, old_map); null_key= to_field->is_null(); - return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; + return test(err); } }; @@ -1599,12 +1598,12 @@ public: const char *name() const { return "func"; } protected: - enum store_key_result copy_inner() + bool copy_inner() { TABLE *table= to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - int res= FALSE; + int res= 0; /* It looks like the next statement is needed only for a simplified @@ -1623,11 +1622,10 @@ public: we need to check for errors executing it and react accordingly */ if (!res && table->in_use->is_error()) - res= 1; /* STORE_KEY_FATAL */ + res= 1; dbug_tmp_restore_column_map(table->write_set, old_map); null_key= to_field->is_null() || item->null_value; - return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL : - (store_key_result) res); + return ((err != 0 || res < 0 || res > 2) ? true : test(res)); } }; @@ -1653,7 +1651,7 @@ public: bool store_key_is_const() { return true; } protected: - enum store_key_result copy_inner() + bool copy_inner() { int res; if (!inited) @@ -1665,18 +1663,18 @@ protected: if ((res= item->save_in_field(to_field, 1))) { if (!err) - err= res < 0 ? 1 : res; /* 1=STORE_KEY_FATAL */ + err= res < 0 ? 1 : res; } /* Item::save_in_field() may call Item::val_xxx(). And if this is a subquery we need to check for errors executing it and react accordingly */ if (!err && to_field->table->in_use->is_error()) - err= 1; /* STORE_KEY_FATAL */ + err= 1; dbug_tmp_restore_column_map(table->write_set, old_map); } null_key= to_field->is_null() || item->null_value; - return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); + return (err > 2 ? true : test(err)); } }; -- cgit v1.2.1 From 974abc7ad832131e59d310730722bb4ecdb460e8 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 27 Oct 2012 00:56:14 +0300 Subject: MDEV-3812 This patch undoes the removal of enum store_key_result by the previous patch for mdev-3812. --- sql/item_subselect.cc | 4 ++-- sql/sql_select.h | 28 +++++++++++++++------------- 2 files changed, 17 insertions(+), 15 deletions(-) (limited to 'sql') diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 04151ebbdb2..265db0055ad 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -3321,13 +3321,13 @@ bool subselect_uniquesubquery_engine::copy_ref_key(bool skip_constants) for (store_key **copy= tab->ref.key_copy ; *copy ; copy++) { - bool store_res; + enum store_key::store_key_result store_res; if (skip_constants && (*copy)->store_key_is_const()) continue; store_res= (*copy)->copy(); tab->ref.key_err= store_res; - if (store_res) + if (store_res == store_key::STORE_KEY_FATAL) { /* Error converting the left IN operand to the column type of the right diff --git a/sql/sql_select.h b/sql/sql_select.h index f465b08e910..118a684ab62 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1459,6 +1459,7 @@ class store_key :public Sql_alloc { public: bool null_key; /* TRUE <=> the value of the key has a null part */ + enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; enum Type { FIELD_STORE_KEY, ITEM_STORE_KEY, CONST_ITEM_STORE_KEY }; store_key(THD *thd, Field *field_arg, uchar *ptr, uchar *null, uint length) :null_key(0), null_ptr(null), err(0) @@ -1495,9 +1496,9 @@ public: @details this function makes sure truncation warnings when preparing the key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). */ - bool copy() + enum store_key_result copy() { - bool result; + enum store_key_result result; THD *thd= to_field->table->in_use; enum_check_fields saved_count_cuted_fields= thd->count_cuted_fields; ulonglong sql_mode= thd->variables.sql_mode; @@ -1519,7 +1520,7 @@ public: uchar *null_ptr; uchar err; - virtual bool copy_inner()=0; + virtual enum store_key_result copy_inner()=0; }; @@ -1551,7 +1552,7 @@ class store_key_field: public store_key } protected: - bool copy_inner() + enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -1568,7 +1569,7 @@ class store_key_field: public store_key copy_field.do_copy(©_field); dbug_tmp_restore_column_map(table->write_set, old_map); null_key= to_field->is_null(); - return test(err); + return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } }; @@ -1598,12 +1599,12 @@ public: const char *name() const { return "func"; } protected: - bool copy_inner() + enum store_key_result copy_inner() { TABLE *table= to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - int res= 0; + int res= FALSE; /* It looks like the next statement is needed only for a simplified @@ -1622,10 +1623,11 @@ public: we need to check for errors executing it and react accordingly */ if (!res && table->in_use->is_error()) - res= 1; + res= 1; /* STORE_KEY_FATAL */ dbug_tmp_restore_column_map(table->write_set, old_map); null_key= to_field->is_null() || item->null_value; - return ((err != 0 || res < 0 || res > 2) ? true : test(res)); + return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL : + (store_key_result) res); } }; @@ -1651,7 +1653,7 @@ public: bool store_key_is_const() { return true; } protected: - bool copy_inner() + enum store_key_result copy_inner() { int res; if (!inited) @@ -1663,18 +1665,18 @@ protected: if ((res= item->save_in_field(to_field, 1))) { if (!err) - err= res < 0 ? 1 : res; + err= res < 0 ? 1 : res; /* 1=STORE_KEY_FATAL */ } /* Item::save_in_field() may call Item::val_xxx(). And if this is a subquery we need to check for errors executing it and react accordingly */ if (!err && to_field->table->in_use->is_error()) - err= 1; + err= 1; /* STORE_KEY_FATAL */ dbug_tmp_restore_column_map(table->write_set, old_map); } null_key= to_field->is_null() || item->null_value; - return (err > 2 ? true : test(err)); + return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } }; -- cgit v1.2.1 From fb90c36284cab73c37355310bed33609fabf8aa2 Mon Sep 17 00:00:00 2001 From: Michael Widenius Date: Wed, 31 Oct 2012 23:49:51 +0200 Subject: Fixed MDEV-612, LP:1010759 - Valgrind error ha_maria::check_if_incompatible_data on mysql-test/r/partition.result: Added test case mysql-test/t/partition.test: Added test case sql/ha_partition.cc: Removed printing of not initialized variable storage/maria/ha_maria.cc: Don't copy variables that are not initialized --- sql/ha_partition.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 9d6e82b0356..aafa2448d85 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4347,8 +4347,8 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key) DBUG_ENTER("ha_partition::common_index_read"); LINT_INIT(key_len); /* used if have_start_key==TRUE */ - DBUG_PRINT("info", ("m_ordered %u m_ordered_scan_ong %u have_start_key %u", - m_ordered, m_ordered_scan_ongoing, have_start_key)); + DBUG_PRINT("info", ("m_ordered: %u have_start_key: %u", + m_ordered, have_start_key)); if (have_start_key) { -- cgit v1.2.1 From 7714739b2d6a50c4ca69421c0e19a9e08ff3b5c7 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 1 Nov 2012 14:54:33 -0700 Subject: Fixed bug mdev-585 (LP bug #637962) If, when executing a query with ORDER BY col LIMIT n, the optimizer chose an index-merge scan to access the table containing col while there existed an index defined over col then optimizer did not consider the possibility of using an alternative range scan by this index to avoid filesort. This could cause a performance degradation if the optimizer flag index_merge was set up to 'on'. --- sql/sql_select.cc | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 438cec61c6d..122c19ee73d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18079,15 +18079,18 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit_arg, */ if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || - quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) - goto use_filesort; - ref_key= select->quick->index; - ref_key_parts= select->quick->used_key_parts; + ref_key= MAX_KEY; + else + { + ref_key= select->quick->index; + ref_key_parts= select->quick->used_key_parts; + } } - if (ref_key >= 0) + if (ref_key >= 0 && ref_key != MAX_KEY) { /* We come here when there is a REF key. @@ -18229,7 +18232,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit_arg, else keys= usable_keys; - if (ref_key >= 0 && table->covering_keys.is_set(ref_key)) + if (ref_key >= 0 && ref_key != MAX_KEY && + table->covering_keys.is_set(ref_key)) ref_key_quick_rows= table->quick_rows[ref_key]; read_time= join->best_positions[tablenr].read_time; -- cgit v1.2.1 From 4ffc9c3b01459a2904a7154a6c750d128864fc7b Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 2 Nov 2012 10:43:52 +0100 Subject: MDEV-531 : Warning: Forcing close of thread ... in rpl_binlog_index Use post_kill_notification in for one_thread_per_connection scheduler, the same as already used in threadpool, to reliably wake a thread stuck in read() or in different poll() variations. --- sql/scheduler.cc | 24 ++++++++++++++++++++++++ sql/scheduler.h | 2 +- sql/threadpool_common.cc | 2 +- sql/threadpool_unix.cc | 18 +----------------- sql/threadpool_win.cc | 16 ---------------- 5 files changed, 27 insertions(+), 35 deletions(-) (limited to 'sql') diff --git a/sql/scheduler.cc b/sql/scheduler.cc index 78a1a2a32bb..0ae4121ef4c 100644 --- a/sql/scheduler.cc +++ b/sql/scheduler.cc @@ -79,11 +79,34 @@ void scheduler_init() { scheduler_wait_sync_end); } + +/** + Kill notification callback, used by one-thread-per-connection + and threadpool scheduler. + + Wakes up a thread that is stuck in read/poll/epoll/event-poll + routines used by threadpool, such that subsequent attempt to + read from client connection will result in IO error. +*/ + +void post_kill_notification(THD *thd) +{ + DBUG_ENTER("post_kill_notification"); + if (current_thd == thd || thd->system_thread) + DBUG_VOID_RETURN; + + if (thd->net.vio) + vio_shutdown(thd->net.vio, SHUT_RD); + DBUG_VOID_RETURN; +} + /* Initialize scheduler for --thread-handling=one-thread-per-connection */ #ifndef EMBEDDED_LIBRARY + + void one_thread_per_connection_scheduler(scheduler_functions *func, ulong *arg_max_connections, uint *arg_connection_count) @@ -95,6 +118,7 @@ void one_thread_per_connection_scheduler(scheduler_functions *func, func->init_new_connection_thread= init_new_connection_handler_thread; func->add_connection= create_thread_to_handle_connection; func->end_thread= one_thread_per_connection_end; + func->post_kill_notification= post_kill_notification; } #endif diff --git a/sql/scheduler.h b/sql/scheduler.h index 82bba5abe65..4e200e86d74 100644 --- a/sql/scheduler.h +++ b/sql/scheduler.h @@ -78,7 +78,7 @@ void one_thread_per_connection_scheduler(scheduler_functions *func, void one_thread_scheduler(scheduler_functions *func); extern void scheduler_init(); - +extern void post_kill_notification(THD *); /* To be used for pool-of-threads (implemeneted differently on various OSs) */ diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 7e5bbd11c69..6b956768287 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -257,7 +257,7 @@ static scheduler_functions tp_scheduler_functions= tp_add_connection, // add_connection tp_wait_begin, // thd_wait_begin tp_wait_end, // thd_wait_end - tp_post_kill_notification, // post_kill_notification + post_kill_notification, // post_kill_notification NULL, // end_thread tp_end // end }; diff --git a/sql/threadpool_unix.cc b/sql/threadpool_unix.cc index f5ea771883d..da38d64fa4d 100644 --- a/sql/threadpool_unix.cc +++ b/sql/threadpool_unix.cc @@ -173,7 +173,6 @@ static int create_worker(thread_group_t *thread_group); static void *worker_main(void *param); static void check_stall(thread_group_t *thread_group); static void connection_abort(connection_t *connection); -void tp_post_kill_notification(THD *thd); static void set_wait_timeout(connection_t *connection); static void set_next_timeout_check(ulonglong abstime); static void print_pool_blocked_message(bool); @@ -444,7 +443,7 @@ static void timeout_check(pool_timer_t *timer) /* Wait timeout exceeded, kill connection. */ mysql_mutex_lock(&thd->LOCK_thd_data); thd->killed = KILL_CONNECTION; - tp_post_kill_notification(thd); + post_kill_notification(thd); mysql_mutex_unlock(&thd->LOCK_thd_data); } else @@ -1258,21 +1257,6 @@ static void connection_abort(connection_t *connection) } -/** - MySQL scheduler callback : kill connection -*/ - -void tp_post_kill_notification(THD *thd) -{ - DBUG_ENTER("tp_post_kill_notification"); - if (current_thd == thd || thd->system_thread) - DBUG_VOID_RETURN; - - if (thd->net.vio) - vio_shutdown(thd->net.vio, SHUT_RD); - DBUG_VOID_RETURN; -} - /** MySQL scheduler callback: wait begin */ diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc index 6359f81cd2b..72e03da2453 100644 --- a/sql/threadpool_win.cc +++ b/sql/threadpool_win.cc @@ -544,22 +544,6 @@ void tp_end(void) } } -/** - Notify pool about connection being killed. -*/ -void tp_post_kill_notification(THD *thd) -{ - if (current_thd == thd) - return; /* There is nothing to do.*/ - - if (thd->system_thread) - return; /* Will crash if we attempt to kill system thread. */ - - Vio *vio= thd->net.vio; - - vio_shutdown(vio, SD_BOTH); - -} /* Handle read completion/notification. -- cgit v1.2.1 From 39e7072d64f5ff36d61bf81970ec398f8d937cfd Mon Sep 17 00:00:00 2001 From: Sergey Petrunya Date: Sun, 4 Nov 2012 19:09:46 +0400 Subject: MDEV-536: LP:1050806 - different result for a query using subquery, and MDEV-567: Wrong result from a query with correlated subquery if ICP is allowed: backport the fix developed for SHOW EXPLAIN: revision-id: psergey@askmonty.org-20120719115219-212cxmm6qvf0wlrb branch nick: 5.5-show-explain-r21 timestamp: Thu 2012-07-19 15:52:19 +0400 BUG#992942 & MDEV-325: Pre-liminary commit for testing and adjust it so that it handles DS-MRR scans correctly. --- sql/opt_range.cc | 7 +++ sql/sql_select.cc | 146 +++++++++++++++++++++++++++++++++++++++++++++++++----- sql/sql_select.h | 10 ++++ 3 files changed, 151 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 791af42521a..b98edeb15db 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -10944,6 +10944,13 @@ int QUICK_RANGE_SELECT::reset() DBUG_ENTER("QUICK_RANGE_SELECT::reset"); last_range= NULL; cur_range= (QUICK_RANGE**) ranges.buffer; + + if (file->inited == handler::RND) + { + /* Handler could be left in this state by MRR */ + if ((error= file->ha_rnd_end())) + DBUG_RETURN(error); + } if (file->inited == handler::NONE) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a4b2d376e05..b6b1259ca49 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -10603,9 +10603,22 @@ void JOIN::cleanup(bool full) if (full) { + JOIN_TAB *sort_tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + if (pre_sort_join_tab) + { + if (sort_tab && sort_tab->select == pre_sort_join_tab->select) + { + pre_sort_join_tab->select= NULL; + } + else + clean_pre_sort_join_tab(); + } + for (tab= first_linear_tab(this, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) + { tab->cleanup(); + } } else { @@ -18849,6 +18862,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, TABLE *table; SQL_SELECT *select; JOIN_TAB *tab; + int err= 0; + bool quick_created= FALSE; DBUG_ENTER("create_sort_index"); if (join->table_count == join->const_tables) @@ -18856,18 +18871,61 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tab= join->join_tab + join->const_tables; table= tab->table; select= tab->select; + + JOIN_TAB *save_pre_sort_join_tab= NULL; + if (join->pre_sort_join_tab) + { + /* + we've already been in this function, and stashed away the original access + method in join->pre_sort_join_tab, restore it now. + */ + + /* First, restore state of the handler */ + if (join->pre_sort_index != MAX_KEY) + { + if (table->file->ha_index_or_rnd_end()) + goto err; + if (join->pre_sort_idx_pushed_cond) + { + table->file->idx_cond_push(join->pre_sort_index, + join->pre_sort_idx_pushed_cond); + } + } + else + { + if (table->file->ha_index_or_rnd_end() || + table->file->ha_rnd_init(TRUE)) + goto err; + } + + /* Second, restore access method parameters */ + tab->records= join->pre_sort_join_tab->records; + tab->select= join->pre_sort_join_tab->select; + tab->select_cond= join->pre_sort_join_tab->select_cond; + tab->type= join->pre_sort_join_tab->type; + tab->read_first_record= join->pre_sort_join_tab->read_first_record; + + save_pre_sort_join_tab= join->pre_sort_join_tab; + join->pre_sort_join_tab= NULL; + } + else + { + /* + Save index #, save index condition. Do it right now, because MRR may + */ + if (table->file->inited == handler::INDEX) + { + join->pre_sort_index= table->file->active_index; + join->pre_sort_idx_pushed_cond= table->file->pushed_idx_cond; + // no need to save key_read + } + else + join->pre_sort_index= MAX_KEY; + } /* Currently ORDER BY ... LIMIT is not supported in subqueries. */ DBUG_ASSERT(join->group_list || !join->is_in_subquery()); - /* - If we have a select->quick object that is created outside of - create_sort_index() and this is part of a subquery that - potentially can be executed multiple times then we should not - delete the quick object on exit from this function. - */ - bool keep_quick= select && select->quick && join->join_tab_save; - /* When there is SQL_BIG_RESULT do not sort using index for GROUP BY, and thus force sorting on disk unless a group min-max optimization @@ -18919,7 +18977,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, get_quick_select_for_ref(thd, table, &tab->ref, tab->found_records)))) goto err; - DBUG_ASSERT(!keep_quick); + quick_created= TRUE; } } @@ -18935,7 +18993,27 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, table->sort.found_records=filesort(thd, table,join->sortorder, length, select, filesort_limit, 0, &examined_rows); + + if (quick_created) + { + /* This will delete the quick select. */ + select->cleanup(); + } + + if (!join->pre_sort_join_tab) + { + if (save_pre_sort_join_tab) + join->pre_sort_join_tab= save_pre_sort_join_tab; + else if (!(join->pre_sort_join_tab= (JOIN_TAB*)thd->alloc(sizeof(JOIN_TAB)))) + goto err; + } + + *(join->pre_sort_join_tab)= *tab; + + /*TODO: here, close the index scan, cancel index-only read. */ tab->records= table->sort.found_records; // For SQL_CALC_ROWS +#if 0 + /* MariaDB doesn't need the following: */ if (select) { /* @@ -18952,6 +19030,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tablesort_result_cache= table->sort.io_cache; table->sort.io_cache= NULL; + // select->cleanup(); // filesort did select /* If a quick object was created outside of create_sort_index() that might be reused, then do not call select->cleanup() since @@ -18974,18 +19053,61 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, // Restore the output resultset table->sort.io_cache= tablesort_result_cache; } +#endif + tab->select=NULL; tab->set_select_cond(NULL, __LINE__); - tab->last_inner= 0; - tab->first_unmatched= 0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; + tab->table->file->ha_index_or_rnd_end(); + + if (err) + goto err; + tab->join->examined_rows+=examined_rows; - table->disable_keyread(); // Restore if we used indexes DBUG_RETURN(table->sort.found_records == HA_POS_ERROR); err: DBUG_RETURN(-1); } + +void JOIN::clean_pre_sort_join_tab() +{ + //TABLE *table= pre_sort_join_tab->table; + /* + Note: we can come here for fake_select_lex object. That object will have + the table already deleted by st_select_lex_unit::cleanup(). + We rely on that fake_select_lex didn't have quick select. + */ +#if 0 + if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick) + { + /* + We need to preserve tablesort's output resultset here, because + QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT (called by + SQL_SELECT::cleanup()) may free it assuming it's the result of the quick + select operation that we no longer need. Note that all the other parts of + this data structure are cleaned up when + QUICK_INDEX_MERGE_SELECT::get_next encounters end of data, so the next + SQL_SELECT::cleanup() call changes sort.io_cache alone. + */ + IO_CACHE *tablesort_result_cache; + + tablesort_result_cache= table->sort.io_cache; + table->sort.io_cache= NULL; + pre_sort_join_tab->select->cleanup(); + table->quick_keys.clear_all(); // as far as we cleanup select->quick + table->intersect_keys.clear_all(); + table->sort.io_cache= tablesort_result_cache; + } +#endif + //table->disable_keyread(); // Restore if we used indexes + if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick) + { + pre_sort_join_tab->select->cleanup(); + } +} + + /***************************************************************************** Remove duplicates from tmp table This should be recoded to add a unique index to the table and remove diff --git a/sql/sql_select.h b/sql/sql_select.h index 118a684ab62..e4687b4f00c 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -897,6 +897,14 @@ protected: public: JOIN_TAB *join_tab, **best_ref; + + /* + Saved join_tab for pre_sorting. create_sort_index() will save here.. + */ + JOIN_TAB *pre_sort_join_tab; + uint pre_sort_index; + Item *pre_sort_idx_pushed_cond; + void clean_pre_sort_join_tab(); /* For "Using temporary+Using filesort" queries, JOIN::join_tab can point to @@ -1279,6 +1287,8 @@ public: outer_ref_cond= pseudo_bits_cond= NULL; in_to_exists_where= NULL; in_to_exists_having= NULL; + + pre_sort_join_tab= NULL; } int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num, -- cgit v1.2.1 From e08f4f16303c234c82c397414c24c931b679f84a Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Sun, 11 Nov 2012 11:47:44 -0800 Subject: Fixed bug mdev-3851. Any ref access to a table by a key fully extended by the components of the primary key should be actually an eq_ref access. --- sql/sql_select.cc | 22 ++++++++++++++-------- sql/table.cc | 5 +++-- 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b6b1259ca49..30f84142c0c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -5378,7 +5378,8 @@ best_access_path(JOIN *join, !ref_or_null_part) { /* use eq key */ max_key_part= (uint) ~0; - if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME || + test(key_flags & HA_EXT_NOSAME)) { tmp = prev_record_reads(join->positions, idx, found_ref); records=1.0; @@ -7966,18 +7967,23 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, *ref_key=0; // end_marker if (j->type == JT_FT) DBUG_RETURN(0); + ulong key_flags= j->table->actual_key_flags(keyinfo); if (j->type == JT_CONST) j->table->const_table= 1; - else if (((j->table->actual_key_flags(keyinfo) & - (HA_NOSAME | HA_NULL_PART_KEY)) - != HA_NOSAME) || + else if (((key_flags & (HA_NOSAME | HA_NULL_PART_KEY))!= HA_NOSAME) || keyparts != j->table->actual_n_key_parts(keyinfo) || null_ref_key) { - /* Must read with repeat */ - j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF; - j->ref.null_ref_key= null_ref_key; - j->ref.null_ref_part= null_ref_part; + if (test(key_flags & HA_EXT_NOSAME) && keyparts == keyinfo->ext_key_parts && + !null_ref_key) + j->type= JT_EQ_REF; + else + { + /* Must read with repeat */ + j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF; + j->ref.null_ref_key= null_ref_key; + j->ref.null_ref_part= null_ref_part; + } } else if (keyuse_uses_no_tables) { diff --git a/sql/table.cc b/sql/table.cc index c0e27b9a962..9f94054a1e6 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -981,7 +981,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, keyinfo->ext_key_part_map= 0; if (share->use_ext_keys && i) { - keyinfo->ext_key_flags= keyinfo->flags | HA_NOSAME; keyinfo->ext_key_part_map= 0; for (j= 0; j < first_key_parts && keyinfo->ext_key_parts < MAX_REF_PARTS; @@ -1002,7 +1001,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, keyinfo->ext_key_parts++; keyinfo->ext_key_part_map|= 1 << j; } - } + } + if (j == first_key_parts) + keyinfo->ext_key_flags= keyinfo->flags | HA_NOSAME | HA_EXT_NOSAME; } share->ext_key_parts+= keyinfo->ext_key_parts; } -- cgit v1.2.1 From 4d442610524154c767f290f327832538ea1d04a4 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 19 Nov 2012 19:29:27 -0800 Subject: Fixed bug mdev-622 (LP bug #1002508). Back-ported the fix and the test case for bug 13528826 from mysql-5.6. --- sql/sql_select.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 30f84142c0c..86742c6ac4a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -22831,6 +22831,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, ha_rows table_records= table->file->stats.records; bool group= join && join->group && order == join->group_list; ha_rows ref_key_quick_rows= HA_POS_ERROR; + const bool has_limit= (select_limit_arg != HA_POS_ERROR); /* If not used with LIMIT, only use keys if the whole query can be @@ -22962,7 +22963,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, be included into the result set. */ if (select_limit > table_records/rec_per_key) - select_limit= table_records; + select_limit= table_records; else select_limit= (ha_rows) (select_limit*rec_per_key); } /* group */ @@ -23044,7 +23045,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, *new_key= best_key; *new_key_direction= best_key_direction; - *new_select_limit= best_select_limit; + *new_select_limit= has_limit ? best_select_limit : table_records; if (new_used_key_parts != NULL) *new_used_key_parts= best_key_parts; -- cgit v1.2.1 From d473199744812ee4af52edfa4b85610d834802ca Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 8 Nov 2012 14:17:53 +0100 Subject: MDEV-258 audit plugin only see queries if general log is enabled --- sql/log.cc | 30 ++++++++++++++---------------- sql/sql_audit.h | 13 +++++++------ 2 files changed, 21 insertions(+), 22 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index effe0e36705..b583c6dfac4 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1279,12 +1279,6 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command, DBUG_ASSERT(thd); - lock_shared(); - if (!opt_log) - { - unlock(); - return 0; - } user_host_len= make_user_name(thd, user_host_buff); current_time= my_hrtime(); @@ -1295,15 +1289,19 @@ bool LOGGER::general_log_write(THD *thd, enum enum_server_command command, command_name[(uint) command].length, query, query_length); - while (*current_handler) - error|= (*current_handler++)-> - log_general(thd, current_time, user_host_buff, - user_host_len, thd->thread_id, - command_name[(uint) command].str, - command_name[(uint) command].length, - query, query_length, - thd->variables.character_set_client) || error; - unlock(); + if (opt_log && log_command(thd, command)) + { + lock_shared(); + while (*current_handler) + error|= (*current_handler++)-> + log_general(thd, current_time, user_host_buff, + user_host_len, thd->thread_id, + command_name[(uint) command].str, + command_name[(uint) command].length, + query, query_length, + thd->variables.character_set_client) || error; + unlock(); + } return error; } @@ -5333,7 +5331,7 @@ bool general_log_write(THD *thd, enum enum_server_command command, const char *query, uint query_length) { /* Write the message to the log if we want to log this king of commands */ - if (logger.log_command(thd, command)) + if (logger.log_command(thd, command) || mysql_audit_general_enabled()) return logger.general_log_write(thd, command, query, query_length); return FALSE; diff --git a/sql/sql_audit.h b/sql/sql_audit.h index 51c695d091d..02a63852955 100644 --- a/sql/sql_audit.h +++ b/sql/sql_audit.h @@ -53,6 +53,11 @@ static inline uint make_user_name(THD *thd, char *buf) sctx->ip ? sctx->ip : "", "]", NullS) - buf; } +static inline bool mysql_audit_general_enabled() +{ + return mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK; +} + /** Call audit plugins of GENERAL audit class, MYSQL_AUDIT_GENERAL_LOG subtype. @@ -72,8 +77,7 @@ void mysql_audit_general_log(THD *thd, time_t time, const char *cmd, uint cmdlen, const char *query, uint querylen) { -#ifndef EMBEDDED_LIBRARY - if (mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK) + if (mysql_audit_general_enabled()) { CHARSET_INFO *clientcs= thd ? thd->variables.character_set_client : global_system_variables.character_set_client; @@ -82,7 +86,6 @@ void mysql_audit_general_log(THD *thd, time_t time, 0, time, user, userlen, cmd, cmdlen, query, querylen, clientcs, 0); } -#endif } /** @@ -101,8 +104,7 @@ static inline void mysql_audit_general(THD *thd, uint event_subtype, int error_code, const char *msg) { -#ifndef EMBEDDED_LIBRARY - if (mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK) + if (mysql_audit_general_enabled()) { time_t time= my_time(0); uint msglen= msg ? strlen(msg) : 0; @@ -130,7 +132,6 @@ void mysql_audit_general(THD *thd, uint event_subtype, error_code, time, user, userlen, msg, msglen, query.str(), query.length(), query.charset(), rows); } -#endif } #define MYSQL_AUDIT_NOTIFY_CONNECTION_CONNECT(thd) mysql_audit_notify(\ -- cgit v1.2.1 From 53578613e96bb471446e226dbab61c2152232f56 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 8 Nov 2012 16:49:07 +0100 Subject: MDEV-259 audit plugin does not see sub-statements --- sql/sp_head.cc | 3 +-- sql/sql_audit.h | 15 +++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/sp_head.cc b/sql/sp_head.cc index f3ba0073c69..0d92a68a2d4 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -3113,8 +3113,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) (the order of query cache and subst_spvars calls is irrelevant because queries with SP vars can't be cached) */ - if (unlikely((thd->variables.option_bits & OPTION_LOG_OFF)==0)) - general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); + general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); if (query_cache_send_result_to_client(thd, thd->query(), thd->query_length()) <= 0) diff --git a/sql/sql_audit.h b/sql/sql_audit.h index 02a63852955..b2ce31f1d26 100644 --- a/sql/sql_audit.h +++ b/sql/sql_audit.h @@ -37,8 +37,16 @@ extern void mysql_audit_acquire_plugins(THD *thd, uint event_class); #ifndef EMBEDDED_LIBRARY extern void mysql_audit_notify(THD *thd, uint event_class, uint event_subtype, ...); + +static inline bool mysql_audit_general_enabled() +{ + return mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK; +} + #else -#define mysql_audit_notify(...) +static inline void mysql_audit_notify(THD *thd, uint event_class, + uint event_subtype, ...) { } +#define mysql_audit_general_enabled() 0 #endif extern void mysql_audit_release(THD *thd); @@ -53,11 +61,6 @@ static inline uint make_user_name(THD *thd, char *buf) sctx->ip ? sctx->ip : "", "]", NullS) - buf; } -static inline bool mysql_audit_general_enabled() -{ - return mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK; -} - /** Call audit plugins of GENERAL audit class, MYSQL_AUDIT_GENERAL_LOG subtype. -- cgit v1.2.1 From 49c8d8b2e613e7c28663df0234e6d98d727eaebd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 9 Nov 2012 13:07:32 +0200 Subject: MDEV-3810 fix. The problem is that memory alocated by copy_andor_structure() well be freed, but if level of SELECT_LEX it will be excluded (in case of merge derived tables and view) then sl->where/having will not be updated here but still can be accessed (so it will be access to freed memory). (patch by Sanja) --- sql/sql_prepare.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 9e8a6b941c6..d91d03d24ee 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2447,14 +2447,24 @@ void reinit_stmt_before_use(THD *thd, LEX *lex) */ if (sl->prep_where) { - sl->where= sl->prep_where->copy_andor_structure(thd); + /* + We need this rollback because memory allocated in + copy_andor_structure() will be freed + */ + thd->change_item_tree((Item**)&sl->where, + sl->prep_where->copy_andor_structure(thd)); sl->where->cleanup(); } else sl->where= NULL; if (sl->prep_having) { - sl->having= sl->prep_having->copy_andor_structure(thd); + /* + We need this rollback because memory allocated in + copy_andor_structure() will be freed + */ + thd->change_item_tree((Item**)&sl->having, + sl->prep_having->copy_andor_structure(thd)); sl->having->cleanup(); } else -- cgit v1.2.1 From 094f4cf77890c5a747a57cf2bed149b0b6933507 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Fri, 9 Nov 2012 23:51:51 -0800 Subject: Fixed bug mdev-3845. If triggers are used for an insert/update/delete statement than the values of all virtual columns must be computed as any of them may be used by the triggers. --- sql/mysql_priv.h | 3 ++- sql/sql_base.cc | 17 +++++++++++++---- sql/sql_delete.cc | 4 +++- sql/sql_table.cc | 2 +- sql/sql_update.cc | 8 ++++++-- sql/table.cc | 21 ++++++++++++--------- sql/table.h | 7 +++++++ 7 files changed, 44 insertions(+), 18 deletions(-) (limited to 'sql') diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index addb29861fc..73b8ad86426 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1366,7 +1366,8 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length, bool allow_rowid, uint *cached_field_index_ptr); Field * find_field_in_table_sef(TABLE *table, const char *name); -int update_virtual_fields(THD *thd, TABLE *table, bool ignore_stored= FALSE); +int update_virtual_fields(THD *thd, TABLE *table, + enum enum_vcol_update_mode vcol_update_mode= VCOL_UPDATE_FOR_READ); #endif /* MYSQL_SERVER */ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index f45403696be..32a5bd8356f 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -8460,7 +8460,9 @@ fill_record(THD * thd, List &fields, List &values, { if (vcol_table->vfield) { - if (update_virtual_fields(thd, vcol_table, TRUE)) + if (update_virtual_fields(thd, vcol_table, + vcol_table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_WRITE)) goto err; } } @@ -8524,7 +8526,9 @@ fill_record_n_invoke_before_triggers(THD *thd, List &fields, if (item_field && item_field->field && (table= item_field->field->table) && table->vfield) - result= update_virtual_fields(thd, table, TRUE); + result= update_virtual_fields(thd, table, + table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_WRITE); } } return result; @@ -8604,7 +8608,10 @@ fill_record(THD *thd, Field **ptr, List &values, bool ignore_errors) } /* Update virtual fields*/ thd->abort_on_warning= FALSE; - if (table->vfield && update_virtual_fields(thd, table, TRUE)) + if (table->vfield && + update_virtual_fields(thd, table, + table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_WRITE)) goto err; thd->abort_on_warning= abort_on_warning_saved; DBUG_RETURN(thd->is_error()); @@ -8657,7 +8664,9 @@ fill_record_n_invoke_before_triggers(THD *thd, Field **ptr, { TABLE *table= (*ptr)->table; if (table->vfield) - result= update_virtual_fields(thd, table, TRUE); + result= update_virtual_fields(thd, table, + table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_WRITE); } return result; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 72a5b9703b3..90ca139d17b 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -313,7 +313,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, while (!(error=info.read_record(&info)) && !thd->killed && ! thd->is_error()) { - update_virtual_fields(thd, table); + update_virtual_fields(thd, table, + triggers_applicable ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_READ); thd->examined_row_count++; // thd->is_error() is tested to disallow delete row on error if (!select || select->skip_record(thd) > 0) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 6b709915283..9b9ee0a743d 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -8066,7 +8066,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, copy_ptr->do_copy(copy_ptr); } prev_insert_id= to->file->next_insert_id; - update_virtual_fields(thd, to, TRUE); + update_virtual_fields(thd, to, VCOL_UPDATE_FOR_WRITE); if (thd->is_error()) { error= 1; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index b3c001849b5..56b2c508fbc 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -490,7 +490,9 @@ int mysql_update(THD *thd, while (!(error=info.read_record(&info)) && !thd->killed) { - update_virtual_fields(thd, table); + update_virtual_fields(thd, table, + table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_READ); thd->examined_row_count++; if (!select || (error= select->skip_record(thd)) > 0) { @@ -605,7 +607,9 @@ int mysql_update(THD *thd, while (!(error=info.read_record(&info)) && !thd->killed) { - update_virtual_fields(thd, table); + update_virtual_fields(thd, table, + table->triggers ? VCOL_UPDATE_ALL : + VCOL_UPDATE_FOR_READ); thd->examined_row_count++; if (!select || select->skip_record(thd) > 0) { diff --git a/sql/table.cc b/sql/table.cc index d950b7a3a4e..5c1e27b87c7 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -5500,22 +5500,25 @@ size_t max_row_length(TABLE *table, const uchar *data) @param thd Thread handle @param table The TABLE object - @param for_write Requests to compute only fields needed for write + @param vcol_update_mode Specifies what virtual column are computed @details The function computes the values of the virtual columns of the table and stores them in the table record buffer. - Only fields from vcol_set are computed, and, when the flag for_write is not - set to TRUE, a virtual field is computed only if it's not stored. - The flag for_write is set to TRUE for row insert/update operations. - + If vcol_update_mode is set to VCOL_UPDATE_ALL then all virtual column are + computed. Otherwise, only fields from vcol_set are computed: all of them, + if vcol_update_mode is set to VCOL_UPDATE_FOR_WRITE, and, only those with + the stored_in_db flag set to false, if vcol_update_mode is equal to + VCOL_UPDATE_FOR_READ. + @retval 0 Success @retval >0 Error occurred when storing a virtual field value */ -int update_virtual_fields(THD *thd, TABLE *table, bool for_write) +int update_virtual_fields(THD *thd, TABLE *table, + enum enum_vcol_update_mode vcol_update_mode) { DBUG_ENTER("update_virtual_fields"); Field **vfield_ptr, *vfield; @@ -5529,9 +5532,9 @@ int update_virtual_fields(THD *thd, TABLE *table, bool for_write) { vfield= (*vfield_ptr); DBUG_ASSERT(vfield->vcol_info && vfield->vcol_info->expr_item); - /* Only update those fields that are marked in the vcol_set bitmap */ - if (bitmap_is_set(table->vcol_set, vfield->field_index) && - (for_write || !vfield->stored_in_db)) + if ((bitmap_is_set(table->vcol_set, vfield->field_index) && + (vcol_update_mode == VCOL_UPDATE_FOR_WRITE || !vfield->stored_in_db)) || + vcol_update_mode == VCOL_UPDATE_ALL) { /* Compute the actual value of the virtual fields */ error= vfield->vcol_info->expr_item->save_in_field(vfield, 0); diff --git a/sql/table.h b/sql/table.h index c1a4e952f7b..80544ef3d89 100644 --- a/sql/table.h +++ b/sql/table.h @@ -156,6 +156,13 @@ enum frm_type_enum enum release_type { RELEASE_NORMAL, RELEASE_WAIT_FOR_DROP }; +enum enum_vcol_update_mode +{ + VCOL_UPDATE_FOR_READ= 0, + VCOL_UPDATE_FOR_WRITE, + VCOL_UPDATE_ALL +}; + typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filesort */ -- cgit v1.2.1 From e679dfcafc630e2dbe506d0001322055d7684e03 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Nov 2012 19:56:51 +0100 Subject: followup fixes for MySQL Bug #13889741: HANDLE_FATAL_SIGNAL IN _DB_ENTER_ | HANDLE_FATAL_SIGNAL IN STRNLEN --- sql/sql_acl.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 89b70032642..020aa042722 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1353,14 +1353,20 @@ ulong acl_get(const char *host, const char *ip, acl_entry *entry; DBUG_ENTER("acl_get"); - VOID(pthread_mutex_lock(&acl_cache->lock)); - end=strmov((tmp_db=strmov(strmov(key, ip ? ip : "")+1,user)+1),db); + tmp_db= strmov(strmov(key, ip ? ip : "") + 1, user) + 1; + end= strnmov(tmp_db, db, key + sizeof(key) - tmp_db); + + if (end >= key + sizeof(key)) // db name was truncated + DBUG_RETURN(0); // no privileges for an invalid db name + if (lower_case_table_names) { my_casedn_str(files_charset_info, tmp_db); db=tmp_db; } key_length= (size_t) (end-key); + + VOID(pthread_mutex_lock(&acl_cache->lock)); if (!db_is_pattern && (entry=(acl_entry*) acl_cache->search((uchar*) key, key_length))) { @@ -4331,11 +4337,17 @@ static bool check_grant_db_routine(THD *thd, const char *db, HASH *hash) bool check_grant_db(THD *thd,const char *db) { Security_context *sctx= thd->security_ctx; - char helping [SAFE_NAME_LEN + USERNAME_LENGTH+2]; + char helping [SAFE_NAME_LEN + USERNAME_LENGTH+2], *end; uint len; bool error= TRUE; - len= (uint) (strmov(strmov(helping, sctx->priv_user) + 1, db) - helping) + 1; + end= strmov(helping, sctx->priv_user) + 1; + end= strnmov(end, db, helping + sizeof(helping) - end); + + if (end >= helping + sizeof(helping)) // db name was truncated + return 1; // no privileges for an invalid db name + + len= (uint) (end - helping) + 1; rw_rdlock(&LOCK_grant); -- cgit v1.2.1 From 632dc05ded27eeb0976e7a67310749ab4635614b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 17 Nov 2012 19:04:13 +0100 Subject: MDEV-3850 too early pthread_mutex_unlock in TC_LOG_MMAP::log_xid --- sql/log.cc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index 4c21ac4c571..eb248c63b19 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -5710,12 +5710,9 @@ int TC_LOG_MMAP::log_xid(THD *thd, my_xid xid) pthread_mutex_unlock(&LOCK_active); pthread_mutex_lock(&p->lock); p->waiters++; - for (;;) + while (p->state == DIRTY && syncing) { - int not_dirty = p->state != DIRTY; pthread_mutex_unlock(&p->lock); - if (not_dirty || !syncing) - break; pthread_cond_wait(&p->cond, &LOCK_sync); pthread_mutex_lock(&p->lock); } -- cgit v1.2.1 From 60a7b05871121987f4156405e33f93530e159b74 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Nov 2012 15:38:27 +0200 Subject: MDEV-3801 Reproducible sub select join crash on 5.3.8 and 5.3.9 Properly drop all unused keys. Patch by Igor Babaev. --- sql/sql_select.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index bf01e42f3c0..1e57f11e399 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8982,7 +8982,7 @@ void JOIN::drop_unused_derived_keys() JOIN_TAB *tab; for (tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab; - tab= next_linear_tab(this, tab, WITHOUT_BUSH_ROOTS)) + tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { TABLE *table=tab->table; -- cgit v1.2.1 From 47c5018f592b61b5e000842bdf5862ff458de488 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 20 Nov 2012 13:28:53 +0100 Subject: MDEV-3861: assertions in TC_LOG_MMAP. Fix some problems in the TC_LOG_MMAP commit processing, which could lead to assertions in some cases. Problems are mostly reproducible in MariaDB 10.0 with asynchroneous commit checkpoints, but most of the problems were present in earlier versions also. --- sql/log.cc | 19 ++++++++++--------- sql/log.h | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index eb248c63b19..e44f8ff3067 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -5550,8 +5550,9 @@ int TC_LOG_MMAP::open(const char *opt_name) syncing= 0; active=pages; + DBUG_ASSERT(npages >= 2); pool=pages+1; - pool_last=pages+npages-1; + pool_last_ptr= &((pages+npages-1)->next); return 0; @@ -5582,8 +5583,8 @@ void TC_LOG_MMAP::get_active_from_pool() do { best_p= p= &pool; - if ((*p)->waiters == 0) // can the first page be used ? - break; // yes - take it. + if ((*p)->waiters == 0 && (*p)->free > 0) // can the first page be used ? + break; // yes - take it. best_free=0; // no - trying second strategy for (p=&(*p)->next; *p; p=&(*p)->next) @@ -5600,10 +5601,10 @@ void TC_LOG_MMAP::get_active_from_pool() safe_mutex_assert_owner(&LOCK_active); active=*best_p; - if ((*best_p)->next) // unlink the page from the pool - *best_p=(*best_p)->next; - else - pool_last=*best_p; + /* Unlink the page from the pool. */ + if (!(*best_p)->next) + pool_last_ptr= best_p; + *best_p=(*best_p)->next; pthread_mutex_unlock(&LOCK_pool); pthread_mutex_lock(&active->lock); @@ -5764,8 +5765,8 @@ int TC_LOG_MMAP::sync() /* page is synced. let's move it to the pool */ pthread_mutex_lock(&LOCK_pool); - pool_last->next=syncing; - pool_last=syncing; + (*pool_last_ptr)=syncing; + pool_last_ptr=&(syncing->next); syncing->next=0; syncing->state= err ? ERROR : POOL; pthread_cond_signal(&COND_pool); // in case somebody's waiting diff --git a/sql/log.h b/sql/log.h index f42ef514307..9fb233fed86 100644 --- a/sql/log.h +++ b/sql/log.h @@ -81,7 +81,7 @@ class TC_LOG_MMAP: public TC_LOG my_off_t file_length; uint npages, inited; uchar *data; - struct st_page *pages, *syncing, *active, *pool, *pool_last; + struct st_page *pages, *syncing, *active, *pool, **pool_last_ptr; /* note that, e.g. LOCK_active is only used to protect 'active' pointer, to protect the content of the active page -- cgit v1.2.1 From 13ba0dd286f3296bfbbd202fa76d47770734b472 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 17 Nov 2012 16:50:15 +0100 Subject: MDEV-736 LP:1004615 - Unexpected warnings "Encountered illegal value '' when converting to DECIMAL" on a query with aggregate functions and GROUP BY fix: don't call field->val_decimal() if the field->is_null() because the buffer at field->ptr might not hold a valid decimal value sql/item_sum.cc: do not call field->val_decimal() if the field->is_null() storage/maria/ma_blockrec.c: cleanup storage/maria/ma_rrnd.c: cleanup strings/decimal.c: typo --- sql/item_sum.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 86aef19efc0..1c83f0e2422 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -2149,9 +2149,8 @@ Item_sum_hybrid::min_max_update_int_field() void Item_sum_hybrid::min_max_update_decimal_field() { - /* TODO: optimize: do not get result_field in case of args[0] is NULL */ my_decimal old_val, nr_val; - const my_decimal *old_nr= result_field->val_decimal(&old_val); + const my_decimal *old_nr; const my_decimal *nr= args[0]->val_decimal(&nr_val); if (!args[0]->null_value) { @@ -2159,16 +2158,17 @@ Item_sum_hybrid::min_max_update_decimal_field() old_nr=nr; else { + old_nr= result_field->val_decimal(&old_val); bool res= my_decimal_cmp(old_nr, nr) > 0; /* (cmp_sign > 0 && res) || (!(cmp_sign > 0) && !res) */ if ((cmp_sign > 0) ^ (!res)) old_nr=nr; } result_field->set_notnull(); + result_field->store_decimal(old_nr); } else if (result_field->is_null(0)) result_field->set_null(); - result_field->store_decimal(old_nr); } -- cgit v1.2.1 From 0f8450b2fb0f3f21a1829254b19bd61c46d09ead Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 27 Nov 2012 00:45:29 +0100 Subject: MDEV-3885 - connection suicide via mysql_kill() causes assertion in server Assertion happened because sql_kill did not set OK status in diagnostic area in the case of connection suicide (id to kill == thd->thread_id), issued via COM_PROCESS_KILL , e.g using mysql_kill() This patch ensures that diagnostic area is initialized in this specific case. --- sql/sql_parse.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d2eb6ae8d1f..087c7903dc7 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6717,7 +6717,7 @@ void sql_kill(THD *thd, ulong id, killed_state state) uint error; if (!(error= kill_one_thread(thd, id, state))) { - if (! thd->killed) + if ((!thd->killed) || (thd->thread_id == id)) my_ok(thd); } else -- cgit v1.2.1 From 0497ecc2c8a3ff362fdb2e69c558063c23d2e885 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 27 Nov 2012 12:34:13 +0100 Subject: fix regression in sp_notembedded after MDEV-3885 --- sql/sql_parse.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 087c7903dc7..acd4873b212 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6717,7 +6717,7 @@ void sql_kill(THD *thd, ulong id, killed_state state) uint error; if (!(error= kill_one_thread(thd, id, state))) { - if ((!thd->killed) || (thd->thread_id == id)) + if ((!thd->killed) || (thd->thread_id == id && thd->killed >= KILL_CONNECTION)) my_ok(thd); } else -- cgit v1.2.1 From 5fddd4a7f09c7cb7ae2171f284130b1316aa6235 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 27 Nov 2012 15:47:08 +0100 Subject: Fix yet another regression after MDEV-3885. If connection kills itself (or own query), it will get an error consistently, with both COM_PROCESSKILL and with "KILL [QUERY] id" --- sql/sql_parse.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index acd4873b212..5dac052b749 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6717,8 +6717,10 @@ void sql_kill(THD *thd, ulong id, killed_state state) uint error; if (!(error= kill_one_thread(thd, id, state))) { - if ((!thd->killed) || (thd->thread_id == id && thd->killed >= KILL_CONNECTION)) + if ((!thd->killed)) my_ok(thd); + else + my_error(killed_errno(thd->killed), MYF(0), id); } else my_error(error, MYF(0), id); -- cgit v1.2.1 From 5e345281e3599c793fdea771d0f23eb19f22d601 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 4 Dec 2012 16:06:07 -0800 Subject: Fixed bug mdev-3888. When inserting a record with update on duplicate keys the server calls the ha_index_read_idx_map handler function to look for the record that violates unique key constraints. The third parameter of this call should mark only the base components of the index where the server is searched for the record. Possible hidden components of the primary key are to be unmarked. --- sql/sql_insert.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 3c3b9f85727..231671d172b 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1677,9 +1677,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } } key_copy((uchar*) key,table->record[0],table->key_info+key_nr,0); + key_part_map keypart_map= (1 << table->key_info[key_nr].key_parts) - 1; if ((error= (table->file->ha_index_read_idx_map(table->record[1], key_nr, (uchar*) key, - HA_WHOLE_KEY, + keypart_map, HA_READ_KEY_EXACT)))) goto err; } -- cgit v1.2.1 From b8b875cb796743240bed71857eae73d37f03c28f Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Nov 2012 21:22:44 +0200 Subject: Fix of MDEV-3874: Server crashes in Item_field::print on a SELECT from a MERGE view with materialization+semijoin, subquery, ORDER BY. The problem was that in debugging binaries it try to print item to assign human readable name to the item. But subquery item was already freed (join_free/cleanup with full cleanup) so Item_field refers to temporary table which memory had been already freed. --- sql/sql_select.cc | 13 +++++++++++++ sql/sql_select.h | 3 +++ 2 files changed, 16 insertions(+) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ceb8827c790..8f7fdab4ed3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2074,6 +2074,7 @@ JOIN::reinit() ULL(0)); first_record= 0; + cleaned= false; if (exec_tmp_table1) { @@ -10623,6 +10624,7 @@ void JOIN::cleanup(bool full) { tab->cleanup(); } + cleaned= true; } else { @@ -22409,6 +22411,17 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(STRING_WITH_LEN("select ")); + if (join && join->cleaned) + { + /* + JOIN already cleaned up so it is dangerous to print items + because temporary tables they pointed on could be freed. + */ + str->append('#'); + str->append(select_number); + return; + } + /* First add options */ if (options & SELECT_STRAIGHT_JOIN) str->append(STRING_WITH_LEN("straight_join ")); diff --git a/sql/sql_select.h b/sql/sql_select.h index e4687b4f00c..bac29b96c5a 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1141,6 +1141,8 @@ public: bool skip_sort_order; bool need_tmp, hidden_group_fields; + /* TRUE if there was full cleunap of the JOIN */ + bool cleaned; DYNAMIC_ARRAY keyuse; Item::cond_result cond_value, having_value; List all_fields; ///< to store all fields that used in query @@ -1268,6 +1270,7 @@ public: zero_result_cause= 0; optimized= 0; initialized= 0; + cleaned= 0; cond_equal= 0; having_equal= 0; exec_const_cond= 0; -- cgit v1.2.1