diff options
author | Alexander Barkov <bar@mariadb.org> | 2017-11-03 09:29:21 +0400 |
---|---|---|
committer | Alexander Barkov <bar@mariadb.org> | 2017-11-03 09:29:21 +0400 |
commit | 5fc79476b522e6b2f5cd91ed79565fc3a09e9dd0 (patch) | |
tree | 92a82b5f5735d7ec089032e2ce07a89d077c43ed | |
parent | 7a63a7dc6d0c959c48b88dbea8e848c7bf4a0b88 (diff) | |
parent | beac522b550b0c8b9c7a4451bf1b851e83fd828f (diff) | |
download | mariadb-git-5fc79476b522e6b2f5cd91ed79565fc3a09e9dd0.tar.gz |
Merge remote-tracking branch 'origin/10.2' into bb-10.2-ext
-rw-r--r-- | mysql-test/r/having.result | 73 | ||||
-rw-r--r-- | mysql-test/t/having.test | 80 | ||||
-rw-r--r-- | sql/item_geofunc.cc | 2 | ||||
-rw-r--r-- | sql/sql_select.cc | 183 | ||||
-rw-r--r-- | sql/sql_select.h | 1 | ||||
-rw-r--r-- | storage/innobase/handler/handler0alter.cc | 2 | ||||
-rw-r--r-- | storage/innobase/os/os0file.cc | 44 |
7 files changed, 278 insertions, 107 deletions
diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index 4a429e1b119..8a8d6e7b2aa 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -727,3 +727,76 @@ A COUNT(*) DROP VIEW v1; DROP TABLE t1; End of 10.1 tests +# +# MDEV-14093: GROUP BY with HAVING over function + ORDER BY +# +CREATE TABLE _authors ( +id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT, +name VARCHAR(100), +some_field MEDIUMINT(8) UNSIGNED, +PRIMARY KEY (id), +index(some_field) +); +CREATE TABLE _books ( +id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT, +title VARCHAR(100), +PRIMARY KEY (id) +); +CREATE TABLE _books2authors ( +author_id MEDIUMINT(8) DEFAULT 0, +book_id MEDIUMINT(8) DEFAULT 0, +index(author_id), +index(book_id) +); +INSERT INTO _authors (name, some_field) VALUES +('author1', 1),('author2', 2),('author3', 3); +INSERT INTO _books (title) VALUES +('book1'),('book2'),('book3'); +INSERT INTO _books2authors (author_id, book_id) VALUES +(2,1),(3,2),(3,3); +SELECT A.id, +GROUP_CONCAT(B.title ORDER BY B.title DESC SEPARATOR ',') AS books, +some_field-1 AS having_field +FROM _authors A +LEFT JOIN _books2authors B2A FORCE INDEX(author_id) +ON B2A.author_id = A.id +LEFT JOIN +_books B ON B.id = B2A.book_id +GROUP BY A.id +HAVING having_field < 1 +ORDER BY having_field ASC; +id books having_field +1 NULL 0 +DROP TABLE _authors, _books, _books2authors; +# +# Bug#17055185: WRONG RESULTS WHEN RUNNING A SELECT THAT INCLUDE +# A HAVING BASED ON A FUNCTION. +# +CREATE TABLE series ( +val INT(10) UNSIGNED NOT NULL +); +INSERT INTO series VALUES(1); +CREATE FUNCTION next_seq_value() RETURNS INT +BEGIN +DECLARE next_val INT; +SELECT val INTO next_val FROM series; +UPDATE series SET val=mod(val + 1, 2); +RETURN next_val; +END; +| +CREATE TABLE t1 (t INT, u INT, KEY(t)); +INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16), +(16, 17), (17, 17); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t) +GROUP BY t HAVING r = 1 ORDER BY t1.u; +t r +10 1 +12 1 +15 1 +17 1 +DROP TABLE t1; +DROP FUNCTION next_seq_value; +DROP TABLE series; diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index 160b347f870..3d8f7dc42b7 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -758,3 +758,83 @@ DROP VIEW v1; DROP TABLE t1; --echo End of 10.1 tests + +--echo # +--echo # MDEV-14093: GROUP BY with HAVING over function + ORDER BY +--echo # + +CREATE TABLE _authors ( + id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT, + name VARCHAR(100), + some_field MEDIUMINT(8) UNSIGNED, + PRIMARY KEY (id), + index(some_field) +); + +CREATE TABLE _books ( + id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT, + title VARCHAR(100), + PRIMARY KEY (id) +); +CREATE TABLE _books2authors ( + author_id MEDIUMINT(8) DEFAULT 0, + book_id MEDIUMINT(8) DEFAULT 0, + index(author_id), + index(book_id) +); + +INSERT INTO _authors (name, some_field) VALUES +('author1', 1),('author2', 2),('author3', 3); + +INSERT INTO _books (title) VALUES +('book1'),('book2'),('book3'); + +INSERT INTO _books2authors (author_id, book_id) VALUES +(2,1),(3,2),(3,3); + +SELECT A.id, + GROUP_CONCAT(B.title ORDER BY B.title DESC SEPARATOR ',') AS books, + some_field-1 AS having_field +FROM _authors A + LEFT JOIN _books2authors B2A FORCE INDEX(author_id) + ON B2A.author_id = A.id + LEFT JOIN + _books B ON B.id = B2A.book_id +GROUP BY A.id +HAVING having_field < 1 +ORDER BY having_field ASC; + +DROP TABLE _authors, _books, _books2authors; + +--echo # +--echo # Bug#17055185: WRONG RESULTS WHEN RUNNING A SELECT THAT INCLUDE +--echo # A HAVING BASED ON A FUNCTION. +--echo # + +# Generate series 1, 0, 1, 0.... +CREATE TABLE series ( + val INT(10) UNSIGNED NOT NULL +); +INSERT INTO series VALUES(1); + +DELIMITER |; +CREATE FUNCTION next_seq_value() RETURNS INT +BEGIN + DECLARE next_val INT; + SELECT val INTO next_val FROM series; + UPDATE series SET val=mod(val + 1, 2); + RETURN next_val; +END; +| +DELIMITER ;| + +CREATE TABLE t1 (t INT, u INT, KEY(t)); +INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16), + (16, 17), (17, 17); +ANALYZE TABLE t1; +SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t) + GROUP BY t HAVING r = 1 ORDER BY t1.u; + +DROP TABLE t1; +DROP FUNCTION next_seq_value; +DROP TABLE series; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 16d4cf7cefa..83d8d2516f0 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -135,7 +135,7 @@ String *Item_func_geometry_from_json::val_str(String *str) { String *sv= args[1]->val_str(&tmp_js); my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0), - "option", sv->c_ptr(), "ST_GeometryFromJSON"); + "option", sv->c_ptr_safe(), "ST_GeometryFromJSON"); null_value= 1; return 0; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8cd4ec36a55..46b8d7dc9a8 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2260,6 +2260,76 @@ derived_exit: DBUG_RETURN(0); } +/** + Add having condition as a where clause condition of the given temp table. + + @param tab Table to which having condition is added. + + @returns false if success, true if error. +*/ + +bool JOIN::add_having_as_table_cond(JOIN_TAB *tab) +{ + tmp_having->update_used_tables(); + table_map used_tables= tab->table->map | OUTER_REF_TABLE_BIT; + + /* If tmp table is not used then consider conditions of const table also */ + if (!need_tmp) + used_tables|= const_table_map; + + DBUG_ENTER("JOIN::add_having_as_table_cond"); + + Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables, + (table_map) 0, false, + false, false); + if (sort_table_cond) + { + if (!tab->select) + { + if (!(tab->select= new SQL_SELECT)) + DBUG_RETURN(true); + tab->select->head= tab->table; + } + if (!tab->select->cond) + tab->select->cond= sort_table_cond; + else + { + if (!(tab->select->cond= + new (thd->mem_root) Item_cond_and(thd, + tab->select->cond, + sort_table_cond))) + DBUG_RETURN(true); + } + if (tab->pre_idx_push_select_cond) + { + if (sort_table_cond->type() == Item::COND_ITEM) + sort_table_cond= sort_table_cond->copy_andor_structure(thd); + if (!(tab->pre_idx_push_select_cond= + new (thd->mem_root) Item_cond_and(thd, + tab->pre_idx_push_select_cond, + sort_table_cond))) + DBUG_RETURN(true); + } + if (tab->select->cond && !tab->select->cond->fixed) + tab->select->cond->fix_fields(thd, 0); + if (tab->pre_idx_push_select_cond && !tab->pre_idx_push_select_cond->fixed) + tab->pre_idx_push_select_cond->fix_fields(thd, 0); + tab->select->pre_idx_push_select_cond= tab->pre_idx_push_select_cond; + tab->set_select_cond(tab->select->cond, __LINE__); + tab->select_cond->top_level_item(); + DBUG_EXECUTE("where",print_where(tab->select->cond, + "select and having", + QT_ORDINARY);); + + having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0, + ~used_tables, false, false, false); + DBUG_EXECUTE("where", + print_where(having, "having after sort", QT_ORDINARY);); + } + + DBUG_RETURN(false); +} + /** Set info for aggregation tables @@ -2289,6 +2359,7 @@ bool JOIN::make_aggr_tables_info() TABLE *exec_tmp_table= NULL; bool distinct= false; bool keep_row_order= false; + bool is_having_added_as_table_cond= false; DBUG_ENTER("JOIN::make_aggr_tables_info"); const bool has_group_by= this->group; @@ -2475,29 +2546,6 @@ bool JOIN::make_aggr_tables_info() if (exec_tmp_table->distinct) optimize_distinct(); - /* - We don't have to store rows in temp table that doesn't match HAVING if: - - we are sorting the table and writing complete group rows to the - temp table. - - We are using DISTINCT without resolving the distinct as a GROUP BY - on all columns. - - If having is not handled here, it will be checked before the row - is sent to the client. - - In the case of window functions however, we *must* make sure to not - store any rows which don't match HAVING within the temp table, - as rows will end up being used during their computation. - */ - if (having && - (sort_and_group || (exec_tmp_table->distinct && !group_list) || - select_lex->have_window_funcs())) - { - /* Attach HAVING to tmp table's condition */ - curr_tab->having= having; - having= NULL; /* Already done */ - } - /* Change sum_fields reference to calculated fields in tmp_table */ items1= ref_ptr_array_slice(2); if ((sort_and_group || curr_tab->table->group || @@ -2525,6 +2573,38 @@ bool JOIN::make_aggr_tables_info() curr_tab->fields= &tmp_fields_list1; set_postjoin_aggr_write_func(curr_tab); + /* + If having is not handled here, it will be checked before the row is sent + to the client. + */ + if (tmp_having && + (sort_and_group || (exec_tmp_table->distinct && !group_list) || + select_lex->have_window_funcs())) + { + /* + If there is no select distinct and there are no window functions + then move the having to table conds of tmp table. + NOTE : We cannot apply having after distinct or window functions + If columns of having are not part of select distinct, + then distinct may remove rows which can satisfy having. + In the case of window functions we *must* make sure to not + store any rows which don't match HAVING within the temp table, + as rows will end up being used during their computation. + */ + if (!select_distinct && !select_lex->have_window_funcs() && + add_having_as_table_cond(curr_tab)) + DBUG_RETURN(true); + is_having_added_as_table_cond= tmp_having != having; + + /* + Having condition which we are not able to add as tmp table conds are + kept as before. And, this will be applied before storing the rows in + tmp table. + */ + curr_tab->having= having; + having= NULL; // Already done + } + tmp_table_param.func_count= 0; tmp_table_param.field_count+= tmp_table_param.func_count; if (sort_and_group || curr_tab->table->group) @@ -2714,60 +2794,11 @@ bool JOIN::make_aggr_tables_info() DBUG_PRINT("info",("Sorting for send_result_set_metadata")); THD_STAGE_INFO(thd, stage_sorting_result); /* If we have already done the group, add HAVING to sorted table */ - if (tmp_having && !group_list && !sort_and_group) + if (tmp_having && !is_having_added_as_table_cond && + !group_list && !sort_and_group) { - // Some tables may have been const - tmp_having->update_used_tables(); - table_map used_tables= (const_table_map | curr_tab->table->map); - - Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables, - (table_map) 0, false, - false, false); - if (sort_table_cond) - { - if (!curr_tab->select) - { - if (!(curr_tab->select= new SQL_SELECT)) - DBUG_RETURN(true); - curr_tab->select->head= curr_tab->table; - } - if (!curr_tab->select->cond) - curr_tab->select->cond= sort_table_cond; - else - { - if (!(curr_tab->select->cond= - new (thd->mem_root) Item_cond_and(thd, curr_tab->select->cond, - sort_table_cond))) - DBUG_RETURN(true); - } - if (curr_tab->pre_idx_push_select_cond) - { - if (sort_table_cond->type() == Item::COND_ITEM) - sort_table_cond= sort_table_cond->copy_andor_structure(thd); - if (!(curr_tab->pre_idx_push_select_cond= - new (thd->mem_root) Item_cond_and(thd, - curr_tab->pre_idx_push_select_cond, - sort_table_cond))) - DBUG_RETURN(true); - } - if (curr_tab->select->cond && !curr_tab->select->cond->fixed) - curr_tab->select->cond->fix_fields(thd, 0); - if (curr_tab->pre_idx_push_select_cond && - !curr_tab->pre_idx_push_select_cond->fixed) - curr_tab->pre_idx_push_select_cond->fix_fields(thd, 0); - curr_tab->select->pre_idx_push_select_cond= - curr_tab->pre_idx_push_select_cond; - curr_tab->set_select_cond(curr_tab->select->cond, __LINE__); - curr_tab->select_cond->top_level_item(); - DBUG_EXECUTE("where",print_where(curr_tab->select->cond, - "select and having", - QT_ORDINARY);); - - having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0, - ~used_tables, false, false, false); - DBUG_EXECUTE("where", - print_where(having, "having after sort", QT_ORDINARY);); - } + if (add_having_as_table_cond(curr_tab)) + DBUG_RETURN(true); } if (group) diff --git a/sql/sql_select.h b/sql/sql_select.h index 1a94ec2894f..9eb7a72c987 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1709,6 +1709,7 @@ private: void optimize_distinct(); void cleanup_item_list(List<Item> &items) const; + bool add_having_as_table_cond(JOIN_TAB *tab); bool make_aggr_tables_info(); }; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index e959190e2d6..dc529fe6a48 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -4523,7 +4523,7 @@ prepare_inplace_alter_table_dict( if (alt_opt.encryption != opt.encryption || alt_opt.encryption_key_id != opt.encryption_key_id) { - key_id = alt_opt.encryption_key_id; + key_id = uint32_t(alt_opt.encryption_key_id); mode = fil_encryption_t(alt_opt.encryption); } } diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index cc687816ae8..03793a1d064 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -5382,13 +5382,21 @@ fallback: } while (err == EINTR && srv_shutdown_state == SRV_SHUTDOWN_NONE); - if (err) { + switch (err) { + case 0: + return true; + default: ib::error() << "preallocating " << size << " bytes for file " << name << " failed with error " << err; + /* fall through */ + case EINTR: + errno = err; + return false; + case EINVAL: + /* fall back to the code below */ + break; } - errno = err; - return(!err); # endif /* HAVE_POSIX_ALLOCATE */ #endif /* _WIN32*/ @@ -5410,14 +5418,9 @@ fallback: memset(buf, 0, buf_size); os_offset_t current_size = os_file_get_size(file); - bool write_progress_info = - (size - current_size >= (os_offset_t) 100 << 20); - - if (write_progress_info) { - ib::info() << "Progress in MB:"; - } - while (current_size < size) { + while (current_size < size + && srv_shutdown_state == SRV_SHUTDOWN_NONE) { ulint n_bytes; if (size - current_size < (os_offset_t) buf_size) { @@ -5433,32 +5436,15 @@ fallback: request, name, file, buf, current_size, n_bytes); if (err != DB_SUCCESS) { - - ut_free(buf2); - return(false); - } - - /* Print about progress for each 100 MB written */ - if (write_progress_info && - ((current_size + n_bytes) / (100 << 20) - != current_size / (100 << 20))) { - - fprintf(stderr, " %lu00", - (ulong) ((current_size + n_bytes) - / (100 << 20))); + break; } current_size += n_bytes; } - if (write_progress_info) { - - fprintf(stderr, "\n"); - } - ut_free(buf2); - return(os_file_flush(file)); + return(current_size >= size && os_file_flush(file)); } /** Truncates a file to a specified size in bytes. |