diff options
author | Oleksandr Byelkin <sanja@mariadb.com> | 2019-09-26 09:49:50 +0200 |
---|---|---|
committer | Oleksandr Byelkin <sanja@mariadb.com> | 2019-10-13 09:40:41 +0200 |
commit | eb0804ef5e7eeb059bb193c3c6787e8a4188d34d (patch) | |
tree | 7a159b51f5ddd8d936185cb61b66a3c1c535e2d0 | |
parent | 833637144178dcae60e7bb732dd373679f32d853 (diff) | |
download | mariadb-git-eb0804ef5e7eeb059bb193c3c6787e8a4188d34d.tar.gz |
MDEV-18553: MDEV-16327 pre-requisits part 1: isolation of LIMIT/OFFSET handling
-rw-r--r-- | sql/group_by_handler.cc | 8 | ||||
-rw-r--r-- | sql/group_by_handler.h | 1 | ||||
-rw-r--r-- | sql/item_subselect.cc | 2 | ||||
-rw-r--r-- | sql/opt_subselect.cc | 9 | ||||
-rw-r--r-- | sql/sql_class.cc | 44 | ||||
-rw-r--r-- | sql/sql_class.h | 6 | ||||
-rw-r--r-- | sql/sql_derived.cc | 2 | ||||
-rw-r--r-- | sql/sql_error.cc | 10 | ||||
-rw-r--r-- | sql/sql_insert.cc | 7 | ||||
-rw-r--r-- | sql/sql_lex.cc | 10 | ||||
-rw-r--r-- | sql/sql_lex.h | 5 | ||||
-rw-r--r-- | sql/sql_limit.h | 80 | ||||
-rw-r--r-- | sql/sql_parse.cc | 13 | ||||
-rw-r--r-- | sql/sql_profile.cc | 10 | ||||
-rw-r--r-- | sql/sql_repl.cc | 10 | ||||
-rw-r--r-- | sql/sql_select.cc | 93 | ||||
-rw-r--r-- | sql/sql_tvc.cc | 2 | ||||
-rw-r--r-- | sql/sql_union.cc | 40 |
18 files changed, 198 insertions, 154 deletions
diff --git a/sql/group_by_handler.cc b/sql/group_by_handler.cc index 326aad439ef..cb1462fce0e 100644 --- a/sql/group_by_handler.cc +++ b/sql/group_by_handler.cc @@ -40,7 +40,7 @@ int Pushdown_query::execute(JOIN *join) { int err; ha_rows max_limit; - ha_rows *reset_limit= 0; + bool reset_limit= FALSE; Item **reset_item= 0; THD *thd= handler->thd; TABLE *table= handler->table; @@ -52,11 +52,11 @@ int Pushdown_query::execute(JOIN *join) if (store_data_in_temp_table) { max_limit= join->tmp_table_param.end_write_records; - reset_limit= &join->unit->select_limit_cnt; + reset_limit= TRUE; } else { - max_limit= join->unit->select_limit_cnt; + max_limit= join->unit->lim.get_select_limit(); if (join->unit->fake_select_lex) reset_item= &join->unit->fake_select_lex->select_limit; } @@ -112,7 +112,7 @@ int Pushdown_query::execute(JOIN *join) break; // LIMIT reached join->do_send_rows= 0; // Calculate FOUND_ROWS() if (reset_limit) - *reset_limit= HA_POS_ERROR; + join->unit->lim.set_unlimited(); if (reset_item) *reset_item= 0; } diff --git a/sql/group_by_handler.h b/sql/group_by_handler.h index 108ebc989d9..97ee44d73d3 100644 --- a/sql/group_by_handler.h +++ b/sql/group_by_handler.h @@ -56,6 +56,7 @@ struct Query ORDER *order_by; Item *having; // LIMIT + //ha_rows select_limit_cnt, offset_limit_cnt; }; class group_by_handler diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 96344c0968b..c9675b227a1 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2744,7 +2744,7 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg) join_arg->thd->change_item_tree(&unit->global_parameters()->select_limit, new (thd->mem_root) Item_int(thd, (int32) 1)); - unit->select_limit_cnt= 1; + unit->lim.set_single_row(); DBUG_RETURN(false); } diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 44cb524d1b8..516730e25d3 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -5714,11 +5714,8 @@ int select_value_catcher::send_data(List<Item> &items) DBUG_ASSERT(!assigned); DBUG_ASSERT(items.elements == n_elements); - if (unit->offset_limit_cnt) - { // Using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // Using limit offset,count Item *val_item; List_iterator_fast<Item> li(items); @@ -6574,7 +6571,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables) Set the limit of this JOIN object as well, because normally its being set in the beginning of JOIN::optimize, which was already done. */ - select_limit= in_subs->unit->select_limit_cnt; + select_limit= in_subs->unit->lim.get_select_limit(); } else if (in_subs->test_strategy(SUBS_IN_TO_EXISTS)) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 7c1d186fce1..1e969cba637 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3016,11 +3016,8 @@ int select_send::send_data(List<Item> &items) DBUG_ENTER("select_send::send_data"); /* unit is not set when using 'delete ... returning' */ - if (unit && unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(FALSE); - } + if (unit && unit->lim.check_and_move_offset()) + DBUG_RETURN(FALSE); // using limit offset,count if (thd->killed == ABORT_QUERY) DBUG_RETURN(FALSE); @@ -3285,11 +3282,8 @@ int select_export::send_data(List<Item> &items) String tmp(buff,sizeof(buff),&my_charset_bin),*res; tmp.length(0); - if (unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // using limit offset,count if (thd->killed == ABORT_QUERY) DBUG_RETURN(0); row_count++; @@ -3545,11 +3539,8 @@ int select_dump::send_data(List<Item> &items) Item *item; DBUG_ENTER("select_dump::send_data"); - if (unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // using limit offset,count if (thd->killed == ABORT_QUERY) DBUG_RETURN(0); @@ -3588,11 +3579,8 @@ int select_singlerow_subselect::send_data(List<Item> &items) MYF(current_thd->lex->ignore ? ME_WARNING : 0)); DBUG_RETURN(1); } - if (unit->offset_limit_cnt) - { // Using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // Using limit offset,count if (thd->killed == ABORT_QUERY) DBUG_RETURN(0); List_iterator_fast<Item> li(items); @@ -3729,11 +3717,8 @@ int select_exists_subselect::send_data(List<Item> &items) { DBUG_ENTER("select_exists_subselect::send_data"); Item_exists_subselect *it= (Item_exists_subselect *)item; - if (unit->offset_limit_cnt) - { // Using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // Using limit offset,count if (thd->killed == ABORT_QUERY) DBUG_RETURN(0); it->value= 1; @@ -4138,12 +4123,9 @@ int select_dumpvar::send_data(List<Item> &items) { DBUG_ENTER("select_dumpvar::send_data"); - if (unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } - if (row_count++) + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // using limit offset,count + if (row_count++) { my_message(ER_TOO_MANY_ROWS, ER_THD(thd, ER_TOO_MANY_ROWS), MYF(0)); DBUG_RETURN(1); diff --git a/sql/sql_class.h b/sql/sql_class.h index 8ca74457273..17ca2ad3947 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5196,9 +5196,9 @@ public: /* this method is called just before the first row of the table can be read */ virtual void prepare_to_read_rows() {} - void reset_offset_limit() + void remove_offset_limit() { - unit->offset_limit_cnt= 0; + unit->lim.remove_offset(); } /* @@ -6009,7 +6009,7 @@ public: */ DBUG_ASSERT(false); /* purecov: inspected */ } - void reset_offset_limit_cnt() + void remove_offset_limit() { // EXPLAIN should never output to a select_union_direct DBUG_ASSERT(false); /* purecov: inspected */ diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 06e82263524..78e87fe737f 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -1218,7 +1218,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) { SELECT_LEX *first_select= unit->first_select(); unit->set_limit(unit->global_parameters()); - if (unit->select_limit_cnt == HA_POS_ERROR) + if (unit->lim.is_unlimited()) first_select->options&= ~OPTION_FOUND_ROWS; lex->current_select= first_select; diff --git a/sql/sql_error.cc b/sql/sql_error.cc index e6dcfed0412..c38ecfff6e6 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -783,7 +783,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) const Sql_condition *err; SELECT_LEX *sel= thd->lex->first_select_lex(); SELECT_LEX_UNIT *unit= &thd->lex->unit; - ulonglong idx= 0; + ha_rows idx; Protocol *protocol=thd->protocol; DBUG_ENTER("mysqld_show_warnings"); @@ -808,14 +808,14 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) Diagnostics_area::Sql_condition_iterator it= thd->get_stmt_da()->sql_conditions(); - while ((err= it++)) + for (idx= 1; (err= it++) ; idx++) { /* Skip levels that the user is not interested in */ if (!(levels_to_show & ((ulong) 1 << err->get_level()))) continue; - if (++idx <= unit->offset_limit_cnt) - continue; - if (idx > unit->select_limit_cnt) + if (unit->lim.check_and_move_offset()) + continue; // using limit offset,count + if (idx > unit->lim.get_select_limit()) break; protocol->prepare_for_resend(); protocol->store(warning_level_names[err->get_level()].str, diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 211ab7e4f4f..bc729a33ba1 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3857,11 +3857,8 @@ int select_insert::send_data(List<Item> &values) DBUG_ENTER("select_insert::send_data"); bool error=0; - if (unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - DBUG_RETURN(0); - } + if (unit->lim.check_and_move_offset()) + DBUG_RETURN(0); // using limit offset,count if (unlikely(thd->killed == ABORT_QUERY)) DBUG_RETURN(0); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 1245242d7aa..f60c396d064 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2350,8 +2350,7 @@ void st_select_lex_unit::init_query() { init_query_common(); set_linkage(GLOBAL_OPTIONS_TYPE); - select_limit_cnt= HA_POS_ERROR; - offset_limit_cnt= 0; + lim.set_unlimited(); union_distinct= 0; prepared= optimized= optimized_2= executed= 0; bag_set_op_optimized= 0; @@ -3494,12 +3493,7 @@ void st_select_lex_unit::set_limit(st_select_lex *sl) { DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare()); - offset_limit_cnt= sl->get_offset(); - select_limit_cnt= sl->get_limit(); - if (select_limit_cnt + offset_limit_cnt >= select_limit_cnt) - select_limit_cnt+= offset_limit_cnt; - else - select_limit_cnt= HA_POS_ERROR; + lim.set_limit(sl->get_limit(), sl->get_offset()); } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 98bec985a64..08f4932ac97 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -32,6 +32,7 @@ #include "sp.h" // enum stored_procedure_type #include "sql_tvc.h" #include "item.h" +#include "sql_limit.h" // Select_limit_counters /* Used for flags of nesting constructs */ #define SELECT_NESTING_MAP_SIZE 64 @@ -829,6 +830,8 @@ void create_explain_query(LEX *lex, MEM_ROOT *mem_root); void create_explain_query_if_not_exists(LEX *lex, MEM_ROOT *mem_root); bool print_explain_for_slow_log(LEX *lex, THD *thd, String *str); + + class st_select_lex_unit: public st_select_lex_node { protected: TABLE_LIST result_table_list; @@ -908,7 +911,7 @@ public: //node on which we should return current_select pointer after parsing subquery st_select_lex *return_to; /* LIMIT clause runtime counters */ - ha_rows select_limit_cnt, offset_limit_cnt; + Select_limit_counters lim; /* not NULL if unit used in subselect, point to subselect item */ Item_subselect *item; /* diff --git a/sql/sql_limit.h b/sql/sql_limit.h new file mode 100644 index 00000000000..8e0c920dffc --- /dev/null +++ b/sql/sql_limit.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2019, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + + +#ifndef INCLUDES_MARIADB_SQL_LIMIT_H +#define INCLUDES_MARIADB_SQL_LIMIT_H +/** + LIMIT/OFFSET parameters for execution. +*/ + +class Select_limit_counters +{ + ha_rows offset_limit_cnt_start, + select_limit_cnt, offset_limit_cnt; + + public: + Select_limit_counters(): + offset_limit_cnt_start(0), + select_limit_cnt(0), offset_limit_cnt(0) + {}; + + void set_limit(ha_rows limit, ha_rows offset) + { + offset_limit_cnt_start= offset; + select_limit_cnt= limit; + if (select_limit_cnt + offset_limit_cnt_start >= + select_limit_cnt) + select_limit_cnt+= offset_limit_cnt_start; + else + select_limit_cnt= HA_POS_ERROR; + reset(); + } + + void set_single_row() + { + offset_limit_cnt= offset_limit_cnt_start= 0; + select_limit_cnt= 1; + } + + void reset() + { + offset_limit_cnt= offset_limit_cnt_start; + } + + bool is_unlimited() + { return select_limit_cnt == HA_POS_ERROR; } + void set_unlimited() + { select_limit_cnt= HA_POS_ERROR; offset_limit_cnt= 0; } + + bool check_and_move_offset() + { + if (offset_limit_cnt) + { + offset_limit_cnt--; + return TRUE; + } + return FALSE; + } + void remove_offset() { offset_limit_cnt= 0; } + + ha_rows get_select_limit() + { return select_limit_cnt; } + ha_rows get_offset_limit() + { return offset_limit_cnt; } +}; + + +#endif // INCLUDES_MARIADB_SQL_LIMIT_H diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 45b539c97d9..c921e2137e6 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4350,7 +4350,7 @@ mysql_execute_command(THD *thd) select_lex->where, select_lex->order_list.elements, select_lex->order_list.first, - unit->select_limit_cnt, + unit->lim.get_select_limit(), lex->ignore, &found, &updated); MYSQL_UPDATE_DONE(res, found, updated); /* mysql_update return 2 if we need to switch to multi-update */ @@ -4672,7 +4672,7 @@ mysql_execute_command(THD *thd) res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, - unit->select_limit_cnt, select_lex->options, + unit->lim.get_select_limit(), select_lex->options, lex->result ? lex->result : sel_result); if (replaced_protocol) @@ -5518,7 +5518,8 @@ mysql_execute_command(THD *thd) res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str, lex->insert_list, lex->ha_rkey_mode, select_lex->where, - unit->select_limit_cnt, unit->offset_limit_cnt); + unit->lim.get_select_limit(), + unit->lim.get_offset_limit()); break; case SQLCOM_BEGIN: @@ -6095,8 +6096,8 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) /* Do like the original select_describe did: remove OFFSET from the top-level LIMIT - */ - result->reset_offset_limit(); + */ + result->remove_offset_limit(); if (lex->explain_json) { lex->explain->print_explain_json(result, lex->analyze_stmt); @@ -7668,7 +7669,7 @@ void mysql_init_multi_delete(LEX *lex) lex->sql_command= SQLCOM_DELETE_MULTI; mysql_init_select(lex); lex->first_select_lex()->select_limit= 0; - lex->unit.select_limit_cnt= HA_POS_ERROR; + lex->unit.lim.set_unlimited(); lex->first_select_lex()->table_list. save_and_clear(&lex->auxiliary_table_list); lex->query_tables= 0; diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc index 5949287ea8d..40e7908ac1d 100644 --- a/sql/sql_profile.cc +++ b/sql/sql_profile.cc @@ -402,7 +402,7 @@ bool PROFILING::show_profiles() MEM_ROOT *mem_root= thd->mem_root; SELECT_LEX *sel= thd->lex->first_select_lex(); SELECT_LEX_UNIT *unit= &thd->lex->unit; - ha_rows idx= 0; + ha_rows idx; Protocol *protocol= thd->protocol; void *iterator; DBUG_ENTER("PROFILING::show_profiles"); @@ -426,9 +426,9 @@ bool PROFILING::show_profiles() unit->set_limit(sel); - for (iterator= history.new_iterator(); + for (iterator= history.new_iterator(), idx= 1; iterator != NULL; - iterator= history.iterator_next(iterator)) + iterator= history.iterator_next(iterator), idx++) { prof= history.iterator_value(iterator); @@ -436,9 +436,9 @@ bool PROFILING::show_profiles() double query_time_usecs= prof->m_end_time_usecs - prof->m_start_time_usecs; - if (++idx <= unit->offset_limit_cnt) + if (unit->lim.check_and_move_offset()) continue; - if (idx > unit->select_limit_cnt) + if (idx > unit->lim.get_select_limit()) break; protocol->prepare_for_resend(); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index a0f952955d5..82d105a869f 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -4013,7 +4013,7 @@ bool mysql_show_binlog_events(THD* thd) if (binary_log->is_open()) { SELECT_LEX_UNIT *unit= &thd->lex->unit; - ha_rows event_count, limit_start, limit_end; + ha_rows event_count; my_off_t pos = MY_MAX(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly char search_file_name[FN_REFLEN], *name; const char *log_file_name = lex_mi->log_file_name; @@ -4028,8 +4028,6 @@ bool mysql_show_binlog_events(THD* thd) } unit->set_limit(thd->lex->current_select); - limit_start= unit->offset_limit_cnt; - limit_end= unit->select_limit_cnt; name= search_file_name; if (log_file_name) @@ -4108,7 +4106,7 @@ bool mysql_show_binlog_events(THD* thd) description_event, opt_master_verify_checksum)); ) { - if (event_count >= limit_start && + if (!unit->lim.check_and_move_offset() && ev->net_send(protocol, linfo.log_file_name, pos)) { errmsg = "Net error"; @@ -4142,11 +4140,11 @@ bool mysql_show_binlog_events(THD* thd) pos = my_b_tell(&log); - if (++event_count >= limit_end) + if (++event_count >= unit->lim.get_select_limit()) break; } - if (unlikely(event_count < limit_end && log.error)) + if (unlikely(event_count < unit->lim.get_select_limit() && log.error)) { errmsg = "Wrong offset or I/O error"; mysql_mutex_unlock(log_lock); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 7039c70db05..db6c725dda4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1749,7 +1749,7 @@ JOIN::optimize_inner() { DBUG_ENTER("JOIN::optimize"); subq_exit_fl= false; - do_send_rows = (unit->select_limit_cnt) ? 1 : 0; + do_send_rows = (unit->lim.get_select_limit()) ? 1 : 0; DEBUG_SYNC(thd, "before_join_optimize"); @@ -1824,9 +1824,9 @@ JOIN::optimize_inner() DBUG_RETURN(-1); row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR : - unit->select_limit_cnt); + unit->lim.get_select_limit()); /* select_limit is used to decide if we are likely to scan the whole table */ - select_limit= unit->select_limit_cnt; + select_limit= unit->lim.get_select_limit(); if (having || (select_options & OPTION_FOUND_ROWS)) select_limit= HA_POS_ERROR; #ifdef HAVE_REF_TO_FIELDS // Not done yet @@ -2054,9 +2054,10 @@ JOIN::optimize_inner() thd->change_item_tree(&sel->having, having); } if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || - (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) + (!unit->lim.get_select_limit() && + !(select_options & OPTION_FOUND_ROWS))) { /* Impossible cond */ - if (unit->select_limit_cnt) + if (unit->lim.get_select_limit()) { DBUG_PRINT("info", (having_value == Item::COND_FALSE ? "Impossible HAVING" : "Impossible WHERE")); @@ -3601,7 +3602,7 @@ bool JOIN::make_aggr_tables_info() */ sort_tab->filesort->limit= (has_group_by || (join_tab + top_join_tab_count > curr_tab + 1)) ? - select_limit : unit->select_limit_cnt; + select_limit : unit->lim.get_select_limit(); } if (!only_const_tables() && !join_tab[const_tables].filesort && @@ -3986,8 +3987,7 @@ JOIN::reinit() { DBUG_ENTER("JOIN::reinit"); - unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ? - select_lex->offset_limit->val_uint() : 0); + unit->lim.reset(); first_record= false; group_sent= false; @@ -5513,7 +5513,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, if (double rr= join->best_positions[i].records_read) records= COST_MULT(records, rr); ha_rows rows= records > HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records; - set_if_smaller(rows, unit->select_limit_cnt); + set_if_smaller(rows, unit->lim.get_select_limit()); join->select_lex->increase_derived_records(rows); } } @@ -7970,7 +7970,7 @@ best_access_path(JOIN *join, if (!best_key && idx == join->const_tables && s->table == join->sort_by_table && - join->unit->select_limit_cnt >= records) + join->unit->lim.get_select_limit() >= records) { trace_access_scan.add("use_tmp_table", true); join->sort_by_table= (TABLE*) 1; // Must use temporary table @@ -11465,7 +11465,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) !tab->loosescan_match_tab && // (1) ((cond && (!tab->keys.is_subset(tab->const_keys) && i > 0)) || (!tab->const_keys.is_clear_all() && i == join->const_tables && - join->unit->select_limit_cnt < + join->unit->lim.get_select_limit() < join->best_positions[i].records_read && !(join->select_options & OPTION_FOUND_ROWS)))) { @@ -11489,7 +11489,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) (join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : - join->unit->select_limit_cnt), 0, + join->unit->lim.get_select_limit()), 0, FALSE, FALSE, FALSE) < 0) { /* @@ -11503,7 +11503,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) (join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : - join->unit->select_limit_cnt),0, + join->unit->lim.get_select_limit()),0, FALSE, FALSE, FALSE) < 0) DBUG_RETURN(1); // Impossible WHERE } @@ -19791,31 +19791,31 @@ do_select(JOIN *join, Procedure *procedure) HAVING will be checked after processing aggregate functions, But WHERE should checked here (we alredy have read tables). Notice that make_join_select() splits all conditions in this case - into two groups exec_const_cond and outer_ref_cond. - If join->table_count == join->const_tables then it is - sufficient to check only the condition pseudo_bits_cond. - */ - DBUG_ASSERT(join->outer_ref_cond == NULL); - if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int()) - { - // HAVING will be checked by end_select - error= (*end_select)(join, 0, 0); - if (error >= NESTED_LOOP_OK) - error= (*end_select)(join, 0, 1); + into two groups exec_const_cond and outer_ref_cond. + If join->table_count == join->const_tables then it is + sufficient to check only the condition pseudo_bits_cond. + */ + DBUG_ASSERT(join->outer_ref_cond == NULL); + if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int()) + { + // HAVING will be checked by end_select + error= (*end_select)(join, 0, 0); + if (error >= NESTED_LOOP_OK) + error= (*end_select)(join, 0, 1); - /* - If we don't go through evaluate_join_record(), do the counting - here. join->send_records is increased on success in end_send(), - so we don't touch it here. - */ - join->join_examined_rows++; - DBUG_ASSERT(join->join_examined_rows <= 1); - } - else if (join->send_row_on_empty_set()) + /* + If we don't go through evaluate_join_record(), do the counting + here. join->send_records is increased on success in end_send(), + so we don't touch it here. + */ + join->join_examined_rows++; + DBUG_ASSERT(join->join_examined_rows <= 1); + } + else if (join->send_row_on_empty_set()) + { + if (!join->having || join->having->val_int()) { - if (!join->having || join->having->val_int()) - { - List<Item> *columns_list= (procedure ? &join->procedure_fields_list : + List<Item> *columns_list= (procedure ? &join->procedure_fields_list : join->fields); rc= join->result->send_data(*columns_list) > 0; } @@ -21499,7 +21499,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } ++join->send_records; - if (join->send_records >= join->unit->select_limit_cnt && + if (join->send_records >= join->unit->lim.get_select_limit() && !join->do_send_rows) { /* @@ -21517,7 +21517,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); } } - if (join->send_records >= join->unit->select_limit_cnt && + if (join->send_records >= join->unit->lim.get_select_limit() && join->do_send_rows) { if (join->select_options & OPTION_FOUND_ROWS) @@ -21658,13 +21658,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) DBUG_RETURN(NESTED_LOOP_OK); - if (join->send_records >= join->unit->select_limit_cnt && + if (join->send_records >= join->unit->lim.get_select_limit() && join->do_send_rows) { if (!(join->select_options & OPTION_FOUND_ROWS)) DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely join->do_send_rows=0; - join->unit->select_limit_cnt = HA_POS_ERROR; + join->unit->lim.set_unlimited(); } else if (join->send_records >= join->fetch_limit) { @@ -21749,7 +21749,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!(join->select_options & OPTION_FOUND_ROWS)) DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); join->do_send_rows=0; - join->unit->select_limit_cnt = HA_POS_ERROR; + join->unit->lim.set_unlimited(); } } } @@ -23111,8 +23111,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, (tab->join->select_options & OPTION_FOUND_ROWS) ? HA_POS_ERROR : - tab->join->unit->select_limit_cnt,TRUE, - TRUE, FALSE, FALSE) <= 0; + tab->join->unit-> + lim.get_select_limit(), + TRUE, TRUE, FALSE, FALSE) <= 0; if (res) { select->cond= save_cond; @@ -23213,7 +23214,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, select->test_quick_select(join->thd, tmp_map, 0, join->select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : - join->unit->select_limit_cnt, + join->unit->lim.get_select_limit(), TRUE, FALSE, FALSE, FALSE); if (cond_saved) @@ -23646,7 +23647,7 @@ JOIN_TAB::remove_duplicates() if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) { // only const items with no OPTION_FOUND_ROWS - join->unit->select_limit_cnt= 1; // Only send first row + join->unit->lim.set_single_row(); // Only send first row DBUG_RETURN(false); } @@ -25845,7 +25846,7 @@ int JOIN::rollup_send_data(uint idx) copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]); if ((!having || having->val_int())) { - if (send_records < unit->select_limit_cnt && do_send_rows && + if (send_records < unit->lim.get_select_limit() && do_send_rows && (res= result->send_data(rollup.fields[i])) > 0) return 1; if (!res) diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index eea14d7dfc2..f4a99392149 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -386,7 +386,7 @@ bool table_value_constr::exec(SELECT_LEX *sl) while ((elem= li++)) { - if (send_records >= sl->master_unit()->select_limit_cnt) + if (send_records >= sl->master_unit()->lim.get_select_limit()) break; int rc= result->send_data(*elem); if (!rc) diff --git a/sql/sql_union.cc b/sql/sql_union.cc index e3c5508e947..c10742737b5 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -111,11 +111,8 @@ int select_unit::send_data(List<Item> &values) { int rc= 0; int not_reported_error= 0; - if (unit->offset_limit_cnt) - { // using limit offset,count - unit->offset_limit_cnt--; - return 0; - } + if (unit->lim.check_and_move_offset()) + return 0; // using limit offset,count if (thd->killed == ABORT_QUERY) return 0; if (table->no_rows_with_nulls) @@ -607,12 +604,8 @@ int select_unit_ext::send_data(List<Item> &values) int rc= 0; int not_reported_error= 0; int find_res; - if (unit->offset_limit_cnt) - { - /* using limit offset,count */ - unit->offset_limit_cnt--; + if (unit->lim.check_and_move_offset()) return 0; - } if (thd->killed == ABORT_QUERY) return 0; if (table->no_rows_with_nulls) @@ -1358,8 +1351,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, else { sl->join->result= result; - select_limit_cnt= HA_POS_ERROR; - offset_limit_cnt= 0; + lim.set_unlimited(); if (!sl->join->procedure && result->prepare(sl->join->fields_list, this)) { @@ -2046,7 +2038,7 @@ bool st_select_lex_unit::optimize() if (sl->tvc) { sl->tvc->select_options= - (select_limit_cnt == HA_POS_ERROR || sl->braces) ? + (lim.is_unlimited() || sl->braces) ? sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; if (sl->tvc->optimize(thd)) { @@ -2066,13 +2058,13 @@ bool st_select_lex_unit::optimize() set_limit(sl); if (sl == global_parameters() || describe) { - offset_limit_cnt= 0; + lim.remove_offset(); /* We can't use LIMIT at this stage if we are using ORDER BY for the whole query */ if (sl->order_list.first || describe) - select_limit_cnt= HA_POS_ERROR; + lim.set_unlimited(); } /* @@ -2081,7 +2073,7 @@ bool st_select_lex_unit::optimize() Otherwise, SQL_CALC_FOUND_ROWS should be done on all sub parts. */ sl->join->select_options= - (select_limit_cnt == HA_POS_ERROR || sl->braces) ? + (lim.is_unlimited() || sl->braces) ? sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; saved_error= sl->join->optimize(); @@ -2161,13 +2153,13 @@ bool st_select_lex_unit::exec() set_limit(sl); if (sl == global_parameters() || describe) { - offset_limit_cnt= 0; + lim.remove_offset(); /* We can't use LIMIT at this stage if we are using ORDER BY for the whole query */ if (sl->order_list.first || describe) - select_limit_cnt= HA_POS_ERROR; + lim.set_unlimited(); } /* @@ -2178,14 +2170,14 @@ bool st_select_lex_unit::exec() if (sl->tvc) { sl->tvc->select_options= - (select_limit_cnt == HA_POS_ERROR || sl->braces) ? + (lim.is_unlimited() || sl->braces) ? sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; saved_error= sl->tvc->optimize(thd); } else { sl->join->select_options= - (select_limit_cnt == HA_POS_ERROR || sl->braces) ? + (lim.is_unlimited() || sl->braces) ? sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; saved_error= sl->join->optimize(); } @@ -2208,9 +2200,7 @@ bool st_select_lex_unit::exec() } if (!sl->tvc) saved_error= sl->join->error; - offset_limit_cnt= (ha_rows)(sl->offset_limit ? - sl->offset_limit->val_uint() : - 0); + lim.reset(); if (likely(!saved_error)) { examined_rows+= thd->get_examined_row_count(); @@ -2237,8 +2227,8 @@ bool st_select_lex_unit::exec() DBUG_RETURN(1); } } - if (found_rows_for_union && !sl->braces && - select_limit_cnt != HA_POS_ERROR) + if (found_rows_for_union && !sl->braces && + !lim.is_unlimited()) { /* This is a union without braces. Remember the number of rows that |