diff options
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r-- | sql/sql_select.cc | 270 |
1 files changed, 212 insertions, 58 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 99a23d78934..741e8afec1d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -122,7 +122,7 @@ static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table, static enum_nested_loop_state evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, - int error, my_bool *report_error); + int error); static enum_nested_loop_state evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab); static enum_nested_loop_state @@ -223,6 +223,7 @@ static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); +static bool test_if_ref(Item_field *left_item,Item *right_item); /* @@ -263,8 +264,8 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, result, unit, select_lex); } DBUG_PRINT("info",("res: %d report_error: %d", res, - thd->net.report_error)); - res|= thd->net.report_error; + thd->is_error())); + res|= thd->is_error(); if (unlikely(res)) result->abort(); @@ -491,7 +492,7 @@ JOIN::prepare(Item ***rref_pointer_array, (having->fix_fields(thd, &having) || having->check_cols(1))); select_lex->having_fix_field= 0; - if (having_fix_rc || thd->net.report_error) + if (having_fix_rc || thd->is_error()) DBUG_RETURN(-1); /* purecov: inspected */ thd->lex->allow_sum_func= save_allow_sum_func; } @@ -571,11 +572,12 @@ JOIN::prepare(Item ***rref_pointer_array, /* - Check if one one uses a not constant column with group functions - and no GROUP BY. + Check if there are references to un-aggregated columns when computing + aggregate functions with implicit grouping (there is no GROUP BY). TODO: Add check of calculation of GROUP functions and fields: SELECT COUNT(*)+table.col1 from table1; */ + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { if (!group_list) { @@ -589,6 +591,13 @@ JOIN::prepare(Item ***rref_pointer_array, else if (!(flag & 2) && !item->const_during_execution()) flag|=2; } + if (having) + { + if (having->with_sum_func) + flag |= 1; + else if (!having->const_during_execution()) + flag |= 2; + } if (flag == 3) { my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS, @@ -672,9 +681,6 @@ err: without "checking NULL", remove the predicates that were pushed down into the subquery. - We can remove the equalities that will be guaranteed to be true by the - fact that subquery engine will be using index lookup. - If the subquery compares scalar values, we can remove the condition that was wrapped into trig_cond (it will be checked when needed by the subquery engine) @@ -684,6 +690,12 @@ err: and non-NULL values, we'll do a full table scan and will rely on the equalities corresponding to non-NULL parts of left tuple to filter out non-matching records. + + TODO: We can remove the equalities that will be guaranteed to be true by the + fact that subquery engine will be using index lookup. This must be done only + for cases where there are no conversion errors of significance, e.g. 257 + that is searched in a byte. But this requires homogenization of the return + codes of all Field*::store() methods. */ void JOIN::remove_subq_pushed_predicates(Item **where) @@ -691,17 +703,13 @@ void JOIN::remove_subq_pushed_predicates(Item **where) if (conds->type() == Item::FUNC_ITEM && ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC && ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM && - ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM) + ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM && + test_if_ref ((Item_field *)((Item_func *)conds)->arguments()[1], + ((Item_func *)conds)->arguments()[0])) { *where= 0; return; } - if (conds->type() == Item::COND_ITEM && - ((class Item_func *)this->conds)->functype() == - Item_func::COND_AND_FUNC) - { - *where= remove_additional_cond(conds); - } } @@ -817,7 +825,7 @@ JOIN::optimize() } conds= optimize_cond(this, conds, join_list, &cond_value); - if (thd->net.report_error) + if (thd->is_error()) { error= 1; DBUG_PRINT("error",("Error from optimize_cond")); @@ -826,7 +834,7 @@ JOIN::optimize() { having= optimize_cond(this, having, join_list, &having_value); - if (thd->net.report_error) + if (thd->is_error()) { error= 1; DBUG_PRINT("error",("Error from optimize_cond")); @@ -1031,7 +1039,7 @@ JOIN::optimize() { ORDER *org_order= order; order=remove_const(this, order,conds,1, &simple_order); - if (thd->net.report_error) + if (thd->is_error()) { error= 1; DBUG_PRINT("error",("Error from remove_const")); @@ -1073,10 +1081,19 @@ JOIN::optimize() We have found that grouping can be removed since groups correspond to only one row anyway, but we still have to guarantee correct result order. The line below effectively rewrites the query from GROUP BY - <fields> to ORDER BY <fields>. One exception is if skip_sort_order is - set (see above), then we can simply skip GROUP BY. - */ - order= skip_sort_order ? 0 : group_list; + <fields> to ORDER BY <fields>. There are two exceptions: + - if skip_sort_order is set (see above), then we can simply skip + GROUP BY; + - we can only rewrite ORDER BY if the ORDER BY fields are 'compatible' + with the GROUP BY ones, i.e. either one is a prefix of another. + We only check if the ORDER BY is a prefix of GROUP BY. In this case + test_if_subpart() copies the ASC/DESC attributes from the original + ORDER BY fields. + If GROUP BY is a prefix of ORDER BY, then it is safe to leave + 'order' as is. + */ + if (!order || test_if_subpart(group_list, order)) + order= skip_sort_order ? 0 : group_list; /* If we have an IGNORE INDEX FOR GROUP BY(fields) clause, this must be rewritten to IGNORE INDEX FOR ORDER BY(fields). @@ -1162,7 +1179,7 @@ JOIN::optimize() group_list= remove_const(this, (old_group_list= group_list), conds, rollup.state == ROLLUP::STATE_NONE, &simple_group); - if (thd->net.report_error) + if (thd->is_error()) { error= 1; DBUG_PRINT("error",("Error from remove_const")); @@ -1185,7 +1202,7 @@ JOIN::optimize() { group_list= procedure->group= remove_const(this, procedure->group, conds, 1, &simple_group); - if (thd->net.report_error) + if (thd->is_error()) { error= 1; DBUG_PRINT("error",("Error from remove_const")); @@ -1242,7 +1259,7 @@ JOIN::optimize() { if (!having) { - Item *where= 0; + Item *where= conds; if (join_tab[0].type == JT_EQ_REF && join_tab[0].ref.items[0]->name == in_left_expr_name) { @@ -2098,10 +2115,10 @@ JOIN::exec() } } } - /* XXX: When can we have here thd->net.report_error not zero? */ - if (thd->net.report_error) + /* XXX: When can we have here thd->is_error() not zero? */ + if (thd->is_error()) { - error= thd->net.report_error; + error= thd->is_error(); DBUG_VOID_RETURN; } curr_join->having= curr_join->tmp_having; @@ -2307,7 +2324,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, join->having_history= (join->having?join->having:join->tmp_having); } - if (thd->net.report_error) + if (thd->is_error()) goto err; join->exec(); @@ -2333,7 +2350,7 @@ err: { thd->proc_info="end"; err|= select_lex->cleanup(); - DBUG_RETURN(err || thd->net.report_error); + DBUG_RETURN(err || thd->is_error()); } DBUG_RETURN(join->error); } @@ -2349,6 +2366,11 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, { int error; DBUG_ENTER("get_quick_record_count"); +#ifndef EMBEDDED_LIBRARY // Avoid compiler warning + uchar buff[STACK_BUFF_ALLOC]; +#endif + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + DBUG_RETURN(0); // Fatal error flag is set if (select) { select->head=table; @@ -3706,7 +3728,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, { KEYUSE key_end,*prev,*save_pos,*use; - qsort(keyuse->buffer,keyuse->elements,sizeof(KEYUSE), + my_qsort(keyuse->buffer,keyuse->elements,sizeof(KEYUSE), (qsort_cmp) sort_keyuse); bzero((char*) &key_end,sizeof(key_end)); /* Add for easy testing */ @@ -3717,7 +3739,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, found_eq_constant=0; for (i=0 ; i < keyuse->elements-1 ; i++,use++) { - if (!use->used_tables) + if (!use->used_tables && use->optimize != KEY_OPTIMIZE_REF_OR_NULL) use->table->const_key_parts[use->key]|= use->keypart_map; if (use->keypart != FT_KEYPART) { @@ -4475,8 +4497,9 @@ choose_plan(JOIN *join, table_map join_tables) Apply heuristic: pre-sort all access plans with respect to the number of records accessed. */ - qsort(join->best_ref + join->const_tables, join->tables - join->const_tables, - sizeof(JOIN_TAB*), straight_join?join_tab_cmp_straight:join_tab_cmp); + my_qsort(join->best_ref + join->const_tables, + join->tables - join->const_tables, sizeof(JOIN_TAB*), + straight_join ? join_tab_cmp_straight : join_tab_cmp); if (straight_join) { @@ -4523,6 +4546,17 @@ choose_plan(JOIN *join, table_map join_tables) ptr1 pointer to first JOIN_TAB object ptr2 pointer to second JOIN_TAB object + NOTES + The order relation implemented by join_tab_cmp() is not transitive, + i.e. it is possible to choose such a, b and c that (a < b) && (b < c) + but (c < a). This implies that result of a sort using the relation + implemented by join_tab_cmp() depends on the order in which + elements are compared, i.e. the result is implementation-specific. + Example: + a: dependent = 0x0 table->map = 0x1 found_records = 3 ptr = 0x907e6b0 + b: dependent = 0x0 table->map = 0x2 found_records = 3 ptr = 0x907e838 + c: dependent = 0x6 table->map = 0x10 found_records = 2 ptr = 0x907ecd0 + RETURN 1 if first is bigger -1 if second is bigger @@ -6282,10 +6316,9 @@ make_join_readinfo(JOIN *join, ulonglong options) ordered. If there is a temp table the ordering is done as a last operation and doesn't prevent join cache usage. */ - if (!ordered_set && !join->need_tmp && - ((table == join->sort_by_table && - (!join->order || join->skip_sort_order)) || - (join->sort_by_table == (TABLE *) 1 && i != join->const_tables))) + if (!ordered_set && !join->need_tmp && + (table == join->sort_by_table || + (join->sort_by_table == (TABLE *) 1 && i != join->const_tables))) ordered_set= 1; tab->sorted= sorted; @@ -6424,7 +6457,15 @@ make_join_readinfo(JOIN *join, ulonglong options) else if (!table->covering_keys.is_clear_all() && !(tab->select && tab->select->quick)) { // Only read index tree - tab->index=find_shortest_key(table, & table->covering_keys); + /* + See bug #26447: "Using the clustered index for a table scan + is always faster than using a secondary index". + */ + if (table->s->primary_key != MAX_KEY && + table->file->primary_key_is_clustered()) + tab->index= table->s->primary_key; + else + tab->index=find_shortest_key(table, & table->covering_keys); tab->read_first_record= join_read_first; tab->type=JT_NEXT; // Read with index_first / index_next } @@ -6651,7 +6692,7 @@ void JOIN::cleanup(bool full) for (tab= join_tab, end= tab+tables; tab != end; tab++) { if (tab->table) - tab->table->file->ha_index_or_rnd_end(); + tab->table->file->ha_index_or_rnd_end(); } } } @@ -9185,9 +9226,43 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, new_field->set_derivation(item->collation.derivation); break; case DECIMAL_RESULT: - new_field= new Field_new_decimal(item->max_length, maybe_null, item->name, - item->decimals, item->unsigned_flag); + { + uint8 dec= item->decimals; + uint8 intg= ((Item_decimal *) item)->decimal_precision() - dec; + uint32 len= item->max_length; + + /* + Trying to put too many digits overall in a DECIMAL(prec,dec) + will always throw a warning. We must limit dec to + DECIMAL_MAX_SCALE however to prevent an assert() later. + */ + + if (dec > 0) + { + signed int overflow; + + dec= min(dec, DECIMAL_MAX_SCALE); + + /* + If the value still overflows the field with the corrected dec, + we'll throw out decimals rather than integers. This is still + bad and of course throws a truncation warning. + +1: for decimal point + */ + + overflow= my_decimal_precision_to_length(intg + dec, dec, + item->unsigned_flag) - len; + + if (overflow > 0) + dec= max(0, dec - overflow); // too long, discard fract + else + len -= item->decimals - dec; // corrected value fits + } + + new_field= new Field_new_decimal(len, maybe_null, item->name, + dec, item->unsigned_flag); break; + } case ROW_RESULT: default: // This case should never be choosen @@ -9345,6 +9420,36 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, } /* Fall through */ case Item::FUNC_ITEM: + if (((Item_func *) item)->functype() == Item_func::FUNC_SP) + { + Item_func_sp *item_func_sp= (Item_func_sp *) item; + Field *sp_result_field= item_func_sp->get_sp_result_field(); + + if (make_copy_field) + { + DBUG_ASSERT(item_func_sp->result_field); + *from_field= item_func_sp->result_field; + } + else + { + *((*copy_func)++)= item; + } + + Field *result_field= + create_tmp_field_from_field(thd, + sp_result_field, + item_func_sp->name, + table, + NULL, + convert_blob_length); + + if (modify_item) + item->set_result_field(result_field); + + return result_field; + } + + /* Fall through */ case Item::COND_ITEM: case Item::FIELD_AVG_ITEM: case Item::FIELD_STD_ITEM: @@ -9580,7 +9685,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->keys_in_use_for_query.init(); table->s= share; - init_tmp_table_share(share, "", 0, tmpname, tmpname); + init_tmp_table_share(thd, share, "", 0, tmpname, tmpname); share->blob_field= blob_field; share->blob_ptr_size= mi_portable_sizeof_char_ptr; share->db_low_byte_first=1; // True for HEAP and MyISAM @@ -10521,7 +10626,8 @@ Next_select_func setup_end_select_func(JOIN *join) /* Set up select_end */ if (table) { - if (table->group && tmp_tbl->sum_func_count) + if (table->group && tmp_tbl->sum_func_count && + !tmp_tbl->precomputed_group_by) { if (table->s->keys) { @@ -10620,6 +10726,15 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) error= (*end_select)(join, 0, 0); if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT) error= (*end_select)(join, 0, 1); + + /* + If we don't go through evaluate_join_record(), do the counting + here. join->send_records is increased on success in end_send(), + so we don't touch it here. + */ + join->examined_rows++; + join->thd->row_count++; + DBUG_ASSERT(join->examined_rows <= 1); } else if (join->send_row_on_empty_set()) { @@ -10682,7 +10797,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) DBUG_PRINT("error",("Error: do_select() failed")); } #endif - DBUG_RETURN(join->thd->net.report_error ? -1 : rc); + DBUG_RETURN(join->thd->is_error() ? -1 : rc); } @@ -10836,7 +10951,6 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) int error; enum_nested_loop_state rc; - my_bool *report_error= &(join->thd->net.report_error); READ_RECORD *info= &join_tab->read_record; if (join->resume_nested_loop) @@ -10868,13 +10982,13 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) join->thd->row_count= 0; error= (*join_tab->read_first_record)(join_tab); - rc= evaluate_join_record(join, join_tab, error, report_error); + rc= evaluate_join_record(join, join_tab, error); } while (rc == NESTED_LOOP_OK) { error= info->read_record(info); - rc= evaluate_join_record(join, join_tab, error, report_error); + rc= evaluate_join_record(join, join_tab, error); } if (rc == NESTED_LOOP_NO_MORE_ROWS && @@ -10898,13 +11012,13 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) static enum_nested_loop_state evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, - int error, my_bool *report_error) + int error) { bool not_used_in_distinct=join_tab->not_used_in_distinct; ha_rows found_records=join->found_records; COND *select_cond= join_tab->select_cond; - if (error > 0 || (*report_error)) // Fatal error + if (error > 0 || (join->thd->is_error())) // Fatal error return NESTED_LOOP_ERROR; if (error < 0) return NESTED_LOOP_NO_MORE_ROWS; @@ -12154,8 +12268,12 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) Item *ref_item=part_of_refkey(field->table,field); if (ref_item && ref_item->eq(right_item,1)) { + right_item= right_item->real_item(); if (right_item->type() == Item::FIELD_ITEM) return (field->eq_def(((Item_field *) right_item)->field)); + /* remove equalities injected by IN->EXISTS transformation */ + else if (right_item->type() == Item::CACHE_ITEM) + return ((Item_cache *)right_item)->eq_def (field); if (right_item->const_item() && !(right_item->is_null())) { /* @@ -14374,13 +14492,31 @@ calc_group_buffer(JOIN *join,ORDER *group) group_item->decimals); break; case STRING_RESULT: + { + enum enum_field_types type= group_item->field_type(); /* - Group strings are taken as varstrings and require an length field. - A field is not yet created by create_tmp_field() - and the sizes should match up. + As items represented as DATE/TIME fields in the group buffer + have STRING_RESULT result type, we increase the length + by 8 as maximum pack length of such fields. */ - key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH; + if (type == MYSQL_TYPE_TIME || + type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_DATETIME || + type == MYSQL_TYPE_TIMESTAMP) + { + key_length+= 8; + } + else + { + /* + Group strings are taken as varstrings and require an length field. + A field is not yet created by create_tmp_field() + and the sizes should match up. + */ + key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH; + } break; + } default: /* This case should never be choosen */ DBUG_ASSERT(0); @@ -15773,6 +15909,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, /* Add "rows" field to item_list. */ if (table_list->schema_table) { + /* in_rows */ + if (join->thd->lex->describe & DESCRIBE_EXTENDED) + item_list.push_back(item_null); + /* rows */ item_list.push_back(item_null); } else @@ -15841,7 +15981,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { if (tab->use_quick == 2) { - char buf[MAX_KEY/8+1]; + /* 4 bits per 1 hex digit + terminating '\0' */ + char buf[MAX_KEY / 4 + 1]; extra.append(STRING_WITH_LEN("; Range checked for each " "record (index map: 0x")); extra.append(tab->keys.print(buf)); @@ -15997,7 +16138,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) first->options | thd->options | SELECT_DESCRIBE, result, unit, first); } - DBUG_RETURN(res || thd->net.report_error); + DBUG_RETURN(res || thd->is_error()); } @@ -16151,8 +16292,21 @@ void TABLE_LIST::print(THD *thd, String *str) } if (my_strcasecmp(table_alias_charset, cmp_name, alias)) { + char t_alias_buff[MAX_ALIAS_NAME]; + const char *t_alias= alias; + str->append(' '); - append_identifier(thd, str, alias, strlen(alias)); + if (lower_case_table_names== 1) + { + if (alias && alias[0]) + { + strmov(t_alias_buff, alias); + my_casedn_str(files_charset_info, t_alias_buff); + t_alias= t_alias_buff; + } + } + + append_identifier(thd, str, t_alias, strlen(t_alias)); } if (index_hints) |