diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/filesort.cc | 66 | ||||
-rw-r--r-- | sql/mysql_priv.h | 4 | ||||
-rw-r--r-- | sql/opt_range.cc | 15 | ||||
-rw-r--r-- | sql/records.cc | 43 | ||||
-rw-r--r-- | sql/sql_acl.cc | 7 | ||||
-rw-r--r-- | sql/sql_delete.cc | 4 | ||||
-rw-r--r-- | sql/sql_help.cc | 8 | ||||
-rw-r--r-- | sql/sql_select.cc | 2 | ||||
-rw-r--r-- | sql/sql_table.cc | 2 | ||||
-rw-r--r-- | sql/sql_udf.cc | 2 | ||||
-rw-r--r-- | sql/sql_update.cc | 4 |
11 files changed, 120 insertions, 37 deletions
diff --git a/sql/filesort.cc b/sql/filesort.cc index 70fc6937b59..f56e5b3a771 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -402,6 +402,56 @@ static byte *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count, DBUG_RETURN(tmp); } +#ifndef DBUG_OFF +/* + Print a text, SQL-like record representation into dbug trace. + + Note: this function is a work in progress: at the moment + - column read bitmap is ignored (can print garbage for unused columns) + - there is no quoting +*/ +static void dbug_print_record(TABLE *table, bool print_rowid) +{ + char buff[1024]; + Field **pfield; + String tmp(buff,sizeof(buff),&my_charset_bin); + DBUG_LOCK_FILE; + + fprintf(DBUG_FILE, "record ("); + for (pfield= table->field; *pfield ; pfield++) + fprintf(DBUG_FILE, "%s%s", (*pfield)->field_name, (pfield[1])? ", ":""); + fprintf(DBUG_FILE, ") = "); + + fprintf(DBUG_FILE, "("); + for (pfield= table->field; *pfield ; pfield++) + { + Field *field= *pfield; + + if (field->is_null()) + fwrite("NULL", sizeof(char), 4, DBUG_FILE); + + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); + + fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); + if (pfield[1]) + fwrite(", ", sizeof(char), 2, DBUG_FILE); + } + fprintf(DBUG_FILE, ")"); + if (print_rowid) + { + fprintf(DBUG_FILE, " rowid "); + for (uint i=0; i < table->file->ref_length; i++) + { + fprintf(DBUG_FILE, "%x", (uchar)table->file->ref[i]); + } + } + fprintf(DBUG_FILE, "\n"); + DBUG_UNLOCK_FILE; +} +#endif /* Search after sort_keys and write them into tempfile. @@ -475,25 +525,23 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, current_thd->variables.read_buff_size); } - READ_RECORD read_record_info; if (quick_select) { if (select->quick->reset()) DBUG_RETURN(HA_POS_ERROR); - init_read_record(&read_record_info, current_thd, select->quick->head, - select, 1, 1); } for (;;) { if (quick_select) { - if ((error= read_record_info.read_record(&read_record_info))) + if ((error= select->quick->get_next())) { error= HA_ERR_END_OF_FILE; break; } file->position(sort_form->record[0]); + DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE);); } else /* Not quick-select */ { @@ -550,15 +598,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (thd->net.report_error) break; } - if (quick_select) - { - /* - index_merge quick select uses table->sort when retrieving rows, so free - resoures it has allocated. - */ - end_read_record(&read_record_info); - } - else + if (!quick_select) { (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ if (!next_pos) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 607c06f55d2..029977c89b9 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1557,8 +1557,8 @@ ulonglong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg, int test_if_number(char *str,int *res,bool allow_wildcards); void change_byte(byte *,uint,char,char); void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form, - SQL_SELECT *select, - int use_record_cache, bool print_errors); + SQL_SELECT *select, int use_record_cache, + bool print_errors, bool disable_rr_cache); void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, bool print_error, uint idx); void end_read_record(READ_RECORD *info); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 17a4701b515..95cda371673 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -6494,7 +6494,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() QUICK_RANGE_SELECT* cur_quick; int result; Unique *unique; - DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique"); + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge"); /* We're going to just read rowids. */ if (head->file->extra(HA_EXTRA_KEYREAD)) @@ -6565,13 +6565,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() } - /* ok, all row ids are in Unique */ + /* + Ok all rowids are in the Unique now. The next call will initialize + head->sort structure so it can be used to iterate through the rowids + sequence. + */ result= unique->get(head); delete unique; doing_pk_scan= FALSE; - /* start table scan */ - init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1); - /* index_merge currently doesn't support "using index" at all */ + + /* Start the rnd_pos() scan. */ + init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE); head->file->extra(HA_EXTRA_NO_KEYREAD); DBUG_RETURN(result); @@ -6601,6 +6605,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next() { result= HA_ERR_END_OF_FILE; end_read_record(&read_record); + free_io_cache(head); /* All rows from Unique have been retrieved, do a clustered PK scan */ if (pk_quick_select) { diff --git a/sql/records.cc b/sql/records.cc index f61efc13034..d5c3a421cd9 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -72,11 +72,47 @@ void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table, } -/* init struct for read with info->read_record */ +/* + init struct for read with info->read_record + + SYNOPSIS + init_read_record() + info OUT read structure + thd Thread handle + table Table the data [originally] comes from. + select SQL_SELECT structure. We may select->quick or + select->file as data source + use_record_cache Call file->extra_opt(HA_EXTRA_CACHE,...) + if we're going to do sequential read and some + additional conditions are satisfied. + print_error Copy this to info->print_error + disable_rr_cache Don't use rr_from_cache (used by sort-union + index-merge which produces rowid sequences that + are already ordered) + + DESCRIPTION + This function sets up reading data via one of the methods: + + rr_unpack_from_tempfile Unpack full records from sequential file + rr_unpack_from_buffer ... or from buffer + + rr_from_tempfile Read rowids from tempfile and get full records + with handler->rnd_pos() calls. + rr_from_pointers ... or get rowids from buffer + + rr_from_cache Read a bunch of rowids from file, sort them, + get records in rowid order, return, repeat. + + rr_quick Get data from QUICK_*_SELECT + + rr_sequential Sequentially scan the table using + handler->rnd_next() calls +*/ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, SQL_SELECT *select, - int use_record_cache, bool print_error) + int use_record_cache, bool print_error, + bool disable_rr_cache) { IO_CACHE *tempfile; DBUG_ENTER("init_read_record"); @@ -121,7 +157,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, it doesn't make sense to use cache - we don't read from the table and table->sort.io_cache is read sequentially */ - if (!table->sort.addon_field && + if (!disable_rr_cache && + !table->sort.addon_field && ! (specialflag & SPECIAL_SAFE_MODE) && thd->variables.read_rnd_buff_size && !(table->file->table_flags() & HA_FAST_KEY_READ) && diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 7592986ef81..e12fbb9843a 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -205,7 +205,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) acl_cache->clear(1); // Clear locked hostname cache init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0); - init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0); + init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0, + FALSE); VOID(my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50)); while (!(read_record_info.read_record(&read_record_info))) { @@ -253,7 +254,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) end_read_record(&read_record_info); freeze_size(&acl_hosts); - init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0); + init_read_record(&read_record_info,thd,table=tables[1].table,NULL,1,0,FALSE); VOID(my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100)); password_length= table->field[2]->field_length / table->field[2]->charset()->mbmaxlen; @@ -426,7 +427,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) end_read_record(&read_record_info); freeze_size(&acl_users); - init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0); + init_read_record(&read_record_info,thd,table=tables[2].table,NULL,1,0,FALSE); VOID(my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100)); while (!(read_record_info.read_record(&read_record_info))) { diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 213da1d49e8..38f89683065 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -214,7 +214,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, DBUG_RETURN(TRUE); } if (usable_index==MAX_KEY) - init_read_record(&info,thd,table,select,1,1); + init_read_record(&info, thd, table, select, 1, 1, FALSE); else init_read_record_idx(&info, thd, table, 1, usable_index); @@ -772,7 +772,7 @@ int multi_delete::do_deletes() } READ_RECORD info; - init_read_record(&info,thd,table,NULL,0,1); + init_read_record(&info, thd, table, NULL, 0, 1, FALSE); /* Ignore any rows not found in reference tables as they may already have been deleted by foreign key handling diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 69a257a9d37..6036a687274 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -181,7 +181,7 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields, int count= 0; READ_RECORD read_record_info; - init_read_record(&read_record_info, thd, topics, select,1,0); + init_read_record(&read_record_info, thd, topics, select, 1, 0, FALSE); while (!read_record_info.read_record(&read_record_info)) { if (!select->cond->val_int()) // Doesn't match like @@ -221,7 +221,7 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields, int count= 0; READ_RECORD read_record_info; - init_read_record(&read_record_info, thd, keywords, select,1,0); + init_read_record(&read_record_info, thd, keywords, select, 1, 0, FALSE); while (!read_record_info.read_record(&read_record_info) && count<2) { if (!select->cond->val_int()) // Dosn't match like @@ -346,7 +346,7 @@ int search_categories(THD *thd, TABLE *categories, DBUG_ENTER("search_categories"); - init_read_record(&read_record_info, thd, categories, select,1,0); + init_read_record(&read_record_info, thd, categories, select,1,0,FALSE); while (!read_record_info.read_record(&read_record_info)) { if (select && !select->cond->val_int()) @@ -380,7 +380,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname, DBUG_ENTER("get_all_items_for_category"); READ_RECORD read_record_info; - init_read_record(&read_record_info, thd, items, select,1,0); + init_read_record(&read_record_info, thd, items, select,1,0,FALSE); while (!read_record_info.read_record(&read_record_info)) { if (!select->cond->val_int()) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d1e5837329b..60ba7591726 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11309,7 +11309,7 @@ join_init_read_record(JOIN_TAB *tab) if (tab->select && tab->select->quick && tab->select->quick->reset()) return 1; init_read_record(&tab->read_record, tab->join->thd, tab->table, - tab->select,1,1); + tab->select,1,1, FALSE); return (*tab->read_record.read_record)(&tab->read_record); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f1de63892d5..1724513eb8f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4177,7 +4177,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, current query id */ from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); + init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE); if (ignore) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); thd->row_count= 0; diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 34ca18d5c39..849d152d93b 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -176,7 +176,7 @@ void udf_init() } table= tables.table; - init_read_record(&read_record_info, new_thd, table, NULL,1,0); + init_read_record(&read_record_info, new_thd, table, NULL,1,0,FALSE); while (!(error = read_record_info.read_record(&read_record_info))) { DBUG_PRINT("info",("init udf record")); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 6830564111b..7d47659fbcc 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -358,7 +358,7 @@ int mysql_update(THD *thd, Full index scan must be started with init_read_record_idx */ if (used_index == MAX_KEY || (select && select->quick)) - init_read_record(&info,thd,table,select,0,1); + init_read_record(&info, thd, table, select, 0, 1, FALSE); else init_read_record_idx(&info, thd, table, 1, used_index); @@ -422,7 +422,7 @@ int mysql_update(THD *thd, if (select && select->quick && select->quick->reset()) goto err; - init_read_record(&info,thd,table,select,0,1); + init_read_record(&info, thd, table, select, 0, 1, FALSE); updated= found= 0; thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ |